repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
HastingsGreer/mermaid | [
"bd13c5fc427eb8cd9054973a8eaaeb302078182d",
"ba07883cc3cb5982e4655048a434b4495cb49c6d"
] | [
"mermaid/forward_models.py",
"mermaid/multiscale_optimizer.py"
] | [
"\"\"\"\nPackage defining various dynamic forward models as well as convenience methods to generate the\nright hand sides (RHS) of the related partial differential equations.\n\nCurrently, the following forward models are implemented:\n #. An advection equation for images\n #. An advection equation for maps\n #. The EPDiff-equation parameterized using the vector-valued momentum for images\n #. The EPDiff-equation parameterized using the vector-valued momentum for maps\n #. The EPDiff-equation parameterized using the scalar-valued momentum for images\n #. The EPDiff-equation parameterized using the scalar-valued momentum for maps\n \nThe images are expected to be tensors of dimension: BxCxXxYxZ (or BxCxX in 1D and BxCxXxY in 2D),\nwhere B is the batch-size, C the number of channels, and X, Y, and Z are the spatial coordinate indices.\n\nFuthermore the following (RHSs) are provided\n #. Image advection\n #. Map advection\n #. Scalar conservation law\n #. EPDiff\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nfrom builtins import range\nfrom builtins import object\nfrom abc import ABCMeta, abstractmethod\nimport numpy as np\nfrom . import finite_differences_multi_channel as fdm\nfrom . import utils\nfrom .data_wrapper import MyTensor\nfrom future.utils import with_metaclass\nimport torch.nn as nn\nimport torch\n\n\nclass RHSLibrary(object):\n \"\"\"\n Convenience class to quickly generate various right hand sides (RHSs) of popular partial differential \n equations. In this way new forward models can be written with minimal code duplication.\n \"\"\"\n\n def __init__(self, spacing, use_neumann_BC_for_map=False):\n \"\"\"\n Constructor\n \n :param spacing: Spacing for the images. This will be an array with 1, 2, or 3 entries in 1D, 2D, and 3D respectively. \n \"\"\"\n self.spacing = spacing\n \"\"\"spatial spacing\"\"\"\n self.spacing_min = np.min(spacing)\n \"\"\" min of the spacing\"\"\"\n self.spacing_ratio = spacing/self.spacing_min\n self.fdt_ne = fdm.FD_torch_multi_channel(spacing,mode='neumann_zero')\n \"\"\"torch finite differencing support neumann zero\"\"\"\n self.fdt_le = fdm.FD_torch_multi_channel( spacing, mode='linear')\n \"\"\"torch finite differencing support linear extrapolation\"\"\"\n self.fdt_di = fdm.FD_torch_multi_channel(spacing, mode='dirichlet_zero')\n \"\"\"torch finite differencing support dirichlet zero\"\"\"\n self.dim = len(self.spacing)\n \"\"\"spatial dimension\"\"\"\n self.use_neumann_BC_for_map = use_neumann_BC_for_map\n \"\"\"If True uses zero Neumann boundary conditions also for evolutions of the map, if False uses linear extrapolation\"\"\"\n\n def rhs_advect_image_multiNC(self,I,v):\n '''\n Advects a batch of images which can be multi-channel. Expected image format here, is \n BxCxXxYxZ, where B is the number of images (batch size), C, the number of channels\n per image and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D)\n\n \n :math:`-\\\\nabla I^Tv`\n\n \n :param I: Image batch BxCIxXxYxZ\n :param v: Velocity fields (this will be one velocity field per image) BxCxXxYxZ\n :return: Returns the RHS of the advection equations involved BxCxXxYxZ\n '''\n\n rhs_ret= self._rhs_advect_image_multiN(I, v )\n return rhs_ret\n\n\n def _rhs_advect_image_multiN(self,I,v):\n \"\"\"\n :param I: One-channel input image: Bx1xXxYxZ\n :param v: velocity field BxCxXxYxZ\n :return: Returns the RHS of the advection equation for one channel BxXxYxZ\n \"\"\"\n\n if self.dim == 1:\n rhs_ret = -self.fdt_ne.dXc(I) * v[:,0:1]\n elif self.dim == 2:\n rhs_ret = -self.fdt_ne.dXc(I) * v[:,0:1] -self.fdt_ne.dYc(I)*v[:,1:2]\n elif self.dim == 3:\n rhs_ret = -self.fdt_ne.dXc(I) * v[:,0:1] -self.fdt_ne.dYc(I)*v[:,1:2]-self.fdt_ne.dZc(I)*v[:,2:3]\n else:\n raise ValueError('Only supported up to dimension 3')\n return rhs_ret\n\n\n def rhs_scalar_conservation_multiNC(self, I, v):\n \"\"\"\n Scalar conservation law for a batch of images which can be multi-channel. Expected image format here, is \n BxCxXxYxZ, where B is the number of images (batch size), C, the number of channels\n per image and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D)\n\n :math:`-div(Iv)`\n\n :param I: Image batch BxCIxXxYxZ\n :param v: Velocity fields (this will be one velocity field per image) BxCxXxYxZ\n :return: Returns the RHS of the scalar conservation law equations involved BxCxXxYxZ\n \"\"\"\n\n rhs_ret=self._rhs_scalar_conservation_multiN(I, v)\n return rhs_ret\n\n\n\n def _rhs_scalar_conservation_multiN(self, I, v):\n \"\"\"\n :param I: One-channel input image: Bx1xXxYxZ\n :param v: velocity field BxCxXxYxZ\n :return: Returns the RHS of the scalar-conservation law equation for one channel BxXxYxZ\n \"\"\"\n\n if self.dim==1:\n rhs_ret = -self.fdt_ne.dXc(I*v[:,0:1])\n elif self.dim==2:\n rhs_ret = -self.fdt_ne.dXc(I*v[:,0:1]) -self.fdt_ne.dYc(I*v[:,1:2])\n elif self.dim==3:\n rhs_ret = -self.fdt_ne.dXc(I* v[:,0:1]) -self.fdt_ne.dYc(I*v[:,1:2])-self.fdt_ne.dZc(I*v[:,2:3])\n else:\n raise ValueError('Only supported up to dimension 3')\n return rhs_ret\n\n\n def rhs_lagrangian_evolve_map_multiNC(self, phi, v):\n \"\"\"\n Evolves a set of N maps (for N images). Expected format here, is\n BxCxXxYxZ, where B is the number of images/maps (batch size), C, the number of channels\n per (here the spatial dimension for the map coordinate functions),\n and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D).\n This is used to evolve the map going from source to target image. Requires interpolation\n so should if at all possible not be used as part of an optimization.\n the idea of compute inverse map is due to the map is defined\n in the source space, referring to point move to where,(compared with the target space, refers to where it comes from)\n in this situation, we only need to capture the velocity at that place and accumulate along the time step\n since advecton function is moves the image (or phi based image) by v step, which means v is shared by different coordinate,\n so it is safe to compute in this way.\n\n :math:`v\\circ\\phi`\n\n :param phi: map batch BxCxXxYxZ\n :param v: Velocity fields (this will be one velocity field per map) BxCxXxYxZ\n :return: Returns the RHS of the evolution equations involved BxCxXxYxZ\n :param phi:\n :param v:\n :return:\n \"\"\"\n\n rhs_ret = utils.compute_warped_image_multiNC(v, phi, spacing=self.spacing, spline_order=1,zero_boundary=False)\n return rhs_ret\n\n\n def rhs_advect_map_multiNC(self, phi, v):\n '''\n Advects a set of N maps (for N images). Expected format here, is \n BxCxXxYxZ, where B is the number of images/maps (batch size), C, the number of channels\n per (here the spatial dimension for the map coordinate functions), \n and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D)\n\n\n :math:`-D\\\\phi v`\n\n :param phi: map batch BxCxXxYxZ\n :param v: Velocity fields (this will be one velocity field per map) BxCxXxYxZ\n :return: Returns the RHS of the advection equations involved BxCxXxYxZ\n '''\n\n sz = phi.size()\n rhs_ret = self._rhs_advect_map_call(phi, v)\n return rhs_ret\n\n def _rhs_advect_map_call(self,phi,v):\n \"\"\"\n\n :param phi: map batch BxCxXxYxZ\n :param v: Velocity fields (this will be one velocity field per map) BxCxXxYxZ\n :return rhsphi: Returns the RHS of the advection equations involved BxCxXxYxZ\n \"\"\"\n\n fdc = self.fdt_le # use order boundary conditions (interpolation)\n\n if self.dim==1:\n dxc_phi = -fdc.dXc(phi)\n rhsphi = v[:, 0:1] * dxc_phi\n elif self.dim==2:\n dxc_phi = -fdc.dXc(phi)\n dyc_phi = -fdc.dYc(phi)\n rhsphi = v[:, 0:1] * dxc_phi + v[:, 1:2] * dyc_phi\n elif self.dim==3:\n dxc_phi = -fdc.dXc(phi)\n dyc_phi = -fdc.dYc(phi)\n dzc_phi = -fdc.dZc(phi)\n rhsphi = v[:,0:1]*dxc_phi + v[:,1:2]*dyc_phi + v[:,2:3]*dzc_phi\n else:\n raise ValueError('Only supported up to dimension 3')\n return rhsphi\n\n\n def rhs_epdiff_multiNC(self, m, v):\n '''\n Computes the right hand side of the EPDiff equation for of N momenta (for N images). \n Expected format here, is BxCxXxYxZ, where B is the number of momenta (batch size), C, \n the number of channels per (here the spatial dimension for the momenta), \n and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D)\n\n a new version, where batch is no longer calculated separately\n\n :math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`\n\n :param m: momenta batch BxCXxYxZ\n :param v: Velocity fields (this will be one velocity field per momentum) BxCXxYxZ\n :return: Returns the RHS of the EPDiff equations involved BxCXxYxZ\n '''\n\n sz = m.size()\n rhs_ret = MyTensor(sz).zero_()\n rhs_ret = self._rhs_epdiff_call(m, v, rhs_ret)\n return rhs_ret\n\n def _rhs_epdiff_call(self, m, v,rhsm):\n \"\"\"\n :param m: momenta batch BxCxXxYxZ\n :param v: Velocity fields (this will be one velocity field per momentum) BxCxXxYxZ\n :return rhsm: Returns the RHS of the EPDiff equations involved BxCxXxYxZ\n \"\"\"\n\n # if self.use_neumann_BC_for_map:\n # fdc = self.fdt_ne # use zero Neumann boundary conditions\n # else:\n # fdc = self.fdt_le # do linear extrapolation\n\n fdc = self.fdt_ne\n #fdc = self.fdt_le\n if self.dim == 1:\n dxc_mv0 = -fdc.dXc(m*v[:,0:1])\n dxc_v = -fdc.dXc(v)\n dxc_v_multi_m = dxc_v * m\n rhsm[:]= dxc_mv0 + dxc_v_multi_m\n\n elif self.dim == 2:\n # (m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm (EPDiff equation)\n dxc_mv0 = -fdc.dXc(m*v[:,0:1])\n dyc_mv1 = -fdc.dYc(m*v[:,1:2])\n dc_mv_sum = dxc_mv0 + dyc_mv1\n dxc_v = -fdc.dXc(v)\n dyc_v = -fdc.dYc(v)\n dxc_v_multi_m = dxc_v * m\n dyc_v_multi_m = dyc_v * m\n dxc_v_multi_m_sum = torch.sum(dxc_v_multi_m, 1)\n dyc_v_multi_m_sum = torch.sum(dyc_v_multi_m, 1)\n rhsm[:,0, :, :] = dc_mv_sum[:,0] + dxc_v_multi_m_sum\n\n rhsm[:,1, :, :] = dc_mv_sum[:,1] + dyc_v_multi_m_sum\n\n elif self.dim == 3:\n dxc_mv0 = -fdc.dXc(m*v[:,0:1])\n dyc_mv1 = -fdc.dYc(m*v[:,1:2])\n dzc_mv2 = -fdc.dZc(m*v[:,2:3])\n dc_mv_sum = dxc_mv0 + dyc_mv1 + dzc_mv2\n dxc_v = -fdc.dXc(v)\n dyc_v = -fdc.dYc(v)\n dzc_v = -fdc.dZc(v)\n dxc_v_multi_m = dxc_v*m\n dyc_v_multi_m = dyc_v*m\n dzc_v_multi_m = dzc_v*m\n dxc_v_multi_m_sum = torch.sum(dxc_v_multi_m,1)\n dyc_v_multi_m_sum = torch.sum(dyc_v_multi_m,1)\n dzc_v_multi_m_sum = torch.sum(dzc_v_multi_m,1)\n\n rhsm[:, 0] = dc_mv_sum[:,0] + dxc_v_multi_m_sum\n\n rhsm[:, 1] = dc_mv_sum[:,1] + dyc_v_multi_m_sum\n\n rhsm[:, 2] = dc_mv_sum[:,2] + dzc_v_multi_m_sum\n\n else:\n raise ValueError('Only supported up to dimension ')\n return rhsm\n\n\n\n def rhs_adapt_epdiff_wkw_multiNC(self, m, v,w, sm_wm,smoother):\n '''\n Computes the right hand side of the EPDiff equation for of N momenta (for N images).\n Expected format here, is BxCxXxYxZ, where B is the number of momenta (batch size), C,\n the number of channels per (here the spatial dimension for the momenta),\n and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D)\n\n a new version, where batch is no longer calculated separately\n\n :math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`\n\n :param m: momenta batch BxCXxYxZ\n :param v: Velocity fields (this will be one velocity field per momentum) BxCXxYxZ\n :return: Returns the RHS of the EPDiff equations involved BxCXxYxZ\n '''\n\n sz = m.size()\n rhs_ret = MyTensor(sz).zero_()\n rhs_ret = self._rhs_adapt_epdiff_wkw_call(m, v,w,sm_wm,smoother, rhs_ret)\n return rhs_ret\n\n def _rhs_adapt_epdiff_wkw_call(self, m, v,w,sm_wm, smoother, rhsm):\n \"\"\"\n :param m: momenta batch BxCxXxYxZ\n :param sm_wm: smoothed(wm) batch x K x dim x X x Y x ...\n :param w: smoothed(wm) batch x K x X x Y x ...\n :param v: Velocity fields (this will be one velocity field per momentum) BxCxXxYxZ\n :return rhsm: Returns the RHS of the EPDiff equations involved BxCxXxYxZ\n \"\"\"\n # if self.use_neumann_BC_for_map:\n # fdc = self.fdt_ne # use zero Neumann boundary conditions\n # else:\n # fdc = self.fdt_le # do linear extrapolation\n\n fdc = self.fdt_ne\n rhs = self._rhs_epdiff_call(m,v,rhsm)\n ret_var = torch.empty_like(rhs)\n # ret_var, rhs should batch x dim x X x Yx ..\n dim = m.shape[1]\n sz = [m.shape[0]]+[1]+list(m.shape[1:]) # batchx1xdimx X x Y\n m = m.view(*sz)\n m_sm_wm = m* sm_wm\n m_sm_wm = m_sm_wm.sum(dim=2)\n sm_m_sm_wm = smoother.smooth(m_sm_wm) # batchx K x X xY...\n dxc_w = fdc.dXc(w)\n dc_w_list = [dxc_w]\n if dim == 2 or dim == 3:\n dyc_w = fdc.dYc(w)\n dc_w_list.append(dyc_w)\n if dim == 3:\n dzc_w = fdc.dZc(w) # batch x K x X xY ...\n dc_w_list.append(dzc_w)\n for i in range(dim):\n ret_var[:, i] = rhs[:, i] + (sm_m_sm_wm* dc_w_list[i]).sum(1)\n\n return ret_var\n\n\n\nclass ForwardModel(with_metaclass(ABCMeta, object)):\n \"\"\"\n Abstract forward model class. Should never be instantiated.\n Derived classes require the definition of f(self,t,x,u,pars) and u(self,t,pars).\n These functions will be used for integration: x'(t) = f(t,x(t),u(t))\n \"\"\"\n\n def __init__(self, sz, spacing, params=None):\n '''\n Constructor of abstract forward model class\n \n :param sz: size of images\n :param spacing: numpy array for spacing in x,y,z directions\n '''\n\n self.dim = spacing.size # spatial dimension of the problem\n \"\"\"spatial dimension\"\"\"\n self.spacing = spacing\n \"\"\"spatial spacing\"\"\"\n self.sz = sz\n \"\"\"image size (BxCxXxYxZ)\"\"\"\n self.params = params\n \"\"\"ParameterDict instance holding parameters\"\"\"\n self.rhs = RHSLibrary(self.spacing)\n \"\"\"rhs library support\"\"\"\n\n if self.dim>3 or self.dim<1:\n raise ValueError('Forward models are currently only supported in dimensions 1 to 3')\n\n self.debug_mode_on =False\n\n @abstractmethod\n def f(self,t,x,u,pars,variables_from_optimizer=None):\n \"\"\"\n Function to be integrated\n \n :param t: time\n :param x: state\n :param u: input\n :param pars: optional parameters\n :param variables_from_optimizer: variables that can be passed from the optimizer\n :return: the function value, should return a list (to support easy concatenations of states)\n \"\"\"\n\n pass\n\n def u(self,t,pars,variables_from_optimizer=None):\n \"\"\"\n External input\n \n :param t: time\n :param pars: parameters\n :param variables_from_optimizer: variables that can be passed from the optimizer\n :return: the external input\n \"\"\"\n\n return []\n\n\nclass AdvectMap(ForwardModel):\n \"\"\"\n Forward model to advect an n-D map using a transport equation: :math:`\\\\Phi_t + D\\\\Phi v = 0`.\n v is treated as an external argument and \\Phi is the state\n \"\"\"\n\n def __init__(self, sz, spacing, params=None,compute_inverse_map=False):\n super(AdvectMap,self).__init__(sz,spacing,params)\n self.compute_inverse_map = compute_inverse_map\n \"\"\"If True then computes the inverse map on the fly for a map-based solution\"\"\"\n\n def u(self,t, pars, variables_from_optimizer=None):\n \"\"\"\n External input, to hold the velocity field\n \n :param t: time (ignored; not time-dependent) \n :param pars: assumes an n-D velocity field is passed as the only input argument\n :param variables_from_optimizer: variables that can be passed from the optimizer\n :return: Simply returns this velocity field\n \"\"\"\n\n return pars['v']\n\n def f(self,t, x, u, pars=None, variables_from_optimizer=None):\n \"\"\"\n Function to be integrated, i.e., right hand side of transport equation: \n \n :math:`-D\\\\phi v`\n \n :param t: time (ignored; not time-dependent) \n :param x: state, here the map, \\Phi, itself (assumes 3D-5D array; [nrI,0,:,:] x-coors; [nrI,1,:,:] y-coors; ...\n :param u: external input, will be the velocity field here\n :param pars: ignored (does not expect any additional inputs)\n :param variables_from_optimizer: variables that can be passed from the optimizer\n :return: right hand side [phi]\n \"\"\"\n\n if self.compute_inverse_map:\n return [self.rhs.rhs_advect_map_multiNC(x[0], u),self.rhs.rhs_lagrangian_evolve_map_multiNC(x[1], u)]\n else:\n return [self.rhs.rhs_advect_map_multiNC(x[0],u)]\n\nclass AdvectImage(ForwardModel):\n \"\"\"\n Forward model to advect an image using a transport equation: :math:`I_t + \\\\nabla I^Tv = 0`.\n v is treated as an external argument and I is the state\n \"\"\"\n\n def __init__(self, sz, spacing, params=None):\n super(AdvectImage, self).__init__(sz, spacing,params)\n\n\n def u(self,t, pars, variables_from_optimizer=None):\n \"\"\"\n External input, to hold the velocity field\n \n :param t: time (ignored; not time-dependent) \n :param pars: assumes an n-D velocity field is passed as the only input argument\n :param variables_from_optimizer: variables that can be passed from the optimizer\n :return: Simply returns this velocity field\n \"\"\"\n\n return pars['v']\n\n def f(self,t, x, u, pars=None, variables_from_optimizer=None):\n \"\"\"\n Function to be integrated, i.e., right hand side of transport equation: :math:`-\\\\nabla I^T v`\n \n :param t: time (ignored; not time-dependent) \n :param x: state, here the image, I, itself (supports multiple images and channels)\n :param u: external input, will be the velocity field here\n :param pars: ignored (does not expect any additional inputs)\n :param variables_from_optimizer: variables that can be passed from the optimizer\n :return: right hand side [I]\n \"\"\"\n\n return [self.rhs.rhs_advect_image_multiNC(x[0],u)]\n\n\n\nclass EPDiffImage(ForwardModel):\n \"\"\"\n Forward model for the EPdiff equation. State is the momentum, m, and the image I:\n :math:`(m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`\n \n :math:`v=Km`\n \n :math:`I_t+\\\\nabla I^Tv=0`\n \"\"\"\n def __init__(self, sz, spacing, smoother, params=None):\n super(EPDiffImage, self).__init__(sz, spacing,params)\n self.smoother = smoother\n\n def f(self,t, x, u, pars=None, variables_from_optimizer=None):\n \"\"\"\n Function to be integrated, i.e., right hand side of the EPDiff equation: \n :math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`\n \n :math:`-\\\\nabla I^Tv`\n \n :param t: time (ignored; not time-dependent) \n :param x: state, here the vector momentum, m, and the image, I\n :param u: ignored, no external input\n :param pars: ignored (does not expect any additional inputs)\n :param variables_from_optimizer: variables that can be passed from the optimizer\n :return: right hand side [m,I]\n \"\"\"\n\n # assume x[0] is m and x[1] is I for the state\n m = x[0]\n I = x[1]\n v = self.smoother.smooth(m,None,utils.combine_dict(pars,{'I': I}),variables_from_optimizer)\n # print('max(|v|) = ' + str( v.abs().max() ))\n return [self.rhs.rhs_epdiff_multiNC(m,v), self.rhs.rhs_advect_image_multiNC(I,v)]\n\n\nclass EPDiffMap(ForwardModel):\n \"\"\"\n Forward model for the EPDiff equation. State is the momentum, m, and the transform, :math:`\\\\phi` \n (mapping the source image to the target image).\n\n :math:`(m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`\n \n :math:`v=Km`\n \n :math:`\\\\phi_t+D\\\\phi v=0`\n \"\"\"\n\n def __init__(self, sz, spacing, smoother, params=None,compute_inverse_map=False):\n super(EPDiffMap, self).__init__(sz,spacing,params)\n self.compute_inverse_map = compute_inverse_map\n \"\"\"If True then computes the inverse map on the fly for a map-based solution\"\"\"\n\n self.smoother = smoother\n self.use_net = True if self.params['smoother']['type'] == 'adaptiveNet' else False\n\n def debugging(self,input,t):\n x = utils.checkNan(input)\n if np.sum(x):\n print(\"find nan at {} step\".format(t))\n print(\"flag m: {}, \".format(x[0]))\n print(\"flag v: {},\".format(x[1]))\n print(\"flag phi: {},\".format(x[2]))\n print(\"flag new_m: {},\".format(x[3]))\n print(\"flag new_phi: {},\".format(x[4]))\n raise ValueError(\"nan error\")\n\n def f(self,t, x, u, pars=None, variables_from_optimizer=None):\n \"\"\"\n Function to be integrated, i.e., right hand side of the EPDiff equation:\n :math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm'\n \n :math:`-D\\\\phi v`\n \n :param t: time (ignored; not time-dependent) \n :param x: state, here the image, vector momentum, m, and the map, :math:`\\\\phi`\n :param u: ignored, no external input\n :param pars: ignored (does not expect any additional inputs)\n :param variables_from_optimizer: variables that can be passed from the optimizer\n :return: right hand side [m,phi]\n \"\"\"\n\n # assume x[0] is m and x[1] is phi for the state\n m = x[0]\n m = m.clamp(max=1., min=-1.)\n phi = x[1]\n\n if self.compute_inverse_map:\n phi_inv = x[2]\n\n if not self.use_net:\n v = self.smoother.smooth(m,None,utils.combine_dict(pars,{'phi':phi}),variables_from_optimizer)\n else:\n v = self.smoother.adaptive_smooth(m, phi, using_map=True)\n\n # print('max(|v|) = ' + str( v.abs().max() ))\n\n if self.compute_inverse_map:\n ret_val= [self.rhs.rhs_epdiff_multiNC(m,v),\n self.rhs.rhs_advect_map_multiNC(phi,v),\n self.rhs.rhs_lagrangian_evolve_map_multiNC(phi_inv,v)]\n else:\n new_m = self.rhs.rhs_epdiff_multiNC(m,v)\n new_phi = self.rhs.rhs_advect_map_multiNC(phi,v)\n ret_val= [new_m, new_phi]\n return ret_val\n\n\n\nclass EPDiffAdaptMap(ForwardModel):\n \"\"\"\n Forward model for the EPDiff equation. State is the momentum, m, and the transform, :math:`\\\\phi`\n (mapping the source image to the target image).\n\n :math:`(m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`\n\n :math:`v=Km`\n\n :math:`\\\\phi_t+D\\\\phi v=0`\n \"\"\"\n\n def __init__(self, sz, spacing, smoother, params=None, compute_inverse_map=False, update_sm_by_advect= True, update_sm_with_interpolation=True,compute_on_initial_map=True):\n super(EPDiffAdaptMap, self).__init__(sz, spacing, params)\n from . import module_parameters as pars\n from . import smoother_factory as sf\n self.compute_inverse_map = compute_inverse_map\n \"\"\"If True then computes the inverse map on the fly for a map-based solution\"\"\"\n\n self.smoother = smoother\n self.update_sm_by_advect = update_sm_by_advect\n self.use_the_first_step_penalty = True\n self.update_sm_with_interpolation = update_sm_with_interpolation\n self.compute_on_initial_map=compute_on_initial_map\n self.update_sm_weight=None\n self.velocity_mask = None\n self.debug_mode_on = False\n s_m_params = pars.ParameterDict()\n s_m_params['smoother']['type'] = 'gaussian'\n s_m_params['smoother']['gaussian_std'] =self.params['smoother']['deep_smoother']['deep_network_local_weight_smoothing']\n self.embedded_smoother = sf.SmootherFactory(sz[2:], spacing).create_smoother(\n s_m_params)\n\n \"\"\" if only take the first step penalty as the total penalty, otherwise accumluate the penalty\"\"\"\n def debug_nan(self, input, t,name=''):\n x = utils.checkNan([input])\n if np.sum(x):\n # print(input[0])\n print(\"find nan at {} step, {} with number {}\".format(t,name,x[0]))\n\n raise ValueError(\"nan error\")\n def init_zero_sm_weight(self,sm_weight):\n self.update_sm_weight = torch.zeros_like(sm_weight).detach()\n\n\n def init_velocity_mask(self,velocity_mask):\n self.velocity_mask = velocity_mask\n\n\n def debug_distrib(self,var,name):\n var = var.detach().cpu().numpy()\n density,_= np.histogram(var,[-100,-10,-1,0,1,10,100],density=True)\n print(\"{} distri:{}\".format(name,density))\n\n\n def f(self, t, x, u, pars=None, variables_from_optimizer=None):\n \"\"\"\n Function to be integrated, i.e., right hand side of the EPDiff equation:\n :math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm'\n\n :math:`-D\\\\phi v`\n\n :param t: time (ignored; not time-dependent)\n :param x: state, here the image, vector momentum, m, and the map, :math:`\\\\phi`\n :param u: ignored, no external input\n :param pars: ignored (does not expect any additional inputs)\n :param variables_from_optimizer: variables that can be passed from the optimizer\n :return: right hand side [m,phi]\n \"\"\"\n\n # assume x[0] is m and x[1] is phi for the state\n m = x[0]\n m=m.clamp(max=1., min=-1.)\n phi = x[1]\n return_val_name = []\n sm_weight = None\n if self.update_sm_by_advect:\n if not self.update_sm_with_interpolation:\n sm_weight_pre = x[2]\n sm_weight = self.embedded_smoother.smooth(sm_weight_pre)\n\n v, extra_ret = self.smoother.smooth(m, None, {'w':sm_weight},multi_output=True)\n if self.velocity_mask is not None:\n v = v* self.velocity_mask\n new_phi = self.rhs.rhs_advect_map_multiNC(phi, v)\n new_sm_weight_pre = self.rhs.rhs_advect_map_multiNC(sm_weight_pre, v)\n new_m = self.rhs.rhs_adapt_epdiff_wkw_multiNC(m, v, new_sm_weight_pre, extra_ret,\n self.embedded_smoother)\n\n ret_val = [new_m, new_phi,new_sm_weight_pre]\n return_val_name =['new_m','new_phi','new_sm_weight']\n else:\n if self.compute_on_initial_map:\n sm_weight = x[2]\n sm_phi = x[3]\n new_sm_weight = utils.compute_warped_image_multiNC(sm_weight, sm_phi, self.spacing, 1,\n zero_boundary=False)\n pre_weight = sm_weight\n new_sm_weight = self.embedded_smoother.smooth(new_sm_weight)\n #print('t{},m min, mean,max {} {} {}'.format(t,m.min().item(),m.mean().item(),m.max().item()))\n v,extra_ret = self.smoother.smooth(m,None,{'w': new_sm_weight},multi_output=True)\n if self.velocity_mask is not None:\n v = v * self.velocity_mask\n\n new_m = self.rhs.rhs_adapt_epdiff_wkw_multiNC(m,v,pre_weight,extra_ret,self.embedded_smoother)\n new_phi = self.rhs.rhs_advect_map_multiNC(phi, v)\n new_sm_phi = self.rhs.rhs_advect_map_multiNC(sm_phi, v)\n new_sm_weight = self.update_sm_weight.detach()\n ret_val = [new_m, new_phi,new_sm_weight,new_sm_phi]\n return_val_name = ['new_m', 'new_phi', 'new_sm_weight','new_sm_phi']\n else: #todo just attention here is what we currently used\n sm_weight = x[2]\n new_sm_weight = utils.compute_warped_image_multiNC(sm_weight, phi, self.spacing, 1,\n zero_boundary=False)\n\n pre_weight = sm_weight\n new_sm_weight = self.embedded_smoother.smooth(new_sm_weight)\n\n v, extra_ret = self.smoother.smooth(m, None,{'w':new_sm_weight}, multi_output=True)\n\n if self.velocity_mask is not None:\n v = v * self.velocity_mask\n\n new_m = self.rhs.rhs_adapt_epdiff_wkw_multiNC(m,v,pre_weight,extra_ret,self.embedded_smoother)\n new_phi = self.rhs.rhs_advect_map_multiNC(phi, v)\n new_sm_weight = self.update_sm_weight.detach()\n ret_val = [new_m, new_phi, new_sm_weight]\n return_val_name = ['new_m', 'new_phi', 'new_sm_weight']\n\n else:\n if not t==0:\n if self.use_the_first_step_penalty:\n self.smoother.disable_penalty_computation()\n else:\n self.smoother.enable_accumulated_penalty()\n\n I = utils.compute_warped_image_multiNC(pars['I0'], phi, self.spacing, 1,zero_boundary=True)\n pars['I'] = I.detach() # TODO check whether I should be detached here\n v = self.smoother.smooth(m, None, pars, variables_from_optimizer)\n if self.velocity_mask is not None:\n v = v * self.velocity_mask\n new_m = self.rhs.rhs_epdiff_multiNC(m, v)\n new_phi = self.rhs.rhs_advect_map_multiNC(phi, v)\n ret_val = [new_m, new_phi]\n return_val_name =['new_m','new_phi']\n\n\n if self.debug_mode_on:\n toshows = [m, v,phi]+ret_val if sm_weight is None else [m, v,phi]+ret_val +[sm_weight]\n name = ['m', 'v','phi']+return_val_name if sm_weight is None else ['m', 'v','phi']+return_val_name +['sm_weight']\n for i, toshow in enumerate(toshows):\n print('t{},{} min, mean,max {} {} {}'.format(t, name[i], toshow.min().item(), toshow.mean().item(),\n toshow.max().item()))\n self.debug_distrib(toshow, name[i])\n self.debug_nan(toshow,t,name[i])\n return ret_val\n\n\n\n # print('max(|v|) = ' + str( v.abs().max() ))\n\n\n\nclass EPDiffScalarMomentum(ForwardModel):\n \"\"\"\n Base class for scalar momentum EPDiff solutions. Defines a smoother that can be commonly used.\n \"\"\"\n\n def __init__(self, sz, spacing, smoother, params):\n super(EPDiffScalarMomentum,self).__init__(sz,spacing,params)\n\n self.smoother = smoother\n\n\nclass EPDiffScalarMomentumImage(EPDiffScalarMomentum):\n \"\"\"\n Forward model for the scalar momentum EPdiff equation. State is the scalar momentum, lam, and the image I\n :math:`(m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`\n\n :math:`v=Km`\n\n :math:'m=\\\\lambda\\\\nabla I`\n\n :math:`I_t+\\\\nabla I^Tv=0`\n\n :math:`\\\\lambda_t + div(\\\\lambda v)=0`\n \"\"\"\n\n def __init__(self, sz, spacing, smoother, params=None):\n super(EPDiffScalarMomentumImage, self).__init__(sz, spacing, smoother, params)\n\n def f(self, t, x, u, pars=None, variables_from_optimizer=None):\n \"\"\"\n Function to be integrated, i.e., right hand side of the EPDiff equation:\n\n :math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`\n\n :math:`-\\\\nabla I^Tv`\n\n :math: `-div(\\\\lambda v)`\n\n :param t: time (ignored; not time-dependent) \n :param x: state, here the scalar momentum, lam, and the image, I, itself\n :param u: no external input\n :param pars: ignored (does not expect any additional inputs)\n :param variables_from_optimizer: variables that can be passed from the optimizer\n :return: right hand side [lam,I]\n \"\"\"\n\n # assume x[0] is \\lambda and x[1] is I for the state\n lam = x[0]\n I = x[1]\n\n # now compute the momentum\n m = utils.compute_vector_momentum_from_scalar_momentum_multiNC(lam, I, self.sz, self.spacing)\n v = self.smoother.smooth(m,None,utils.combine_dict(pars,{'I':I}),variables_from_optimizer)\n\n # advection for I, scalar-conservation law for lam\n return [self.rhs.rhs_scalar_conservation_multiNC(lam, v), self.rhs.rhs_advect_image_multiNC(I, v)]\n\n\n\nclass EPDiffScalarMomentumMap(EPDiffScalarMomentum):\n \"\"\"\n Forward model for the scalar momentum EPDiff equation. State is the scalar momentum, lam, the image, I, and the transform, phi.\n :math:`(m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`\n \n :math:`v=Km`\n \n :math:`m=\\\\lambda\\\\nabla I`\n \n :math:`I_t+\\\\nabla I^Tv=0`\n \n :math:`\\\\lambda_t + div(\\\\lambda v)=0`\n \n :math:`\\\\Phi_t+D\\\\Phi v=0`\n \"\"\"\n\n def __init__(self, sz, spacing, smoother, params=None, compute_inverse_map=False):\n super(EPDiffScalarMomentumMap, self).__init__(sz,spacing, smoother, params)\n self.compute_inverse_map = compute_inverse_map\n \"\"\"If True then computes the inverse map on the fly for a map-based solution\"\"\"\n\n def f(self,t, x, u, pars=None, variables_from_optimizer=None):\n \"\"\"\n Function to be integrated, i.e., right hand side of the EPDiff equation:\n \n :math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`\n \n :math:`-\\\\nabla I^Tv`\n \n :math:`-div(\\\\lambda v)`\n \n :math:`-D\\\\Phi v`\n \n :param t: time (ignored; not time-dependent) \n :param x: state, here the scalar momentum, lam, the image, I, and the transform, :math:`\\\\phi`\n :param u: ignored, no external input\n :param pars: ignored (does not expect any additional inputs)\n :param variables_from_optimizer: variables that can be passed from the optimizer\n :return: right hand side [lam,I,phi]\n \"\"\"\n\n # assume x[0] is lam and x[1] is I and x[2] is phi for the state\n lam = x[0]\n I = x[1]\n phi = x[2]\n\n if self.compute_inverse_map:\n phi_inv = x[3]\n\n # now compute the momentum\n m = utils.compute_vector_momentum_from_scalar_momentum_multiNC(lam, I, self.sz, self.spacing)\n # todo: replace this by phi again\n #v = self.smoother.smooth(m,None,[phi,True],variables_from_optimizer)\n v = self.smoother.smooth(m,None,utils.combine_dict(pars,{'I':I}),variables_from_optimizer)\n\n if self.compute_inverse_map:\n ret_val = [self.rhs.rhs_scalar_conservation_multiNC(lam,v),\n self.rhs.rhs_advect_image_multiNC(I,v),\n self.rhs.rhs_advect_map_multiNC(phi,v),\n self.rhs.rhs_lagrangian_evolve_map_multiNC(phi_inv,v)]\n else:\n ret_val = [self.rhs.rhs_scalar_conservation_multiNC(lam,v),\n self.rhs.rhs_advect_image_multiNC(I,v),\n self.rhs.rhs_advect_map_multiNC(phi,v)]\n\n return ret_val\n",
"\"\"\"\nThis package enables easy single-scale and multi-scale optimization support.\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\n# from builtins import zip\n# from builtins import str\n# from builtins import range\n# from builtins import object\nfrom abc import ABCMeta, abstractmethod\nimport os\nimport time\nimport copy\nfrom . import utils\nfrom . import visualize_registration_results as vizReg\nfrom . import custom_optimizers as CO\nimport numpy as np\nimport torch\nfrom .data_wrapper import USE_CUDA, AdaptVal, MyTensor\nfrom . import model_factory as MF\nfrom . import image_sampling as IS\nfrom .metrics import get_multi_metric\nfrom .res_recorder import XlsxRecorder\nfrom .data_utils import make_dir\nfrom torch.utils.data import Dataset, DataLoader\nfrom . import optimizer_data_loaders as OD\nfrom . import fileio as FIO\nfrom . import model_evaluation\n\nfrom collections import defaultdict\nfrom future.utils import with_metaclass\n\nfrom termcolor import colored, cprint\n\n# add some convenience functionality\nclass SimpleRegistration(with_metaclass(ABCMeta, object)):\n \"\"\"\n Abstract optimizer base class.\n \"\"\"\n\n def __init__(self,ISource,ITarget,spacing,sz,params,compute_inverse_map=False, default_learning_rate=None):\n \"\"\"\n :param ISource: source image\n :param ITarget: target image\n :param spacing: image spacing\n :param params: parameters\n :param compute_inverse_map: for map-based method the inverse map can be computed on the fly\n \"\"\"\n self.params = params\n self.use_map = self.params['model']['deformation'][('use_map', True, '[True|False] either do computations via a map or directly using the image')]\n self.map_low_res_factor = self.params['model']['deformation'][('map_low_res_factor', 1.0, 'Set to a value in (0,1) if a map-based solution should be computed at a lower internal resolution (image matching is still at full resolution')]\n self.spacing = spacing\n self.ISource = ISource\n self.ITarget = ITarget\n self.sz = sz\n self.compute_inverse_map = compute_inverse_map\n self.default_learning_rate=default_learning_rate\n self.optimizer = None\n\n def get_history(self):\n \"\"\"\n Returns the optimization history as a dictionary. Keeps track of energies, iterations counts, and additonal custom measures.\n\n :return: history dictionary\n \"\"\"\n\n if self.optimizer is not None:\n return self.optimizer.get_history()\n else:\n return None\n\n def write_parameters_to_settings(self):\n \"\"\"\n Allows currently computed parameters (if they were optimized) to be written back to an output parameter file\n :return:\n \"\"\"\n if self.optimizer is not None:\n self.optimizer.write_parameters_to_settings()\n\n @abstractmethod\n def register(self):\n \"\"\"\n Abstract method to register the source to the target image\n :return: \n \"\"\"\n pass\n\n def get_optimizer(self):\n \"\"\"\n Returns the optimizer being used (can be used to customize the simple registration if desired)\n :return: optimizer\n \"\"\"\n return self.optimizer\n\n def get_energy(self):\n \"\"\"\n Returns the current energy\n :return: Returns a tuple (energy, similarity energy, regularization energy)\n \"\"\"\n if self.optimizer is not None:\n return self.optimizer.get_energy()\n else:\n return None\n\n def get_warped_label(self):\n \"\"\"\n Returns the warped label\n :return: the warped label\n \"\"\"\n if self.optimizer is not None:\n return self.optimizer.get_warped_label()\n else:\n return None\n\n def get_warped_image(self):\n \"\"\"\n Returns the warped image\n :return: the warped image\n \"\"\"\n if self.optimizer is not None:\n return self.optimizer.get_warped_image()\n else:\n return None\n\n def set_initial_map(self,map0,initial_inverse_map=None):\n \"\"\"\n Sets the initial map for the registrations; by default (w/o setting anything) this will be the identity\n map, but by setting it to a different initial condition one can concatenate transformations.\n\n :param map0:\n :return: n/a\n \"\"\"\n if self.optimizer is not None:\n self.optimizer.set_initial_map(map0, initial_inverse_map)\n # self.optimizer.set_initial_inverse_map(initial_inverse_map)\n\n def set_weight_map(self,weight_map):\n if self.optimizer is not None:\n self.optimizer.set_initial_map(weight_map)\n\n def get_initial_map(self):\n \"\"\"\n Returns the initial map; this will typically be the identity map, but can be set to a different initial\n condition using set_initial_map\n\n :return: returns the initial map (if applicable)\n \"\"\"\n\n if self.optimizer is not None:\n return self.optimizer.get_initial_map()\n else:\n return None\n\n def get_initial_inverse_map(self):\n \"\"\"\n Returns the initial inverse map; this will typically be the identity map, but can be set to a different initial\n condition using set_initial_map\n\n :return: returns the initial map (if applicable)\n \"\"\"\n\n if self.optimizer is not None:\n return self.optimizer.get_initial_inverse_map()\n else:\n return None\n\n def get_map(self):\n \"\"\"\n Returns the deformation map\n :return: deformation map\n \"\"\"\n if self.optimizer is not None:\n return self.optimizer.get_map()\n\n def get_inverse_map(self):\n \"\"\"\n Returns the inverse deformation map if available\n :return: deformation map\n \"\"\"\n if self.optimizer is not None:\n return self.optimizer.get_inverse_map()\n\n def get_model_parameters(self):\n \"\"\"\n Returns the parameters of the model\n\n :return: model parameters \n \"\"\"\n return self.optimizer.get_model_parameters()\n\n def set_model_parameters(self,p):\n \"\"\"\n Sets the parameters of a model\n\n :param p: model parameters\n :return:\n \"\"\"\n self.optimizer.set_model_parameters(p)\n\n def get_model_state_dict(self):\n \"\"\"\n Returns the state dictionary of the mode\n\n :return: state dictionary\n \"\"\"\n return self.optimizer.get_model_state_dict()\n\n def set_model_state_dict(self,sd):\n \"\"\"\n Sets the state dictionary of the model\n\n :param sd: state dictionary\n :return:\n \"\"\"\n self.optimizer.set_model_state_dict(sd)\n\n\n\nclass SimpleSingleScaleRegistration(SimpleRegistration):\n \"\"\"\n Simple single scale registration\n \"\"\"\n def __init__(self,ISource,ITarget,spacing,sz,params,compute_inverse_map=False, default_learning_rate=None):\n super(SimpleSingleScaleRegistration, self).__init__(ISource,ITarget,spacing,sz,params,compute_inverse_map=compute_inverse_map,default_learning_rate=default_learning_rate)\n self.optimizer = SingleScaleRegistrationOptimizer(self.sz,self.spacing,self.use_map,self.map_low_res_factor,self.params,compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)\n\n def register(self):\n \"\"\"\n Registers the source to the target image\n :return: n/a\n \"\"\"\n self.optimizer.register(self.ISource, self.ITarget)\n\n\nclass SimpleSingleScaleConsensusRegistration(SimpleRegistration):\n \"\"\"\n Single scale registration making use of consensus optimization (to allow for multiple independent registration\n that can share parameters).\n \"\"\"\n def __init__(self,ISource,ITarget,spacing,sz,params,compute_inverse_map=False, default_learning_rate=None):\n super(SimpleSingleScaleConsensusRegistration, self).__init__(ISource,ITarget,spacing,sz,params,compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)\n self.optimizer = SingleScaleConsensusRegistrationOptimizer(self.sz,self.spacing,self.use_map,self.map_low_res_factor,self.params,compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)\n\n def register(self):\n \"\"\"\n Registers the source to the target image\n :return: n/a\n \"\"\"\n self.optimizer.register(self.ISource, self.ITarget)\n\n\nclass SimpleSingleScaleBatchRegistration(SimpleRegistration):\n \"\"\"\n Single scale registration making use of batch optimization (to allow optimizing over many or large images).\n \"\"\"\n def __init__(self,ISource,ITarget,spacing,sz,params,compute_inverse_map=False, default_learning_rate=None):\n super(SimpleSingleScaleBatchRegistration, self).__init__(ISource,ITarget,spacing,sz,params,compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)\n self.optimizer = SingleScaleBatchRegistrationOptimizer(self.sz,self.spacing,self.use_map,self.map_low_res_factor,self.params,compute_inverse_map=compute_inverse_map,default_learning_rate=default_learning_rate)\n\n def register(self):\n \"\"\"\n Registers the source to the target image\n :return: n/a\n \"\"\"\n self.optimizer.register(self.ISource, self.ITarget)\n\n\nclass SimpleMultiScaleRegistration(SimpleRegistration):\n \"\"\"\n Simple multi scale registration\n \"\"\"\n def __init__(self,ISource,ITarget,spacing,sz,params,compute_inverse_map=False, default_learning_rate=None):\n super(SimpleMultiScaleRegistration, self).__init__(ISource, ITarget, spacing,sz,params,compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)\n self.optimizer = MultiScaleRegistrationOptimizer(self.sz,self.spacing,self.use_map,self.map_low_res_factor,self.params,compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)\n\n def register(self):\n \"\"\"\n Registers the source to the target image\n :return: n/a\n \"\"\"\n self.optimizer.register(self.ISource,self.ITarget)\n\n\nclass Optimizer(with_metaclass(ABCMeta, object)):\n \"\"\"\n Abstract optimizer base class.\n \"\"\"\n\n def __init__(self, sz, spacing, useMap, mapLowResFactor, params, compute_inverse_map=False, default_learning_rate=None):\n \"\"\"\n Constructor.\n \n :param sz: image size in BxCxXxYxZ format\n :param spacing: spatial spacing, e.g., [0.1,0.1,0.1] in 3D\n :param useMap: boolean, True if a coordinate map is evolved to warp images, False otherwise\n :param map_low_res_factor: if <1 evolutions happen at a lower resolution; >=1 ignored \n :param params: ParametersDict() instance to hold parameters\n :param compute_inverse_map: for map-based models the inverse map can be computed on the fly\n \"\"\"\n self.sz = sz\n \"\"\"image size\"\"\"\n self.spacing = spacing\n \"\"\"image spacing\"\"\"\n self.lowResSize = None\n \"\"\"low res image size\"\"\"\n self.lowResSpacing = None\n \"\"\"low res image spacing\"\"\"\n self.useMap = useMap\n \"\"\"makes use of map\"\"\"\n self.mapLowResFactor = mapLowResFactor\n \"\"\"if <1 then evolutions are at a lower resolution, but image is compared at the same resolution; >=1 ignored\"\"\"\n if self.mapLowResFactor is not None:\n if self.mapLowResFactor>1:\n print('mapLowResFactor needs to be <=1 but is set to ' + str( self.mapLowResFactor ) + '; ignoring it')\n self.mapLowResFactor = None\n elif self.mapLowResFactor==1:\n print('mapLowResFactor = 1: performing computations at original resolution.')\n self.mapLowResFactor = None\n\n self.compute_inverse_map = compute_inverse_map\n \"\"\"If set to True the inverse map is computed on the fly for map-based models\"\"\"\n self.default_learning_rate = default_learning_rate\n \"\"\"If set, this will be the learning rate that the optimizers used (otherwise, as specified in the json configuration, via params)\"\"\"\n\n self.params = params\n \"\"\"general parameters\"\"\"\n self.rel_ftol = 1e-4\n \"\"\"relative termination tolerance for optimizer\"\"\"\n self.last_successful_step_size_taken = None\n \"\"\"Records the last successful step size an optimizer took (possible use: propogate step size between multiscale levels\"\"\"\n\n self.external_optimizer_parameter_loss = None\n\n if (self.mapLowResFactor is not None):\n self.lowResSize = utils._get_low_res_size_from_size( sz, self.mapLowResFactor )\n self.lowResSpacing = utils._get_low_res_spacing_from_spacing(self.spacing,sz,self.lowResSize)\n self.sampler = IS.ResampleImage()\n\n self.params[('optimizer', {}, 'optimizer settings')]\n self.params[('model', {}, 'general model settings')]\n self.params['model'][('deformation', {}, 'model describing the desired deformation model')]\n self.params['model'][('registration_model', {}, 'general settings for the registration model')]\n\n self.params['model']['deformation']['use_map']= (useMap, '[True|False] either do computations via a map or directly using the image')\n self.params['model']['deformation']['map_low_res_factor'] = (mapLowResFactor, 'Set to a value in (0,1) if a map-based solution should be computed at a lower internal resolution (image matching is still at full resolution')\n\n self.compute_similarity_measure_at_low_res = self.params['model']['deformation'][('compute_similarity_measure_at_low_res',False,'If set to true map is not upsampled and the entire computations proceeds at low res')]\n\n self.rel_ftol = self.params['optimizer']['single_scale'][('rel_ftol',self.rel_ftol,'relative termination tolerance for optimizer')]\n\n self.spline_order = params['model']['registration_model'][('spline_order', 1, 'Spline interpolation order; 1 is linear interpolation (default); 3 is cubic spline')]\n \"\"\"order of the spline for interpolations\"\"\"\n\n self.show_iteration_output = True\n self.history = dict()\n\n self.optimizer_has_been_initialized = False\n \"\"\"\n Needs to be set before the actual optimization commences; allows to keep track if all parameters have been set\n and for example to delay external parameter settings\n \"\"\"\n\n def write_parameters_to_settings(self):\n \"\"\"\n Writes current state of optimized parameters back to the json setting file (for example to keep track of optimized weights)\n :return:\n \"\"\"\n pass\n\n def turn_iteration_output_on(self):\n self.show_iteration_output = True\n\n def turn_iteration_output_off(self):\n self.show_iteration_output = False\n\n def get_history(self):\n \"\"\"\n Returns the optimization history as a dictionary. Keeps track of energies, iterations counts, and additonal custom measures.\n\n :return: history dictionary\n \"\"\"\n return self.history\n\n def _add_to_history(self,key,value):\n \"\"\"\n Adds an element to the optimizer history\n\n :param key: history key\n :param value: value that is associated with it\n :return: n/a\n \"\"\"\n if key not in self.history:\n self.history[key] = [value]\n else:\n self.history[key].append(value)\n\n def set_last_successful_step_size_taken(self,lr):\n \"\"\"\n Function to let the optimizer know what step size has been successful previously.\n Useful for example to retain optimization \"memory\" across scales in a multi-scale implementation\n :param lr: step size\n :return: n/a\n \"\"\"\n self.last_successful_step_size_taken=lr\n\n def get_last_successful_step_size_taken(self):\n \"\"\"\n Returns the last successful step size the optimizer has taken (if the optimizer supports querying the step size)\n :return: last successful step size\n \"\"\"\n return self.last_successful_step_size_taken\n\n def set_rel_ftol(self, rel_ftol):\n \"\"\"Sets the relative termination tolerance: :math:`|f(x_i)-f(x_{i-1})|/f(x_i)<tol`\n \n :param rel_ftol: relative termination tolerance for optimizer\n \"\"\"\n self.rel_ftol = rel_ftol\n self.params['optimizer']['single_scale']['rel_ftol'] = (rel_ftol,'relative termination tolerance for optimizer')\n self.rel_ftol = self.params['optimizer']['single_scale']['rel_ftol']\n\n def get_rel_ftol(self):\n \"\"\"\n Returns the optimizer termination tolerance\n \"\"\"\n return self.rel_ftol\n\n\n\n @abstractmethod\n def set_model(self, modelName):\n \"\"\"\n Abstract method to select the model which should be optimized by name\n \n :param modelName: name (string) of the model that should be solved\n \"\"\"\n pass\n\n @abstractmethod\n def optimize(self):\n \"\"\"\n Abstract method to start the optimization\n \"\"\"\n pass\n\n def get_last_successful_step_size_taken(self):\n return self.last_successful_step_size_taken\n\n def get_checkpoint_dict(self):\n \"\"\"\n Returns a dict() object containing the information for the current checkpoint.\n :return: checpoint dictionary\n \"\"\"\n return dict()\n\n def load_checkpoint_dict(self,d,load_optimizer_state=False):\n \"\"\"\n Takes the dictionary from a checkpoint and loads it as the current state of optimizer and model\n\n :param d: dictionary\n :param load_optimizer_state: if set to True the optimizer state will be restored\n :return: n/a\n \"\"\"\n pass\n\n def save_checkpoint(self,filename):\n torch.save(self.get_checkpoint_dict(),filename)\n\n def load_checkpoint(self,filename):\n d = torch.load(filename)\n self.load_checkpoint_dict(d)\n\n def set_external_optimizer_parameter_loss(self,opt_parameter_loss):\n \"\"\"\n Allows to set an external method as an optimizer parameter loss\n :param opt_parameter_loss: method which takes shared_model_parameters as its only input\n :return: returns a scalar value which is the loss\n \"\"\"\n self.external_optimizer_parameter_loss = opt_parameter_loss\n\n def get_external_optimizer_parameter_loss(self):\n \"\"\"\n Returns the externally set method for parameter loss. Will be None if none was set.\n :return: method\n \"\"\"\n return self.external_optimizer_parameter_loss\n\n def compute_optimizer_parameter_loss(self,shared_model_parameters):\n \"\"\"\n Returns the optimizer parameter loss. This is the method that should be called to compute this loss.\n Will either evaluate the method optimizer_parameter_loss or if one was externally defined, the\n externally defined one will have priority.\n\n :param shared_model_parameters: paramters that have been declared shared in a model\n :return: parameter loss\n \"\"\"\n if self.external_optimizer_parameter_loss is not None:\n return self.external_optimizer_parameter_loss(shared_model_parameters)\n else:\n return self.optimizer_parameter_loss(shared_model_parameters)\n\n def optimizer_parameter_loss(self,shared_model_parameters):\n \"\"\"\n This allows to define additional terms for the loss which are based on parameters that are shared\n between models (for example for the smoother). Can be used to define a form of consensus optimization.\n :param shared_model_parameters: paramters that have been declared shared in a model\n :return: 0 by default, otherwise the corresponding penalty\n \"\"\"\n return MyTensor(1).zero_()\n\nclass ImageRegistrationOptimizer(Optimizer):\n \"\"\"\n Optimization class for image registration.\n \"\"\"\n\n def __init__(self, sz, spacing, useMap, mapLowResFactor, params, compute_inverse_map=False, default_learning_rate=None):\n super(ImageRegistrationOptimizer, self).__init__(sz, spacing, useMap, mapLowResFactor, params, compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)\n self.ISource = None\n \"\"\"source image\"\"\"\n self.lowResISource = None\n \"\"\"if mapLowResFactor <1, a lowres soure image needs to be created to parameterize some of the registration algorithms\"\"\"\n self.lowResITarget = None\n \"\"\"if mapLowResFactor <1, a lowres target image may need to be created to be used as additonal inputs for registration algorithms\"\"\"\n self.ITarget = None\n \"\"\"target image\"\"\"\n self.LSource = None\n \"\"\" source label \"\"\"\n self.LTarget = None\n \"\"\" target label \"\"\"\n self.lowResLSource = None\n \"\"\"if mapLowResFactor <1, a lowres soure label image needs to be created to parameterize some of the registration algorithms\"\"\"\n self.lowResLTarget = None\n \"\"\"if mapLowResFactor <1, a lowres target label image needs to be created to parameterize some of the registration algorithms\"\"\"\n self.initialMap = None\n \"\"\" initial map\"\"\"\n self.initialInverseMap = None\n \"\"\" initial inverse map\"\"\"\n self.weight_map =None\n \"\"\" initial weight map\"\"\"\n self.multi_scale_info_dic = None\n \"\"\" dicts containing full resolution image and label\"\"\"\n self.optimizer_name = None #''lbfgs_ls'\n \"\"\"name of the optimizer to use\"\"\"\n self.optimizer_params = {}\n \"\"\"parameters that should be passed to the optimizer\"\"\"\n self.optimizer = None\n \"\"\"optimizer object itself (to be instantiated)\"\"\"\n self.visualize = True\n \"\"\"if True figures are created during the run\"\"\"\n self.visualize_step = 10\n \"\"\"how often the figures are updated; each self.visualize_step-th iteration\"\"\"\n self.nrOfIterations = None\n \"\"\"the maximum number of iterations for the optimizer\"\"\"\n self.current_epoch = None\n \"\"\"Can be set externally, so the optimizer knows in which epoch we are\"\"\"\n self.save_fig=False\n \"\"\" save fig during the visualization\"\"\"\n self.save_fig_path=None\n \"\"\" the path for saving figures\"\"\"\n self.save_fig_num =-1\n \"\"\" the max num of the fig to be saved during one call, set -1 to save all\"\"\"\n self.pair_name=None\n \"\"\" name list of the registration pair \"\"\"\n self.iter_count = 0\n \"\"\" count of the iterations over multi-resolution\"\"\"\n self.recording_step = None\n \"\"\"sets the step-size for recording all intermediate results to the history\"\"\"\n\n def set_recording_step(self, step):\n assert step > 0, 'Recording step needs to be larger than 0'\n self.recording_step = step\n self.history['recording'] = []\n\n def set_current_epoch(self,current_epoch):\n self.current_epoch = current_epoch\n\n def get_current_epoch(self):\n return self.current_epoch\n\n\n\n def turn_visualization_on(self):\n \"\"\"\n Turns on visualization during the run\n \"\"\"\n self.visualize = True\n\n def turn_visualization_off(self):\n \"\"\"\n Turns off visualization during the run\n \"\"\"\n self.visualize = False\n\n def set_visualization(self, vis):\n \"\"\"\n Set if visualization should be on (True) or off (False)\n\n :param vis: visualization status on (True) or off (False)\n \"\"\"\n self.visualize = vis\n\n def get_visualization(self):\n \"\"\"\n Returns the visualization status\n\n :return: Returns True if visualizations will be displayed and False otherwise\n \"\"\"\n return self.visualize\n\n def set_visualize_step(self, nr_step):\n \"\"\"\n Set after how many steps a visualization should be updated\n\n :param nr_step:\n \"\"\"\n self.visualize_step = nr_step\n\n\n def get_visualize_step(self):\n \"\"\"\n Returns after how many steps visualizations are updated\n\n :return: after how many steps visualizations are updated\n \"\"\"\n return self.visualize_step\n\n def set_save_fig(self,save_fig):\n \"\"\"\n :param save_fig: True: save the visualized figs\n :return:\n \"\"\"\n self.save_fig = save_fig\n def get_save_fig(self):\n \"\"\"\n :param save_fig: True: get the visualized figs\n :return:\n \"\"\"\n return self.save_fig\n\n def set_save_fig_path(self, save_fig_path):\n \"\"\"\n the path of saved figures, default is the ../data/expr_name\n :param save_fig_path:\n :return:\n \"\"\"\n self.save_fig_path = save_fig_path\n\n\n\n def get_save_fig_path(self):\n \"\"\"\n the path of saved figures, default is the ../data/expr_name\n :param save_fig_path:\n :return:\n \"\"\"\n return self.save_fig_path\n\n\n def set_save_fig_num(self, save_fig_num=-1):\n \"\"\"\n set the num of the fig to save\n :param save_fig_num:\n :return:\n \"\"\"\n self.save_fig_num = save_fig_num\n\n def get_save_fig_num(self):\n \"\"\"\n set the num of the fig to save\n :param save_fig_num:\n :return:\n \"\"\"\n return self.save_fig_num\n\n def set_expr_name(self, expr_name):\n \"\"\"\n the name of experiments\n :param expr_name:\n :return:\n \"\"\"\n self.expr_name = expr_name\n\n def get_expr_name(self): \n \"\"\"\n the name of experiments\n :param expr_name:\n :return:\n \"\"\"\n return self.expr_name\n\n def set_pair_name(self, pair_name):\n self.pair_name = pair_name\n\n\n def get_pair_name(self):\n return self.pair_name\n\n\n def register(self, ISource, ITarget):\n \"\"\"\n Registers the source to the target image\n :param ISource: source image\n :param ITarget: target image\n :return: n/a\n \"\"\"\n self.set_source_image(ISource)\n self.set_target_image(ITarget)\n self.optimize()\n self.write_parameters_to_settings()\n\n def set_source_image(self, I):\n \"\"\"\n Setting the source image which should be deformed to match the target image\n\n :param I: source image\n \"\"\"\n self.ISource = I\n\n def set_multi_scale_info(self, ISource, ITarget, spacing, LSource=None, LTarget=None):\n \"\"\"provide full resolution of Image and Label\"\"\"\n self.multi_scale_info_dic = {'ISource': ISource, 'ITarget': ITarget, 'spacing': spacing, 'LSource': LSource,\n 'LTarget': LTarget}\n\n def _compute_low_res_image(self,I,params,spacing=None):\n low_res_image = None\n if self.mapLowResFactor is not None:\n low_res_image,_ = self.sampler.downsample_image_to_size(I,spacing,self.lowResSize[2::],self.spline_order)\n return low_res_image\n\n def _compute_low_res_label_map(self,label_map,params, spacing=None):\n low_res_label_map = None\n if self.mapLowResFactor is not None:\n low_res_image, _ = self.sampler.downsample_image_to_size(label_map, spacing, self.lowResSize[2::],\n 0)\n return low_res_label_map\n\n def compute_low_res_image_if_needed(self):\n \"\"\"To be called before the optimization starts\"\"\"\n if self.multi_scale_info_dic is None:\n ISource = self.ISource\n ITarget = self.ITarget\n LSource = self.LSource\n LTarget = self.LTarget\n spacing = self.spacing\n else:\n ISource, ITarget, LSource, LTarget, spacing = self.multi_scale_info_dic['ISource'], self.multi_scale_info_dic['ITarget'],\\\n self.multi_scale_info_dic['LSource'],self.multi_scale_info_dic['LTarget'],self.multi_scale_info_dic['spacing']\n if self.mapLowResFactor is not None:\n self.lowResISource = self._compute_low_res_image(ISource,self.params,spacing)\n # todo: can be removed to save memory; is more experimental at this point\n self.lowResITarget = self._compute_low_res_image(ITarget,self.params,spacing)\n if self.LSource is not None and self.LTarget is not None:\n self.lowResLSource = self._compute_low_res_label_map(LSource,self.params,spacing)\n self.lowResLTarget = self._compute_low_res_label_map(LTarget, self.params,spacing)\n\n def set_source_label(self, LSource):\n \"\"\"\n :param LSource:\n :return:\n \"\"\"\n self.LSource = LSource\n\n\n def set_target_label(self, LTarget):\n \"\"\"\n :param LTarget:\n :return:\n \"\"\"\n self.LTarget = LTarget\n\n\n def get_source_label(self):\n return self.LSource\n\n def get_target_label(self):\n return self.LTarget\n\n def set_target_image(self, I):\n \"\"\"\n Setting the target image which the source image should match after registration\n\n :param I: target image\n \"\"\"\n self.ITarget = I\n\n\n def set_optimizer_by_name(self, optimizer_name):\n \"\"\"\n Set the desired optimizer by name (only lbfgs and adam are currently supported)\n\n :param optimizer_name: name of the optimizer (string) to be used\n \"\"\"\n self.optimizer_name = optimizer_name\n self.params['optimizer']['name'] = optimizer_name\n\n def get_optimizer_by_name(self):\n \"\"\"\n Get the name (string) of the optimizer that was selected\n\n :return: name (string) of the optimizer\n \"\"\"\n return self.optimizer_name\n\n def set_optimizer(self, opt):\n \"\"\"\n Set the optimizer. Not by name, but instead by passing the optimizer object which should be instantiated\n\n :param opt: optimizer object\n \"\"\"\n self.optimizer = opt\n\n def get_optimizer(self):\n \"\"\"\n Returns the optimizer object which was set to perform the optimization\n\n :return: optimizer object\n \"\"\"\n return self.optimizer\n\n def set_optimizer_params(self, opt_params):\n \"\"\"\n Set the desired parameters of the optimizer. This is done by passing a dictionary, for example, dict(lr=0.01)\n\n :param opt_params: dictionary holding the parameters of an optimizer\n \"\"\"\n self.optimizer_params = opt_params\n\n\nclass SingleScaleRegistrationOptimizer(ImageRegistrationOptimizer):\n \"\"\"\n Optimizer operating on a single scale. Typically this will be the full image resolution.\n\n .. todo::\n Check what the best way to adapt the tolerances for the pre-defined optimizers;\n tying it to rel_ftol is not really correct.\n \"\"\"\n\n def __init__(self, sz, spacing, useMap, mapLowResFactor, params, compute_inverse_map=False, default_learning_rate=None):\n super(SingleScaleRegistrationOptimizer, self).__init__(sz, spacing, useMap, mapLowResFactor, params,compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)\n\n if self.mapLowResFactor is not None:\n # computes model at a lower resolution than the image similarity\n if self.compute_similarity_measure_at_low_res:\n self.mf = MF.ModelFactory(self.lowResSize, self.lowResSpacing, self.lowResSize, self.lowResSpacing )\n else:\n self.mf = MF.ModelFactory(self.sz, self.spacing, self.lowResSize, self.lowResSpacing )\n else:\n # computes model and similarity at the same resolution\n self.mf = MF.ModelFactory(self.sz, self.spacing, self.sz, self.spacing)\n \"\"\"model factory which will be used to create the model and its loss function\"\"\"\n\n self.model = None\n \"\"\"the model itself\"\"\"\n self.criterion = None\n \"\"\"the loss function\"\"\"\n\n self.initialMap = None\n \"\"\"initial map, will be needed for map-based solutions; by default this will be the identity map, but can be set to something different externally\"\"\"\n self.initialInverseMap = None\n \"\"\"initial inverse map; will be the same as the initial map, unless it was set externally\"\"\"\n self.map0_inverse_external = None\n \"\"\"initial inverse map, set externally, will be needed for map-based solutions; by default this will be the identity map, but can be set to something different externally\"\"\"\n self.map0_external = None\n \"\"\"intial map, set externally\"\"\"\n self.lowResInitialMap = None\n \"\"\"low res initial map, by default the identity map, will be needed for map-based solutions which are computed at lower resolution\"\"\"\n self.lowResInitialInverseMap = None\n \"\"\"low res initial inverse map, by default the identity map, will be needed for map-based solutions which are computed at lower resolution\"\"\"\n self.weight_map =None\n \"\"\"init_weight map, which only used by metric learning models\"\"\"\n self.optimizer_instance = None\n \"\"\"the optimizer instance to perform the actual optimization\"\"\"\n\n c_params = self.params[('optimizer', {}, 'optimizer settings')]\n self.weight_clipping_type = c_params[('weight_clipping_type','none','Type of weight clipping that should be used [l1|l2|l1_individual|l2_individual|l1_shared|l2_shared|None]')]\n self.weight_clipping_type = self.weight_clipping_type.lower()\n \"\"\"Type of weight clipping; applied to weights and bias indepdenendtly; norm restricted to weight_clipping_value\"\"\"\n if self.weight_clipping_type=='none':\n self.weight_clipping_type = None\n if self.weight_clipping_type!='pre_lsm_weights':\n self.weight_clipping_value = c_params[('weight_clipping_value', 1.0, 'Value to which the norm is being clipped')]\n \"\"\"Desired norm after clipping\"\"\"\n\n extent = self.spacing * self.sz[2:]\n max_extent = max(extent)\n\n clip_params = c_params[('gradient_clipping',{},'clipping settings for the gradient for optimization')]\n self.clip_display = clip_params[('clip_display', True, 'If set to True displays if clipping occurred')]\n self.clip_individual_gradient = clip_params[('clip_individual_gradient',False,'If set to True, the gradient for the individual parameters will be clipped')]\n self.clip_individual_gradient_value = clip_params[('clip_individual_gradient_value',max_extent,'Value to which the gradient for the individual parameters is clipped')]\n self.clip_shared_gradient = clip_params[('clip_shared_gradient', True, 'If set to True, the gradient for the shared parameters will be clipped')] # todo recover the clip gradient,or it may cause unstable\n self.clip_shared_gradient_value = clip_params[('clip_shared_gradient_value', 1.0, 'Value to which the gradient for the shared parameters is clipped')]\n\n self.scheduler = None # for the step size scheduler\n self.patience = None # for the step size scheduler\n self._use_external_scheduler = False\n\n self.rec_energy = None\n self.rec_similarityEnergy = None\n self.rec_regEnergy = None\n self.rec_opt_par_loss_energy = None\n self.rec_phiWarped = None\n self.rec_phiInverseWarped = None\n self.rec_IWarped = None\n self.last_energy = None\n self.rel_f = None\n self.rec_custom_optimizer_output_string = ''\n \"\"\"the evaluation information\"\"\"\n self.rec_custom_optimizer_output_values = None\n\n self.delayed_model_parameters = None\n self.delayed_model_parameters_still_to_be_set = False\n self.delayed_model_state_dict = None\n self.delayed_model_state_dict_still_to_be_set = False\n\n # to be able to transfer state and parameters\n self._sgd_par_list = None # holds the list of parameters\n self._sgd_par_names = None # holds the list of names associated with these parameters\n self._sgd_name_to_model_par = None # allows mapping from name to model parameter\n self._sgd_split_shared = None # keeps track if the shared states were split or not\n self._sgd_split_individual = None # keeps track if the individual states were split or not\n self.over_scale_iter_count = None #accumulated iter count over different scales\n self.n_scale = None #the index of current scale, torename and document todo\n\n\n def write_parameters_to_settings(self):\n if self.model is not None:\n self.model.write_parameters_to_settings()\n\n def get_sgd_split_shared(self):\n return self._sgd_split_shared\n\n def get_sgd_split_indvidual(self):\n return self._sgd_split_individual\n\n def get_checkpoint_dict(self):\n if self.model is not None and self.optimizer_instance is not None:\n d = super(SingleScaleRegistrationOptimizer, self).get_checkpoint_dict()\n d['model'] = dict()\n d['model']['parameters'] = self.model.get_registration_parameters_and_buffers()\n d['model']['size'] = self.model.sz\n d['model']['spacing'] = self.model.spacing\n d['optimizer_state'] = self.optimizer_instance.state_dict()\n return d\n else:\n raise ValueError('Unable to create checkpoint, because either the model or the optimizer have not been initialized')\n\n def load_checkpoint_dict(self,d,load_optimizer_state=False):\n if self.model is not None and self.optimizer_instance is not None:\n self.model.set_registration_parameters(d['model']['parameters'],d['model']['size'],d['model']['spacing'])\n if load_optimizer_state:\n try:\n self.optimizer_instance.load_state_dict(d['optimizer_state'])\n print('INFO: Was able to load the previous optimzer state from checkpoint data')\n except:\n print('INFO: Could not load the previous optimizer state')\n else:\n print('WARNING: Turned off the loading of the optimizer state')\n else:\n raise ValueError('Cannot load checkpoint dictionary, because either the model or the optimizer have not been initialized')\n\n def get_opt_par_energy(self):\n \"\"\"\n Energy for optimizer parameters\n\n :return:\n \"\"\"\n return self.rec_opt_par_loss_energy.cpu().item()\n\n def get_custom_output_values(self):\n \"\"\"\n Custom output values\n\n :return:\n \"\"\"\n return self.rec_custom_optimizer_output_values\n\n def get_energy(self):\n \"\"\"\n Returns the current energy\n :return: Returns a tuple (energy, similarity energy, regularization energy)\n \"\"\"\n return self.rec_energy.cpu().item(), self.rec_similarityEnergy.cpu().item(), self.rec_regEnergy.cpu().item()\n\n def get_warped_image(self):\n \"\"\"\n Returns the warped image\n :return: the warped image\n \"\"\"\n if self.useMap:\n cmap = self.get_map()\n # and now warp it\n return utils.compute_warped_image_multiNC(self.ISource, cmap, self.spacing, self.spline_order,zero_boundary=True)\n else:\n return self.rec_IWarped\n\n def get_warped_label(self):\n \"\"\"\n Returns the warped label\n :return: the warped label\n \"\"\"\n if self.useMap:\n cmap = self.get_map()\n return utils.get_warped_label_map(self.LSource, cmap, self.spacing)\n else:\n return None\n\n def get_map(self):\n \"\"\"\n Returns the deformation map\n :return: deformation map\n \"\"\"\n return self.rec_phiWarped\n\n def get_inverse_map(self):\n \"\"\"\n Returns the deformation map\n :return: deformation map\n \"\"\"\n return self.rec_phiInverseWarped\n\n def set_n_scale(self, n_scale):\n \"\"\"\n the path of saved figures, default is the ../data/expr_name\n :param save_fig_path:\n :return:\n \"\"\"\n self.n_scale = n_scale\n\n def set_over_scale_iter_count(self, iter_count):\n self.over_scale_iter_count = iter_count\n\n\n def _create_initial_maps(self):\n if self.useMap:\n # create the identity map [-1,1]^d, since we will use a map-based implementation\n if self.map0_external is not None:\n self.initialMap = self.map0_external\n else:\n id = utils.identity_map_multiN(self.sz, self.spacing)\n self.initialMap = AdaptVal(torch.from_numpy(id))\n\n if self.map0_inverse_external is not None:\n self.initialInverseMap = self.map0_inverse_external\n else:\n id =utils.identity_map_multiN(self.sz, self.spacing)\n self.initialInverseMap = AdaptVal(torch.from_numpy(id))\n\n if self.mapLowResFactor is not None:\n # create a lower resolution map for the computations\n if self.map0_external is None:\n lowres_id = utils.identity_map_multiN(self.lowResSize, self.lowResSpacing)\n self.lowResInitialMap = AdaptVal(torch.from_numpy(lowres_id))\n else:\n sampler = IS.ResampleImage()\n lowres_id, _ = sampler.downsample_image_to_size(self.initialMap , self.spacing,self.lowResSize[2::] , 1,zero_boundary=False)\n self.lowResInitialMap = AdaptVal(lowres_id)\n\n if self.map0_inverse_external is None:\n lowres_id = utils.identity_map_multiN(self.lowResSize, self.lowResSpacing)\n self.lowResInitialInverseMap = AdaptVal(torch.from_numpy(lowres_id))\n else:\n sampler = IS.ResampleImage()\n lowres_inverse_id, _ = sampler.downsample_image_to_size(self.initialInverseMap, self.spacing, self.lowResSize[2::],\n 1, zero_boundary=False)\n self.lowResInitialInverseMap = AdaptVal(lowres_inverse_id)\n\n def set_model(self, modelName):\n \"\"\"\n Sets the model that should be solved\n\n :param modelName: name of the model that should be solved (string)\n \"\"\"\n\n self.params['model']['registration_model']['type'] = ( modelName, \"['svf'|'svf_quasi_momentum'|'svf_scalar_momentum'|'svf_vector_momentum'|'lddmm_shooting'|'lddmm_shooting_scalar_momentum'] all with '_map' or '_image' suffix\" )\n\n self.model, self.criterion = self.mf.create_registration_model(modelName, self.params['model'],compute_inverse_map=self.compute_inverse_map)\n print(self.model)\n\n self._create_initial_maps()\n\n def set_initial_map(self,map0,map0_inverse=None):\n \"\"\"\n Sets the initial map (overwrites the default identity map)\n :param map0: intial map\n :param map0_inverse: initial inverse map\n :return: n/a\n \"\"\"\n\n self.map0_external = map0\n self.map0_inverse_external = map0_inverse\n\n if self.initialMap is not None:\n # was already set, so let's modify it\n self._create_initial_maps()\n\n def set_initial_weight_map(self,weight_map,freeze_weight=False):\n \"\"\"\n Sets the initial map (overwrites the default identity map)\n :param map0: intial map\n :param map0_inverse: initial inverse map\n :return: n/a\n \"\"\"\n if self.mapLowResFactor is not None:\n sampler = IS.ResampleImage()\n weight_map, _ = sampler.downsample_image_to_size(weight_map, self.spacing, self.lowResSize[2::], 1,\n zero_boundary=False)\n self.model.local_weights.data = weight_map\n if freeze_weight:\n self.model.freeze_adaptive_regularizer_param()\n\n def get_initial_map(self):\n \"\"\"\n Returns the initial map\n\n :return: initial map\n \"\"\"\n\n if self.initialMap is not None:\n return self.initialMap\n elif self.map0_external is not None:\n return self.map0_external\n else:\n return None\n\n def get_initial_inverse_map(self):\n \"\"\"\n Returns the initial inverse map\n\n :return: initial inverse map\n \"\"\"\n\n if self.initialInverseMap is not None:\n return self.initialInverseMap\n elif self.map0_inverse_external is not None:\n return self.map0_inverse_external\n else:\n return None\n\n def add_similarity_measure(self, sim_name, sim_measure):\n \"\"\"\n Adds a custom similarity measure.\n\n :param sim_name: name of the similarity measure (string)\n :param sim_measure: similarity measure itself (class object that can be instantiated)\n \"\"\"\n self.criterion.add_similarity_measure(sim_name, sim_measure)\n self.params['model']['registration_model']['similarity_measure']['type'] = (sim_name, 'was customized; needs to be expplicitly instantiated, cannot be loaded')\n\n def add_model(self, model_name, model_network_class, model_loss_class, use_map, model_description='custom model'):\n \"\"\"\n Adds a custom model and its loss function\n\n :param model_name: name of the model to be added (string)\n :param model_network_class: registration model itself (class object that can be instantiated)\n :param model_loss_class: registration loss (class object that can be instantiated)\n :param use_map: True/False: specifies if model uses a map or not\n :param model_description: optional model description\n \"\"\"\n self.mf.add_model(model_name, model_network_class, model_loss_class, use_map, model_description)\n self.params['model']['registration_model']['type'] = (model_name, 'was customized; needs to be explicitly instantiated, cannot be loaded')\n\n def set_model_state_dict(self,sd):\n \"\"\"\n Sets the state dictionary of the model\n\n :param sd: state dictionary\n :return: n/a\n \"\"\"\n\n if self.optimizer_has_been_initialized:\n self.model.load_state_dict(sd)\n self.delayed_model_state_dict_still_to_be_set = False\n else:\n self.delayed_model_state_dict_still_to_be_set = True\n self.delayed_model_state_dict = sd\n\n def get_model_state_dict(self):\n \"\"\"\n Returns the state dictionary of the model\n\n :return: state dictionary\n \"\"\"\n return self.model.state_dict()\n\n def set_model_parameters(self, p):\n \"\"\"\n Set the parameters of the registration model\n\n :param p: parameters\n \"\"\"\n\n if self.optimizer_has_been_initialized:\n if (self.useMap) and (self.mapLowResFactor is not None):\n self.model.set_registration_parameters(p, self.lowResSize, self.lowResSpacing)\n else:\n self.model.set_registration_parameters(p, self.sz, self.spacing)\n self.delayed_model_parameters_still_to_be_set = False\n else:\n self.delayed_model_parameters_still_to_be_set = True\n self.delayed_model_parameters = p\n\n def _is_vector(self,d):\n sz = d.size()\n if len(sz)==1:\n return True\n else:\n return False\n\n def _is_tensor(self,d):\n sz = d.size()\n if len(sz)>1:\n return True\n else:\n return False\n\n def _aux_do_weight_clipping_norm(self,pars,desired_norm):\n \"\"\"does weight clipping but only for conv or bias layers (assuming they are named as such); be careful with the namimg here\"\"\"\n if self.weight_clipping_value > 0:\n for key in pars:\n # only do the clipping if it is a conv layer or a bias term\n if key.lower().find('conv')>0 or key.lower().find('bias')>0:\n p = pars[key]\n if self._is_vector(p.data):\n # just normalize this vector component-by-component, norm does not matter here as these are only scalars\n p.data = p.data.clamp_(-self.weight_clipping_value, self.weight_clipping_value)\n elif self._is_tensor(p.data):\n # normalize sample-by-sample individually\n for b in range(p.data.size()[0]):\n param_norm = p.data[b, ...].norm(desired_norm)\n if param_norm > self.weight_clipping_value:\n clip_coef = self.weight_clipping_value / param_norm\n p.data[b, ...].mul_(clip_coef)\n else:\n raise ValueError('Unknown data type; I do not know how to clip this')\n\n def _do_shared_weight_clipping_pre_lsm(self):\n multi_gaussian_weights = self.params['model']['registration_model']['forward_model']['smoother'][('multi_gaussian_weights', -1, 'the used multi gaussian weights')]\n if multi_gaussian_weights==-1:\n raise ValueError('The multi-gaussian weights should have been set before')\n multi_gaussian_weights = np.array(multi_gaussian_weights)\n\n sp = self.get_shared_model_parameters()\n for key in sp:\n if key.lower().find('pre_lsm_weights') > 0:\n p = sp[key]\n sz = p.size() #0 dim is weight dimension\n if sz[0]!=len(multi_gaussian_weights):\n raise ValueError('Number of multi-Gaussian weights needs to be {}, but got {}'.format(sz[0],len(multi_gaussian_weights)))\n for w in range(sz[0]):\n # this is to assure that the weights are always between 0 and 1 (when using the WeightedLinearSoftmax\n p[w,...].data.clamp_(0.0-multi_gaussian_weights[w],1.0-multi_gaussian_weights[w])\n \n def _do_individual_weight_clipping_l1(self):\n ip = self.get_individual_model_parameters()\n self._aux_do_weight_clipping_norm(pars=ip,desired_norm=1)\n\n def _do_shared_weight_clipping_l1(self):\n sp = self.get_shared_model_parameters()\n self._aux_do_weight_clipping_norm(pars=sp,desired_norm=1)\n\n def _do_individual_weight_clipping_l2(self):\n ip = self.get_individual_model_parameters()\n self._aux_do_weight_clipping_norm(pars=ip, desired_norm=2)\n\n def _do_shared_weight_clipping_l2(self):\n sp = self.get_shared_model_parameters()\n self._aux_do_weight_clipping_norm(pars=sp, desired_norm=2)\n\n def _do_weight_clipping(self):\n \"\"\"performs weight clipping, if desired\"\"\"\n if self.weight_clipping_type is not None:\n possible_modes = ['l1', 'l2', 'l1_individual', 'l2_individual', 'l1_shared', 'l2_shared', 'pre_lsm_weights']\n if self.weight_clipping_type in possible_modes:\n if self.weight_clipping_type=='l1':\n self._do_shared_weight_clipping_l1()\n self._do_individual_weight_clipping_l1()\n elif self.weight_clipping_type=='l2':\n self._do_shared_weight_clipping_l2()\n self._do_individual_weight_clipping_l2()\n elif self.weight_clipping_type=='l1_individual':\n self._do_individual_weight_clipping_l1()\n elif self.weight_clipping_type=='l2_individual':\n self._do_individual_weight_clipping_l2()\n elif self.weight_clipping_type=='l1_shared':\n self._do_shared_weight_clipping_l1()\n elif self.weight_clipping_type=='l2_shared':\n self._do_shared_weight_clipping_l2()\n elif self.weight_clipping_type=='pre_lsm_weights':\n self._do_shared_weight_clipping_pre_lsm()\n else:\n raise ValueError('Illegal weight clipping type: {}'.format(self.weight_clipping_type))\n else:\n raise ValueError('Weight clipping needs to be: [None|l1|l2|l1_individual|l2_individual|l1_shared|l2_shared]')\n\n def get_model_parameters(self):\n \"\"\"\n Returns the parameters of the model\n\n :return: model parameters\n \"\"\"\n return self.model.get_registration_parameters()\n\n def set_shared_model_parameters(self,p):\n \"\"\"\n Set only the shared parameters of the model\n\n :param p: shared registration parameters as an ordered dict\n :return: n/a\n \"\"\"\n\n self.model.set_shared_registration_parameters(p)\n\n def get_shared_model_parameters_and_buffers(self):\n \"\"\"\n Returns only the model parameters that are shared between models and the shared buffers associated w/ it.\n\n :return: shared model parameters and buffers\n \"\"\"\n return self.model.get_shared_registration_parameters_and_buffers()\n\n def get_shared_model_parameters(self):\n \"\"\"\n Returns only the model parameters that are shared between models.\n\n :return: shared model parameters\n \"\"\"\n return self.model.get_shared_registration_parameters()\n\n def set_individual_model_parameters(self,p):\n \"\"\"\n Set only the individual parameters of the model\n\n :param p: individual registration parameters as an ordered dict\n :return: n/a\n \"\"\"\n\n self.model.set_individual_registration_parameters(p)\n\n def get_individual_model_parameters(self):\n \"\"\"\n Returns only the model parameters that individual to a model (i.e., not shared).\n\n :return: individual model parameters\n \"\"\"\n return self.model.get_individual_registration_parameters()\n\n def _collect_individual_or_shared_parameters_in_list(self,pars):\n pl = []\n for p_key in pars:\n pl.append(pars[p_key])\n return pl\n\n def load_shared_state_dict(self,sd):\n \"\"\"\n Loads the shared part of a state dictionary\n :param sd: shared state dictionary\n :return: n/a\n \"\"\"\n self.model.load_shared_state_dict(sd)\n\n def shared_state_dict(self):\n \"\"\"\n Returns the shared part of a state dictionary\n :return:\n \"\"\"\n return self.model.shared_state_dict()\n\n def load_individual_state_dict(self):\n raise ValueError('Not yet implemented')\n\n def individual_state_dict(self):\n raise ValueError('Not yet implemented')\n\n def upsample_model_parameters(self, desiredSize):\n \"\"\"\n Upsamples the model parameters\n\n :param desiredSize: desired size after upsampling, e.g., [100,20,50]\n :return: returns a tuple (upsampled_parameters,upsampled_spacing)\n \"\"\"\n return self.model.upsample_registration_parameters(desiredSize)\n\n def downsample_model_parameters(self, desiredSize):\n \"\"\"\n Downsamples the model parameters\n\n :param desiredSize: desired size after downsampling, e.g., [50,50,40]\n :return: returns a tuple (downsampled_parameters,downsampled_spacing)\n \"\"\"\n return self.model.downsample_registration_parameters(desiredSize)\n\n def _set_number_of_iterations_from_multi_scale(self, nrIter):\n \"\"\"\n Same as set_number_of_iterations with the exception that this is not recored in the parameter structure since it comes from the multi-scale setting\n :param nrIter: number of iterations\n \"\"\"\n self.nrOfIterations = nrIter\n\n def set_number_of_iterations(self, nrIter):\n \"\"\"\n Set the number of iterations of the optimizer\n\n :param nrIter: number of iterations\n \"\"\"\n self.params['optimizer'][('single_scale', {}, 'single scale settings')]\n self.params['optimizer']['single_scale']['nr_of_iterations'] = (nrIter, 'number of iterations')\n\n self.nrOfIterations = nrIter\n\n def get_number_of_iterations(self):\n \"\"\"\n Returns the number of iterations of the solver\n\n :return: number of set iterations\n \"\"\"\n return self.nrOfIterations\n\n def _closure(self):\n self.optimizer_instance.zero_grad()\n # 1) Forward pass: Compute predicted y by passing x to the model\n # 2) Compute loss\n\n # first define variables that will be passed to the model and the criterion (for further use)\n\n over_scale_iter_count = self.iter_count if self.over_scale_iter_count is None else self.over_scale_iter_count + self.iter_count\n opt_variables = {'iter': self.iter_count, 'epoch': self.current_epoch, 'scale': self.n_scale,\n 'over_scale_iter_count': over_scale_iter_count}\n\n self.rec_IWarped, self.rec_phiWarped, self.rec_phiInverseWarped = model_evaluation.evaluate_model_low_level_interface(\n model=self.model,\n I_source=self.ISource,\n opt_variables=opt_variables,\n use_map=self.useMap,\n initial_map=self.initialMap,\n compute_inverse_map=self.compute_inverse_map,\n initial_inverse_map=self.initialInverseMap,\n map_low_res_factor=self.mapLowResFactor,\n sampler=self.sampler,\n low_res_spacing=self.lowResSpacing,\n spline_order=self.spline_order,\n low_res_I_source=self.lowResISource,\n low_res_initial_map=self.lowResInitialMap,\n low_res_initial_inverse_map=self.lowResInitialInverseMap,\n compute_similarity_measure_at_low_res=self.compute_similarity_measure_at_low_res)\n\n # compute the respective losses\n if self.useMap:\n if self.mapLowResFactor is not None and self.compute_similarity_measure_at_low_res:\n loss_overall_energy, sim_energy, reg_energy = self.criterion(self.lowResInitialMap, self.rec_phiWarped,\n self.lowResISource, self.lowResITarget,\n self.lowResISource,\n self.model.get_variables_to_transfer_to_loss_function(),\n opt_variables)\n else:\n loss_overall_energy,sim_energy,reg_energy = self.criterion(self.initialMap, self.rec_phiWarped, self.ISource, self.ITarget, self.lowResISource,\n self.model.get_variables_to_transfer_to_loss_function(),\n opt_variables)\n else:\n loss_overall_energy,sim_energy,reg_energy = self.criterion(self.rec_IWarped, self.ISource, self.ITarget,\n self.model.get_variables_to_transfer_to_loss_function(),\n opt_variables )\n\n # to support consensus optimization we have the option of adding a penalty term\n # based on shared parameters\n opt_par_loss_energy = self.compute_optimizer_parameter_loss(self.model.get_shared_registration_parameters())\n loss_overall_energy = loss_overall_energy + opt_par_loss_energy\n loss_overall_energy.backward()\n\n # do gradient clipping\n if self.clip_individual_gradient:\n current_individual_grad_norm = torch.nn.utils.clip_grad_norm_(\n self._collect_individual_or_shared_parameters_in_list(self.get_individual_model_parameters()),\n self.clip_individual_gradient_value)\n\n if self.clip_display:\n if current_individual_grad_norm>self.clip_individual_gradient_value:\n print('INFO: Individual gradient was clipped: {} -> {}'.format(current_individual_grad_norm,self.clip_individual_gradient_value))\n\n if self.clip_shared_gradient:\n current_shared_grad_norm = torch.nn.utils.clip_grad_norm_(\n self._collect_individual_or_shared_parameters_in_list(self.get_shared_model_parameters()),\n self.clip_shared_gradient_value)\n\n if self.clip_display:\n if current_shared_grad_norm > self.clip_shared_gradient_value:\n print('INFO: Shared gradient was clipped: {} -> {}'.format(current_shared_grad_norm,\n self.clip_shared_gradient_value))\n\n self.rec_custom_optimizer_output_string = self.model.get_custom_optimizer_output_string()\n self.rec_custom_optimizer_output_values = self.model.get_custom_optimizer_output_values()\n\n self.rec_energy = loss_overall_energy\n self.rec_similarityEnergy = sim_energy\n self.rec_regEnergy = reg_energy\n self.rec_opt_par_loss_energy = opt_par_loss_energy\n\n # if self.useMap:\n #\n # if self.iter_count % 1 == 0:\n # self.rec_energy, self.rec_similarityEnergy, self.rec_regEnergy = self.criterion.get_energy(\n # self.identityMap, self.rec_phiWarped, self.ISource, self.ITarget, self.lowResISource, self.model.get_variables_to_transfer_to_loss_function())\n # else:\n # if self.iter_count % 1 == 0:\n # self.rec_energy, self.rec_similarityEnergy, self.rec_regEnergy = self.criterion.get_energy(\n # self.rec_IWarped, self.ISource, self.ITarget, self.model.get_variables_to_transfer_to_loss_function())\n\n return loss_overall_energy\n\n def analysis(self, energy, similarityEnergy, regEnergy, opt_par_energy, phi_or_warped_image, custom_optimizer_output_string ='', custom_optimizer_output_values=None, force_visualization=False):\n \"\"\"\n print out the and visualize the result\n :param energy:\n :param similarityEnergy:\n :param regEnergy:\n :param opt_par_energy\n :param phi_or_warped_image:\n :return: returns tuple: first entry True if termination tolerance was reached, otherwise returns False; second entry if the image was visualized\n \"\"\"\n\n current_batch_size = phi_or_warped_image.size()[0]\n\n was_visualized = False\n reached_tolerance = False\n\n cur_energy = utils.t2np(energy.float())\n # energy analysis\n\n self._add_to_history('iter', self.iter_count)\n self._add_to_history('energy', cur_energy[0])\n self._add_to_history('similarity_energy', utils.t2np(similarityEnergy.float()))\n self._add_to_history('regularization_energy', utils.t2np(regEnergy.float()))\n self._add_to_history('opt_par_energy', utils.t2np(opt_par_energy.float())[0])\n\n if custom_optimizer_output_values is not None:\n for key in custom_optimizer_output_values:\n self._add_to_history(key,custom_optimizer_output_values[key])\n\n if self.last_energy is not None:\n\n # relative function tolerance: |f(xi)-f(xi+1)|/(1+|f(xi)|)\n self.rel_f = abs(self.last_energy - cur_energy) / (1 + abs(cur_energy))\n self._add_to_history('relF', self.rel_f[0])\n\n if self.show_iteration_output:\n cprint('{iter:5d}-Tot: E={energy:08.4f} | simE={similarityE:08.4f} | regE={regE:08.4f} | optParE={optParE:08.4f} | relF={relF:08.4f} | {cos}'\n .format(iter=self.iter_count,\n energy=utils.get_scalar(cur_energy),\n similarityE=utils.get_scalar(utils.t2np(similarityEnergy.float())),\n regE=utils.get_scalar(utils.t2np(regEnergy.float())),\n optParE=utils.get_scalar(utils.t2np(opt_par_energy.float())),\n relF=utils.get_scalar(self.rel_f),\n cos=custom_optimizer_output_string), 'red')\n cprint('{iter:5d}-Img: E={energy:08.4f} | simE={similarityE:08.4f} | regE={regE:08.4f} |'\n .format(iter=self.iter_count,\n energy=utils.get_scalar(cur_energy) / current_batch_size,\n similarityE=utils.get_scalar(utils.t2np(similarityEnergy.float())) / current_batch_size,\n regE=utils.get_scalar(utils.t2np(regEnergy.float())) / current_batch_size), 'blue')\n\n # check if relative convergence tolerance is reached\n if self.rel_f < self.rel_ftol:\n if self.show_iteration_output:\n print('Reached relative function tolerance of = ' + str(self.rel_ftol))\n reached_tolerance = True\n\n else:\n self._add_to_history('relF', None)\n if self.show_iteration_output:\n cprint('{iter:5d}-Tot: E={energy:08.4f} | simE={similarityE:08.4f} | regE={regE:08.4f} | optParE={optParE:08.4f} | relF= n/a | {cos}'\n .format(iter=self.iter_count,\n energy=utils.get_scalar(cur_energy),\n similarityE=utils.get_scalar(utils.t2np(similarityEnergy.float())),\n regE=utils.get_scalar(utils.t2np(regEnergy.float())),\n optParE=utils.get_scalar(utils.t2np(opt_par_energy.float())),\n cos=custom_optimizer_output_string), 'red')\n cprint('{iter:5d}-Img: E={energy:08.4f} | simE={similarityE:08.4f} | regE={regE:08.4f} |'\n .format(iter=self.iter_count,\n energy=utils.get_scalar(cur_energy)/current_batch_size,\n similarityE=utils.get_scalar(utils.t2np(similarityEnergy.float()))/current_batch_size,\n regE=utils.get_scalar(utils.t2np(regEnergy.float()))/current_batch_size),'blue')\n\n iter_count = self.iter_count\n self.last_energy = cur_energy\n\n if self.recording_step is not None:\n if iter_count % self.recording_step == 0 or iter_count == 0:\n if self.useMap:\n if self.compute_similarity_measure_at_low_res:\n I1Warped = utils.compute_warped_image_multiNC(self.lowResISource,\n phi_or_warped_image,\n self.lowResSpacing,\n self.spline_order,\n zero_boundary=False)\n lowResLWarped = utils.get_warped_label_map(self.lowResLSource,\n phi_or_warped_image,\n self.spacing)\n self.history['recording'].append({\n 'iter': iter_count,\n 'iS': utils.t2np(self.ISource),\n 'iT': utils.t2np(self.ITarget),\n 'iW': utils.t2np(I1Warped),\n 'iSL': utils.t2np(self.lowResLSource) if self.lowResLSource is not None else None,\n 'iTL': utils.t2np(self.lowResLTarget) if self.lowResLTarget is not None else None,\n 'iWL': utils.t2np(lowResLWarped) if self.lowResLWarped is not None else None,\n 'phiWarped': utils.t2np(phi_or_warped_image)\n })\n else:\n I1Warped = utils.compute_warped_image_multiNC(self.ISource,\n phi_or_warped_image,\n self.spacing,\n self.spline_order,\n zero_boundary=False)\n LWarped = None\n if self.LSource is not None and self.LTarget is not None:\n LWarped = utils.get_warped_label_map(self.LSource,\n phi_or_warped_image,\n self.spacing)\n self.history['recording'].append({\n 'iter': iter_count,\n 'iS': utils.t2np(self.ISource),\n 'iT': utils.t2np(self.ITarget),\n 'iW': utils.t2np(I1Warped),\n 'iSL': utils.t2np(self.LSource) if self.LSource is not None else None,\n 'iTL': utils.t2np(self.LTarget) if self.LTarget is not None else None,\n 'iWL': utils.t2np(LWarped) if LWarped is not None else None,\n 'phiWarped': utils.t2np(phi_or_warped_image)\n })\n else:\n self.history['recording'].append({\n 'iter': iter_count,\n 'iS': utils.t2np(self.ISource),\n 'iT': utils.t2np(self.ITarget),\n 'iW': utils.t2np(phi_or_warped_image)\n })\n\n if self.visualize or self.save_fig:\n visual_param = {}\n visual_param['visualize'] = self.visualize\n visual_param['save_fig'] = self.save_fig\n visual_param['save_fig_num'] = self.save_fig_num\n if self.save_fig:\n visual_param['save_fig_path'] = self.save_fig_path\n visual_param['save_fig_path_byname'] = os.path.join(self.save_fig_path, 'byname')\n visual_param['save_fig_path_byiter'] = os.path.join(self.save_fig_path, 'byiter')\n visual_param['pair_name'] = self.pair_name\n visual_param['iter'] = 'scale_'+str(self.n_scale) + '_iter_' + str(self.iter_count)\n\n if self.visualize_step and (iter_count % self.visualize_step == 0) or (iter_count == self.nrOfIterations-1) or force_visualization:\n was_visualized = True\n if self.useMap and self.mapLowResFactor is not None:\n vizImage, vizName = self.model.get_parameter_image_and_name_to_visualize(self.lowResISource)\n else:\n vizImage, vizName = self.model.get_parameter_image_and_name_to_visualize(self.ISource)\n\n if self.useMap:\n if self.compute_similarity_measure_at_low_res:\n I1Warped = utils.compute_warped_image_multiNC(self.lowResISource,\n phi_or_warped_image,\n self.lowResSpacing,\n self.spline_order,\n zero_boundary=False)\n lowResLWarped = utils.get_warped_label_map(self.lowResLSource,\n phi_or_warped_image,\n self.spacing)\n vizReg.show_current_images(iter=iter_count,\n iS=self.lowResISource,\n iT=self.lowResITarget,\n iW=I1Warped,\n iSL=self.lowResLSource,\n iTL=self.lowResLTarget,\n iWL=lowResLWarped,\n vizImages=vizImage,\n vizName=vizName,\n phiWarped=phi_or_warped_image,\n visual_param=visual_param)\n\n else:\n I1Warped = utils.compute_warped_image_multiNC(self.ISource,\n phi_or_warped_image,\n self.spacing,\n self.spline_order,\n zero_boundary=False)\n vizImage = vizImage if len(vizImage)>2 else None\n LWarped = None\n if self.LSource is not None and self.LTarget is not None:\n LWarped = utils.get_warped_label_map(self.LSource,\n phi_or_warped_image,\n self.spacing)\n\n vizReg.show_current_images(iter=iter_count,\n iS=self.ISource,\n iT=self.ITarget,\n iW=I1Warped,\n iSL=self.LSource,\n iTL=self.LTarget,\n iWL=LWarped,\n vizImages=vizImage,\n vizName=vizName,\n phiWarped=phi_or_warped_image,\n visual_param=visual_param)\n else:\n vizReg.show_current_images(iter=iter_count,\n iS=self.ISource,\n iT=self.ITarget,\n iW=phi_or_warped_image,\n vizImages=vizImage,\n vizName=vizName,\n phiWarped=None,\n visual_param=visual_param)\n\n return reached_tolerance, was_visualized\n\n def _debugging_saving_intermid_img(self,img=None,is_label_map=False, append=''):\n folder_path = os.path.join(self.save_fig_path,'debugging')\n folder_path = os.path.join(folder_path, self.pair_name[0])\n make_dir(folder_path)\n file_name = 'scale_'+str(self.n_scale) + '_iter_' + str(self.iter_count)+append\n file_name=file_name.replace('.','_')\n if is_label_map:\n file_name += '_label'\n path = os.path.join(folder_path,file_name+'.nii.gz')\n im_io = FIO.ImageIO()\n im_io.write(path, np.squeeze(img.detach().cpu().numpy()))\n\n # todo: write these parameter/optimizer functions also for shared parameters and all parameters\n def set_sgd_shared_model_parameters_and_optimizer_states(self, pars):\n \"\"\"\n Set the individual model parameters and states that may be stored by the optimizer such as the momentum.\n Expects as input what get_sgd_individual_model_parameters_and_optimizer_states creates as output,\n but potentially multiple copies of it (as generated by a pyTorch dataloader). I.e., it takes in a dataloader sample.\n NOTE: currently only supports SGD\n\n :param pars: parameter list as produced by get_sgd_individual_model_parameters_and_optimizer_states\n :return: n/a\n \"\"\"\n if self.optimizer_instance is None:\n raise ValueError('Optimizer not yet created')\n\n if (self._sgd_par_list is None) or (self._sgd_par_names is None):\n raise ValueError(\n 'sgd par list and/or par names not available; needs to be created before passing it to the optimizer')\n\n if len(pars) == 0:\n print('WARNING: found no values')\n return\n\n # the optimizer (if properly initialized) already holds pointers to the model parameters and the optimizer states\n # so we can set everything in one swoop here\n\n # loop over the SGD parameter groups (this is modeled after the code in the SGD optimizer)\n # this input will represent a sample from a pytorch dataloader\n\n # wrap the parameters in a list if needed (so we can mirror the setup from get_sgd_...\n if type(pars) == list:\n use_pars = pars\n else:\n use_pars = [pars]\n\n for p in use_pars:\n if 'is_shared' in p:\n if p['is_shared']:\n current_name = p['name']\n\n assert (torch.is_tensor(p['model_params']))\n current_model_params = p['model_params']\n\n if 'momentum_buffer' in p:\n assert (torch.is_tensor(p['momentum_buffer']))\n current_momentum_buffer = p['momentum_buffer']\n else:\n current_momentum_buffer = None\n\n # now we need to match this with the parameters and the state of the SGD optimizer\n model_par = self._sgd_name_to_model_par[current_name]\n model_par.data.copy_(current_model_params)\n\n # and now do the same with the state\n param_state = self.optimizer_instance.state[model_par]\n if 'momentum_buffer' in param_state:\n param_state['momentum_buffer'].copy_(current_momentum_buffer)\n\n def set_sgd_individual_model_parameters_and_optimizer_states(self, pars):\n \"\"\"\n Set the individual model parameters and states that may be stored by the optimizer such as the momentum.\n Expects as input what get_sgd_individual_model_parameters_and_optimizer_states creates as output,\n but potentially multiple copies of it (as generated by a pyTorch dataloader). I.e., it takes in a dataloader sample.\n NOTE: currently only supports SGD\n\n :param pars: parameter list as produced by get_sgd_individual_model_parameters_and_optimizer_states\n :return: n/a\n \"\"\"\n if self.optimizer_instance is None:\n raise ValueError('Optimizer not yet created')\n\n if (self._sgd_par_list is None) or (self._sgd_par_names is None):\n raise ValueError(\n 'sgd par list and/or par names not available; needs to be created before passing it to the optimizer')\n\n if len(pars) == 0:\n print('WARNING: found no values')\n return\n\n # the optimizer (if properly initialized) already holds pointers to the model parameters and the optimizer states\n # so we can set everything in one swoop here\n\n # loop over the SGD parameter groups (this is modeled after the code in the SGD optimizer)\n # this input will represent a sample from a pytorch dataloader\n\n # wrap the parameters in a list if needed (so we can mirror the setup from get_sgd_...\n if type(pars)==list:\n use_pars = pars\n else:\n use_pars = [pars]\n\n for p in use_pars:\n if 'is_shared' in p:\n if not p['is_shared'][0]: # need to grab the first one, because the dataloader replicated these entries\n current_name = p['name'][0]\n\n assert( torch.is_tensor(p['model_params']))\n current_model_params = p['model_params']\n\n if 'momentum_buffer' in p:\n assert( torch.is_tensor(p['momentum_buffer']) )\n current_momentum_buffer = p['momentum_buffer']\n else:\n current_momentum_buffer = None\n\n # now we need to match this with the parameters and the state of the SGD optimizer\n model_par = self._sgd_name_to_model_par[current_name]\n model_par.data.copy_(current_model_params)\n\n # and now do the same with the state\n param_state = self.optimizer_instance.state[model_par]\n if 'momentum_buffer' in param_state:\n param_state['momentum_buffer'].copy_(current_momentum_buffer)\n\n def _convert_obj_with_parameters_to_obj_with_tensors(self, p):\n \"\"\"\n Converts structures that consist of lists and dictionaries with parameters to tensors\n\n :param p: parameter structure\n :return: object with parameters converted to tensors\n \"\"\"\n\n if type(p) == list:\n ret_p = []\n for e in p:\n ret_p.append(self._convert_obj_with_parameters_to_obj_with_tensors(e))\n return ret_p\n elif type(p) == dict:\n ret_p = dict()\n for key in p:\n ret_p[key] = self._convert_obj_with_parameters_to_obj_with_tensors((p[key]))\n return ret_p\n elif type(p) == torch.nn.parameter.Parameter:\n return p.data\n else:\n return p\n\n def get_sgd_shared_model_parameters(self):\n \"\"\"\n Gets the model parameters that are shared.\n\n :return:\n \"\"\"\n\n if self.optimizer_instance is None:\n raise ValueError('Optimizer not yet created')\n\n if (self._sgd_par_list is None) or (self._sgd_par_names is None):\n raise ValueError(\n 'sgd par list and/or par names not available; needs to be created before passing it to the optimizer')\n\n d = []\n\n # loop over the SGD parameter groups (this is modeled after the code in the SGD optimizer)\n for group in self.optimizer_instance.param_groups:\n\n group_dict = dict()\n group_dict['params'] = []\n\n for p in group['params']:\n current_group_params = dict()\n # let's first see if this is a shared state\n if self._sgd_par_names[p]['is_shared']:\n # keep track of the names so we can and batch, so we can read it back in\n current_group_params.update(self._sgd_par_names[p])\n # now deal with the optimizer state if available\n current_group_params['model_params'] = self._convert_obj_with_parameters_to_obj_with_tensors(p)\n\n group_dict['params'].append(current_group_params)\n\n d.append(group_dict)\n\n return d\n\n\n def get_sgd_individual_model_parameters_and_optimizer_states(self):\n \"\"\"\n Gets the individual model parameters and states that may be stored by the optimizer such as the momentum.\n NOTE: currently only supports SGD\n\n :return:\n \"\"\"\n if self.optimizer_instance is None:\n raise ValueError('Optimizer not yet created')\n\n if (self._sgd_par_list is None) or (self._sgd_par_names is None):\n raise ValueError(\n 'sgd par list and/or par names not available; needs to be created before passing it to the optimizer')\n\n d = []\n\n # loop over the SGD parameter groups (this is modeled after the code in the SGD optimizer)\n for group in self.optimizer_instance.param_groups:\n\n group_dict = dict()\n group_dict['weight_decay'] = group['weight_decay']\n group_dict['momentum'] = group['momentum']\n group_dict['dampening'] = group['dampening']\n group_dict['nesterov'] = group['nesterov']\n group_dict['lr'] = group['lr']\n\n group_dict['params'] = []\n\n for p in group['params']:\n current_group_params = dict()\n # let's first see if this is a shared state\n if not self._sgd_par_names[p]['is_shared']:\n # keep track of the names so we can and batch, so we can read it back in\n current_group_params.update(self._sgd_par_names[p])\n # now deal with the optimizer state if available\n current_group_params['model_params'] = self._convert_obj_with_parameters_to_obj_with_tensors(p)\n if group['momentum'] != 0:\n param_state = self.optimizer_instance.state[p]\n if 'momentum_buffer' in param_state:\n current_group_params['momentum_buffer'] = self._convert_obj_with_parameters_to_obj_with_tensors(param_state['momentum_buffer'])\n\n group_dict['params'].append(current_group_params)\n\n d.append(group_dict)\n\n return d\n\n def _remove_state_variables_for_individual_parameters(self,individual_pars):\n \"\"\"\n Removes the optimizer state for individual parameters.\n This is required at the beginning as we do not want to reuse the SGD momentum for example for an unrelated registration.\n\n :param individual_pars: individual parameters are returned by get_sgd_individual_model_parameters_and_optimizer_states\n :return: n/a\n \"\"\"\n\n if self.optimizer_instance is None:\n raise ValueError('Optimizer not yet created')\n\n if (self._sgd_par_list is None) or (self._sgd_par_names is None):\n raise ValueError(\n 'sgd par list and/or par names not available; needs to be created before passing it to the optimizer')\n\n # loop over the SGD parameter groups (this is modeled after the code in the SGD optimizer)\n for group in self.optimizer_instance.param_groups:\n\n for p in group['params']:\n # let's first see if this is a shared state\n if not self._sgd_par_names[p]['is_shared']:\n # we want to delete the state of this one\n self.optimizer_instance.state.pop(p)\n\n\n def _create_optimizer_parameter_dictionary(self,individual_pars, shared_pars,\n settings_individual=dict(), settings_shared=dict()):\n\n par_list = []\n \"\"\"List of parameters that can directly be passed to an optimizer; different list elements define different parameter groups\"\"\"\n par_names = dict()\n \"\"\"dictionary which maps from a parameters id (i.e., memory) to its description: name/is_shared\"\"\"\n # name is the name of the variable\n # is_shared keeps track of if a parameter was declared shared (opposed to individual, which we need for registrations)\n\n names_to_par = dict()\n \"\"\"dictionary which maps from a parameter name back to the parameter\"\"\"\n\n # first deal with the individual parameters\n pl_ind, par_to_name_ind = utils.get_parameter_list_and_par_to_name_dict_from_parameter_dict(individual_pars)\n #cd = {'params': pl_ind}\n cd = {'params': [p for p in pl_ind if p.requires_grad]}\n cd.update(settings_individual)\n par_list.append(cd)\n # add all the names\n for current_par, key in zip(pl_ind, par_to_name_ind):\n par_names[key] = {'name': par_to_name_ind[key], 'is_shared': False}\n names_to_par[par_to_name_ind[key]] = current_par\n\n # now deal with the shared parameters\n pl_shared, par_to_name_shared = utils.get_parameter_list_and_par_to_name_dict_from_parameter_dict(shared_pars)\n #cd = {'params': pl_shared}\n cd = {'params': [p for p in pl_shared if p.requires_grad]}\n cd.update(settings_shared)\n par_list.append(cd)\n for current_par, key in zip(pl_shared, par_to_name_shared):\n par_names[key] = {'name': par_to_name_shared[key], 'is_shared': True}\n names_to_par[par_to_name_shared[key]] = current_par\n\n return par_list, par_names, names_to_par\n\n def _write_out_shared_parameters(self, model_pars, filename):\n\n # just write out the ones that are shared\n for group in model_pars:\n if 'params' in group:\n was_shared_group = False # there can only be one\n # create lists that will hold the information for the different batches\n cur_pars = []\n\n # now iterate through the current parameter list\n for p in group['params']:\n needs_to_be_saved = True\n if 'is_shared' in p:\n if not p['is_shared']:\n needs_to_be_saved = False\n\n if needs_to_be_saved:\n # we found a shared entry\n was_shared_group = True\n cur_pars.append(p)\n\n # now we have the parameter list for one of the elements of the batch and we can write it out\n if was_shared_group: # otherwise will be overwritten by a later parameter group\n torch.save(cur_pars, filename)\n\n\n def _write_out_individual_parameters(self, model_pars, filenames):\n\n batch_size = len(filenames)\n\n # just write out the ones that are individual\n for group in model_pars:\n if 'params' in group:\n was_individual_group = False # there can only be one\n # create lists that will hold the information for the different batches\n for b in range(batch_size):\n cur_pars = []\n\n # now iterate through the current parameter list\n for p in group['params']:\n if 'is_shared' in p:\n # we found an individual entry\n if not p['is_shared']:\n was_individual_group = True\n # now go through this dictionary, extract the current batch info in it,\n # and append it to the current batch parameter list\n cur_dict = dict()\n for p_el in p:\n if p_el == 'name':\n cur_dict['name'] = p[p_el]\n elif p_el == 'is_shared':\n cur_dict['is_shared'] = p[p_el]\n else:\n # this will be a tensor so we need to extract the information for the current batch\n cur_dict[p_el] = p[p_el][b, ...]\n\n cur_pars.append(cur_dict)\n\n # now we have the parameter list for one of the elements of the batch and we can write it out\n if was_individual_group: # otherwise will be overwritten by a later parameter group\n torch.save(cur_pars, filenames[b])\n\n def _get_optimizer_instance(self):\n\n if (self.model is None) or (self.criterion is None):\n raise ValueError('Please specify a model to solve with set_model first')\n\n # first check if an optimizer was specified externally\n\n if self.optimizer is not None:\n # simply instantiate it\n if self.optimizer_name is not None:\n print('Warning: optimizer name = ' + str(self.optimizer_name) +\n ' specified, but ignored since optimizer was set explicitly')\n opt_instance = self.optimizer(self.model.parameters(), **self.optimizer_params)\n return opt_instance\n else:\n # select it by name\n # TODO: Check what the best way to adapt the tolerances is here; tying it to rel_ftol is not really correct\n if self.optimizer_name is None:\n raise ValueError('Need to select an optimizer')\n elif self.optimizer_name == 'lbfgs_ls':\n if self.last_successful_step_size_taken is not None:\n desired_lr = self.last_successful_step_size_taken\n else:\n desired_lr = 1.0\n max_iter = self.params['optimizer']['lbfgs'][('max_iter',1,'maximum number of iterations')]\n max_eval = self.params['optimizer']['lbfgs'][('max_eval',5,'maximum number of evaluation')]\n history_size = self.params['optimizer']['lbfgs'][('history_size',5,'Size of the optimizer history')]\n line_search_fn = self.params['optimizer']['lbfgs'][('line_search_fn','backtracking','Type of line search function')]\n\n opt_instance = CO.LBFGS_LS(self.model.parameters(),\n lr=desired_lr, max_iter=max_iter, max_eval=max_eval,\n tolerance_grad=self.rel_ftol * 10, tolerance_change=self.rel_ftol,\n history_size=history_size, line_search_fn=line_search_fn)\n return opt_instance\n elif self.optimizer_name == 'sgd':\n #if self.last_successful_step_size_taken is not None:\n # desired_lr = self.last_successful_step_size_taken\n #else:\n\n if self.default_learning_rate is not None:\n current_default_learning_rate = self.default_learning_rate\n self.params['optimizer']['sgd']['individual']['lr'] = current_default_learning_rate\n self.params['optimizer']['sgd']['shared']['lr'] = current_default_learning_rate\n\n else:\n current_default_learning_rate = 0.01\n\n desired_lr_individual = self.params['optimizer']['sgd']['individual'][('lr',current_default_learning_rate,'desired learning rate')]\n sgd_momentum_individual = self.params['optimizer']['sgd']['individual'][('momentum',0.9,'sgd momentum')]\n sgd_dampening_individual = self.params['optimizer']['sgd']['individual'][('dampening',0.0,'sgd dampening')]\n sgd_weight_decay_individual = self.params['optimizer']['sgd']['individual'][('weight_decay',0.0,'sgd weight decay')]\n sgd_nesterov_individual = self.params['optimizer']['sgd']['individual'][('nesterov',True,'use Nesterove scheme')]\n\n desired_lr_shared = self.params['optimizer']['sgd']['shared'][('lr', current_default_learning_rate, 'desired learning rate')]\n sgd_momentum_shared = self.params['optimizer']['sgd']['shared'][('momentum', 0.9, 'sgd momentum')]\n sgd_dampening_shared = self.params['optimizer']['sgd']['shared'][('dampening', 0.0, 'sgd dampening')]\n sgd_weight_decay_shared = self.params['optimizer']['sgd']['shared'][('weight_decay', 0.0, 'sgd weight decay')]\n sgd_nesterov_shared = self.params['optimizer']['sgd']['shared'][('nesterov', True, 'use Nesterove scheme')]\n\n settings_shared = {'momentum': sgd_momentum_shared,\n 'dampening': sgd_dampening_shared,\n 'weight_decay': sgd_weight_decay_shared,\n 'nesterov': sgd_nesterov_shared,\n 'lr': desired_lr_shared}\n\n settings_individual = {'momentum': sgd_momentum_individual,\n 'dampening': sgd_dampening_individual,\n 'weight_decay': sgd_weight_decay_individual,\n 'nesterov': sgd_nesterov_individual,\n 'lr': desired_lr_individual}\n\n self._sgd_par_list, self._sgd_par_names, self._sgd_name_to_model_par = self._create_optimizer_parameter_dictionary(\n self.model.get_individual_registration_parameters(),\n self.model.get_shared_registration_parameters(),\n settings_individual=settings_individual,\n settings_shared=settings_shared)\n\n opt_instance = torch.optim.SGD(self._sgd_par_list)\n\n return opt_instance\n elif self.optimizer_name == 'adam':\n if self.last_successful_step_size_taken is not None:\n desired_lr = self.last_successful_step_size_taken\n else:\n if self.default_learning_rate is not None:\n current_default_learning_rate = self.default_learning_rate\n self.params['optimizer']['adam']['lr'] = current_default_learning_rate\n else:\n current_default_learning_rate = 0.01\n desired_lr = self.params['optimizer']['adam'][('lr',current_default_learning_rate,'desired learning rate')]\n\n adam_betas = self.params['optimizer']['adam'][('betas',[0.9,0.999],'adam betas')]\n adam_eps = self.params['optimizer']['adam'][('eps',self.rel_ftol,'adam eps')]\n adam_weight_decay = self.params['optimizer']['adam'][('weight_decay',0.0,'adam weight decay')]\n opt_instance = torch.optim.Adam(self.model.parameters(), lr=desired_lr,\n betas=adam_betas,\n eps=adam_eps,\n weight_decay=adam_weight_decay)\n return opt_instance\n else:\n raise ValueError('Optimizer = ' + str(self.optimizer_name) + ' not yet supported')\n\n def _set_all_still_missing_parameters(self):\n\n if self.optimizer_name is None:\n self.optimizer_name = self.params['optimizer'][('name','lbfgs_ls','Optimizer (lbfgs|adam|sgd)')]\n\n if self.model is None:\n model_name = self.params['model']['registration_model'][('type', 'lddmm_shooting_map', \"['svf'|'svf_quasi_momentum'|'svf_scalar_momentum'|'svf_vector_momentum'|'lddmm_shooting'|'lddmm_shooting_scalar_momentum'] all with suffix '_map' or '_image'\")]\n self.params['model']['deformation'][('use_map', True, 'use a map for the solution or not True/False' )]\n self.set_model( model_name )\n\n if self.nrOfIterations is None: # not externally set, so this will not be a multi-scale solution\n self.params['optimizer'][('single_scale', {}, 'single scale settings')]\n self.nrOfIterations = self.params['optimizer']['single_scale'][('nr_of_iterations', 10, 'number of iterations')]\n\n # get the optimizer\n if self.optimizer_instance is None:\n self.optimizer_instance = self._get_optimizer_instance()\n\n if USE_CUDA:\n self.model = self.model.cuda()\n\n self.compute_low_res_image_if_needed()\n self.optimizer_has_been_initialized = True\n\n def set_scheduler_patience(self,patience):\n self.params['optimizer']['scheduler']['patience'] = patience\n self.scheduler_patience = patience\n\n def set_scheduler_patience_silent(self,patience):\n self.scheduler_patience = patience\n\n def get_scheduler_patience(self):\n return self.scheduler_patience\n\n def _set_use_external_scheduler(self):\n self._use_external_scheduler = True\n\n def _set_use_internal_scheduler(self):\n self._use_external_scheduler = False\n\n def _get_use_external_scheduler(self):\n return self._use_external_scheduler\n\n def _get_dictionary_to_pass_to_integrator(self):\n \"\"\"\n This is experimental to allow passing additional parameters to integrators/smoothers, etc.\n\n :return: dictionary\n \"\"\"\n\n d = dict()\n\n if self.mapLowResFactor is not None:\n d['I0'] = self.lowResISource\n d['I1'] = self.lowResITarget\n else:\n d['I0'] = self.ISource\n d['I1'] = self.ITarget\n\n return d\n\n def optimize(self):\n \"\"\"\n Do the single scale optimization\n \"\"\"\n\n self._set_all_still_missing_parameters()\n\n # in this way model parameters can be \"set\" before the optimizer has been properly initialized\n if self.delayed_model_parameters_still_to_be_set:\n print('Setting model parameters, delayed')\n self.set_model_parameters(self.delayed_model_parameters)\n\n if self.delayed_model_state_dict_still_to_be_set:\n print('Setting model state dict, delayed')\n self.set_model_state_dict(self.delayed_model_state_dict)\n\n # this allows passing addtional parameters to the smoothers for all models and smoothers\n self.model.set_dictionary_to_pass_to_integrator(self._get_dictionary_to_pass_to_integrator())\n self.criterion.set_dictionary_to_pass_to_smoother(self._get_dictionary_to_pass_to_integrator())\n\n # optimize for a few steps\n start = time.time()\n\n self.last_energy = None\n could_not_find_successful_step = False\n\n if not self._use_external_scheduler:\n self.use_step_size_scheduler = self.params['optimizer'][('use_step_size_scheduler',True,'If set to True the step sizes are reduced if no progress is made')]\n\n if self.use_step_size_scheduler:\n self.params['optimizer'][('scheduler', {}, 'parameters for the ReduceLROnPlateau scheduler')]\n self.scheduler_verbose = self.params['optimizer']['scheduler'][\n ('verbose', True, 'if True prints out changes in learning rate')]\n self.scheduler_factor = self.params['optimizer']['scheduler'][('factor', 0.5, 'reduction factor')]\n self.scheduler_patience = self.params['optimizer']['scheduler'][\n ('patience', 10, 'how many steps without reduction before LR is changed')]\n\n if self.use_step_size_scheduler and self.scheduler is None:\n self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer_instance, 'min',\n verbose=self.scheduler_verbose,\n factor=self.scheduler_factor,\n patience=self.scheduler_patience)\n\n self.iter_count = 0\n for iter in range(self.nrOfIterations):\n\n # take a step of the optimizer\n # for p in self.optimizer_instance._params:\n # p.data = p.data.float()\n\n current_loss = self.optimizer_instance.step(self._closure)\n\n # do weight clipping if it is desired\n self._do_weight_clipping()\n\n # an external scheduler may for example be used in batch optimization\n if not self._use_external_scheduler:\n if self.use_step_size_scheduler:\n self.scheduler.step(current_loss.data[0])\n\n if hasattr(self.optimizer_instance,'last_step_size_taken'):\n self.last_successful_step_size_taken = self.optimizer_instance.last_step_size_taken()\n\n if self.last_successful_step_size_taken==0.0:\n print('Optimizer was not able to find a successful step. Stopping iterations.')\n could_not_find_successful_step = True\n if iter==0:\n print('The gradient was likely too large or the optimization started from an optimal point.')\n print('If this behavior is unexpected try adjusting the settings of the similiarity measure or allow the optimizer to try out smaller steps.')\n\n # to make sure warped images and the map is correct, call closure once more\n self._closure()\n\n if self.useMap:\n vis_arg = self.rec_phiWarped\n else:\n vis_arg = self.rec_IWarped\n\n tolerance_reached, was_visualized = self.analysis(self.rec_energy, self.rec_similarityEnergy,\n self.rec_regEnergy, self.rec_opt_par_loss_energy,\n vis_arg,\n self.rec_custom_optimizer_output_string,\n self.rec_custom_optimizer_output_values)\n\n if tolerance_reached or could_not_find_successful_step:\n if tolerance_reached:\n print('Terminating optimization, because the desired tolerance was reached.')\n\n # force the output of the last image in this case, if it has not been visualized previously\n if not was_visualized and (self.visualize or self.save_fig):\n _, _ = self.analysis(self.rec_energy, self.rec_similarityEnergy,\n self.rec_regEnergy, self.rec_opt_par_loss_energy,\n vis_arg,\n self.rec_custom_optimizer_output_string,\n self.rec_custom_optimizer_output_values,\n force_visualization=True)\n break\n\n self.iter_count = iter+1\n\n if self.show_iteration_output:\n cprint('-->Elapsed time {:.5f}[s]'.format(time.time() - start), 'green')\n\n\nclass SingleScaleBatchRegistrationOptimizer(ImageRegistrationOptimizer):\n\n def __init__(self, sz, spacing, useMap, mapLowResFactor, params, compute_inverse_map=False, default_learning_rate=None):\n\n super(SingleScaleBatchRegistrationOptimizer, self).__init__(sz, spacing, useMap, mapLowResFactor, params, compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)\n\n self.params[('optimizer', {}, 'optimizer settings')]\n cparams = self.params['optimizer']\n cparams[('batch_settings', {}, 'settings for the batch or optimizer')]\n cparams = cparams['batch_settings']\n\n self.batch_size = cparams[('batch_size',2,'how many images per batch (if set larger or equal to the number of images, it will be processed as one batch')]\n \"\"\"how many images per batch\"\"\"\n\n self.shuffle = cparams[('shuffle', True, 'if batches should be shuffled between epochs')]\n \"\"\"shuffle batches between epochshow many images per batch\"\"\"\n\n self.num_workers = cparams[('num_workers',0,'Number of workers to read the data. Set it to zero on the GPU or use >0 at your own risk.')]\n \"\"\"number of workers to read the data\"\"\"\n\n self.nr_of_epochs = cparams[('nr_of_epochs', 1,'how many epochs')]\n \"\"\"how many iterations for batch; i.e., how often to iterate over the entire dataset = epochs\"\"\"\n\n self.parameter_output_dir = cparams[('parameter_output_dir','parameters','output directory to store the shared and the individual parameters during the iterations')]\n \"\"\"output directory to store the shared and the individual parameters during the iterations\"\"\"\n\n self.individual_parameter_output_dir = os.path.join(self.parameter_output_dir,'individual')\n self.shared_parameter_output_dir = os.path.join(self.parameter_output_dir,'shared')\n\n self.start_from_previously_saved_parameters = cparams[('start_from_previously_saved_parameters',True,'If set to true checks already for the first batch of files in the output directories exist and uses them if they do.')]\n \"\"\"If true then checks if previously saved parameter files exists and load them at the beginning already\"\"\"\n\n self.individual_checkpoint_output_directory = os.path.join(self.individual_parameter_output_dir,'checkpoints')\n self.shared_checkpoint_output_directory = os.path.join(self.shared_parameter_output_dir,'checkpoints')\n\n self.checkpoint_interval = cparams[('checkpoint_interval',0,'after how many epochs, checkpoints are saved; if set to 0, checkpoint will not be saved')]\n \"\"\"after how many epochs checkpoints are saved\"\"\"\n\n self.verbose_output = cparams[('verbose_output',False,'turns on verbose output')]\n\n self.show_sample_optimizer_output = cparams[('show_sample_optimizer_output',False,'If true shows the energies during optimizaton of a sample')]\n \"\"\"Shows iterations for each sample being optimized\"\"\"\n\n self.also_eliminate_shared_state_between_samples_during_first_epoch = \\\n self.params['optimizer']['sgd'][('also_eliminate_shared_state_between_samples_during_first_epoch', False,\n 'if set to true all states are eliminated, otherwise only the individual ones')]\n\n self.use_step_size_scheduler = self.params['optimizer'][('use_step_size_scheduler', True, 'If set to True the step sizes are reduced if no progress is made')]\n self.scheduler = None\n\n if self.use_step_size_scheduler:\n self.params['optimizer'][('scheduler', {}, 'parameters for the ReduceLROnPlateau scheduler')]\n self.scheduler_verbose = self.params['optimizer']['scheduler'][\n ('verbose', True, 'if True prints out changes in learning rate')]\n self.scheduler_factor = self.params['optimizer']['scheduler'][('factor', 0.75, 'reduction factor')]\n self.scheduler_patience = self.params['optimizer']['scheduler'][\n ('patience', 5, 'how many steps without reduction before LR is changed')]\n\n self.model_name = None\n self.add_model_name = None\n self.add_model_networkClass = None\n self.add_model_lossClass = None\n self.addSimName = None\n self.addSimMeasure = None\n\n self.ssOpt = None\n\n def write_parameters_to_settings(self):\n if self.ssOpt is not None:\n self.ssOpt.write_parameters_to_settings()\n\n def add_similarity_measure(self, simName, simMeasure):\n \"\"\"\n Adds a custom similarity measure\n\n :param simName: name of the similarity measure (string)\n :param simMeasure: the similarity measure itself (an object that can be instantiated)\n \"\"\"\n self.addSimName = simName\n self.addSimMeasure = simMeasure\n\n\n def set_model(self, modelName):\n \"\"\"\n Sets the model that should be solved\n\n :param modelName: name of the model that should be solved (string)\n \"\"\"\n\n self.model_name = modelName\n\n def add_model(self, add_model_name, add_model_networkClass, add_model_lossClass):\n \"\"\"\n Adds a custom model to be optimized over\n\n :param add_model_name: name of the model (string)\n :param add_model_networkClass: network model itself (as an object that can be instantiated)\n :param add_model_lossClass: loss of the model (as an object that can be instantiated)\n \"\"\"\n self.add_model_name = add_model_name\n self.add_model_networkClass = add_model_networkClass\n self.add_model_lossClass = add_model_lossClass\n\n\n def get_checkpoint_dict(self):\n d = super(SingleScaleBatchRegistrationOptimizer, self).get_checkpoint_dict()\n if self.ssOpt is not None:\n d['shared_parameters'] = self.ssOpt.get_shared_model_parameters_and_buffers()\n return d\n\n def load_checkpoint_dict(self, d, load_optimizer_state=False):\n super(SingleScaleBatchRegistrationOptimizer, self).load_checkpoint_dict(d)\n if 'shared_parameters' in d:\n if self.ssOpt is not None:\n self.ssOpt.set_shared_model_parameters(d['shared_parameters'])\n else:\n raise ValueError('checkpoint does not contain: consensus_dual')\n\n def get_warped_image(self):\n \"\"\"\n Returns the warped image\n :return: the warped image\n \"\"\"\n\n p = dict()\n p['warped_images'] = []\n\n print('get_warped_image: not yet implemented')\n\n return p\n\n def get_map(self):\n \"\"\"\n Returns the deformation map\n :return: deformation map\n \"\"\"\n\n p = dict()\n p['phi'] = []\n\n print('get_map: not yet implemented')\n\n return p\n\n def get_inverse_map(self):\n \"\"\"\n Returns the inverse deformation map\n :return: deformation map\n \"\"\"\n\n p = dict()\n p['phi_inv'] = []\n\n print('get_inverse_map: not yet implemented')\n\n return p\n\n\n def get_model_parameters(self):\n \"\"\"\n Returns the parameters of the model\n\n :return: model parameters\n \"\"\"\n p = dict()\n if self.ssOpt is not None:\n p['shared_parameters'] = self.ssOpt.get_shared_model_parameters_and_buffers()\n\n return p\n\n def set_model_parameters(self, p):\n raise ValueError('Setting model parameters not yet supported by batch optimizer')\n\n def _set_all_still_missing_parameters(self):\n if self.model_name is None:\n model_name = self.params['model']['registration_model'][('type', 'lddmm_shooting_map',\n \"['svf'|'svf_quasi_momentum'|'svf_scalar_momentum'|'svf_vector_momentum'|'lddmm_shooting'|'lddmm_shooting_scalar_momentum'] all with suffix '_map' or '_image'\")]\n self.params['model']['deformation'][('use_map', True, 'use a map for the solution or not True/False')]\n self.set_model(model_name)\n\n if self.optimizer_name is None:\n self.optimizer_name = self.params['optimizer'][('name', 'sgd', 'Optimizer (lbfgs|adam|sgd)')]\n\n self.optimizer_has_been_initialized = True\n\n def _create_single_scale_optimizer(self,batch_size):\n ssOpt = SingleScaleRegistrationOptimizer(batch_size, self.spacing, self.useMap, self.mapLowResFactor, self.params, compute_inverse_map=self.compute_inverse_map, default_learning_rate=self.default_learning_rate)\n\n if ((self.add_model_name is not None) and\n (self.add_model_networkClass is not None) and\n (self.add_model_lossClass is not None)):\n ssOpt.add_model(self.add_model_name, self.add_model_networkClass, self.add_model_lossClass)\n\n # now set the actual model we want to solve\n ssOpt.set_model(self.model_name)\n\n if (self.addSimName is not None) and (self.addSimMeasure is not None):\n ssOpt.add_similarity_measure(self.addSimName, self.addSimMeasure)\n\n if self.optimizer_name is not None:\n ssOpt.set_optimizer_by_name(self.optimizer_name)\n else:\n raise ValueError('Optimizers need to be specified by name of consensus optimization at the moment.')\n\n ssOpt.set_rel_ftol(self.get_rel_ftol())\n\n ssOpt.set_visualization(self.get_visualization())\n ssOpt.set_visualize_step(self.get_visualize_step())\n\n return ssOpt\n\n def _get_individual_checkpoint_filenames(self,output_directory,idx,epoch_iter):\n filenames = []\n for v in idx:\n filenames.append(os.path.join(output_directory,'checkpoint_individual_parameter_pair_{:05d}_epoch_{:05d}.pt'.format(v,epoch_iter)))\n return filenames\n\n def _get_shared_checkpoint_filename(self,output_directory,epoch_iter):\n\n filename = os.path.join(output_directory,'checkpoint_shared_parameters_epoch_{:05d}.pt'.format(epoch_iter))\n return filename\n\n def _create_all_output_directories(self):\n\n if not os.path.exists(self.parameter_output_dir):\n os.makedirs(self.parameter_output_dir)\n print('Creating directory: ' + self.parameter_output_dir)\n\n if not os.path.exists(self.individual_parameter_output_dir):\n os.makedirs(self.individual_parameter_output_dir)\n print('Creating directory: ' + self.individual_parameter_output_dir)\n\n if not os.path.exists(self.shared_parameter_output_dir):\n os.makedirs(self.shared_parameter_output_dir)\n print('Creating directory: ' + self.shared_parameter_output_dir)\n\n if not os.path.exists(self.individual_checkpoint_output_directory):\n os.makedirs(self.individual_checkpoint_output_directory)\n print('Creating directory: ' + self.individual_checkpoint_output_directory)\n\n if not os.path.exists(self.shared_checkpoint_output_directory):\n os.makedirs(self.shared_checkpoint_output_directory)\n print('Creating directory: ' + self.shared_checkpoint_output_directory)\n\n\n def _get_shared_parameter_filename(self,output_dir):\n return os.path.join(output_dir,'shared_parameters.pt')\n\n def optimize(self):\n \"\"\"\n The optimizer to optimize over batches of images\n\n :return: n/a\n \"\"\"\n\n #todo: maybe switch loading and writing individual parameters to individual states; this would assure that all states (such as running averages, etc.) are included and not only parameters\n\n if self.optimizer is not None:\n raise ValueError('Custom optimizers are currently not supported for batch optimization.\\\n Set the optimizer by name (e.g., in the json configuration) instead. Should be some form of stochastic gradient descent.')\n\n\n self._set_all_still_missing_parameters()\n self._create_all_output_directories()\n\n iter_offset = 0\n\n if torch.is_tensor(self.ISource) or torch.is_tensor(self.ITarget):\n raise ValueError('Batch optimizer expects lists of filenames as inputs for the source and target images')\n\n registration_data_set = OD.PairwiseRegistrationDataset(output_directory=self.individual_parameter_output_dir,\n source_image_filenames=self.ISource,\n target_image_filenames=self.ITarget,\n params=self.params)\n\n nr_of_datasets = len(registration_data_set)\n if nr_of_datasets<self.batch_size:\n print('INFO: nr of datasets is smaller than batch-size. Reducing batch size to ' + str(nr_of_datasets))\n self.batch_size=nr_of_datasets\n\n if nr_of_datasets%self.batch_size!=0:\n raise ValueError('nr_of_datasets = {}; batch_size = {}: Number of registration pairs needs to be divisible by the batch size.'.format(nr_of_datasets,self.batch_size))\n\n dataloader = DataLoader(registration_data_set, batch_size=self.batch_size,\n shuffle=self.shuffle, num_workers=self.num_workers)\n\n self.ssOpt = None\n last_batch_size = None\n\n nr_of_samples = nr_of_datasets//self.batch_size\n\n last_energy = None\n last_sim_energy = None\n last_reg_energy = None\n last_opt_energy = None\n\n shared_parameter_filename = self._get_shared_parameter_filename(self.shared_parameter_output_dir)\n\n load_individual_parameters_during_first_epoch = False\n load_shared_parameters_before_first_epoch = False\n\n if self.start_from_previously_saved_parameters:\n # check if there are files in the output_directory\n has_all_filenames = True\n for idx in range(len(self.ISource)):\n cur_filename = registration_data_set._get_parameter_filename(idx)\n if not os.path.isfile(cur_filename):\n has_all_filenames = False\n break\n\n load_individual_parameters_during_first_epoch = has_all_filenames\n load_shared_parameters_before_first_epoch = os.path.isfile(shared_parameter_filename)\n\n if load_individual_parameters_during_first_epoch:\n print('INFO: Will load the individual parameters from the previous run in directory ' + self.individual_parameter_output_dir + ' for initialization.')\n else:\n print('INFO: Will NOT load the individual parameters from the previous run in directory ' + self.individual_parameter_output_dir + ' for initialization.')\n\n if load_shared_parameters_before_first_epoch:\n print('INFO: Will load the shared parameter file ' + shared_parameter_filename + ' before computing the first epoch')\n else:\n print('INFO: Will NOT load the shared parameter file ' + shared_parameter_filename + ' before computing the first epoch')\n\n for iter_epoch in range(iter_offset,self.nr_of_epochs+iter_offset):\n if self.verbose_output:\n print('Computing epoch ' + str(iter_epoch + 1) + ' of ' + str(iter_offset+self.nr_of_epochs))\n\n cur_running_energy = 0.0\n cur_running_sim_energy = 0.0\n cur_running_reg_energy = 0.0\n cur_running_opt_energy = 0.0\n\n cur_min_energy = None\n cur_max_energy = None\n cur_min_sim_energy = None\n cur_max_sim_energy = None\n cur_min_reg_energy = None\n cur_max_reg_energy = None\n cur_min_opt_energy = None\n cur_max_opt_energy = None\n\n for i, sample in enumerate(dataloader, 0):\n\n # get the data from the dataloader\n current_source_batch = AdaptVal(sample['ISource'])\n current_target_batch = AdaptVal(sample['ITarget'])\n\n # create the optimizer\n batch_size = current_source_batch.size()\n if (batch_size != last_batch_size) and (last_batch_size is not None):\n raise ValueError('Ooops, this should not have happened.')\n\n initialize_optimizer = False\n if (batch_size != last_batch_size) or (self.ssOpt is None):\n initialize_optimizer = True\n # we need to create a new optimizer; otherwise optimizer already exists\n self.ssOpt = self._create_single_scale_optimizer(batch_size)\n\n # images need to be set before calling _set_all_still_missing_parameters\n self.ssOpt.set_source_image(current_source_batch)\n self.ssOpt.set_target_image(current_target_batch)\n self.ssOpt.set_current_epoch(iter_epoch)\n\n if initialize_optimizer:\n # to make sure we have the model initialized, force parameter installation\n self.ssOpt._set_all_still_missing_parameters()\n # since this is chunked-up we increase the patience\n self.ssOpt._set_use_external_scheduler()\n\n if self.show_sample_optimizer_output:\n self.ssOpt.turn_iteration_output_on()\n else:\n self.ssOpt.turn_iteration_output_off()\n\n if self.use_step_size_scheduler and self.scheduler is None:\n self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.ssOpt.optimizer_instance, 'min',\n verbose=self.scheduler_verbose,\n factor=self.scheduler_factor,\n patience=self.scheduler_patience)\n\n if load_shared_parameters_before_first_epoch:\n print('Loading the shared parameters/state.')\n self.ssOpt.load_shared_state_dict(torch.load(shared_parameter_filename))\n\n last_batch_size = batch_size\n\n if iter_epoch!=0 or load_individual_parameters_during_first_epoch: # only load the individual parameters after the first epoch\n if 'individual_parameter' in sample:\n current_individual_parameters = sample['individual_parameter']\n if current_individual_parameters is not None:\n if self.verbose_output:\n print('INFO: loading current individual optimizer state')\n self.ssOpt.set_sgd_individual_model_parameters_and_optimizer_states(current_individual_parameters)\n else:\n print('WARNING: could not find previous parameter file')\n else:\n # this is the case when optimization is run for the first time for a batch or if previous results should not be used\n # In this case we want to have a fresh start for the initial conditions\n par_file = os.path.join(self.individual_parameter_output_dir,'default_init.pt')\n if i==0:\n # this is the first time, so we store the individual parameters\n torch.save(self.ssOpt.get_individual_model_parameters(),par_file)\n else:\n # now we load them\n if self.verbose_output:\n print('INFO: forcing the initial individual parameters to default')\n self.ssOpt.set_individual_model_parameters(torch.load(par_file))\n # and we need to kill the optimizer state (to get rid of the previous momentum)\n if self.also_eliminate_shared_state_between_samples_during_first_epoch:\n if self.verbose_output:\n print('INFO: discarding the entire optimizer state')\n self.ssOpt.optimizer_instance.state = defaultdict(dict)\n else:\n if self.verbose_output:\n print('INFO: discarding current *individual* optimizer states only')\n self.ssOpt._remove_state_variables_for_individual_parameters(self.ssOpt.get_sgd_individual_model_parameters_and_optimizer_states())\n\n\n if self.visualize:\n if i == 0:\n # to avoid excessive graphical output\n self.ssOpt.turn_visualization_on()\n else:\n self.ssOpt.turn_visualization_off()\n else:\n self.ssOpt.turn_visualization_off()\n\n self.ssOpt.optimize()\n\n cur_energy,cur_sim_energy,cur_reg_energy = self.ssOpt.get_energy()\n cur_opt_energy = self.ssOpt.get_opt_par_energy()\n\n cur_running_energy += 1./nr_of_samples*cur_energy\n cur_running_sim_energy += 1./nr_of_samples*cur_sim_energy\n cur_running_reg_energy += 1./nr_of_samples*cur_reg_energy\n cur_running_opt_energy += 1./nr_of_samples*cur_opt_energy\n\n if i==0:\n cur_min_energy = cur_energy\n cur_max_energy = cur_energy\n cur_min_sim_energy = cur_sim_energy\n cur_max_sim_energy = cur_sim_energy\n cur_min_reg_energy = cur_reg_energy\n cur_max_reg_energy = cur_reg_energy\n cur_min_opt_energy = cur_opt_energy\n cur_max_opt_energy = cur_opt_energy\n else:\n cur_min_energy = min(cur_energy,cur_min_energy)\n cur_max_energy = max(cur_energy,cur_max_energy)\n cur_min_sim_energy = min(cur_sim_energy,cur_min_sim_energy)\n cur_max_sim_energy = max(cur_sim_energy,cur_max_sim_energy)\n cur_min_reg_energy = min(cur_reg_energy,cur_min_reg_energy)\n cur_max_reg_energy = max(cur_reg_energy,cur_max_reg_energy)\n cur_min_opt_energy = min(cur_opt_energy,cur_min_opt_energy)\n cur_max_opt_energy = max(cur_opt_energy,cur_max_opt_energy)\n\n # need to save this index by index so we can shuffle\n self.ssOpt._write_out_individual_parameters(self.ssOpt.get_sgd_individual_model_parameters_and_optimizer_states(),sample['individual_parameter_filename'])\n\n if self.checkpoint_interval>0:\n if (iter_epoch%self.checkpoint_interval==0) or (iter_epoch==self.nr_of_epochs+iter_offset-1):\n if self.verbose_output:\n print('Writing out individual checkpoint data for epoch ' + str(iter_epoch) + ' for sample ' + str(i+1) + '/' + str(nr_of_samples))\n individual_filenames = self._get_individual_checkpoint_filenames(self.individual_checkpoint_output_directory,sample['idx'],iter_epoch)\n self.ssOpt._write_out_individual_parameters(self.ssOpt.get_sgd_individual_model_parameters_and_optimizer_states(),individual_filenames)\n\n if i==nr_of_samples-1:\n if self.verbose_output:\n print('Writing out shared checkpoint data for epoch ' + str(iter_epoch))\n shared_filename = self._get_shared_checkpoint_filename(self.shared_checkpoint_output_directory,iter_epoch)\n self.ssOpt._write_out_shared_parameters(self.ssOpt.get_sgd_shared_model_parameters(),shared_filename)\n\n if self.show_sample_optimizer_output:\n if (last_energy is not None) and (last_sim_energy is not None) and (last_reg_energy is not None):\n print('\\n\\nEpoch {:05d}: Last energies : E=[{:2.5f}], simE=[{:2.5f}], regE=[{:2.5f}], optE=[{:2.5f}]'\\\n .format(iter_epoch-1,last_energy,last_sim_energy,last_reg_energy,last_opt_energy))\n print(' / image: Last energies : E=[{:2.5f}], simE=[{:2.5f}], regE=[{:2.5f}]' \\\n .format(last_energy/batch_size[0], last_sim_energy/batch_size[0], last_reg_energy/batch_size[0]))\n else:\n print('\\n\\n')\n\n last_energy = cur_running_energy\n last_sim_energy = cur_running_sim_energy\n last_reg_energy = cur_running_reg_energy\n last_opt_energy = cur_running_opt_energy\n\n if self.show_sample_optimizer_output:\n print('Epoch {:05d}: Current energies: E=[{:2.5f}], simE=[{:2.5f}], regE=[{:2.5f}], optE=[{:2.5f}]'\\\n .format(iter_epoch,last_energy, last_sim_energy,last_reg_energy,last_opt_energy))\n print(' / image: Current energies: E=[{:2.5f}], simE=[{:2.5f}], regE=[{:2.5f}]' \\\n .format(last_energy/batch_size[0], last_sim_energy/batch_size[0], last_reg_energy/batch_size[0]))\n else:\n print('Epoch {:05d}: Current energies: E={:2.5f}:[{:1.2f},{:1.2f}], simE={:2.5f}:[{:1.2f},{:1.2f}], regE={:2.5f}:[{:1.2f},{:1.2f}], optE={:1.2f}:[{:1.2f},{:1.2f}]'\\\n .format(iter_epoch, last_energy, cur_min_energy, cur_max_energy,\n last_sim_energy, cur_min_sim_energy, cur_max_sim_energy,\n last_reg_energy, cur_min_reg_energy, cur_max_reg_energy,\n last_opt_energy, cur_min_opt_energy, cur_max_opt_energy))\n print(' / image: Current energies: E={:2.5f}:[{:1.2f},{:1.2f}], simE={:2.5f}:[{:1.2f},{:1.2f}], regE={:2.5f}:[{:1.2f},{:1.2f}]' \\\n .format(last_energy/batch_size[0], cur_min_energy/batch_size[0], cur_max_energy/batch_size[0],\n last_sim_energy/batch_size[0], cur_min_sim_energy/batch_size[0], cur_max_sim_energy/batch_size[0],\n last_reg_energy/batch_size[0], cur_min_reg_energy/batch_size[0], cur_max_reg_energy/batch_size[0]))\n\n if self.show_sample_optimizer_output:\n print('\\n\\n')\n\n if self.use_step_size_scheduler:\n self.scheduler.step(last_energy)\n\n print('Writing out shared parameter/state file to ' + shared_parameter_filename )\n torch.save(self.ssOpt.shared_state_dict(),shared_parameter_filename)\n\n\nclass SingleScaleConsensusRegistrationOptimizer(ImageRegistrationOptimizer):\n\n def __init__(self, sz, spacing, useMap, mapLowResFactor, params, compute_inverse_map=False, default_learning_rate=None):\n\n super(SingleScaleConsensusRegistrationOptimizer, self).__init__(sz, spacing, useMap, mapLowResFactor, params, compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)\n\n self.params[('optimizer', {}, 'optimizer settings')]\n cparams = self.params['optimizer']\n cparams[('consensus_settings', {}, 'settings for the consensus optimizer')]\n cparams = cparams['consensus_settings']\n\n self.sigma = cparams[('sigma', 1.0, 'sigma/2 is multiplier for squared augmented Lagrangian penalty')]\n \"\"\"Multiplier for squared augmented Lagrangian penalty\"\"\"\n\n self.nr_of_epochs = cparams[('nr_of_epochs', 1, 'how many iterations for consensus; i.e., how often to iterate over the entire dataset')]\n \"\"\"how many iterations for consensus; i.e., how often to iterate over the entire dataset\"\"\"\n self.batch_size = cparams[('batch_size',1,'how many images per batch (if set larger or equal to the number of images, it will be processed as one batch')]\n \"\"\"how many images per batch\"\"\"\n self.save_intermediate_checkpoints = cparams[('save_intermediate_checkpoints',False,'when set to True checkpoints are retained for each batch iterations')]\n \"\"\"when set to True checkpoints are retained for each batch iterations\"\"\"\n\n self.checkpoint_output_directory = cparams[('checkpoint_output_directory','checkpoints','directory where the checkpoints will be stored')]\n \"\"\"output directory where the checkpoints will be saved\"\"\"\n\n self.save_consensus_state_checkpoints = cparams[('save_consensus_state_checkpoints',True,'saves the current consensus state; typically only the individual states are saved as checkpoints')]\n \"\"\"saves the current consensus state; typically only the individual states are saved as checkpoints\"\"\"\n\n self.continue_from_last_checkpoint = cparams[('continue_from_last_checkpoint',False,'If true then iterations are resumed from last checkpoint. Allows restarting an optimization')]\n \"\"\"allows restarting an optimization by continuing from the last checkpoint\"\"\"\n\n self.load_optimizer_state_from_checkpoint = cparams[('load_optimizer_state_from_checkpoint',True,'If set to False only the state of the model is loaded when resuming from a checkpoint')]\n \"\"\"If set to False only the state of the model is loaded when resuming from a checkpoint\"\"\"\n\n self.nr_of_batches = None\n self.nr_of_images = None\n\n self.current_consensus_state = None\n self.current_consensus_dual = None\n self.next_consensus_state = None\n self.last_shared_state = None\n\n self.model_name = None\n self.add_model_name = None\n self.add_model_networkClass = None\n self.add_model_lossClass = None\n self.addSimName = None\n self.addSimMeasure = None\n\n self.iter_offset = None\n\n self.ssOpt = None\n\n def write_parameters_to_settings(self):\n if self.ssOpt is not None:\n self.ssOpt.write_parameters_to_settings()\n\n def _consensus_penalty_loss(self,shared_model_parameters):\n \"\"\"\n This allows to define additional terms for the loss which are based on parameters that are shared\n between models (for example for the smoother). Can be used to define a form of consensus optimization.\n :param shared_model_parameters: parameters that have been declared shared in a model\n :return: 0 by default, otherwise the corresponding penalty\n \"\"\"\n additional_loss = MyTensor(1).zero_()\n total_number_of_parameters = 1\n for k in shared_model_parameters:\n total_number_of_parameters += shared_model_parameters[k].numel()\n additional_loss += ((shared_model_parameters[k]\\\n -self.current_consensus_state[k]\\\n -self.current_consensus_dual[k])**2).sum()\n\n\n additional_loss *= self.sigma/(2.0*total_number_of_parameters)\n\n #print('sigma=' + str(self.sigma) + '; additional loss = ' + str( additional_loss.data.cpu().numpy()))\n\n return additional_loss\n\n def _set_state_to_zero(self,state):\n # set all the individual parameters to zero\n for k in state:\n state[k].zero_()\n\n def _add_scaled_difference_to_state(self,state,model_shared_state,current_dual,scaling_factor):\n for k in state:\n state[k] += scaling_factor*(model_shared_state[k]-current_dual[k])\n\n def _create_single_scale_optimizer(self,batch_size,consensus_penalty):\n\n ssOpt = SingleScaleRegistrationOptimizer(batch_size, self.spacing, self.useMap, self.mapLowResFactor, self.params, compute_inverse_map=self.compute_inverse_map, default_learning_rate=self.default_learning_rate)\n\n if ((self.add_model_name is not None) and\n (self.add_model_networkClass is not None) and\n (self.add_model_lossClass is not None)):\n ssOpt.add_model(self.add_model_name, self.add_model_networkClass, self.add_model_lossClass)\n\n # now set the actual model we want to solve\n ssOpt.set_model(self.model_name)\n\n if (self.addSimName is not None) and (self.addSimMeasure is not None):\n ssOpt.add_similarity_measure(self.addSimName, self.addSimMeasure)\n\n # setting the optimizer\n #if self.optimizer is not None:\n # ssOpt.set_optimizer(self.optimizer)\n # ssOpt.set_optimizer_params(self.optimizer_params)\n #elif self.optimizer_name is not None:\n if self.optimizer_name is not None:\n ssOpt.set_optimizer_by_name(self.optimizer_name)\n else:\n raise ValueError('Optimizers need to be specified by name of consensus optimization at the moment.')\n\n ssOpt.set_rel_ftol(self.get_rel_ftol())\n\n ssOpt.set_visualization(self.get_visualization())\n ssOpt.set_visualize_step(self.get_visualize_step())\n\n if consensus_penalty:\n ssOpt.set_external_optimizer_parameter_loss(self._consensus_penalty_loss)\n\n return ssOpt\n\n def _initialize_consensus_variables_if_needed(self,ssOpt):\n if self.current_consensus_state is None:\n self.current_consensus_state = copy.deepcopy(ssOpt.get_shared_model_parameters())\n self._set_state_to_zero(self.current_consensus_state)\n\n if self.current_consensus_dual is None:\n self.current_consensus_dual = copy.deepcopy(self.current_consensus_state)\n self._set_state_to_zero(self.current_consensus_dual)\n\n if self.last_shared_state is None:\n self.last_shared_state = copy.deepcopy(self.current_consensus_state)\n self._set_state_to_zero(self.last_shared_state)\n\n if self.next_consensus_state is None:\n self.next_consensus_state = copy.deepcopy(self.current_consensus_dual) # also make it zero\n self._set_state_to_zero(self.next_consensus_state)\n\n def add_similarity_measure(self, simName, simMeasure):\n \"\"\"\n Adds a custom similarity measure\n\n :param simName: name of the similarity measure (string)\n :param simMeasure: the similarity measure itself (an object that can be instantiated)\n \"\"\"\n self.addSimName = simName\n self.addSimMeasure = simMeasure\n\n\n def set_model(self, modelName):\n \"\"\"\n Sets the model that should be solved\n\n :param modelName: name of the model that should be solved (string)\n \"\"\"\n\n self.model_name = modelName\n\n def add_model(self, add_model_name, add_model_networkClass, add_model_lossClass):\n \"\"\"\n Adds a custom model to be optimized over\n\n :param add_model_name: name of the model (string)\n :param add_model_networkClass: network model itself (as an object that can be instantiated)\n :param add_model_lossClass: loss of the model (as an object that can be instantiated)\n \"\"\"\n self.add_model_name = add_model_name\n self.add_model_networkClass = add_model_networkClass\n self.add_model_lossClass = add_model_lossClass\n\n def get_checkpoint_dict(self):\n d = super(SingleScaleConsensusRegistrationOptimizer, self).get_checkpoint_dict()\n d['consensus_dual'] = self.current_consensus_dual\n return d\n\n def load_checkpoint_dict(self, d, load_optimizer_state=False):\n super(SingleScaleConsensusRegistrationOptimizer, self).load_checkpoint_dict(d)\n if 'consensus_dual' in d:\n self.current_consensus_dual = d['consensus_dual']\n else:\n raise ValueError('checkpoint does not contain: consensus_dual')\n\n def _custom_load_checkpoint(self,ssOpt,filename):\n d = torch.load(filename)\n ssOpt.load_checkpoint_dict(d)\n self.load_checkpoint_dict(d)\n\n def _custom_single_batch_load_checkpoint(self,ssOpt,filename):\n d = torch.load(filename)\n if self.load_optimizer_state_from_checkpoint:\n ssOpt.load_checkpoint_dict(d,load_optimizer_state=True)\n\n def _custom_save_checkpoint(self,ssOpt,filename):\n sd = ssOpt.get_checkpoint_dict()\n\n # todo: maybe make this optional to save storage\n sd['res'] = dict()\n sd['res']['Iw'] = ssOpt.get_warped_image()\n sd['res']['phi'] = ssOpt.get_map()\n\n cd = self.get_checkpoint_dict()\n # now merge these two dictionaries\n sd.update(cd)\n # and now save it\n torch.save(sd,filename)\n\n def _copy_state(self,state_to,state_from):\n\n for key in state_to:\n if key in state_from:\n state_to[key].copy_(state_from[key])\n else:\n raise ValueError('Could not copy key ' + key)\n\n\n def _set_all_still_missing_parameters(self):\n\n if self.model_name is None:\n model_name = self.params['model']['registration_model'][('type', 'lddmm_shooting_map', \"['svf'|'svf_quasi_momentum'|'svf_scalar_momentum'|'svf_vector_momentum'|'lddmm_shooting'|'lddmm_shooting_scalar_momentum'] all with suffix '_map' or '_image'\")]\n self.params['model']['deformation'][('use_map', True, 'use a map for the solution or not True/False' )]\n self.set_model( model_name )\n\n if self.optimizer_name is None:\n self.optimizer_name = self.params['optimizer'][('name','lbfgs_ls','Optimizer (lbfgs|adam|sgd)')]\n\n self.optimizer_has_been_initialized = True\n\n def get_warped_image(self):\n \"\"\"\n Returns the warped image\n :return: the warped image\n \"\"\"\n\n p = dict()\n p['warped_images'] = []\n for current_batch in range(self.nr_of_batches):\n current_checkpoint_filename = self._get_checkpoint_filename(current_batch, self.iter_offset+self.nr_of_epochs - 1)\n dc = torch.load(current_checkpoint_filename)\n p['warped_images'].append(dc['res']['Iw'])\n\n return p\n\n\n def get_map(self):\n \"\"\"\n Returns the deformation map\n :return: deformation map\n \"\"\"\n\n p = dict()\n p['phi'] = []\n for current_batch in range(self.nr_of_batches):\n current_checkpoint_filename = self._get_checkpoint_filename(current_batch, self.iter_offset+self.nr_of_epochs - 1)\n dc = torch.load(current_checkpoint_filename)\n p['phi'].append(dc['res']['phi'])\n\n return p\n\n def get_model_parameters(self):\n \"\"\"\n Returns the parameters of the model\n\n :return: model parameters\n \"\"\"\n p = dict()\n p['consensus_state'] = self.current_consensus_state\n p['registration_pars'] = []\n for current_batch in range(self.nr_of_batches):\n current_checkpoint_filename = self._get_checkpoint_filename(current_batch,self.iter_offset+self.nr_of_epochs-1)\n dc = torch.load(current_checkpoint_filename)\n d = dict()\n d['model'] = dc['model']\n d['consensus_dual'] = dc['consensus_dual']\n p['registration_pars'].append(d)\n\n return p\n\n def set_model_parameters(self, p):\n raise ValueError('Setting model parameters not yet supported by consensus optimizer')\n\n def _get_checkpoint_filename(self,batch_nr,batch_iter):\n if self.save_intermediate_checkpoints:\n return os.path.join(self.checkpoint_output_directory,\n \"checkpoint_batch{:05d}_iter{:05d}.pt\".format(batch_nr,batch_iter))\n else:\n return os.path.join(self.checkpoint_output_directory,\n \"checkpoint_batch{:05d}.pt\".format(batch_nr))\n\n def _get_consensus_checkpoint_filename(self,batch_iter):\n return os.path.join(self.checkpoint_output_directory,\n \"consensus_state_iter{:05d}.pt\".format(batch_iter))\n\n def _optimize_as_single_batch(self,resume_from_iter=None):\n \"\"\"\n Does optimization where everything is represented as a single batch. This is essentially like an individual\n optimization, but supports checkpointing.\n\n :param resume_from_iter: resumes computations from this iteration (assumes the corresponding checkpoint exists here)\n :return: n/a\n \"\"\"\n\n if resume_from_iter is not None:\n self.iter_offset = resume_from_iter+1\n print('Resuming from checkpoint iteration: ' + str(resume_from_iter))\n else:\n self.iter_offset = 0\n\n for iter_batch in range(self.iter_offset,self.nr_of_epochs+self.iter_offset):\n print('Computing epoch ' + str(iter_batch + 1) + ' of ' + str(self.iter_offset+self.nr_of_epochs))\n\n all_histories = []\n current_batch = 0 # there is only one batch, this one\n\n current_source_batch = self.ISource[:, ...].data\n current_target_batch = self.ITarget[:, ...].data\n current_batch_image_size = np.array(current_source_batch.size())\n\n # there is not consensus penalty here as this is technically not consensus optimization\n # todo: could ultimately replace the single scale optimizer; here used to write out checkpoints\n self.ssOpt = self._create_single_scale_optimizer(current_batch_image_size, consensus_penalty=False)\n\n # needs to be set before calling _set_all_still_missing_parameters\n self.ssOpt.set_source_image(current_source_batch)\n self.ssOpt.set_target_image(current_target_batch)\n\n # to make sure we have the model initialized, force parameter installation\n self.ssOpt._set_all_still_missing_parameters()\n\n # this loads the optimizer state and the model state, but here not the self.current_consensus_dual\n if iter_batch>0:\n previous_checkpoint_filename = self._get_checkpoint_filename(current_batch, iter_batch - 1)\n self._custom_single_batch_load_checkpoint(self.ssOpt, previous_checkpoint_filename)\n\n self.ssOpt.optimize()\n\n if (current_batch == self.nr_of_batches - 1) and (iter_batch == self.nr_of_epochs - 1):\n # the last time we run this\n all_histories.append(self.ssOpt.get_history())\n\n current_checkpoint_filename = self._get_checkpoint_filename(current_batch, iter_batch)\n self._custom_save_checkpoint(self.ssOpt, current_checkpoint_filename)\n\n self._add_to_history('batch_history', copy.deepcopy(all_histories))\n\n\n def _optimize_with_multiple_batches(self, resume_from_iter=None):\n \"\"\"\n Does consensus optimization over multiple batches.\n\n :param resume_from_iter: resumes computations from this iteration (assumes the corresponding checkpoint exists here)\n :return: n/a\n \"\"\"\n\n if resume_from_iter is not None:\n iter_offset = resume_from_iter+1\n print('Resuming from checkpoint iteration: ' + str(resume_from_iter))\n else:\n iter_offset = 0\n\n for iter_batch in range(iter_offset,self.nr_of_epochs+iter_offset):\n print('Computing epoch ' + str(iter_batch+1) + ' of ' + str(iter_offset+self.nr_of_epochs))\n\n next_consensus_initialized = False\n all_histories = []\n\n for current_batch in range(self.nr_of_batches):\n\n from_image = current_batch*self.batch_size\n to_image = min(self.nr_of_images,(current_batch+1)*self.batch_size)\n\n nr_of_images_in_batch = to_image-from_image\n\n current_source_batch = self.ISource[from_image:to_image, ...].data\n current_target_batch = self.ITarget[from_image:to_image, ...].data\n current_batch_image_size = np.array(current_source_batch.size())\n\n print('Computing image pair batch ' + str(current_batch+1) + ' of ' + str(self.nr_of_batches) +\n ' of batch iteration ' + str(iter_batch+1) + ' of ' + str(iter_offset+self.nr_of_epochs))\n print('Image range: [' + str(from_image) + ',' + str(to_image) + ')')\n\n # create new optimizer\n if iter_batch==0:\n # do not apply the penalty the first time around\n self.ssOpt = self._create_single_scale_optimizer(current_batch_image_size,consensus_penalty=False)\n else:\n self.ssOpt = self._create_single_scale_optimizer(current_batch_image_size,consensus_penalty=True)\n\n # to make sure we have the model initialized, force parameter installation\n self.ssOpt._set_all_still_missing_parameters()\n\n if iter_batch==0:\n # in the first round just initialize the shared state with what was computed previously\n if self.last_shared_state is not None:\n self.ssOpt.set_shared_model_parameters(self.last_shared_state)\n\n self._initialize_consensus_variables_if_needed(self.ssOpt)\n\n if not next_consensus_initialized:\n self._set_state_to_zero(self.next_consensus_state)\n next_consensus_initialized = True\n\n if iter_batch==0:\n # for the first time, just set the dual to zero\n self._set_state_to_zero(self.current_consensus_dual)\n # load the last\n else:\n # this loads the optimizer state and the model state and also self.current_consensus_dual\n previous_checkpoint_filename = self._get_checkpoint_filename(current_batch, iter_batch-1)\n self._custom_load_checkpoint(self.ssOpt,previous_checkpoint_filename)\n\n # first update the dual variable (we do this now that we have the consensus state still\n self._add_scaled_difference_to_state(self.current_consensus_dual,\n self.ssOpt.get_shared_model_parameters(),\n self.current_consensus_state,-1.0)\n\n\n self.ssOpt.set_source_image(current_source_batch)\n self.ssOpt.set_target_image(current_target_batch)\n\n self.ssOpt.optimize()\n\n self._copy_state(self.last_shared_state,self.ssOpt.get_shared_model_parameters())\n\n if (current_batch==self.nr_of_batches-1) and (iter_batch==self.nr_of_epochs-1):\n # the last time we run this\n all_histories.append( self.ssOpt.get_history() )\n\n # update the consensus state (is done via next_consensus_state as\n # self.current_consensus_state is used as part of the optimization for all optimizations in the batch\n self._add_scaled_difference_to_state(self.next_consensus_state,\n self.ssOpt.get_shared_model_parameters(),\n self.current_consensus_dual,float(nr_of_images_in_batch)/float(self.nr_of_images))\n\n current_checkpoint_filename = self._get_checkpoint_filename(current_batch, iter_batch)\n self._custom_save_checkpoint(self.ssOpt,current_checkpoint_filename)\n\n self._add_to_history('batch_history', copy.deepcopy(all_histories))\n self._copy_state(self.current_consensus_state, self.next_consensus_state)\n\n if self.save_consensus_state_checkpoints:\n consensus_filename = self._get_consensus_checkpoint_filename(iter_batch)\n torch.save({'consensus_state':self.current_consensus_state},consensus_filename)\n\n\n def _get_checkpoint_iter_with_complete_batch(self,start_at_iter):\n\n if start_at_iter<0:\n print('Could NOT find a complete checkpoint batch.')\n return None\n\n is_complete_batch = True\n for current_batch in range(self.nr_of_batches):\n cfilename = self._get_checkpoint_filename(current_batch, start_at_iter)\n if os.path.isfile(cfilename):\n print('Checkpoint file: ' + cfilename + \" exists.\")\n else:\n print('Checkpoint file: ' + cfilename + \" does NOT exist.\")\n is_complete_batch = False\n break\n\n if is_complete_batch:\n print('Found complete batch for batch iteration ' + str(start_at_iter))\n return start_at_iter\n else:\n return self._get_checkpoint_iter_with_complete_batch(start_at_iter-1)\n\n\n def _get_last_checkpoint_iteration_from_checkpoint_files(self):\n \"\"\"\n Looks through the checkpoint files and checks which ones were the last saved ones.\n This allows for picking up the iterations after a completed or terminated optimization.\n Also checks that the same number of batches are used, otherwise an optimization cannot be resumed\n from a checkpoint.\n\n :return: last iteration performed for complete batch\n \"\"\"\n\n print('Attempting to resume optimization from checkpoint data.')\n print('Searching for existing checkpoint data ...')\n\n # first find all the computed iters\n largest_found_iter = None\n\n if self.save_intermediate_checkpoints:\n current_iter_batch = 0\n while os.path.isfile(self._get_checkpoint_filename(0,current_iter_batch)):\n print('Found checkpoint iteration: ' + str(current_iter_batch) + ' : ' + self._get_checkpoint_filename(0,current_iter_batch))\n largest_found_iter = current_iter_batch\n current_iter_batch +=1\n\n else:\n if os.path.isfile(self._get_checkpoint_filename(0,0)):\n print('Found checkpoint: ' + str(self._get_checkpoint_filename(0,0)))\n largest_found_iter = 0\n \n if largest_found_iter is None:\n print('Could not find any checkpoint data from which to resume.')\n return None\n else:\n largest_iter_with_complete_batch = self._get_checkpoint_iter_with_complete_batch(largest_found_iter)\n return largest_iter_with_complete_batch\n\n def optimize(self):\n\n \"\"\"\n This optimizer performs consensus optimization:\n\n 1) (u_i_shared,u_i_individual)^{k+1} = argmin \\sum_i f_i(u_i_shared,u_i_individual) + \\sigma/2\\|u_i_shared-u_consensus^k-z_i^k\\|^2\n 2) (u_consensus)^{k+1} = 1/n\\sum_{i=1}^n ((u_i_shared)^{k+1}-z_i^k)\n 3) z_i^{k+1} = z_i^k-((u_i_shared)^{k+1}-u_consensus_{k+1})\n\n :return: n/a\n \"\"\"\n\n if self.optimizer is not None:\n raise ValueError('Custom optimizers are currently not supported for consensus optimization.\\\n Set the optimizer by name (e.g., in the json configuration) instead.')\n\n self._set_all_still_missing_parameters()\n\n # todo: support reading images from file\n self.nr_of_images = self.ISource.size()[0]\n self.nr_of_batches = np.ceil(float(self.nr_of_images)/float(self.batch_size)).astype('int')\n\n if self.continue_from_last_checkpoint:\n last_checkpoint_iteration = self._get_last_checkpoint_iteration_from_checkpoint_files()\n else:\n last_checkpoint_iteration = None\n\n if self.nr_of_batches==1:\n compute_as_single_batch = True\n else:\n compute_as_single_batch = False\n\n if not os.path.exists(self.checkpoint_output_directory):\n os.makedirs(self.checkpoint_output_directory)\n\n if compute_as_single_batch:\n self._optimize_as_single_batch(resume_from_iter=last_checkpoint_iteration)\n else:\n self._optimize_with_multiple_batches(resume_from_iter=last_checkpoint_iteration)\n\n\nclass MultiScaleRegistrationOptimizer(ImageRegistrationOptimizer):\n \"\"\"\n Class to perform multi-scale optimization. Essentially puts a loop around multiple calls of the\n single scale optimizer and starts with the registration of downsampled images. When moving up\n the hierarchy, the registration parameters are upsampled from the solution at the previous lower resolution\n \"\"\"\n\n def __init__(self, sz, spacing, useMap, mapLowResFactor, params, compute_inverse_map=False, default_learning_rate=None ):\n super(MultiScaleRegistrationOptimizer, self).__init__(sz, spacing, useMap, mapLowResFactor, params, compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)\n self.scaleFactors = None\n \"\"\"At what image scales optimization should be computed\"\"\"\n self.scaleIterations = None\n \"\"\"number of iterations per scale\"\"\"\n\n self.addSimName = None\n \"\"\"name of the similarity measure to be added\"\"\"\n self.addSimMeasure = None\n \"\"\"similarity measure itself that should be added\"\"\"\n self.add_model_name = None\n \"\"\"name of the model that should be added\"\"\"\n self.add_model_networkClass = None\n \"\"\"network object of the model to be added\"\"\"\n self.add_model_lossClass = None\n \"\"\"loss object of the model to be added\"\"\"\n self.model_name = None\n \"\"\"name of the model to be added (if specified by name; gets dominated by specifying an optimizer directly\"\"\"\n self.ssOpt = None\n \"\"\"Single scale optimizer\"\"\"\n self.params['optimizer'][('multi_scale', {}, 'multi scale settings')]\n\n def write_parameters_to_settings(self):\n if self.ssOpt is not None:\n self.ssOpt.write_parameters_to_settings()\n\n def add_similarity_measure(self, simName, simMeasure):\n \"\"\"\n Adds a custom similarity measure\n\n :param simName: name of the similarity measure (string)\n :param simMeasure: the similarity measure itself (an object that can be instantiated)\n \"\"\"\n self.addSimName = simName\n self.addSimMeasure = simMeasure\n\n def set_model(self, modelName):\n \"\"\"\n Set the model to be optimized over by name\n\n :param modelName: the name of the model (string)\n \"\"\"\n self.model_name = modelName\n\n def set_initial_map(self, map0, map0_inverse=None):\n \"\"\"\n Sets the initial map (overwrites the default identity map)\n :param map0: intial map\n :return: n/a\n \"\"\"\n if self.ssOpt is None:\n self.initialMap = map0\n self.initialInverseMap = map0_inverse\n\n def set_initial_weight_map(self,weight_map,freeze_weight=False):\n if self.ssOpt is None:\n self.weight_map = weight_map\n self.freeze_weight = freeze_weight\n\n def set_pair_name(self,pair_name):\n # f = lambda name: os.path.split(name)\n # get_in = lambda x: os.path.splitext(f(x)[1])[0]\n # get_fn = lambda x: f(f(x)[0])[1]\n # get_img_name = lambda x: get_fn(x)+'_'+get_in(x)\n # img_pair_name = [get_img_name(pair_name[0])+'_'+get_img_name(pair_name[1]) for pair_name in pair_names]\n self.pair_name = pair_name\n\n def set_save_fig_path(self, save_fig_path):\n \"\"\"\n the path of saved figures, default is the ../data/expr_name\n :param save_fig_path:\n :return:\n \"\"\"\n self.save_fig_path = os.path.join(save_fig_path, self.expr_name)\n\n\n\n def add_model(self, add_model_name, add_model_networkClass, add_model_lossClass, use_map):\n \"\"\"\n Adds a custom model to be optimized over\n\n :param add_model_name: name of the model (string)\n :param add_model_networkClass: network model itself (as an object that can be instantiated)\n :param add_model_lossClass: loss of the model (as an object that can be instantiated)\n :param use_map: if set to true, model using a map, otherwise direcly works with the image\n \"\"\"\n self.add_model_name = add_model_name\n self.add_model_networkClass = add_model_networkClass\n self.add_model_lossClass = add_model_lossClass\n self.add_model_use_map = use_map\n\n def set_scale_factors(self, scaleFactors):\n \"\"\"\n Set the scale factors for the solution. Should be in decending order, e.g., [1.0, 0.5, 0.25]\n\n :param scaleFactors: scale factors for the multi-scale solution hierarchy\n \"\"\"\n\n self.params['optimizer']['multi_scale']['scale_factors'] = (scaleFactors, 'how images are scaled')\n self.scaleFactors = scaleFactors\n\n def set_number_of_iterations_per_scale(self, scaleIterations):\n \"\"\"\n Sets the number of iterations that will be performed per scale of the multi-resolution hierarchy. E.g, [50,100,200]\n\n :param scaleIterations: number of iterations per scale (array)\n \"\"\"\n\n self.params['optimizer']['multi_scale']['scale_iterations'] = (scaleIterations, 'number of iterations per scale')\n self.scaleIterations = scaleIterations\n\n def _get_desired_size_from_scale(self, origSz, scale):\n\n osz = np.array(list(origSz))\n dsz = osz\n dsz[2::] = (np.round( scale*osz[2::] )).astype('int')\n\n return dsz\n\n def get_energy(self):\n \"\"\"\n Returns the current energy\n :return: Returns a tuple (energy, similarity energy, regularization energy)\n \"\"\"\n if self.ssOpt is not None:\n return self.ssOpt.get_energy()\n else:\n return None\n\n def get_warped_image(self):\n \"\"\"\n Returns the warped image\n :return: the warped image\n \"\"\"\n if self.ssOpt is not None:\n return self.ssOpt.get_warped_image()\n else:\n return None\n\n\n def get_warped_label(self):\n \"\"\"\n Returns the warped label\n :return: the warped label\n \"\"\"\n if self.ssOpt is not None:\n return self.ssOpt.get_warped_label()\n else:\n return None\n\n def get_map(self):\n \"\"\"\n Returns the deformation map\n :return: deformation map\n \"\"\"\n if self.ssOpt is not None:\n return self.ssOpt.get_map()\n else:\n return None\n\n def get_inverse_map(self):\n \"\"\"\n Returns the inverse deformation map\n :return: deformation map\n \"\"\"\n if self.ssOpt is not None:\n return self.ssOpt.get_inverse_map()\n else:\n return None\n\n def get_model_parameters(self):\n \"\"\"\n Returns the parameters of the model\n\n :return: model parameters\n \"\"\"\n if self.ssOpt is not None:\n return self.ssOpt.get_model_parameters()\n else:\n return None\n\n def set_model_parameters(self,p):\n raise ValueError('Setting model parameters not yet supported for multi-scale optimizer')\n\n def _set_all_still_missing_parameters(self):\n\n self.scaleFactors = self.params['optimizer']['multi_scale'][('scale_factors', [1.0, 0.5, 0.25], 'how images are scaled')]\n self.scaleIterations = self.params['optimizer']['multi_scale'][('scale_iterations', [10, 20, 20], 'number of iterations per scale')]\n\n if (self.optimizer is None) and (self.optimizer_name is None):\n self.optimizer_name = self.params['optimizer'][('name','lbfgs_ls','Optimizer (lbfgs|adam|sgd)')]\n\n if self.model_name is None:\n model_name = self.params['model']['registration_model'][('type', 'lddmm_shooting_map', \"['svf'|'svf_quasi_momentum'|'svf_scalar_momentum'|'svf_vector_momentum'|'lddmm_shooting'|'lddmm_shooting_scalar_momentum'] all with suffix '_map' or '_image'\")]\n self.params['model']['deformation'][('use_map', True, 'use a map for the solution or not True/False' )]\n self.set_model( model_name )\n\n self.optimizer_has_been_initialized = True\n\n def optimize(self):\n \"\"\"\n Perform the actual multi-scale optimization\n \"\"\"\n self._set_all_still_missing_parameters()\n\n if (self.ISource is None) or (self.ITarget is None):\n raise ValueError('Source and target images need to be set first')\n\n upsampledParameters = None\n upsampledParameterSpacing = None\n upsampledSz = None\n lastSuccessfulStepSizeTaken = None\n\n nrOfScales = len(self.scaleFactors)\n\n # check that we have the right number of iteration parameters\n assert (nrOfScales == len(self.scaleIterations))\n\n print('Performing multiscale optmization with scales: ' + str(self.scaleFactors))\n\n # go from lowest to highest scale\n reverseScales = self.scaleFactors[-1::-1]\n reverseIterations = self.scaleIterations[-1::-1]\n over_scale_iter_count = 0\n\n for en_scale in enumerate(reverseScales):\n print('Optimizing for scale = ' + str(en_scale[1]))\n\n # create the images\n currentScaleFactor = en_scale[1]\n currentScaleNumber = en_scale[0]\n\n currentDesiredSz = self._get_desired_size_from_scale(self.ISource.size(), currentScaleFactor)\n\n currentNrOfIteratons = reverseIterations[currentScaleNumber]\n\n ISourceC, spacingC = self.sampler.downsample_image_to_size(self.ISource, self.spacing, currentDesiredSz[2::],self.spline_order)\n ITargetC, spacingC = self.sampler.downsample_image_to_size(self.ITarget, self.spacing, currentDesiredSz[2::],self.spline_order)\n LSourceC = None\n LTargetC = None\n if self.LSource is not None and self.LTarget is not None:\n LSourceC, spacingC = self.sampler.downsample_image_to_size(self.LSource, self.spacing, currentDesiredSz[2::],0)\n LTargetC, spacingC = self.sampler.downsample_image_to_size(self.LTarget, self.spacing, currentDesiredSz[2::],0)\n initialMap = None\n initialInverseMap = None\n weight_map=None\n if self.initialMap is not None:\n initialMap,_ = self.sampler.downsample_image_to_size(self.initialMap,self.spacing, currentDesiredSz[2::],1,zero_boundary=False)\n if self.initialInverseMap is not None:\n initialInverseMap,_ = self.sampler.downsample_image_to_size(self.initialInverseMap,self.spacing, currentDesiredSz[2::],1,zero_boundary=False)\n if self.weight_map is not None:\n weight_map,_ =self.sampler.downsample_image_to_size(self.weight_map,self.spacing, currentDesiredSz[2::],1,zero_boundary=False)\n szC = np.array(ISourceC.size()) # this assumes the BxCxXxYxZ format\n mapLowResFactor = None if currentScaleNumber==0 else self.mapLowResFactor\n self.ssOpt = SingleScaleRegistrationOptimizer(szC, spacingC, self.useMap, mapLowResFactor, self.params, compute_inverse_map=self.compute_inverse_map,default_learning_rate=self.default_learning_rate)\n print('Setting learning rate to ' + str( lastSuccessfulStepSizeTaken ))\n self.ssOpt.set_last_successful_step_size_taken( lastSuccessfulStepSizeTaken )\n self.ssOpt.set_initial_map(initialMap,initialInverseMap)\n\n if ((self.add_model_name is not None) and\n (self.add_model_networkClass is not None) and\n (self.add_model_lossClass is not None)):\n self.ssOpt.add_model(self.add_model_name, self.add_model_networkClass, self.add_model_lossClass, use_map=self.add_model_use_map)\n\n # now set the actual model we want to solve\n self.ssOpt.set_model(self.model_name)\n if weight_map is not None:\n self.ssOpt.set_initial_weight_map(weight_map,self.freeze_weight)\n\n\n if (self.addSimName is not None) and (self.addSimMeasure is not None):\n self.ssOpt.add_similarity_measure(self.addSimName, self.addSimMeasure)\n\n # setting the optimizer\n if self.optimizer is not None:\n self.ssOpt.set_optimizer(self.optimizer)\n self.ssOpt.set_optimizer_params(self.optimizer_params)\n elif self.optimizer_name is not None:\n self.ssOpt.set_optimizer_by_name(self.optimizer_name)\n\n self.ssOpt.set_rel_ftol(self.get_rel_ftol())\n\n self.ssOpt.set_visualization(self.get_visualization())\n self.ssOpt.set_visualize_step(self.get_visualize_step())\n self.ssOpt.set_n_scale(en_scale[1])\n self.ssOpt.set_over_scale_iter_count(over_scale_iter_count)\n\n if self.get_save_fig():\n self.ssOpt.set_expr_name(self.get_expr_name())\n self.ssOpt.set_save_fig(self.get_save_fig())\n self.ssOpt.set_save_fig_path(self.get_save_fig_path())\n self.ssOpt.set_save_fig_num(self.get_save_fig_num())\n self.ssOpt.set_pair_name(self.get_pair_name())\n self.ssOpt.set_n_scale(en_scale[1])\n self.ssOpt.set_source_label(self.get_source_label())\n self.ssOpt.set_target_label(self.get_target_label())\n\n\n self.ssOpt.set_source_image(ISourceC)\n self.ssOpt.set_target_image(ITargetC)\n self.ssOpt.set_multi_scale_info(self.ISource,self.ITarget,self.spacing,self.LSource,self.LTarget)\n if self.LSource is not None and self.LTarget is not None:\n self.ssOpt.set_source_label(LSourceC)\n self.ssOpt.set_target_label(LTargetC)\n\n if upsampledParameters is not None:\n # check that the upsampled parameters are consistent with the downsampled images\n spacingError = False\n expectedSpacing = None\n\n if mapLowResFactor is not None:\n expectedSpacing = utils._get_low_res_spacing_from_spacing(spacingC, szC, upsampledSz)\n # the spacing of the upsampled parameters will be different\n if not (abs(expectedSpacing - upsampledParameterSpacing) < 0.000001).all():\n spacingError = True\n elif not (abs(spacingC - upsampledParameterSpacing) < 0.000001).all():\n expectedSpacing = spacingC\n spacingError = True\n\n if spacingError:\n print(expectedSpacing)\n print(upsampledParameterSpacing)\n raise ValueError('Upsampled parameters and downsampled images are of inconsistent dimension')\n\n # now that everything is fine, we can use the upsampled parameters\n print('Explicitly setting the optimization parameters')\n self.ssOpt.set_model_parameters(upsampledParameters)\n\n # do the actual optimization\n print('Optimizing for at most ' + str(currentNrOfIteratons) + ' iterations')\n self.ssOpt._set_number_of_iterations_from_multi_scale(currentNrOfIteratons)\n self.ssOpt.optimize()\n\n self._add_to_history('scale_nr',currentScaleNumber)\n self._add_to_history('scale_factor',currentScaleFactor)\n self._add_to_history('ss_history',self.ssOpt.get_history())\n\n lastSuccessfulStepSizeTaken = self.ssOpt.get_last_successful_step_size_taken()\n over_scale_iter_count += currentNrOfIteratons\n\n # if we are not at the very last scale, then upsample the parameters\n if currentScaleNumber != nrOfScales - 1:\n # we need to revert the downsampling to the next higher level\n scaleTo = reverseScales[currentScaleNumber + 1]\n upsampledSz = self._get_desired_size_from_scale(self.ISource.size(), scaleTo)\n print('Before')\n print(upsampledSz)\n if self.useMap:\n if self.mapLowResFactor is not None:\n # parameters are upsampled differently here, because they are computed at low res\n upsampledSz = utils._get_low_res_size_from_size(upsampledSz,self.mapLowResFactor)\n print(self.mapLowResFactor)\n print('After')\n print(upsampledSz)\n upsampledParameters, upsampledParameterSpacing = self.ssOpt.upsample_model_parameters(upsampledSz[2::])\n"
] | [
[
"torch.empty_like",
"numpy.min",
"torch.sum",
"torch.zeros_like",
"numpy.histogram",
"numpy.sum"
],
[
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.load",
"torch.utils.data.DataLoader",
"torch.is_tensor",
"torch.from_numpy",
"numpy.round",
"torch.optim.SGD",
"numpy.array",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bekirduran/AI_Puzzle_Solver | [
"7e8c007802d1e4596dd09edd97bafeb7a4ff7f61"
] | [
"com/puzzlesolver/cross_over.py"
] | [
"import numpy as np\n\n\n# This class generating new list item given first of list item row and second of list item row\nclass Crossover:\n\n @staticmethod\n def crossover(best):\n row_begin_index = 0\n row_half = 2\n\n cross_list = []\n for i in range(len(best) - 1):\n first_part1 = best[i][row_begin_index:row_half, :]\n first_part2 = best[i + 1][row_half:, :]\n\n cross_list.append(np.concatenate((first_part1, first_part2)))\n\n second_part1 = best[i][row_half:, :]\n second_part2 = best[i + 1][row_begin_index:row_half, :]\n\n cross_list.append(np.concatenate((second_part2, second_part1)))\n return cross_list\n"
] | [
[
"numpy.concatenate"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
huonw/tensorflow | [
"85f47254af7cc230a4a031998dffe770b7edbb9d",
"85f47254af7cc230a4a031998dffe770b7edbb9d",
"85f47254af7cc230a4a031998dffe770b7edbb9d"
] | [
"tensorflow/python/compiler/tensorrt/test/trt_mode_test.py",
"tensorflow/python/keras/engine/training_dataset_test.py",
"tensorflow/python/keras/engine/base_layer_utils.py"
] | [
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Model script to test TF-TensorRT integration.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom unittest import SkipTest # pylint: disable=g-importing-member\n\nfrom tensorflow.compiler.tf2tensorrt.wrap_py_utils import get_linked_tensorrt_version\nfrom tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import test\n\n\nclass TrtModeTestBase(trt_test.TfTrtIntegrationTestBase):\n \"\"\"Test squeeze on batch dim and some unary operations in TF-TRT.\"\"\"\n\n def GraphFn(self, x1):\n q = math_ops.abs(x1)\n q = q + 1.0\n q = q * 3.0\n q = array_ops.squeeze(q, 0)\n q = math_ops.abs(q)\n q = q + 5.0\n return array_ops.identity(q, name=\"output_0\")\n\n def GetParams(self):\n \"\"\"The input has 1 as a first dimension, which is removed by the squeeze.\n\n op in the graph.\n\n In explicit batch mode, TensorRT can convert the whole graph. In this mode\n it is possible to manipulate the batch dimension using the squeeze op.\n\n In implicit batch mode TensorRT cannot convert the whole graph. We are not\n allowed to manipulate (squeeze) the first dimension in implicit batch mode.\n Therefore the graph will be converted using multiple segments.\n \"\"\"\n return self.BuildParams(self.GraphFn, dtypes.float32, [[1, 12, 5]],\n [[12, 5]])\n\n def GetConversionParams(self, run_params, implicit_batch=False):\n \"\"\"Return a TrtConversionParams for test.\"\"\"\n\n conversion_params = super(TrtModeTestBase,\n self).GetConversionParams(run_params)\n rewriter_config = self.GetTrtRewriterConfig(\n run_params=run_params,\n conversion_params=conversion_params,\n use_implicit_batch=implicit_batch)\n return conversion_params._replace(rewriter_config_template=rewriter_config)\n\n @classmethod\n def setUpClass(cls):\n if cls is TrtModeTestBase:\n raise SkipTest(\"TrtModeTestBase defines base class for other test.\")\n super(TrtModeTestBase, cls).setUpClass()\n\n\nclass ImplicitBatchTest(TrtModeTestBase):\n\n def GetConversionParams(self, run_params):\n \"\"\"Return a TrtConversionParams for test using implicit batch mdoe.\"\"\"\n return super(ImplicitBatchTest, self).GetConversionParams(run_params, True)\n\n def ExpectedEnginesToBuild(self, run_params):\n \"\"\"Check that the expected engine is built.\n\n Args:\n run_params: the run parameters.\n\n Returns:\n the expected engines to build.\n\n The squeeze op is not converted by TensorRT in implicit batch mode.\n Because of this we have two TRTEngineOp in the graphs: one for the\n subgraph before 'squeeze(q,0)', and another one for the rest of the ops\n after the 'squeeze(q,0)'.\n \"\"\"\n return [\"TRTEngineOp_0\", \"TRTEngineOp_1\"]\n\n\nclass ExplicitBatchTest(TrtModeTestBase):\n\n def GetParams(self):\n \"\"\"We specify input/output masks with static (known) shapes.\"\"\"\n return self.BuildParamsWithMask(\n self.GraphFn,\n dtypes.float32, [[1, 12, 5]], [[12, 5]],\n input_mask=[[True, True, True]],\n output_mask=[[True, True]])\n\n def GetConversionParams(self, run_params):\n \"\"\"Return a TrtConversionParams for test that enables explicit batch.\"\"\"\n return super(ExplicitBatchTest, self).GetConversionParams(run_params, False)\n\n def ExpectedEnginesToBuild(self, run_params):\n \"\"\"Check that the expected engine is built.\n\n Args:\n run_params: the run parameters.\n\n Returns:\n the expected engines to build.\n\n In explicit batch mode the whole graph is converted using a single engine.\n \"\"\"\n return [\"TRTEngineOp_0\"]\n\n def ShouldRunTest(self, run_params):\n # Only run for TRT 6 and above.\n ver = get_linked_tensorrt_version()\n return ver[0] >= 6 and (not run_params.use_calibration)\n\n\nclass DynamicShapesTest(TrtModeTestBase):\n \"\"\"Test with dynamic input shapes.\n\n DynamicShapesTest is different from ExplicitBatchTest in that it uses input\n and output masks to change the input and output shapes to unknown shapes.\n \"\"\"\n\n def GetParams(self):\n \"\"\"We specify input/output mask with dynamic (unknown) shapes.\"\"\"\n return self.BuildParamsWithMask(\n self.GraphFn,\n dtypes.float32, [[1, 12, 5]], [[12, 5]],\n input_mask=[[False, False, False]],\n output_mask=[[False, False]])\n\n def GetConversionParams(self, run_params):\n \"\"\"Return a TrtConversionParams for test that enables explicit batch.\"\"\"\n return super(DynamicShapesTest, self).GetConversionParams(run_params, False)\n\n def ExpectedEnginesToBuild(self, run_params):\n \"\"\"Return the expected engines to build.\"\"\"\n return [\"TRTEngineOp_0\"]\n\n def ShouldRunTest(self, run_params):\n # Only run for TRT 6 and above.\n ver = get_linked_tensorrt_version()\n return ver[0] >= 6 and (not run_params.use_calibration)\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for training routines.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nimport sys\n\nimport numpy as np\nimport six\n\nfrom tensorflow.python import keras\nfrom tensorflow.python.data.experimental.ops import cardinality\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.keras import callbacks\nfrom tensorflow.python.keras import keras_parameterized\nfrom tensorflow.python.keras import metrics as metrics_module\nfrom tensorflow.python.keras import testing_utils\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.platform import tf_logging as logging\n\n\nclass BatchCounterCallback(callbacks.Callback):\n\n def __init__(self):\n self.batch_begin_count = 0\n self.batch_end_count = 0\n\n def on_batch_begin(self, *args, **kwargs):\n self.batch_begin_count += 1\n\n def on_batch_end(self, *args, **kwargs):\n self.batch_end_count += 1\n\n\nclass TestTrainingWithDataset(keras_parameterized.TestCase):\n\n @keras_parameterized.run_with_all_model_types\n @keras_parameterized.run_all_keras_modes\n def test_calling_model_on_same_dataset(self):\n model = testing_utils.get_small_mlp(1, 4, input_dim=3)\n optimizer = 'rmsprop'\n loss = 'mse'\n metrics = ['mae']\n model.compile(\n optimizer,\n loss,\n metrics=metrics,\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n\n inputs = np.zeros((10, 3), np.float32)\n targets = np.zeros((10, 4), np.float32)\n dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))\n dataset = dataset.repeat(100)\n dataset = dataset.batch(10)\n\n # Call fit with validation data\n model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,\n validation_data=dataset, validation_steps=2)\n model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,\n validation_data=dataset, validation_steps=2)\n\n @keras_parameterized.run_with_all_model_types\n @keras_parameterized.run_all_keras_modes\n def test_training_and_eval_methods_on_dataset(self):\n model = testing_utils.get_small_mlp(1, 4, input_dim=3)\n optimizer = 'rmsprop'\n loss = 'mse'\n metrics = ['mae', metrics_module.CategoricalAccuracy()]\n model.compile(\n optimizer,\n loss,\n metrics=metrics,\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n\n inputs = np.zeros((10, 3), np.float32)\n targets = np.zeros((10, 4), np.float32)\n dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))\n dataset = dataset.repeat() # Infinite dataset.\n dataset = dataset.batch(10)\n\n model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)\n model.evaluate(dataset, steps=2, verbose=1)\n model.predict(dataset, steps=2)\n\n # Test with validation data\n model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,\n validation_data=dataset, validation_steps=2)\n\n # Test with validation split\n with self.assertRaises(ValueError):\n model.fit(dataset,\n epochs=1, steps_per_epoch=2, verbose=0,\n validation_split=0.5, validation_steps=2)\n\n # Test with sample weight.\n sample_weight = np.random.random((10,))\n with self.assertRaisesRegexp(\n ValueError, r'`sample_weight` argument is not supported .+dataset'):\n model.fit(\n dataset,\n epochs=1,\n steps_per_epoch=2,\n verbose=0,\n sample_weight=sample_weight)\n\n with self.assertRaisesRegexp(\n ValueError, '(you should not specify a target)|'\n '(`y` argument is not supported when using dataset as input.)'):\n model.fit(dataset, dataset,\n epochs=1, steps_per_epoch=2, verbose=0)\n\n # With an infinite dataset, `steps_per_epoch`/`steps` argument is required.\n with self.assertRaises(ValueError):\n model.fit(dataset, epochs=1, verbose=0)\n with self.assertRaises(ValueError):\n model.evaluate(dataset, verbose=0)\n with self.assertRaises(ValueError):\n model.predict(dataset, verbose=0)\n\n @keras_parameterized.run_with_all_model_types(exclude_models='sequential')\n @keras_parameterized.run_all_keras_modes\n def test_training_and_eval_methods_on_multi_input_output_dataset(self):\n input_a = keras.layers.Input(shape=(3,), name='input_1')\n input_b = keras.layers.Input(shape=(3,), name='input_2')\n dense = keras.layers.Dense(4, name='dense')\n dropout = keras.layers.Dropout(0.5, name='dropout')\n branch_a = [input_a, dense]\n branch_b = [input_b, dense, dropout]\n\n model = testing_utils.get_multi_io_model(branch_a, branch_b)\n model.compile(\n optimizer='rmsprop',\n loss='mse',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n\n input_a_np = np.random.random((10, 3)).astype(dtype=np.float32)\n input_b_np = np.random.random((10, 3)).astype(dtype=np.float32)\n output_d_np = np.random.random((10, 4)).astype(dtype=np.float32)\n output_e_np = np.random.random((10, 4)).astype(dtype=np.float32)\n\n # Test with tuples\n dataset_tuple = dataset_ops.Dataset.from_tensor_slices((\n (input_a_np, input_b_np), (output_d_np, output_e_np)))\n dataset_tuple = dataset_tuple.repeat(100)\n dataset_tuple = dataset_tuple.batch(10)\n\n model.fit(dataset_tuple, epochs=1, steps_per_epoch=2, verbose=1)\n model.evaluate(dataset_tuple, steps=2, verbose=1)\n\n # Test with dict\n input_dict = {'input_1': input_a_np, 'input_2': input_b_np}\n if testing_utils.get_model_type() == 'subclass':\n output_dict = {'output_1': output_d_np, 'output_2': output_e_np}\n else:\n output_dict = {'dense': output_d_np, 'dropout': output_e_np}\n\n dataset_dict = dataset_ops.Dataset.from_tensor_slices((\n input_dict, output_dict))\n dataset_dict = dataset_dict.repeat(100)\n dataset_dict = dataset_dict.batch(10)\n\n model.fit(dataset_dict, epochs=1, steps_per_epoch=2, verbose=1)\n model.evaluate(dataset_dict, steps=2, verbose=1)\n\n predict_dataset_dict = dataset_ops.Dataset.from_tensor_slices(\n input_dict)\n predict_dataset_dict = predict_dataset_dict.repeat(100)\n predict_dataset_dict = predict_dataset_dict.batch(10)\n model.predict(predict_dataset_dict, steps=1)\n\n @keras_parameterized.run_with_all_model_types\n @keras_parameterized.run_all_keras_modes\n def test_dataset_with_sample_weights(self):\n model = testing_utils.get_small_mlp(1, 4, input_dim=3)\n optimizer = 'rmsprop'\n loss = 'mse'\n metrics = ['mae', metrics_module.CategoricalAccuracy()]\n model.compile(\n optimizer,\n loss,\n metrics=metrics,\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n\n inputs = np.zeros((10, 3), np.float32)\n targets = np.zeros((10, 4), np.float32)\n sample_weights = np.ones((10), np.float32)\n dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets,\n sample_weights))\n dataset = dataset.repeat(100)\n dataset = dataset.batch(10)\n\n model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)\n model.evaluate(dataset, steps=2, verbose=1)\n model.predict(dataset, steps=2)\n\n @keras_parameterized.run_with_all_model_types\n @keras_parameterized.run_all_keras_modes\n def test_dataset_with_sample_weights_correctness(self):\n x = keras.layers.Input(shape=(1,), name='input')\n y = keras.layers.Dense(\n 1, kernel_initializer='ones', bias_initializer='zeros', name='dense')(x)\n model = keras.Model(x, y)\n optimizer = 'rmsprop'\n loss = 'mse'\n model.compile(optimizer, loss)\n inputs = np.array([[0], [1], [2], [3]], np.float32)\n targets = np.array([[2], [4], [6], [8]], np.float32)\n sample_weights = np.array([0.25, 0.5, 0.75, 1], np.float32)\n ds = dataset_ops.Dataset.from_tensor_slices((inputs, targets,\n sample_weights)).batch(2)\n result = model.evaluate(ds, verbose=1)\n # The per sample loss is multipled by the corresponding sample weight. The\n # average of these weighted losses is the return value of the `evaluate`\n # call. For example, in the test above the average weighted loss is\n # calculated in the following manner:\n # ((2-0)^2) * 0.25 + ((4-1)^2) * 0.5 + ((6-2)^2 * 0.75) + ((8-3)^2 * 1)\n # equals 42.5 / 4 = 10.625\n self.assertEqual(result, 10.625)\n\n @keras_parameterized.run_with_all_model_types\n @keras_parameterized.run_all_keras_modes\n def test_dataset_with_sparse_labels(self):\n model = testing_utils.get_small_mlp(1, 4, input_dim=3)\n optimizer = 'rmsprop'\n model.compile(\n optimizer,\n loss='sparse_categorical_crossentropy',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n\n inputs = np.zeros((10, 3), dtype=np.float32)\n targets = np.random.randint(0, 4, size=10, dtype=np.int32)\n dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))\n dataset = dataset.repeat(100)\n dataset = dataset.batch(10)\n\n model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)\n\n @keras_parameterized.run_all_keras_modes\n def test_dataset_fit_correctness(self):\n class SumLayer(keras.layers.Layer):\n\n def build(self, _):\n self.w = self.add_weight('w', ())\n\n def call(self, inputs):\n return keras.backend.sum(inputs, axis=1, keepdims=True) + self.w * 0\n\n model = keras.Sequential([SumLayer(input_shape=(2,))])\n model.compile(\n 'rmsprop',\n loss='mae',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n\n inputs = np.zeros((40, 2), dtype=np.float32)\n inputs[10:20, :] = 2\n inputs[20:30, :] = 1\n inputs[30:, :] = 4\n targets = np.zeros((40, 1), dtype=np.float32)\n\n # Test correctness with `steps_per_epoch`.\n train_dataset = dataset_ops.Dataset.from_tensor_slices(\n (inputs, targets)).batch(10)\n val_dataset = dataset_ops.Dataset.from_tensor_slices(\n (inputs, targets)).batch(10)\n history = model.fit(train_dataset,\n epochs=2, steps_per_epoch=2, verbose=1,\n validation_data=val_dataset, validation_steps=2)\n self.assertAllClose(history.history['loss'],\n [inputs[:20].sum() / 20, inputs[20:].sum() / 20])\n # The validation dataset will be reset at the end of each validation run.\n self.assertAllClose(history.history['val_loss'],\n [inputs[:20].sum() / 20, inputs[:20].sum() / 20])\n\n # Test correctness with dataset reset.\n train_dataset = dataset_ops.Dataset.from_tensor_slices(\n (inputs, targets)).batch(10)\n val_dataset = dataset_ops.Dataset.from_tensor_slices(\n (inputs, targets)).batch(10)\n history = model.fit(train_dataset,\n epochs=2, verbose=1, validation_data=val_dataset)\n self.assertAllClose(\n history.history['loss'],\n [inputs.sum() / 40, inputs.sum() / 40])\n self.assertAllClose(\n history.history['val_loss'],\n [inputs.sum() / 40, inputs.sum() / 40])\n\n def test_dataset_input_shape_validation(self):\n with ops.get_default_graph().as_default(), self.cached_session():\n model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)\n model.compile(optimizer='rmsprop', loss='mse')\n\n # User forgets to batch the dataset\n inputs = np.zeros((10, 3))\n targets = np.zeros((10, 4))\n dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))\n dataset = dataset.repeat(100)\n\n with self.assertRaisesRegexp(\n ValueError,\n r'expected (.*?) to have shape \\(3,\\) but got array with shape \\(1,\\)'\n ):\n model.train_on_batch(dataset)\n\n # Wrong input shape\n inputs = np.zeros((10, 5))\n targets = np.zeros((10, 4))\n dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))\n dataset = dataset.repeat(100)\n dataset = dataset.batch(10)\n\n with self.assertRaisesRegexp(ValueError,\n r'expected (.*?) to have shape \\(3,\\)'):\n model.train_on_batch(dataset)\n\n @keras_parameterized.run_with_all_model_types\n @keras_parameterized.run_all_keras_modes\n def test_finite_dataset_known_cardinality_no_steps_arg(self):\n model = testing_utils.get_small_mlp(1, 4, input_dim=3)\n model.compile(\n 'rmsprop',\n 'mse',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n\n inputs = np.zeros((100, 3), dtype=np.float32)\n targets = np.random.randint(0, 4, size=100, dtype=np.int32)\n dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))\n dataset = dataset.batch(10)\n\n batch_counter = BatchCounterCallback()\n history = model.fit(dataset, epochs=2, verbose=1, callbacks=[batch_counter])\n\n self.assertLen(history.history['loss'], 2)\n self.assertEqual(batch_counter.batch_end_count, 20)\n model.evaluate(dataset)\n out = model.predict(dataset)\n self.assertEqual(out.shape[0], 100)\n\n @keras_parameterized.run_with_all_model_types\n @keras_parameterized.run_all_keras_modes\n def test_finite_dataset_unknown_cardinality_no_steps_arg(self):\n model = testing_utils.get_small_mlp(1, 4, input_dim=3)\n model.compile(\n 'rmsprop',\n 'mse',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n\n inputs = np.zeros((100, 3), dtype=np.float32)\n targets = np.random.randint(0, 4, size=100, dtype=np.int32)\n dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))\n dataset = dataset.filter(lambda x, y: True).batch(10)\n self.assertEqual(keras.backend.get_value(cardinality.cardinality(dataset)),\n cardinality.UNKNOWN)\n\n batch_counter = BatchCounterCallback()\n history = model.fit(dataset, epochs=2, verbose=1, callbacks=[batch_counter])\n\n self.assertLen(history.history['loss'], 2)\n self.assertEqual(batch_counter.batch_end_count, 20)\n model.evaluate(dataset)\n out = model.predict(dataset)\n self.assertEqual(out.shape[0], 100)\n\n @keras_parameterized.run_with_all_model_types\n @keras_parameterized.run_all_keras_modes(always_skip_v1=True)\n def test_finite_dataset_unknown_cardinality_no_step_with_train_and_val(self):\n\n class CaptureStdout(object):\n\n def __enter__(self):\n self._stdout = sys.stdout\n string_io = six.StringIO()\n sys.stdout = string_io\n self._stringio = string_io\n return self\n\n def __exit__(self, *args):\n self.output = self._stringio.getvalue()\n sys.stdout = self._stdout\n\n model = testing_utils.get_small_mlp(1, 4, input_dim=3)\n model.compile(\n 'rmsprop',\n 'mse',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n\n inputs = np.zeros((100, 3), dtype=np.float32)\n targets = np.random.randint(0, 4, size=100, dtype=np.int32)\n dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))\n dataset = dataset.filter(lambda x, y: True).batch(10)\n self.assertEqual(\n keras.backend.get_value(cardinality.cardinality(dataset)),\n cardinality.UNKNOWN)\n\n batch_counter = BatchCounterCallback()\n with CaptureStdout() as capture:\n history = model.fit(\n dataset,\n epochs=2,\n callbacks=[batch_counter],\n validation_data=dataset.take(3))\n\n lines = capture.output.splitlines()\n\n self.assertIn('10/10', lines[-1])\n\n self.assertLen(history.history['loss'], 2)\n self.assertEqual(batch_counter.batch_begin_count, 21)\n self.assertEqual(batch_counter.batch_end_count, 20)\n model.evaluate(dataset)\n out = model.predict(dataset)\n self.assertEqual(out.shape[0], 100)\n\n @keras_parameterized.run_with_all_model_types\n @keras_parameterized.run_all_keras_modes\n def test_finite_dataset_unknown_cardinality_out_of_data(self):\n model = testing_utils.get_small_mlp(1, 4, input_dim=3)\n model.compile(\n 'rmsprop',\n 'mse',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n\n inputs = np.zeros((100, 3), dtype=np.float32)\n targets = np.random.randint(0, 4, size=100, dtype=np.int32)\n dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))\n dataset = dataset.filter(lambda x, y: True).batch(10)\n self.assertEqual(\n keras.backend.get_value(cardinality.cardinality(dataset)),\n cardinality.UNKNOWN)\n\n batch_counter = BatchCounterCallback()\n with test.mock.patch.object(logging, 'warning') as mock_log:\n # steps_per_epoch (200) is greater than the dataset size (100). As this is\n # unexpected, training will stop and not make it to the second epoch.\n history = model.fit(\n dataset,\n epochs=2,\n verbose=1,\n callbacks=[batch_counter],\n steps_per_epoch=200)\n self.assertIn(\n 'ran out of data; interrupting training.', str(mock_log.call_args))\n self.assertIn(\n 'can generate at least '\n '`steps_per_epoch * epochs` batches (in this case, 400 batches). '\n 'You may need to use the repeat() function when '\n 'building your dataset.', str(mock_log.call_args))\n\n self.assertLen(history.history['loss'], 1)\n self.assertEqual(batch_counter.batch_end_count, 10)\n model.evaluate(dataset)\n out = model.predict(dataset)\n self.assertEqual(out.shape[0], 100)\n\n @keras_parameterized.run_all_keras_modes\n def test_with_external_loss(self):\n inp = keras.Input(shape=(4,), name='inp1')\n out = keras.layers.Dense(2)(inp)\n model = keras.Model(inp, out)\n model.add_loss(math_ops.reduce_mean(out))\n model.compile('rmsprop')\n x = np.ones((10, 4))\n\n # dataset contains only features, no labels.\n dataset = dataset_ops.Dataset.from_tensor_slices(x).repeat(10).batch(10)\n model.fit(dataset)\n\n @keras_parameterized.run_all_keras_modes(always_skip_v1=True)\n def test_train_eval_with_steps(self):\n # See b/142880049 for more details.\n inp = keras.Input(shape=(4,), name='inp1')\n out = keras.layers.Dense(2)(inp)\n model = keras.Model(inp, out)\n model.compile(\n 'rmsprop', loss='mse',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n\n inputs = np.zeros((100, 4), dtype=np.float32)\n targets = np.random.randint(0, 2, size=100, dtype=np.int32)\n training_ds = dataset_ops.Dataset.from_tensor_slices(\n (inputs, targets)).repeat().batch(10)\n\n # Create eval dataset with generator, so that dataset won't contain the\n # overall size metadata. Without eval_steps, we expect to run through all\n # the data in this dataset every epoch.\n def gen():\n for _ in range(100):\n yield (np.zeros(4, dtype=np.float32),\n np.random.randint(0, 2, size=1, dtype=np.int32))\n eval_ds = dataset_ops.Dataset.from_generator(\n generator=gen,\n output_types=('float64', 'int32'),\n output_shapes=([4], [1])).batch(100)\n batch_counter = BatchCounterCallback()\n\n model.fit(\n training_ds,\n steps_per_epoch=10,\n epochs=10,\n validation_data=eval_ds,\n callbacks=[batch_counter]\n )\n\n # Expect 10 batch from training per epoch.\n self.assertEqual(batch_counter.batch_end_count, 100)\n\n\nclass TestMetricsWithDatasets(keras_parameterized.TestCase):\n\n @keras_parameterized.run_with_all_model_types\n @keras_parameterized.run_all_keras_modes\n def test_metrics_correctness_with_dataset(self):\n layers = [\n keras.layers.Dense(8, activation='relu', input_dim=4,\n kernel_initializer='ones'),\n keras.layers.Dense(1, activation='sigmoid', kernel_initializer='ones')\n ]\n\n model = testing_utils.get_model_from_layers(layers, (4,))\n\n model.compile(\n loss='binary_crossentropy',\n metrics=['accuracy', metrics_module.BinaryAccuracy()],\n optimizer='rmsprop',\n run_eagerly=testing_utils.should_run_eagerly(),\n experimental_run_tf_function=testing_utils.should_run_tf_function())\n\n np.random.seed(123)\n x = np.random.randint(10, size=(100, 4)).astype(np.float32)\n y = np.random.randint(2, size=(100, 1)).astype(np.float32)\n dataset = dataset_ops.Dataset.from_tensor_slices((x, y))\n dataset = dataset.batch(10)\n outs = model.evaluate(dataset, steps=10)\n self.assertEqual(np.around(outs[1], decimals=1), 0.5)\n self.assertEqual(np.around(outs[2], decimals=1), 0.5)\n\n y = np.zeros((100, 1), dtype=np.float32)\n dataset = dataset_ops.Dataset.from_tensor_slices((x, y))\n dataset = dataset.repeat(100)\n dataset = dataset.batch(10)\n outs = model.evaluate(dataset, steps=10)\n self.assertEqual(outs[1], 0.)\n self.assertEqual(outs[2], 0.)\n\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains private utilities used mainly by the base Layer class.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport threading\n\nfrom tensorflow.python import tf2\nfrom tensorflow.python.distribute import distribution_strategy_context\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.keras import backend\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_util\nfrom tensorflow.python.ops import control_flow_v2_func_graphs\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import init_ops_v2\nfrom tensorflow.python.ops import variables as tf_variables\nfrom tensorflow.python.training.tracking import base as tracking\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import tf_contextlib\n\n_call_context = threading.local()\n\n\ndef create_mean_metric(value, name=None):\n # import keras will import base_layer and then this module, and metric relies\n # on base_layer, which result into a cyclic dependency.\n from tensorflow.python.keras import metrics as metrics_module # pylint: disable=g-import-not-at-top\n metric_obj = metrics_module.Mean(name=name, dtype=value.dtype)\n return metric_obj, metric_obj(value)\n\n\ndef make_variable(name,\n shape=None,\n dtype=dtypes.float32,\n initializer=None,\n trainable=None,\n caching_device=None,\n validate_shape=True,\n constraint=None,\n use_resource=None,\n collections=None,\n synchronization=tf_variables.VariableSynchronization.AUTO,\n aggregation=tf_variables.VariableAggregation.NONE,\n partitioner=None): # pylint: disable=unused-argument\n \"\"\"Temporary util to create a variable (relies on `variable_scope.variable`).\n\n Some reuse-related technicalities prevent us from using\n `variable_scope.get_variable()` directly, so we use a subcomponent\n that has fewer constraints (`variable_scope.variable()`).\n\n In the longer term, it seems like a similar \"default variable creator\" method\n should exist in `Trackable` instead. When this happens, we can get\n rid of this temporary solution.\n\n TODO(fchollet): remove this method when no longer needed.\n\n Arguments:\n name: Variable name.\n shape: Variable shape.\n dtype: The type of the variable. Defaults to `self.dtype` or `float32`.\n initializer: Initializer instance (callable).\n trainable: Whether the variable should be part of the layer's\n \"trainable_variables\" (e.g. variables, biases)\n or \"non_trainable_variables\" (e.g. BatchNorm mean, stddev).\n Note, if the current variable scope is marked as non-trainable\n then this parameter is ignored and any added variables are also\n marked as non-trainable. `trainable` defaults to `True` unless\n `synchronization` is set to `ON_READ`.\n caching_device: Passed to `tf.Variable`.\n validate_shape: Passed to `tf.Variable`.\n constraint: Constraint instance (callable).\n use_resource: Whether to use a `ResourceVariable`.\n collections: List of graph collections keys. The new variable is added to\n these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.\n synchronization: Indicates when a distributed a variable will be\n aggregated. Accepted values are constants defined in the class\n `tf.VariableSynchronization`. By default the synchronization is set to\n `AUTO` and the current `DistributionStrategy` chooses\n when to synchronize. If `synchronization` is set to `ON_READ`,\n `trainable` must not be set to `True`.\n aggregation: Indicates how a distributed variable will be aggregated.\n Accepted values are constants defined in the class\n `tf.VariableAggregation`.\n partitioner: Not handled at this time.\n\n Returns:\n Variable instance.\n \"\"\"\n initializing_from_value = False\n if initializer is not None and not callable(initializer):\n initializing_from_value = True\n\n if initializing_from_value:\n init_val = initializer\n variable_dtype = None\n else:\n # Instantiate initializer if provided initializer is a type object.\n if isinstance(\n initializer,\n (type(init_ops.Initializer), type(init_ops_v2.Initializer))):\n initializer = initializer()\n init_val = lambda: initializer(shape, dtype=dtype)\n variable_dtype = dtype.base_dtype\n if use_resource is None:\n use_resource = True\n\n # TODO(apassos,rohanj) figure out how to remove collections from here so we\n # can remove the V1.\n variable_shape = tensor_shape.TensorShape(shape)\n return tf_variables.VariableV1(\n initial_value=init_val,\n name=name,\n trainable=trainable,\n caching_device=caching_device,\n dtype=variable_dtype,\n validate_shape=validate_shape,\n constraint=constraint,\n use_resource=use_resource,\n collections=collections,\n synchronization=synchronization,\n aggregation=aggregation,\n shape=variable_shape if variable_shape else None)\n\n\ndef collect_previous_mask(input_tensors):\n \"\"\"Retrieves the output mask(s) of the previous node.\n\n Arguments:\n input_tensors: An arbitrary structure of Tensors.\n\n Returns:\n A mask tensor or list of mask tensors.\n \"\"\"\n\n def _collect_previous_mask(x):\n return getattr(x, '_keras_mask', None)\n\n return nest.map_structure(_collect_previous_mask, input_tensors)\n\n\ndef have_all_keras_metadata(tensors):\n return all(hasattr(x, '_keras_history') for x in nest.flatten(tensors))\n\n\ndef generate_placeholders_from_shape(shape):\n return array_ops.placeholder(shape=shape, dtype=backend.floatx())\n\n\ndef create_keras_history(tensors):\n \"\"\"Wraps TensorFlow Operations for compatibility with the Functional API.\n\n This method checks to see if a Tensor in `tensors` is missing Keras metadata\n and has its origin in a Keras `Input` Layer. If so, this method will replace\n the raw TensorFlow Operations that created this tensor with\n `TensorFlowOpLayer` instances that create identical operations.\n\n Any Tensors not originating from a Keras `Input` Layer will be treated as\n constants when constructing `TensorFlowOpLayer` instances.\n\n Arguments:\n tensors: A structure of Tensors, some of which come from raw TensorFlow\n operations and need to have Keras metadata assigned to them.\n\n Returns:\n keras_tensors: The Tensors found that came from a Keras Layer.\n \"\"\"\n _, created_layers = _create_keras_history_helper(tensors, set(), [])\n return created_layers\n\n\ndef _create_keras_history_helper(tensors, processed_ops, created_layers):\n \"\"\"Helper method for `create_keras_history`.\n\n Arguments:\n tensors: A structure of Tensors for which to create Keras metadata.\n processed_ops: Set. TensorFlow operations that have already been wrapped in\n `TensorFlowOpLayer` instances.\n created_layers: List. The `TensorFlowOpLayer` instances created.\n\n Returns:\n Tuple. First element is the updated set of TensorFlow Operations that\n have been wrapped in `TensorFlowOpLayer` instances. Second element is\n a list of the `TensorFlowOpLayer` instances created.\n \"\"\"\n # Import of `base_layer` needed in order to create `TensorFlowOpLayer`.\n # Cannot be imported at top because of circular dependencies.\n # TODO(omalleyt): Resolve circular dependency.\n from tensorflow.python.keras.engine import base_layer # pylint: disable=g-import-not-at-top\n tensor_list = nest.flatten(tensors)\n for tensor in tensor_list:\n if getattr(tensor, '_keras_history', None) is not None:\n continue\n op = tensor.op # The Op that created this Tensor.\n if op not in processed_ops:\n if op.type.startswith('Sparse'):\n lambda_example = \"\"\"\n weights_mult = lambda x: tf.sparse.sparse_dense_matmul(x, weights)\n output = tf.keras.layers.Lambda(weights_mult)(input)\n \"\"\"\n raise ValueError(\n 'Sparse ops are not supported with functional models with built-in '\n 'layer wrapping. Please wrap the sparse ops in a Lambda layer like'\n ': \\n{lambda_example}\\n'.format(lambda_example=lambda_example))\n\n # Recursively set `_keras_history`.\n op_inputs = list(op.inputs)\n constants = {}\n layer_inputs = []\n for i, op_input in enumerate(op_inputs):\n if uses_keras_history(op_input):\n layer_inputs.append(op_input)\n else:\n # Treat any value not originating from a `keras.Input` as\n # a constant. Variables cannot be supported.\n ds_with_session = (\n distribution_strategy_context.in_cross_replica_context() and\n not ops.executing_eagerly_outside_functions())\n using_xla = control_flow_util.GraphOrParentsInXlaContext(\n ops.get_default_graph())\n if ds_with_session or using_xla:\n # In Legacy Graph mode, evaluating here makes Session be\n # configured improperly. The downside of this is that saving\n # via `get_config` breaks, but SavedModel still works.\n constants[i] = op_input\n else:\n with ops.init_scope():\n constants[i] = backend.function([], op_input)([])\n layer_inputs = unnest_if_single_tensor(layer_inputs)\n processed_ops, created_layers = _create_keras_history_helper(\n layer_inputs, processed_ops, created_layers)\n name = op.name\n node_def = op.node_def.SerializeToString()\n op_layer = base_layer.TensorFlowOpLayer(\n node_def, constants=constants, name=name)\n created_layers.append(op_layer)\n op_layer._add_inbound_node( # pylint: disable=protected-access\n layer_inputs, op.outputs)\n processed_ops.update([op])\n return processed_ops, created_layers\n\n\ndef unnest_if_single_tensor(input_tensors):\n # Preserve compatibility with older configs\n flat_input_tensors = nest.flatten(input_tensors)\n # If this is a single element but not a dict, unwrap. If this is a dict,\n # assume the first layer expects a dict (as is the case with a\n # DenseFeatures layer); pass through.\n if not isinstance(input_tensors, dict) and len(flat_input_tensors) == 1:\n input_tensors = flat_input_tensors[0]\n return input_tensors\n\n\ndef needs_keras_history(tensors, ignore_call_context=False):\n \"\"\"Check if any Tensors need to be wrapped in TensorFlowOpLayers.\n\n This will never return True inside a sublayer, because sublayers\n do not need to create Keras History. Otherwise, this returns True\n if one or more of `tensors` originates from a `keras.Input` and\n does not have `_keras_history` set.\n\n Arguments:\n tensors: An arbitrary nested structure of Tensors.\n ignore_call_context: Whether to ignore the check of if currently\n outside of a `call` context. This is `True` when creating\n KerasHistory inside `Node`, where we always know that Tensors\n are being used with the Functional API.\n\n Returns:\n Bool, whether at least one Tensor needs to be wrapped.\n \"\"\"\n input_tensors = nest.flatten(tensors)\n if call_context().in_call and not ignore_call_context:\n return False\n if all(\n getattr(tensor, '_keras_history', None) is not None\n for tensor in input_tensors):\n # KerasHistory already set.\n return False\n return uses_keras_history(tensors)\n\n\ndef is_in_keras_graph():\n \"\"\"Returns if currently executing inside of a Keras graph.\"\"\"\n return call_context().in_keras_graph\n\n\ndef is_in_eager_or_tf_function():\n \"\"\"Returns if in eager mode or inside of a tf.function.\"\"\"\n return context.executing_eagerly() or is_in_tf_function()\n\n\ndef is_in_tf_function():\n \"\"\"Returns if inside of a tf.function.\"\"\"\n # Check if running in V1 graph mode.\n if not ops.executing_eagerly_outside_functions():\n return False\n if not ops.inside_function():\n return False\n # Check if inside Keras FuncGraph.\n if is_in_keras_graph():\n return False\n # Check for a v1 `wrap_function` FuncGraph.\n graph = ops.get_default_graph()\n if (getattr(graph, 'name', False) and\n graph.name.startswith('wrapped_function')):\n return False\n return True\n\n\ndef uses_keras_history(tensors):\n \"\"\"Check if at least one Tensor originates from a `keras.Input`.\n\n This is `True` if at least one Tensor has its origin in a `keras.Input`.\n Any Tensor that originates from a `keras.Input` will have a dependency\n Tensor with a `_keras_history` attribute attached. Tensors that have\n already been checked to not originate from a `keras.Input`\n are marked as `_keras_history_checked`.\n\n Arguments:\n tensors: An arbitrary nested structure of Tensors.\n\n Returns:\n Bool, whether at least one Tensor originates from a `keras.Input`.\n \"\"\"\n checked_tensors = set()\n tensors_to_check = nest.flatten(tensors)\n\n while tensors_to_check:\n new_tensors_to_check = []\n for tensor in tensors_to_check:\n if id(tensor) in checked_tensors:\n continue\n\n checked_tensors.add(id(tensor))\n\n if getattr(tensor, '_keras_history_checked', None) is not None:\n continue\n if getattr(tensor, '_keras_history', None) is not None:\n return True\n\n try:\n new_tensors_to_check.extend(tensor.op.inputs)\n except AttributeError:\n # In case `tensor` is a Variable created in an Eager context.\n pass\n\n tensors_to_check = new_tensors_to_check\n\n # Mark that these Tensors have been checked once for `_keras_history`,\n # and should not be checked again for performance reasons.\n mark_checked(tensors)\n return False\n\n\ndef mark_checked(tensors):\n \"\"\"Marks that these Tensors should not be tracked.\n\n This prevents Layers from attempting to create TensorFlowOpLayers\n for these Tensors.\n\n Arguments:\n tensors: An arbitrary structure of Tensors.\n \"\"\"\n\n def _mark_checked(tensor):\n tensor._keras_history_checked = True # pylint: disable=protected-access\n\n nest.map_structure(_mark_checked, tensors)\n\n\ndef call_context():\n \"\"\"Returns currently active `CallContext`.\"\"\"\n if getattr(_call_context, 'call_context', None) is None:\n _call_context.call_context = CallContext()\n return _call_context.call_context\n\n\nclass CallContext(object):\n \"\"\"Keeps track of properties currently inside a Layer/Model's `call`.\n\n Attributes:\n layer: The `Layer` whose `call` is currently active.\n inputs: The inputs to the currently active `Layer`.\n frozen: Whether currently executing inside a `Layer` with `trainable` set to\n `False`.\n in_call: Whether currently inside the `call` of a Layer.\n training: Whether currently executing in training or inference mode.\n in_keras_graph: Whether executing inside the Keras Graph.\n saving: Whether currently saving to SavedModel.\n \"\"\"\n\n def __init__(self):\n self.layer = None\n self.inputs = None\n self.frozen = False\n self.in_call = False\n self.training = None\n self._in_keras_graph = False\n self.saving = False\n\n @tf_contextlib.contextmanager\n def enter(self, layer, inputs, build_graph, training, saving=None):\n \"\"\"Push a Layer and its inputs and state onto the current call context.\"\"\"\n prev_layer = self.layer\n prev_inputs = self.inputs\n prev_frozen = self.frozen\n prev_in_call = self.in_call\n prev_training = self.training\n prev_in_keras_graph = self._in_keras_graph\n prev_saving = self.saving\n\n self.layer = layer\n self.inputs = inputs\n self.frozen = self.frozen or not layer.trainable\n self.in_call = True\n self.training = training\n self._in_keras_graph = (\n self._in_keras_graph or\n (build_graph and\n getattr(backend.get_graph(), 'name', None) == 'keras_graph'))\n self.saving = prev_saving if saving is None else saving\n\n try:\n yield\n finally:\n self.layer = prev_layer\n self.inputs = prev_inputs\n self.frozen = prev_frozen\n self.in_call = prev_in_call\n self.training = prev_training\n self._in_keras_graph = prev_in_keras_graph\n self.saving = prev_saving\n\n @property\n def in_keras_graph(self):\n # Returns True even if in a subgraph of the Keras graph, such as those\n # created by control flow ops.\n if context.executing_eagerly():\n return False\n return (self._in_keras_graph or\n getattr(backend.get_graph(), 'name', None) == 'keras_graph')\n\n\ndef training_arg_passed_to_call(argspec, args, kwargs):\n \"\"\"Returns whether a user passed the `training` argument in `__call__`.\"\"\"\n # `argspec.args` starts with ['self', 'inputs']\n full_args = dict(zip(argspec.args[2:], args))\n full_args.update(kwargs)\n return 'training' in full_args and full_args['training'] is not None\n\n\ndef autocast_context_manager(dtype):\n \"\"\"Returns a context manager to autocast AutoCastVariables.\n\n Under this context manager, AutoCastVariables will be casted to `dtype` if\n `dtype` is floating-point. Otherwise, AutoCastVariables will not be casted.\n\n Args:\n dtype: The dtype to cast AutoCastVariables to, or None.\n\n Returns:\n A context manager to automatically cast AutoCastVariables.\n \"\"\"\n if dtype and not dtypes.as_dtype(dtype).is_floating:\n dtype = None\n return ops.get_default_graph()._enable_auto_casting_variables(dtype) # pylint: disable=protected-access\n\n\ndef is_subclassed(layer):\n \"\"\"Returns True if the object is a subclassed layer or subclassed model.\"\"\"\n return (layer.__module__.find('keras.engine') == -1 and\n layer.__module__.find('keras.layers') == -1)\n\n\ndef from_saved_model(layer):\n \"\"\"Returns whether the layer is loaded from a SavedModel.\"\"\"\n return layer.__module__.find('keras.saving.saved_model') != -1\n\n\ndef check_graph_consistency(tensor=None, method='add_loss', force_raise=False):\n \"\"\"Checks that tensors passed to `add_*` method match the Keras graph.\n\n When one of the `add_*` method is called inside a V2 conditional branch,\n the underlying tensor gets created in a FuncGraph managed by control_flow_v2.\n We need to raise clear error messages in such cases.\n\n Arguments:\n tensor: Tensor to check, or `False` if it is known that an error\n should be raised.\n method: Caller method, one of {'add_metric', 'add_loss', 'add_update'}.\n force_raise: If an error should be raised regardless of `tensor`.\n\n Raises:\n RuntimeError: In case of an out-of-graph tensor.\n \"\"\"\n if (force_raise or\n (ops.executing_eagerly_outside_functions() and\n hasattr(tensor, 'graph') and\n isinstance(tensor.graph,\n (control_flow_v2_func_graphs.CondBranchFuncGraph,\n control_flow_v2_func_graphs.WhileCondFuncGraph,\n control_flow_v2_func_graphs.WhileBodyFuncGraph)))):\n if method == 'activity_regularizer':\n bad_example = \"\"\"\n class TestModel(tf.keras.Model):\n\n def __init__(self):\n super(TestModel, self).__init__(name='test_model')\n self.dense = tf.keras.layers.Dense(2, activity_regularizer='l2')\n\n def call(self, x, training=None):\n if training:\n return self.dense(x)\n else:\n return self.dense(x)\n \"\"\"\n correct_example = \"\"\"\n class TestModel(tf.keras.Model):\n\n def __init__(self):\n super(TestModel, self).__init__(name='test_model')\n self.dense = tf.keras.layers.Dense(2, activity_regularizer='l2')\n\n def call(self, x, training=None):\n return self.dense(x)\n \"\"\"\n raise RuntimeError(\n 'You are using a layer with `activity_regularizer` in a control flow '\n 'branch, e.g.:\\n{bad_example}\\nThis is currently not supported. '\n 'Please move your call to the layer with `activity_regularizer` out '\n 'of the control flow branch, e.g.:\\n{correct_example}\\n'\n 'You can also resolve this by marking your outer model/layer dynamic'\n ' (eager-only) by passing `dynamic=True` to the layer constructor. '\n 'Any kind of control flow is supported with dynamic layers. '\n 'Note that using `dynamic=True` requires you to implement static '\n 'shape inference in the `compute_output_shape(input_shape)` '\n 'method.'.format(\n bad_example=bad_example, correct_example=correct_example))\n\n if method == 'add_metric':\n bad_example = \"\"\"\n def call(self, inputs, training=None):\n if training:\n metric = compute_metric(inputs)\n self.add_metric(metric, name='my_metric', aggregation='mean')\n return inputs\n \"\"\"\n correct_example = \"\"\"\n def call(self, inputs, training=None):\n if training:\n metric = compute_metric(inputs)\n else:\n metric = 0.\n self.add_metric(metric, name='my_metric', aggregation='mean')\n return inputs\n \"\"\"\n elif method == 'add_loss':\n bad_example = \"\"\"\n def call(self, inputs, training=None):\n if training:\n loss = compute_loss(inputs)\n self.add_loss(loss)\n return inputs\n \"\"\"\n correct_example = \"\"\"\n def call(self, inputs, training=None):\n if training:\n loss = compute_loss(inputs)\n else:\n loss = 0.\n self.add_loss(loss)\n return inputs\n \"\"\"\n else:\n bad_example = \"\"\"\n def call(self, inputs, training=None):\n if training:\n self.add_update(self.w.assign_add(1))\n return inputs\n \"\"\"\n correct_example = \"\"\"\n def call(self, inputs, training=None):\n if training:\n increment = 1\n else:\n increment = 0\n self.add_update(self.w.assign_add(increment))\n return inputs\n \"\"\"\n raise RuntimeError(\n 'You are using the method `{method}` in a control flow branch '\n 'in your layer, e.g.:\\n{bad_example}\\n'\n 'This is not currently supported. '\n 'Please move your call to {method} out of the control flow branch, '\n 'e.g.:\\n{correct_example}\\n'\n 'You can also resolve this by marking your layer '\n 'as dynamic (eager-only) by passing '\n '`dynamic=True` to the layer constructor. '\n 'Any kind of control flow is supported with dynamic layers. '\n 'Note that using `dynamic=True` requires you '\n 'to implement static shape inference '\n 'in the `compute_output_shape(input_shape)` method.'.format(\n method=method,\n bad_example=bad_example,\n correct_example=correct_example))\n\n\ndef mark_as_return(outputs, acd):\n \"\"\"Marks `outputs` as the return values for automatic control deps.\"\"\"\n\n def _mark_as_return(tensor):\n \"\"\"Marks `tensor` as the return value for automatic control deps.\"\"\"\n if not tensor_util.is_tensor(tensor):\n return tensor\n\n # pylint: disable=protected-access\n return_tensor = acd.mark_as_return(tensor)\n if getattr(tensor, '_keras_mask', None) is not None:\n return_tensor._keras_mask = acd.mark_as_return(tensor._keras_mask)\n else:\n return_tensor._keras_mask = None\n\n # Handle TensorFlow Probability attached metadata.\n # TODO(b/132076537): Remove this once TFP uses `CompositeTensor`.\n if getattr(tensor, '_tfp_distribution', None) is not None:\n return_tensor._tfp_distribution = tensor._tfp_distribution\n\n return return_tensor\n # pylint: enable=protected-access\n\n return nest.map_structure(_mark_as_return, outputs)\n\n\ndef default(method):\n \"\"\"Decorates a method to detect overrides in subclasses.\"\"\"\n method._is_default = True # pylint: disable=protected-access\n return method\n\n\nV2_DTYPE_BEHAVIOR = None\n\n\n# These two functions are not exported because we plan on removing them in the\n# future.\ndef enable_v2_dtype_behavior():\n \"\"\"Enable the V2 dtype behavior for Keras layers.\n\n By default, the V2 dtype behavior is enabled in TensorFlow 2.\n\n When enabled, the dtype of Keras layers defaults to floatx (which is typically\n float32) instead of None. In addition, layers will automatically cast\n floating-point inputs to the layer's dtype.\n\n For example, once enabled, the following block will run a Conv2D layer\n in float32:\n\n ```python\n x = tf.ones((4, 4, 4, 4), dtype='float64')\n layer = tf.keras.layers.Conv2D(filters=4, kernel_size=2)\n print(layer.dtype) # Float32 when enabled. None when disabled.\n # When enabled, will cast inputs to the layer's dtype, which is float32. When\n # disabled, will do no casting, so the layer is done in float64.\n y = layer(x)\n ```\n\n A layer author can opt-out their layer from the automatic input casting by\n passing `autocast=False` to the base Layer's constructor. This disables the\n autocasting part of the V2 behavior for that layer, but not the defaulting to\n floatx part of the V2 behavior.\n\n When a global `tf.keras.mixed_precision.experimental.Policy` is set, the\n layer's dtype will default to the global policy instead of floatx. Layers\n will automatically cast inputs to the policy's compute_dtype.\n \"\"\"\n global V2_DTYPE_BEHAVIOR\n V2_DTYPE_BEHAVIOR = True\n\n\ndef disable_v2_dtype_behavior():\n \"\"\"Disables the V2 dtype behavior for Keras layers.\n\n See `enable_v2_dtype_behavior`.\n\n This function will be removed in the future.\n \"\"\"\n global V2_DTYPE_BEHAVIOR\n V2_DTYPE_BEHAVIOR = False\n\n\ndef v2_dtype_behavior_enabled():\n \"\"\"Returns True if the V2 dtype behavior is enabled.\"\"\"\n if V2_DTYPE_BEHAVIOR is None:\n return tf2.enabled()\n return V2_DTYPE_BEHAVIOR\n\n\nclass TrackableWeightHandler(object):\n \"\"\"Keras wrapper for handling tracking.Trackable object saving and restoring.\n\n This class handles Trackables in both V1 and V2 modes, ensuring that they can\n be saved and restored with the correct data and without adding additional ops\n on every save.\n\n Attributes:\n trackable: The trackable to wrap.\n num_tensors: The number of tensors that this trackable requires for saving.\n \"\"\"\n\n def __init__(self, trackable):\n if not isinstance(trackable, tracking.Trackable):\n raise ValueError('%s is not a Trackable object.' % (trackable,))\n self._trackable = trackable\n\n # TODO(b/141682913): Figure out why this is private and fix it.\n saveables = trackable._gather_saveables_for_checkpoint().values() # pylint: disable=protected-access\n if len(saveables) != 1:\n raise ValueError('Only Trackables with one Saveable are supported.')\n saveable = list(saveables)[0]\n\n if ops.executing_eagerly_outside_functions():\n # If we're in eager mode, we need to defer calling the Trackable's\n # saveable() callable until data export time.\n # However, it is safe to call the saveable as many times as we want, so\n # we will call it now to figure out how many tensors this Trackable will\n # produce.\n self._saveable = saveable\n self._num_tensors = len(self._saveable().specs)\n self._setter = lambda weights: self._saveable().restore(weights, None)\n self._getter = lambda: [spec.tensor for spec in self._saveable().specs]\n else:\n # If we're in Graph mode, we need to evaluate the Saveable only once and\n # cache the resulting restore graph. Failing to do this will result in\n # new assignment ops being added to the graph each time set_weights() is\n # called.\n self._placeholder_tensors = []\n self._saveable = saveable()\n self._num_tensors = len(self._saveable.specs)\n for spec in self._saveable.specs:\n tensor = spec.tensor\n self._placeholder_tensors.append(\n array_ops.placeholder(tensor.dtype, tensor.shape))\n self._assign_op = self._saveable.restore(self._placeholder_tensors, None)\n self._setter = self._set_weights_v1\n self._getter = lambda: [spec.tensor for spec in self._saveable.specs]\n\n @property\n def num_tensors(self):\n return self._num_tensors\n\n def set_weights(self, weights):\n if len(weights) != self._num_tensors:\n raise ValueError(\n ('Weight handler for trackable %s received the wrong number of ' +\n 'weights: expected %s, got %s.') %\n (self._trackable, self._num_tensors, len(weights)))\n self._setter(weights)\n\n def get_tensors(self):\n return self._getter()\n\n def _set_weights_v1(self, weights):\n feed_dict = {}\n for idx, tensor in enumerate(weights):\n feed_dict[self._placeholder_tensors[idx]] = tensor\n backend.get_session().run(self._assign_op, feed_dict)\n\n\n# TODO(kathywu): This is a temporary hack. When a network of layers is revived\n# from SavedModel, only the top-level layer will have losses. This causes issues\n# in eager mode because the child layers may have graph losses\n# (thus model.losses returns a mix of Eager and graph tensors). To fix this,\n# whenever eager losses are added to one layer, add eager losses to all\n# child layers. This causes `.losses` to only return eager losses.\nREVIVED_LOSS_PLACEHOLDER = (\n 'This layer\\'s losses have been added to the parent layer.')\n"
] | [
[
"tensorflow.python.ops.math_ops.abs",
"tensorflow.python.ops.array_ops.squeeze",
"tensorflow.compiler.tf2tensorrt.wrap_py_utils.get_linked_tensorrt_version",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.array_ops.identity"
],
[
"tensorflow.python.keras.testing_utils.get_model_from_layers",
"tensorflow.python.keras.layers.Dense",
"numpy.around",
"numpy.random.randint",
"tensorflow.python.keras.backend.sum",
"tensorflow.python.keras.testing_utils.get_small_mlp",
"tensorflow.python.platform.test.main",
"tensorflow.python.keras.testing_utils.get_model_type",
"tensorflow.python.data.experimental.ops.cardinality.cardinality",
"numpy.zeros",
"tensorflow.python.keras.metrics.BinaryAccuracy",
"tensorflow.python.keras.keras_parameterized.run_all_keras_modes",
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices",
"tensorflow.python.keras.Model",
"tensorflow.python.keras.testing_utils.should_run_tf_function",
"tensorflow.python.ops.math_ops.reduce_mean",
"tensorflow.python.keras.layers.Dropout",
"tensorflow.python.keras.testing_utils.get_small_functional_mlp",
"numpy.array",
"tensorflow.python.keras.layers.Input",
"tensorflow.python.keras.testing_utils.should_run_eagerly",
"numpy.random.random",
"tensorflow.python.keras.Input",
"numpy.random.seed",
"tensorflow.python.keras.metrics.CategoricalAccuracy",
"tensorflow.python.data.ops.dataset_ops.Dataset.from_generator",
"tensorflow.python.keras.keras_parameterized.run_with_all_model_types",
"numpy.ones",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.keras.testing_utils.get_multi_io_model",
"tensorflow.python.platform.test.mock.patch.object"
],
[
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.keras.metrics.Mean",
"tensorflow.python.framework.ops.executing_eagerly_outside_functions",
"tensorflow.python.keras.backend.function",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.framework.ops.inside_function",
"tensorflow.python.keras.engine.base_layer.TensorFlowOpLayer",
"tensorflow.python.keras.backend.floatx",
"tensorflow.python.keras.backend.get_session",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.distribute.distribution_strategy_context.in_cross_replica_context",
"tensorflow.python.framework.ops.init_scope",
"tensorflow.python.framework.tensor_util.is_tensor",
"tensorflow.python.framework.dtypes.as_dtype",
"tensorflow.python.ops.variables.VariableV1",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.tf2.enabled",
"tensorflow.python.keras.backend.get_graph",
"tensorflow.python.util.nest.flatten"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.4",
"2.3",
"2.9",
"2.5",
"2.8",
"2.10"
]
}
] |
hlahkar/transformers | [
"c19d04623eacfbc2c452397a5eda0fde42db3fc5"
] | [
"src/transformers/training_args.py"
] | [
"# Copyright 2020 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport dataclasses\nimport json\nimport os\nfrom dataclasses import dataclass, field\nfrom enum import Enum\nfrom typing import Any, Dict, List, Optional, Tuple\n\nfrom .file_utils import cached_property, is_torch_available, is_torch_tpu_available, torch_required\nfrom .trainer_utils import EvaluationStrategy\nfrom .utils import logging\n\n\nif is_torch_available():\n import torch\n\nif is_torch_tpu_available():\n import torch_xla.core.xla_model as xm\n\n\nlogger = logging.get_logger(__name__)\n\n\ndef default_logdir() -> str:\n \"\"\"\n Same default as PyTorch\n \"\"\"\n import socket\n from datetime import datetime\n\n current_time = datetime.now().strftime(\"%b%d_%H-%M-%S\")\n return os.path.join(\"runs\", current_time + \"_\" + socket.gethostname())\n\n\n@dataclass\nclass TrainingArguments:\n \"\"\"\n TrainingArguments is the subset of the arguments we use in our example scripts **which relate to the training loop\n itself**.\n\n Using :class:`~transformers.HfArgumentParser` we can turn this class into argparse arguments to be able to specify\n them on the command line.\n\n\n\n\n Parameters:\n output_dir (:obj:`str`):\n The output directory where the model predictions and checkpoints will be written.\n overwrite_output_dir (:obj:`bool`, `optional`, defaults to :obj:`False`):\n If :obj:`True`, overwrite the content of the output directory. Use this to continue training if\n :obj:`output_dir` points to a checkpoint directory.\n do_train (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether to run training or not. This argument is not directly used by :class:`~transformers.Trainer`, it's\n intended to be used by your training/evaluation scripts instead. See the `example scripts\n <https://github.com/huggingface/transformers/tree/master/examples>`__ for more details.\n do_eval (:obj:`bool`, `optional`):\n Whether to run evaluation on the dev set or not. Will be set to :obj:`True` if :obj:`evaluation_strategy`\n is different from :obj:`\"no\"`. This argument is not directly used by :class:`~transformers.Trainer`, it's\n intended to be used by your training/evaluation scripts instead. See the `example scripts\n <https://github.com/huggingface/transformers/tree/master/examples>`__ for more details.\n do_predict (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether to run predictions on the test set or not. This argument is not directly used by\n :class:`~transformers.Trainer`, it's intended to be used by your training/evaluation scripts instead. See\n the `example scripts <https://github.com/huggingface/transformers/tree/master/examples>`__ for more\n details.\n evaluation_strategy (:obj:`str` or :class:`~transformers.trainer_utils.EvaluationStrategy`, `optional`, defaults to :obj:`\"no\"`):\n The evaluation strategy to adopt during training. Possible values are:\n\n * :obj:`\"no\"`: No evaluation is done during training.\n * :obj:`\"steps\"`: Evaluation is done (and logged) every :obj:`eval_steps`.\n * :obj:`\"epoch\"`: Evaluation is done at the end of each epoch.\n\n prediction_loss_only (:obj:`bool`, `optional`, defaults to `False`):\n When performing evaluation and predictions, only returns the loss.\n per_device_train_batch_size (:obj:`int`, `optional`, defaults to 8):\n The batch size per GPU/TPU core/CPU for training.\n per_device_eval_batch_size (:obj:`int`, `optional`, defaults to 8):\n The batch size per GPU/TPU core/CPU for evaluation.\n gradient_accumulation_steps (:obj:`int`, `optional`, defaults to 1):\n Number of updates steps to accumulate the gradients for, before performing a backward/update pass.\n\n .. warning::\n\n When using gradient accumulation, one step is counted as one step with backward pass. Therefore,\n logging, evaluation, save will be conducted every ``gradient_accumulation_steps * xxx_step`` training\n examples.\n eval_accumulation_steps (:obj:`int`, `optional`):\n Number of predictions steps to accumulate the output tensors for, before moving the results to the CPU. If\n left unset, the whole predictions are accumulated on GPU/TPU before being moved to the CPU (faster but\n requires more memory).\n learning_rate (:obj:`float`, `optional`, defaults to 5e-5):\n The initial learning rate for Adam.\n weight_decay (:obj:`float`, `optional`, defaults to 0):\n The weight decay to apply (if not zero).\n adam_beta1 (:obj:`float`, `optional`, defaults to 0.9):\n The beta1 for the Adam optimizer.\n adam_beta2 (:obj:`float`, `optional`, defaults to 0.999):\n The beta2 for the Adam optimizer.\n adam_epsilon (:obj:`float`, `optional`, defaults to 1e-8):\n Epsilon for the Adam optimizer.\n max_grad_norm (:obj:`float`, `optional`, defaults to 1.0):\n Maximum gradient norm (for gradient clipping).\n num_train_epochs(:obj:`float`, `optional`, defaults to 3.0):\n Total number of training epochs to perform (if not an integer, will perform the decimal part percents of\n the last epoch before stopping training).\n max_steps (:obj:`int`, `optional`, defaults to -1):\n If set to a positive number, the total number of training steps to perform. Overrides\n :obj:`num_train_epochs`.\n warmup_steps (:obj:`int`, `optional`, defaults to 0):\n Number of steps used for a linear warmup from 0 to :obj:`learning_rate`.\n logging_dir (:obj:`str`, `optional`):\n Tensorboard log directory. Will default to `runs/**CURRENT_DATETIME_HOSTNAME**`.\n logging_first_step (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether to log and evaluate the first :obj:`global_step` or not.\n logging_steps (:obj:`int`, `optional`, defaults to 500):\n Number of update steps between two logs.\n save_steps (:obj:`int`, `optional`, defaults to 500):\n Number of updates steps before two checkpoint saves.\n save_total_limit (:obj:`int`, `optional`):\n If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in\n :obj:`output_dir`.\n no_cuda (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether to not use CUDA even when it is available or not.\n seed (:obj:`int`, `optional`, defaults to 42):\n Random seed for initialization.\n fp16 (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether to use 16-bit (mixed) precision training (through NVIDIA apex) instead of 32-bit training.\n fp16_opt_level (:obj:`str`, `optional`, defaults to 'O1'):\n For :obj:`fp16` training, apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. See details\n on the `apex documentation <https://nvidia.github.io/apex/amp.html>`__.\n local_rank (:obj:`int`, `optional`, defaults to -1):\n During distributed training, the rank of the process.\n tpu_num_cores (:obj:`int`, `optional`):\n When training on TPU, the number of TPU cores (automatically passed by launcher script).\n debug (:obj:`bool`, `optional`, defaults to :obj:`False`):\n When training on TPU, whether to print debug metrics or not.\n dataloader_drop_last (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size)\n or not.\n eval_steps (:obj:`int`, `optional`):\n Number of update steps between two evaluations if :obj:`evaluation_strategy=\"steps\"`. Will default to the\n same value as :obj:`logging_steps` if not set.\n dataloader_num_workers (:obj:`int`, `optional`, defaults to 0):\n Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the\n main process.\n past_index (:obj:`int`, `optional`, defaults to -1):\n Some models like :doc:`TransformerXL <../model_doc/transformerxl>` or :doc`XLNet <../model_doc/xlnet>` can\n make use of the past hidden states for their predictions. If this argument is set to a positive int, the\n ``Trainer`` will use the corresponding output (usually index 2) as the past state and feed it to the model\n at the next training step under the keyword argument ``mems``.\n run_name (:obj:`str`, `optional`):\n A descriptor for the run. Notably used for wandb logging.\n disable_tqdm (:obj:`bool`, `optional`):\n Whether or not to disable the tqdm progress bars. Will default to :obj:`True` if the logging level is set\n to warn or lower (default), :obj:`False` otherwise.\n remove_unused_columns (:obj:`bool`, `optional`, defaults to :obj:`True`):\n If using `nlp.Dataset` datasets, whether or not to automatically remove the columns unused by the model\n forward method.\n\n (Note that this behavior is not implemented for :class:`~transformers.TFTrainer` yet.)\n label_names (:obj:`List[str]`, `optional`):\n The list of keys in your dictionary of inputs that correspond to the labels.\n\n Will eventually default to :obj:`[\"labels\"]` except if the model used is one of the\n :obj:`XxxForQuestionAnswering` in which case it will default to :obj:`[\"start_positions\",\n \"end_positions\"]`.\n load_best_model_at_end (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether or not to load the best model found during training at the end of training.\n\n .. note::\n\n When set to :obj:`True`, the parameters :obj:`save_steps` will be ignored and the model will be saved\n after each evaluation.\n metric_for_best_model (:obj:`str`, `optional`):\n Use in conjunction with :obj:`load_best_model_at_end` to specify the metric to use to compare two different\n models. Must be the name of a metric returned by the evaluation with or without the prefix :obj:`\"eval_\"`.\n Will default to :obj:`\"loss\"` if unspecified and :obj:`load_best_model_at_end=True` (to use the evaluation\n loss).\n\n If you set this value, :obj:`greater_is_better` will default to :obj:`True`. Don't forget to set it to\n :obj:`False` if your metric is better when lower.\n greater_is_better (:obj:`bool`, `optional`):\n Use in conjunction with :obj:`load_best_model_at_end` and :obj:`metric_for_best_model` to specify if better\n models should have a greater metric or not. Will default to:\n\n - :obj:`True` if :obj:`metric_for_best_model` is set to a value that isn't :obj:`\"loss\"` or\n :obj:`\"eval_loss\"`.\n - :obj:`False` if :obj:`metric_for_best_model` is not set, or set to :obj:`\"loss\"` or :obj:`\"eval_loss\"`.\n model_parallel (:obj:`bool`, `optional`, defaults to :obj:`False`):\n If there are more than one devices, whether to use model parallelism to distribute the model's modules\n across devices or not.\n ignore_data_skip (:obj:`bool`, `optional`, defaults to :obj:`False`):\n When resuming training, whether or not to skip the epochs and batches to get the data loading at the same\n stage as in the previous training. If set to :obj:`True`, the training will begin faster (as that skipping\n step can take a long time) but will not yield the same results as the interrupted training would have.\n \"\"\"\n\n output_dir: str = field(\n metadata={\"help\": \"The output directory where the model predictions and checkpoints will be written.\"}\n )\n overwrite_output_dir: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Overwrite the content of the output directory.\"\n \"Use this to continue training if output_dir points to a checkpoint directory.\"\n )\n },\n )\n\n do_train: bool = field(default=False, metadata={\"help\": \"Whether to run training.\"})\n do_eval: bool = field(default=None, metadata={\"help\": \"Whether to run eval on the dev set.\"})\n do_predict: bool = field(default=False, metadata={\"help\": \"Whether to run predictions on the test set.\"})\n model_parallel: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"If there are more than one devices, whether to use model parallelism to distribute the \"\n \"model's modules across devices.\"\n )\n },\n )\n evaluation_strategy: EvaluationStrategy = field(\n default=\"no\",\n metadata={\"help\": \"Run evaluation during training at each logging step.\"},\n )\n prediction_loss_only: bool = field(\n default=False,\n metadata={\"help\": \"When performing evaluation and predictions, only returns the loss.\"},\n )\n\n per_device_train_batch_size: int = field(\n default=8, metadata={\"help\": \"Batch size per GPU/TPU core/CPU for training.\"}\n )\n per_device_eval_batch_size: int = field(\n default=8, metadata={\"help\": \"Batch size per GPU/TPU core/CPU for evaluation.\"}\n )\n\n per_gpu_train_batch_size: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Deprecated, the use of `--per_device_train_batch_size` is preferred. \"\n \"Batch size per GPU/TPU core/CPU for training.\"\n },\n )\n per_gpu_eval_batch_size: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Deprecated, the use of `--per_device_eval_batch_size` is preferred.\"\n \"Batch size per GPU/TPU core/CPU for evaluation.\"\n },\n )\n\n gradient_accumulation_steps: int = field(\n default=1,\n metadata={\"help\": \"Number of updates steps to accumulate before performing a backward/update pass.\"},\n )\n eval_accumulation_steps: Optional[int] = field(\n default=None,\n metadata={\"help\": \"Number of predictions steps to accumulate before moving the tensors to the CPU.\"},\n )\n\n learning_rate: float = field(default=5e-5, metadata={\"help\": \"The initial learning rate for Adam.\"})\n weight_decay: float = field(default=0.0, metadata={\"help\": \"Weight decay if we apply some.\"})\n adam_beta1: float = field(default=0.9, metadata={\"help\": \"Beta1 for Adam optimizer\"})\n adam_beta2: float = field(default=0.999, metadata={\"help\": \"Beta2 for Adam optimizer\"})\n adam_epsilon: float = field(default=1e-8, metadata={\"help\": \"Epsilon for Adam optimizer.\"})\n max_grad_norm: float = field(default=1.0, metadata={\"help\": \"Max gradient norm.\"})\n\n num_train_epochs: float = field(default=3.0, metadata={\"help\": \"Total number of training epochs to perform.\"})\n max_steps: int = field(\n default=-1,\n metadata={\"help\": \"If > 0: set total number of training steps to perform. Override num_train_epochs.\"},\n )\n warmup_steps: int = field(default=0, metadata={\"help\": \"Linear warmup over warmup_steps.\"})\n\n logging_dir: Optional[str] = field(default_factory=default_logdir, metadata={\"help\": \"Tensorboard log dir.\"})\n logging_first_step: bool = field(default=False, metadata={\"help\": \"Log the first global_step\"})\n logging_steps: int = field(default=500, metadata={\"help\": \"Log every X updates steps.\"})\n save_steps: int = field(default=500, metadata={\"help\": \"Save checkpoint every X updates steps.\"})\n save_total_limit: Optional[int] = field(\n default=None,\n metadata={\n \"help\": (\n \"Limit the total amount of checkpoints.\"\n \"Deletes the older checkpoints in the output_dir. Default is unlimited checkpoints\"\n )\n },\n )\n no_cuda: bool = field(default=False, metadata={\"help\": \"Do not use CUDA even when it is available\"})\n seed: int = field(default=42, metadata={\"help\": \"random seed for initialization\"})\n\n fp16: bool = field(\n default=False,\n metadata={\"help\": \"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit\"},\n )\n fp16_opt_level: str = field(\n default=\"O1\",\n metadata={\n \"help\": (\n \"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n \"See details at https://nvidia.github.io/apex/amp.html\"\n )\n },\n )\n local_rank: int = field(default=-1, metadata={\"help\": \"For distributed training: local_rank\"})\n\n tpu_num_cores: Optional[int] = field(\n default=None, metadata={\"help\": \"TPU: Number of TPU cores (automatically passed by launcher script)\"}\n )\n tpu_metrics_debug: bool = field(\n default=False,\n metadata={\"help\": \"Deprecated, the use of `--debug` is preferred. TPU: Whether to print debug metrics\"},\n )\n debug: bool = field(default=False, metadata={\"help\": \"Whether to print debug metrics on TPU\"})\n\n dataloader_drop_last: bool = field(\n default=False, metadata={\"help\": \"Drop the last incomplete batch if it is not divisible by the batch size.\"}\n )\n eval_steps: int = field(default=None, metadata={\"help\": \"Run an evaluation every X steps.\"})\n dataloader_num_workers: int = field(\n default=0,\n metadata={\n \"help\": \"Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the main process.\"\n },\n )\n\n past_index: int = field(\n default=-1,\n metadata={\"help\": \"If >=0, uses the corresponding part of the output as the past state for next step.\"},\n )\n\n run_name: Optional[str] = field(\n default=None, metadata={\"help\": \"An optional descriptor for the run. Notably used for wandb logging.\"}\n )\n disable_tqdm: Optional[bool] = field(\n default=None, metadata={\"help\": \"Whether or not to disable the tqdm progress bars.\"}\n )\n\n remove_unused_columns: Optional[bool] = field(\n default=True, metadata={\"help\": \"Remove columns not required by the model when using an nlp.Dataset.\"}\n )\n label_names: Optional[List[str]] = field(\n default=None, metadata={\"help\": \"The list of keys in your dictionary of inputs that correspond to the labels.\"}\n )\n\n load_best_model_at_end: Optional[bool] = field(\n default=False,\n metadata={\"help\": \"Whether or not to load the best model found during training at the end of training.\"},\n )\n metric_for_best_model: Optional[str] = field(\n default=None, metadata={\"help\": \"The metric to use to compare two different models.\"}\n )\n greater_is_better: Optional[bool] = field(\n default=None, metadata={\"help\": \"Whether the `metric_for_best_model` should be maximized or not.\"}\n )\n ignore_data_skip: bool = field(\n default=False,\n metadata={\n \"help\": \"When resuming training, whether or not to skip the first epochs and batches to get to the same training data.\"\n },\n )\n\n def __post_init__(self):\n if self.disable_tqdm is None:\n self.disable_tqdm = logger.getEffectiveLevel() > logging.WARN\n self.evaluation_strategy = EvaluationStrategy(self.evaluation_strategy)\n if self.do_eval is False and self.evaluation_strategy != EvaluationStrategy.NO:\n self.do_eval = True\n if self.eval_steps is None:\n self.eval_steps = self.logging_steps\n\n if self.load_best_model_at_end and self.metric_for_best_model is None:\n self.metric_for_best_model = \"loss\"\n if self.greater_is_better is None and self.metric_for_best_model is not None:\n self.greater_is_better = self.metric_for_best_model not in [\"loss\", \"eval_loss\"]\n if self.run_name is None:\n self.run_name = self.output_dir\n\n if is_torch_available() and self.device.type != \"cuda\" and self.fp16:\n raise ValueError(\"AMP (`--fp16`) can only be used on CUDA devices.\")\n\n @property\n def train_batch_size(self) -> int:\n \"\"\"\n The actual batch size for training (may differ from :obj:`per_gpu_train_batch_size` in distributed training).\n \"\"\"\n if self.per_gpu_train_batch_size:\n logger.warning(\n \"Using deprecated `--per_gpu_train_batch_size` argument which will be removed in a future \"\n \"version. Using `--per_device_train_batch_size` is preferred.\"\n )\n per_device_batch_size = self.per_gpu_train_batch_size or self.per_device_train_batch_size\n if not self.model_parallel:\n train_batch_size = per_device_batch_size * max(1, self.n_gpu)\n else:\n train_batch_size = per_device_batch_size\n return train_batch_size\n\n @property\n def eval_batch_size(self) -> int:\n \"\"\"\n The actual batch size for evaluation (may differ from :obj:`per_gpu_eval_batch_size` in distributed training).\n \"\"\"\n if self.per_gpu_eval_batch_size:\n logger.warning(\n \"Using deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future \"\n \"version. Using `--per_device_eval_batch_size` is preferred.\"\n )\n per_device_batch_size = self.per_gpu_eval_batch_size or self.per_device_eval_batch_size\n if not self.model_parallel:\n eval_batch_size = per_device_batch_size * max(1, self.n_gpu)\n else:\n eval_batch_size = per_device_batch_size\n return eval_batch_size\n\n @cached_property\n @torch_required\n def _setup_devices(self) -> Tuple[\"torch.device\", int]:\n logger.info(\"PyTorch: setting up devices\")\n if self.no_cuda:\n device = torch.device(\"cpu\")\n n_gpu = 0\n elif is_torch_tpu_available():\n device = xm.xla_device()\n n_gpu = 0\n elif self.local_rank == -1:\n # if n_gpu is > 1 we'll use nn.DataParallel.\n # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`\n # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will\n # trigger an error that a device index is missing. Index 0 takes into account the\n # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`\n # will use the first GPU in that env, i.e. GPU#1\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n n_gpu = torch.cuda.device_count()\n else:\n # Here, we'll use torch.distributed.\n # Initializes the distributed backend which will take care of synchronizing nodes/GPUs\n torch.distributed.init_process_group(backend=\"nccl\")\n device = torch.device(\"cuda\", self.local_rank)\n n_gpu = 1\n\n if device.type == \"cuda\":\n torch.cuda.set_device(device)\n\n return device, n_gpu\n\n @property\n @torch_required\n def device(self) -> \"torch.device\":\n \"\"\"\n The device used by this process.\n \"\"\"\n return self._setup_devices[0]\n\n @property\n @torch_required\n def n_gpu(self):\n \"\"\"\n The number of GPUs used by this process.\n\n Note:\n This will only be greater than one when you have multiple GPUs available but are not using distributed\n training. For distributed training, it will always be 1.\n \"\"\"\n return self._setup_devices[1]\n\n @property\n @torch_required\n def parallel_mode(self):\n \"\"\"\n The current mode used for parallelism if multiple GPUs/TPU cores are available. One of:\n\n - :obj:`ParallelMode.NOT_PARALLEL`: no parallelism (CPU or one GPU).\n - :obj:`ParallelMode.NOT_DISTRIBUTED`: several GPUs in one single process (uses :obj:`torch.nn.DataParallel`).\n - :obj:`ParallelMode.DISTRIBUTED`: several GPUs, each ahving its own process (uses\n :obj:`torch.nn.DistributedDataParallel`).\n - :obj:`ParallelMode.TPU`: several TPU cores.\n \"\"\"\n if is_torch_tpu_available():\n return ParallelMode.TPU\n elif self.local_rank != -1:\n return ParallelMode.DISTRIBUTED\n elif self.n_gpu > 1:\n return ParallelMode.NOT_DISTRIBUTED\n else:\n return ParallelMode.NOT_PARALLEL\n\n def to_dict(self):\n \"\"\"\n Serializes this instance while replace `Enum` by their values (for JSON serialization support).\n \"\"\"\n d = dataclasses.asdict(self)\n for k, v in d.items():\n if isinstance(v, Enum):\n d[k] = v.value\n return d\n\n def to_json_string(self):\n \"\"\"\n Serializes this instance to a JSON string.\n \"\"\"\n return json.dumps(self.to_dict(), indent=2)\n\n def to_sanitized_dict(self) -> Dict[str, Any]:\n \"\"\"\n Sanitized serialization to use with TensorBoard’s hparams\n \"\"\"\n d = self.to_dict()\n d = {**d, **{\"train_batch_size\": self.train_batch_size, \"eval_batch_size\": self.eval_batch_size}}\n\n valid_types = [bool, int, float, str]\n if is_torch_available():\n valid_types.append(torch.Tensor)\n\n return {k: v if type(v) in valid_types else str(v) for k, v in d.items()}\n\n\nclass ParallelMode(Enum):\n NOT_PARALLEL = \"not_parallel\"\n NOT_DISTRIBUTED = \"not_distributed\"\n DISTRIBUTED = \"distributed\"\n TPU = \"tpu\"\n"
] | [
[
"torch.distributed.init_process_group",
"torch.cuda.set_device",
"torch.cuda.is_available",
"torch.device",
"torch.cuda.device_count"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vedanthpadigelwar/AI_projects | [
"885bbe76800f9a449414b3735ab4a4c4bd2e7aa0"
] | [
"test method/tensorflow2.0/deep-sort-yolov4/demo.py"
] | [
"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function, absolute_import\n\nfrom timeit import time\nimport warnings\nimport cv2\nimport numpy as np\nfrom PIL import Image\nfrom yolo import YOLO\n\nfrom deep_sort import preprocessing\nfrom deep_sort import nn_matching\nfrom deep_sort.detection import Detection\nfrom deep_sort.detection_yolo import Detection_YOLO\nfrom deep_sort.tracker import Tracker\nfrom tools import generate_detections as gdet\nimport imutils.video\nfrom videocaptureasync import VideoCaptureAsync\n\nwarnings.filterwarnings('ignore')\n\n\ndef main(yolo):\n\n # Definition of the parameters\n max_cosine_distance = 0.3\n nn_budget = None\n nms_max_overlap = 1.0\n\n # Deep SORT\n model_filename = 'model_data/mars-small128.pb'\n encoder = gdet.create_box_encoder(model_filename, batch_size=1)\n\n metric = nn_matching.NearestNeighborDistanceMetric(\n \"cosine\", max_cosine_distance, nn_budget)\n tracker = Tracker(metric)\n\n tracking = True\n writeVideo_flag = True\n asyncVideo_flag = False\n\n file_path = 'video.webm'\n if asyncVideo_flag:\n video_capture = VideoCaptureAsync(file_path)\n else:\n video_capture = cv2.VideoCapture(file_path)\n\n if asyncVideo_flag:\n video_capture.start()\n\n if writeVideo_flag:\n if asyncVideo_flag:\n w = int(video_capture.cap.get(3))\n h = int(video_capture.cap.get(4))\n else:\n w = int(video_capture.get(3))\n h = int(video_capture.get(4))\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter('output_yolov4.avi', fourcc, 30, (w, h))\n frame_index = -1\n\n fps = 0.0\n fps_imutils = imutils.video.FPS().start()\n\n while True:\n ret, frame = video_capture.read() # frame shape 640*480*3\n if ret != True:\n break\n\n t1 = time.time()\n\n image = Image.fromarray(frame[..., ::-1]) # bgr to rgb\n boxes, confidence, classes = yolo.detect_image(image)\n\n if tracking:\n features = encoder(frame, boxes)\n\n detections = [Detection(bbox, confidence, cls, feature) for bbox, confidence, cls, feature in\n zip(boxes, confidence, classes, features)]\n else:\n detections = [Detection_YOLO(bbox, confidence, cls) for bbox, confidence, cls in\n zip(boxes, confidence, classes)]\n\n # Run non-maxima suppression.\n boxes = np.array([d.tlwh for d in detections])\n scores = np.array([d.confidence for d in detections])\n indices = preprocessing.non_max_suppression(\n boxes, nms_max_overlap, scores)\n detections = [detections[i] for i in indices]\n\n if tracking:\n # Call the tracker\n tracker.predict()\n tracker.update(detections)\n\n for track in tracker.tracks:\n if not track.is_confirmed() or track.time_since_update > 1:\n continue\n bbox = track.to_tlbr()\n cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(\n bbox[2]), int(bbox[3])), (255, 255, 255), 2)\n cv2.putText(frame, \"ID: \" + str(track.track_id), (int(bbox[0]), int(bbox[1])), 0,\n 1.5e-3 * frame.shape[0], (0, 255, 0), 1)\n\n for det in detections:\n bbox = det.to_tlbr()\n score = \"%.2f\" % round(det.confidence * 100, 2) + \"%\"\n cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(\n bbox[2]), int(bbox[3])), (255, 0, 0), 2)\n if len(classes) > 0:\n cls = det.cls\n cv2.putText(frame, str(cls) + \" \" + score, (int(bbox[0]), int(bbox[3])), 0,\n 1.5e-3 * frame.shape[0], (0, 255, 0), 1)\n\n cv2.imshow('', frame)\n\n if writeVideo_flag: # and not asyncVideo_flag:\n # save a frame\n out.write(frame)\n frame_index = frame_index + 1\n\n fps_imutils.update()\n\n if not asyncVideo_flag:\n fps = (fps + (1./(time.time()-t1))) / 2\n print(\"FPS = %f\" % (fps))\n\n # Press Q to stop!\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n fps_imutils.stop()\n print('imutils FPS: {}'.format(fps_imutils.fps()))\n\n if asyncVideo_flag:\n video_capture.stop()\n else:\n video_capture.release()\n\n if writeVideo_flag:\n out.release()\n\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n main(YOLO())\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ktpolanski/scirpy | [
"2d6e3a6347ad54425a8dea635fa04609aaf33c57",
"2d6e3a6347ad54425a8dea635fa04609aaf33c57"
] | [
"scirpy/tests/test_util.py",
"scirpy/_tools/_chain_qc.py"
] | [
"from scirpy.util import (\n _is_na,\n _is_false,\n _is_true,\n _normalize_counts,\n _is_symmetric,\n _reduce_nonzero,\n _translate_dna_to_protein,\n)\nfrom scirpy.util.graph import layout_components\nfrom itertools import combinations\nimport igraph as ig\nimport numpy as np\nimport pandas as pd\nimport numpy.testing as npt\nimport pytest\nimport scipy.sparse\nfrom .fixtures import adata_tra\n\nimport warnings\n\n\ndef test_reduce_nonzero():\n A = np.array([[0, 0, 3], [1, 2, 5], [7, 0, 0]])\n B = np.array([[1, 0, 3], [2, 1, 0], [6, 0, 5]])\n A_csr = scipy.sparse.csr_matrix(A)\n B_csr = scipy.sparse.csr_matrix(B)\n A_csc = scipy.sparse.csc_matrix(A)\n B_csc = scipy.sparse.csc_matrix(B)\n\n expected = np.array([[1, 0, 3], [1, 1, 5], [6, 0, 5]])\n\n with pytest.raises(ValueError):\n _reduce_nonzero(A, B)\n npt.assert_equal(_reduce_nonzero(A_csr, B_csr).toarray(), expected)\n npt.assert_equal(_reduce_nonzero(A_csc, B_csc).toarray(), expected)\n npt.assert_equal(_reduce_nonzero(A_csr, A_csr.copy()).toarray(), A_csr.toarray())\n\n\ndef test_is_symmatric():\n M = np.array([[1, 2, 2], [2, 1, 3], [2, 3, 1]])\n S_csr = scipy.sparse.csr_matrix(M)\n S_csc = scipy.sparse.csc_matrix(M)\n S_lil = scipy.sparse.lil_matrix(M)\n assert _is_symmetric(M)\n assert _is_symmetric(S_csr)\n assert _is_symmetric(S_csc)\n assert _is_symmetric(S_lil)\n\n M = np.array([[1, 2, 2], [2, 1, np.nan], [2, np.nan, np.nan]])\n S_csr = scipy.sparse.csr_matrix(M)\n S_csc = scipy.sparse.csc_matrix(M)\n S_lil = scipy.sparse.lil_matrix(M)\n assert _is_symmetric(M)\n assert _is_symmetric(S_csr)\n assert _is_symmetric(S_csc)\n assert _is_symmetric(S_lil)\n\n M = np.array([[1, 2, 2], [2, 1, 3], [3, 2, 1]])\n S_csr = scipy.sparse.csr_matrix(M)\n S_csc = scipy.sparse.csc_matrix(M)\n S_lil = scipy.sparse.lil_matrix(M)\n assert not _is_symmetric(M)\n assert not _is_symmetric(S_csr)\n assert not _is_symmetric(S_csc)\n assert not _is_symmetric(S_lil)\n\n\ndef test_is_na():\n warnings.filterwarnings(\"error\")\n assert _is_na(None)\n assert _is_na(np.nan)\n assert _is_na(\"nan\")\n assert not _is_na(42)\n assert not _is_na(\"Foobar\")\n assert not _is_na(dict())\n array_test = np.array([\"None\", \"nan\", None, np.nan, \"foobar\"])\n array_expect = np.array([True, True, True, True, False])\n array_test_bool = np.array([True, False, True])\n array_expect_bool = np.array([False, False, False])\n\n npt.assert_equal(_is_na(array_test), array_expect)\n npt.assert_equal(_is_na(pd.Series(array_test)), array_expect)\n\n npt.assert_equal(_is_na(array_test_bool), array_expect_bool)\n npt.assert_equal(_is_na(pd.Series(array_test_bool)), array_expect_bool)\n\n\ndef test_is_false():\n warnings.filterwarnings(\"error\")\n assert _is_false(False)\n assert _is_false(0)\n assert _is_false(\"\")\n assert _is_false(\"False\")\n assert _is_false(\"false\")\n assert not _is_false(42)\n assert not _is_false(True)\n assert not _is_false(\"true\")\n assert not _is_false(\"foobar\")\n assert not _is_false(np.nan)\n assert not _is_false(None)\n assert not _is_false(\"nan\")\n assert not _is_false(\"None\")\n array_test = np.array(\n [\"False\", \"false\", 0, 1, True, False, \"true\", \"Foobar\", np.nan, \"nan\"],\n dtype=object,\n )\n array_test_str = array_test.astype(\"str\")\n array_expect = np.array(\n [True, True, True, False, False, True, False, False, False, False]\n )\n array_test_bool = np.array([True, False, True])\n array_expect_bool = np.array([False, True, False])\n\n npt.assert_equal(_is_false(array_test), array_expect)\n npt.assert_equal(_is_false(array_test_str), array_expect)\n npt.assert_equal(_is_false(pd.Series(array_test)), array_expect)\n npt.assert_equal(_is_false(pd.Series(array_test_str)), array_expect)\n npt.assert_equal(_is_false(array_test_bool), array_expect_bool)\n npt.assert_equal(_is_false(pd.Series(array_test_bool)), array_expect_bool)\n\n\ndef test_is_true():\n warnings.filterwarnings(\"error\")\n assert not _is_true(False)\n assert not _is_true(0)\n assert not _is_true(\"\")\n assert not _is_true(\"False\")\n assert not _is_true(\"false\")\n assert not _is_true(\"0\")\n assert not _is_true(np.nan)\n assert not _is_true(None)\n assert not _is_true(\"nan\")\n assert not _is_true(\"None\")\n assert _is_true(42)\n assert _is_true(True)\n assert _is_true(\"true\")\n assert _is_true(\"foobar\")\n assert _is_true(\"True\")\n array_test = np.array(\n [\"False\", \"false\", 0, 1, True, False, \"true\", \"Foobar\", np.nan, \"nan\"],\n dtype=object,\n )\n array_test_str = array_test.astype(\"str\")\n array_expect = np.array(\n [False, False, False, True, True, False, True, True, False, False]\n )\n array_test_bool = np.array([True, False, True])\n array_expect_bool = np.array([True, False, True])\n\n npt.assert_equal(_is_true(array_test), array_expect)\n npt.assert_equal(_is_true(array_test_str), array_expect)\n npt.assert_equal(_is_true(pd.Series(array_test)), array_expect)\n npt.assert_equal(_is_true(pd.Series(array_test_str)), array_expect)\n npt.assert_equal(_is_true(array_test_bool), array_expect_bool)\n npt.assert_equal(_is_true(pd.Series(array_test_bool)), array_expect_bool)\n\n\[email protected]\ndef group_df():\n return pd.DataFrame().assign(\n cell=[\"c1\", \"c2\", \"c3\", \"c4\", \"c5\", \"c6\"],\n sample=[\"s2\", \"s1\", \"s2\", \"s2\", \"s2\", \"s1\"],\n )\n\n\ndef test_normalize_counts(group_df):\n with pytest.raises(ValueError):\n _normalize_counts(group_df, True, None)\n\n npt.assert_equal(_normalize_counts(group_df, False), [1] * 6)\n npt.assert_equal(\n _normalize_counts(group_df, \"sample\"), [0.25, 0.5, 0.25, 0.25, 0.25, 0.5]\n )\n npt.assert_equal(\n _normalize_counts(group_df, True, \"sample\"), [0.25, 0.5, 0.25, 0.25, 0.25, 0.5]\n )\n\n\ndef test_layout_components():\n g = ig.Graph()\n\n # add 100 unconnected nodes\n g.add_vertices(100)\n\n # add 50 2-node components\n g.add_vertices(100)\n g.add_edges([(ii, ii + 1) for ii in range(100, 200, 2)])\n\n # add 33 3-node components\n g.add_vertices(100)\n for ii in range(200, 299, 3):\n g.add_edges([(ii, ii + 1), (ii, ii + 2), (ii + 1, ii + 2)])\n\n # add a couple of larger components\n n = 300\n for ii in np.random.randint(4, 30, size=10):\n g.add_vertices(ii)\n g.add_edges(combinations(range(n, n + ii), 2))\n n += ii\n\n layout_components(g, arrange_boxes=\"size\", component_layout=\"fr\")\n try:\n layout_components(g, arrange_boxes=\"rpack\", component_layout=\"fr\")\n except ImportError:\n warnings.warn(\n \"The 'rpack' layout-test was skipped because rectangle \"\n \"packer is not installed. \"\n )\n layout_components(g, arrange_boxes=\"squarify\", component_layout=\"fr\")\n\n\ndef test_translate_dna_to_protein(adata_tra):\n for nt, aa in zip(adata_tra.obs[\"IR_VJ_1_cdr3_nt\"], adata_tra.obs[\"IR_VJ_1_cdr3\"]):\n assert _translate_dna_to_protein(nt) == aa\n",
"from ..util import _is_na, _is_true, deprecated\nfrom anndata import AnnData\nfrom typing import Union, Sequence, Tuple\nimport numpy as np\nfrom scanpy import logging\n\n\n@deprecated(\"Use `tl.chain_qc` instead.\")\ndef chain_pairing(\n adata: AnnData, *, inplace: bool = True, key_added: str = \"chain_pairing\"\n) -> Union[None, np.ndarray]:\n \"\"\"Categorize cells based on how many TRA and TRB chains they have.\n\n Parameters\n ----------\n adata\n Annotated data matrix\n inplace\n If True, adds a column to adata.obs\n key_added\n Column name to add to 'obs'\n\n Returns\n -------\n Depending on the value of `inplace`, either\n returns a Series with a chain pairing category for each cell\n or adds a `chain_pairing` column to `adata`.\n \"\"\"\n res = chain_qc(\n adata,\n inplace=inplace,\n key_added=(\"receptor_type\", \"receptor_subtype\", key_added),\n )\n if not inplace:\n return res[2]\n\n\ndef chain_qc(\n adata: AnnData,\n *,\n inplace: bool = True,\n key_added: Sequence[str] = (\"receptor_type\", \"receptor_subtype\", \"chain_pairing\"),\n) -> Union[None, Tuple[np.ndarray]]:\n \"\"\"Perform quality control based on the receptor-chain pairing configuration.\n\n Categorizes cells into their receptor types and according to their chain pairing\n status. The function adds three columns to `adata.obs`, two containing a coarse\n and fine annotation of receptor types, a third classifying cells according\n to the number of matched receptor types.\n\n `receptor_type` can be one of the following\n * `TCR` (all cells that contain any combination of TRA/TRB/TRG/TRD chains,\n but no IGH/IGK/IGL chains)\n * `BCR` (all cells that contain any combination of IGH/IGK/IGL chains,\n but no TCR chains)\n * `ambiguous` (all cells that contain both BCR and TCR chains)\n * `multichain` (all cells with more than two VJ or more than two VDJ chains)\n * `no IR` (all cells without any detected immune receptor)\n\n `receptor_subtype` can be one of the following\n * `TRA+TRB` (all cells that have only TRA and/or TRB chains)\n * `TRG+TRD` (all cells that have only TRG and/or TRD chains)\n * `IGH` (all cells that have only IGH chains, but no IGL or IGK)\n * `IGH+IGL` (all cells that have only IGH and IGL chains)\n * `IGH+IGK` (all cells that have only IGH and IGK chains)\n * `multichain` (all cells with more than two VJ or more than two VDJ chains)\n * `ambiguous` (all cells that are none of the above, e.g. TRA+TRD, TRA+IGH or,\n IGH+IGK as the primary and IGH+IGL as the secondary receptor)\n * `no IR` (all cells without any detected immune receptor)\n\n `chain_pairing` can be one of the following\n * `single pair` (all cells that have exactely one matched VJ and VDJ chain)\n * `orphan VJ` (all cells that have only one VJ chain)\n * `orphan VDJ` (all cells that have only one VDJ chain)\n * `extra VJ` (all cells that have a matched pair of VJ and VDJ chains plus an\n additional VJ-chain)\n * `extra VDJ` (analogous)\n * `two full chains` (all cells that have two matched pairs of VJ and VDJ chains)\n * `ambiguous` (all cells that have unmatched chains, i.e. that have been\n classified as an `ambiguous` receptor_subtype)\n * `multichain` (all cells with more than two VJ or more than two VDJ chains)\n * `no IR` (all chains with not immune receptor chains)\n\n Parameters\n ----------\n adata\n Annotated data matrix\n inplace\n If True, adds columns to to adata\n key_added\n Tuple specifying the column names for the coarse and fine receptor type\n annotation, respectively\n\n Returns\n -------\n Depending on the value of `inplace` either adds three columns to\n `adata.obs` or returns a tuple with three numpy arrays containing\n the annotations.\n \"\"\"\n x = adata.obs\n\n # initalize result arrays\n string_length = len(\"multichain\")\n res_receptor_type = np.empty(dtype=f\"<U{string_length}\", shape=(x.shape[0],))\n res_receptor_subtype = np.empty(dtype=f\"<U{string_length}\", shape=(x.shape[0],))\n\n mask_has_ir = _is_true(x[\"has_ir\"].values)\n mask_multichain = mask_has_ir & _is_true(x[\"multi_chain\"].values)\n\n vj_loci = x.loc[:, [\"IR_VJ_1_locus\", \"IR_VJ_2_locus\"]].values\n vdj_loci = x.loc[:, [\"IR_VDJ_1_locus\", \"IR_VDJ_2_locus\"]].values\n\n # Build masks for receptor chains\n has_tra = (vj_loci == \"TRA\").any(axis=1)\n has_trg = (vj_loci == \"TRG\").any(axis=1)\n has_igk = (vj_loci == \"IGK\").any(axis=1)\n has_igl = (vj_loci == \"IGL\").any(axis=1)\n\n has_trb = (vdj_loci == \"TRB\").any(axis=1)\n has_trd = (vdj_loci == \"TRD\").any(axis=1)\n has_igh = (vdj_loci == \"IGH\").any(axis=1)\n\n has_tr = has_tra | has_trg | has_trb | has_trd\n has_ig = has_igk | has_igl | has_igh\n\n # Combine masks into receptor types and subtypes\n type_is_t = has_tr & ~has_ig\n type_is_b = ~has_tr & has_ig\n\n subtype_is_tab = (has_tra | has_trb) & ~(has_trg | has_trd | has_ig)\n subtype_is_tgd = (has_trg | has_trd) & ~(has_tra | has_trb | has_ig)\n subtype_is_ighk = (has_igk) & ~(has_tr | has_igl)\n subtype_is_ighl = (has_igl) & ~(has_tr | has_igk)\n # orphan IGH\n subtype_is_igh = (has_igh) & ~(has_igk | has_igl | has_tr)\n\n # Apply masks for receptor type\n res_receptor_type[:] = \"ambiguous\"\n res_receptor_type[~mask_has_ir] = \"no IR\"\n res_receptor_type[type_is_t] = \"TCR\"\n res_receptor_type[type_is_b] = \"BCR\"\n res_receptor_type[mask_multichain] = \"multichain\"\n\n # Apply masks for receptor subtypes\n res_receptor_subtype[:] = \"ambiguous\"\n res_receptor_subtype[~mask_has_ir] = \"no IR\"\n res_receptor_subtype[subtype_is_tab] = \"TRA+TRB\"\n res_receptor_subtype[subtype_is_tgd] = \"TRG+TRD\"\n res_receptor_subtype[subtype_is_igh] = \"IGH\"\n res_receptor_subtype[subtype_is_ighl] = \"IGH+IGL\"\n res_receptor_subtype[subtype_is_ighk] = \"IGH+IGK\"\n res_receptor_subtype[mask_multichain] = \"multichain\"\n\n res_chain_pairing = _chain_pairing(\n adata, res_receptor_subtype == \"ambiguous\", mask_has_ir, mask_multichain\n )\n\n if inplace:\n col_receptor_type, col_receptor_subtype, col_chain_pairing = key_added\n adata.obs[col_receptor_type] = res_receptor_type\n adata.obs[col_receptor_subtype] = res_receptor_subtype\n adata.obs[col_chain_pairing] = res_chain_pairing\n else:\n return (res_receptor_type, res_receptor_subtype, res_chain_pairing)\n\n\ndef _chain_pairing(\n adata: AnnData,\n mask_ambiguous: np.ndarray,\n mask_has_ir: np.ndarray,\n mask_multichain: np.ndarray,\n) -> np.ndarray:\n \"\"\"Annotate chain pairing categories.\n\n Parameters:\n -----------\n mask_ambiguous\n boolean array of the same length as `adata.obs`, marking\n which cells have an ambiguous receptor configuration.\n \"\"\"\n x = adata.obs\n string_length = len(\"two full chains\")\n results = np.empty(dtype=f\"<U{string_length}\", shape=(x.shape[0],))\n\n logging.debug(\"Done initalizing\")\n\n mask_has_vj1 = ~_is_na(x[\"IR_VJ_1_cdr3\"].values)\n mask_has_vdj1 = ~_is_na(x[\"IR_VDJ_1_cdr3\"].values)\n mask_has_vj2 = ~_is_na(x[\"IR_VJ_2_cdr3\"].values)\n mask_has_vdj2 = ~_is_na(x[\"IR_VDJ_2_cdr3\"].values)\n\n logging.debug(\"Done with masks\")\n\n for m in [mask_has_vj1, mask_has_vdj1, mask_has_vj2, mask_has_vdj2]:\n # no cell can have a cdr3 sequence but no TCR\n assert np.setdiff1d(np.where(m)[0], np.where(mask_has_ir)[0]).size == 0\n\n results[~mask_has_ir] = \"no IR\"\n results[mask_has_vj1] = \"orphan VJ\"\n results[mask_has_vdj1] = \"orphan VDJ\"\n results[mask_has_vj1 & mask_has_vdj1] = \"single pair\"\n results[mask_has_vj1 & mask_has_vdj1 & mask_has_vj2] = \"extra VJ\"\n results[mask_has_vj1 & mask_has_vdj1 & mask_has_vdj2] = \"extra VDJ\"\n results[\n mask_has_vj1 & mask_has_vdj1 & mask_has_vj2 & mask_has_vdj2\n ] = \"two full chains\"\n results[mask_ambiguous] = \"ambiguous\"\n results[mask_multichain] = \"multichain\"\n\n assert \"\" not in results, \"One or more chains are not characterized\"\n\n return results\n"
] | [
[
"numpy.array",
"pandas.Series",
"pandas.DataFrame",
"numpy.random.randint"
],
[
"numpy.where",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mauicv/alibi | [
"30fea76391c255963c8818c2b54aa615b0d6f858",
"30fea76391c255963c8818c2b54aa615b0d6f858"
] | [
"alibi/explainers/anchors/anchor_image.py",
"alibi/models/tensorflow/cfrl_models.py"
] | [
"import copy\nimport logging\nfrom functools import partial\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union\n\nimport numpy as np\nfrom skimage.segmentation import felzenszwalb, quickshift, slic\n\nfrom alibi.api.defaults import DEFAULT_DATA_ANCHOR_IMG, DEFAULT_META_ANCHOR\nfrom alibi.api.interfaces import Explainer, Explanation\nfrom alibi.exceptions import (AlibiPredictorCallException,\n AlibiPredictorReturnTypeError)\nfrom alibi.utils.wrappers import ArgmaxTransformer\n\nfrom .anchor_base import AnchorBaseBeam\nfrom .anchor_explanation import AnchorExplanation\n\nlogger = logging.getLogger(__name__)\n\nDEFAULT_SEGMENTATION_KWARGS = {\n 'felzenszwalb': {},\n 'quickshift': {},\n 'slic': {'n_segments': 10, 'compactness': 10, 'sigma': .5}\n} # type: Dict[str, Dict]\n\n\ndef scale_image(image: np.ndarray, scale: tuple = (0, 255)) -> np.ndarray:\n \"\"\"\n Scales an image in a specified range.\n\n Parameters\n ----------\n image\n Image to be scale.\n scale\n The scaling interval.\n\n Returns\n -------\n img_scaled\n Scaled image.\n \"\"\"\n\n img_max, img_min = image.max(), image.min()\n img_std = (image - img_min) / (img_max - img_min)\n img_scaled = img_std * (scale[1] - scale[0]) + scale[0]\n\n return img_scaled\n\n\nclass AnchorImageSampler:\n def __init__(\n self,\n predictor: Callable,\n segmentation_fn: Callable,\n custom_segmentation: bool,\n image: np.ndarray,\n images_background: Optional[np.ndarray] = None,\n p_sample: float = 0.5,\n n_covered_ex: int = 10,\n ):\n \"\"\"\n Initialize anchor image sampler.\n\n Parameters\n ----------\n predictor\n A callable that takes a `numpy` array of `N` data points as inputs and returns `N` outputs.\n segmentation_fn\n Function used to segment the images.\n image\n Image to be explained.\n images_background\n Images to overlay superpixels on.\n p_sample\n Probability for a pixel to be represented by the average value of its superpixel.\n n_covered_ex\n How many examples where anchors apply to store for each anchor sampled during search\n (both examples where prediction on samples agrees/disagrees with `desired_label` are stored).\n \"\"\"\n self.predictor = predictor\n self.segmentation_fn = segmentation_fn\n self.custom_segmentation = custom_segmentation\n self.image = image\n self.images_background = images_background\n self.n_covered_ex = n_covered_ex\n self.p_sample = p_sample\n self.segments = self.generate_superpixels(image)\n self.segment_labels = list(np.unique(self.segments))\n self.instance_label = self.predictor(image[np.newaxis, ...])[0]\n\n def __call__(\n self, anchor: Tuple[int, tuple], num_samples: int, compute_labels: bool = True\n ) -> List[Union[np.ndarray, float, int]]:\n \"\"\"\n Sample images from a perturbation distribution by masking randomly chosen superpixels\n from the original image and replacing them with pixel values from superimposed images\n if background images are provided to the explainer. Otherwise, the superpixels from the\n original image are replaced with their average values.\n\n Parameters\n ----------\n anchor\n - ``int`` - order of anchor in the batch.\n - ``tuple`` - features (= superpixels) present in the proposed anchor.\n num_samples\n Number of samples used.\n compute_labels\n If ``True``, an array of comparisons between predictions on perturbed samples and\n instance to be explained is returned.\n\n Returns\n -------\n If ``compute_labels=True``, a list containing the following is returned\n\n - `covered_true` - perturbed examples where the anchor applies and the model prediction on perturbed is the \\\n same as the instance prediction.\n\n - `covered_false` - perturbed examples where the anchor applies and the model prediction on pertrurbed sample \\\n is NOT the same as the instance prediction.\n\n - `labels` - `num_samples` ints indicating whether the prediction on the perturbed sample matches (1) \\\n the label of the instance to be explained or not (0).\n\n - `data` - Matrix with 1s and 0s indicating whether the values in a superpixel will remain unchanged (1) or \\\n will be perturbed (0), for each sample.\n\n - `1.0` - indicates exact coverage is not computed for this algorithm.\n\n - `anchor[0]` - position of anchor in the batch request\n\n Otherwise, a list containing the data matrix only is returned.\n \"\"\"\n\n if compute_labels:\n raw_data, data = self.perturbation(anchor[1], num_samples)\n labels = self.compare_labels(raw_data)\n covered_true = raw_data[labels][: self.n_covered_ex]\n covered_true = [scale_image(img) for img in covered_true]\n covered_false = raw_data[np.logical_not(labels)][: self.n_covered_ex]\n covered_false = [scale_image(img) for img in covered_false]\n # coverage set to -1.0 as we can't compute 'true'coverage for this model\n\n return [covered_true, covered_false, labels.astype(int), data, -1.0, anchor[0]] # type: ignore\n\n else:\n data = self._choose_superpixels(num_samples)\n data[:, anchor[1]] = 1 # superpixels in candidate anchor are not perturbed\n\n return [data]\n\n def compare_labels(self, samples: np.ndarray) -> np.ndarray:\n \"\"\"\n Compute the agreement between a classifier prediction on an instance to be explained\n and the prediction on a set of samples which have a subset of perturbed superpixels.\n\n Parameters\n ----------\n samples\n Samples whose labels are to be compared with the instance label.\n\n Returns\n -------\n A boolean array indicating whether the prediction was the same as the instance label.\n \"\"\"\n\n return self.predictor(samples) == self.instance_label\n\n def _choose_superpixels(\n self, num_samples: int, p_sample: float = 0.5\n ) -> np.ndarray:\n \"\"\"\n Generates a binary mask of dimension [num_samples, M] where M is the number of\n image superpixels (segments).\n\n Parameters\n ----------\n num_samples\n Number of perturbed images to be generated\n p_sample:\n The probability that a superpixel is perturbed\n\n Returns\n -------\n data\n Binary 2D mask, where each non-zero entry in a row indicates that\n the values of the particular image segment will not be perturbed.\n \"\"\"\n\n n_features = len(self.segment_labels)\n data = np.random.choice(\n [0, 1], num_samples * n_features, p=[p_sample, 1 - p_sample]\n )\n data = data.reshape((num_samples, n_features))\n\n return data\n\n def perturbation(\n self, anchor: tuple, num_samples: int\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Perturbs an image by altering the values of selected superpixels. If a dataset of image\n backgrounds is provided to the explainer, then the superpixels are replaced with the\n equivalent superpixels from the background image. Otherwise, the superpixels are replaced\n by their average value.\n\n Parameters\n ----------\n anchor:\n Contains the superpixels whose values are not going to be perturbed.\n num_samples:\n Number of perturbed samples to be returned.\n\n Returns\n -------\n imgs\n A `[num_samples, H, W, C]` array of perturbed images.\n segments_mask\n A `[num_samples, M]` binary mask, where `M` is the number of image superpixels\n segments. 1 indicates the values in that particular superpixels are not\n perturbed.\n \"\"\"\n\n image = self.image\n segments = self.segments\n backgrounds: Union[np.ndarray, List[None]]\n\n # choose superpixels to be perturbed\n segments_mask = self._choose_superpixels(num_samples, p_sample=self.p_sample)\n segments_mask[:, anchor] = 1\n\n # for each sample, need to sample one of the background images if provided\n if self.images_background is not None:\n backgrounds = np.random.choice(\n range(len(self.images_background)),\n segments_mask.shape[0],\n replace=True,\n )\n else:\n backgrounds = [None] * segments_mask.shape[0]\n # create fudged image where the pixel value in each superpixel is set to the\n # average over the superpixel for each channel\n fudged_image = image.copy()\n n_channels = image.shape[-1]\n for x in np.unique(segments):\n fudged_image[segments == x] = [\n np.mean(image[segments == x][:, i]) for i in range(n_channels)\n ]\n\n pert_imgs = []\n for mask, background_idx in zip(segments_mask, backgrounds):\n temp = copy.deepcopy(image)\n to_perturb = np.where(mask == 0)[0]\n # create mask for each superpixel not present in the sample\n mask = np.zeros(segments.shape).astype(bool)\n for superpixel in to_perturb:\n mask[segments == superpixel] = True\n if background_idx is not None:\n # replace values with those of background image\n temp[mask] = self.images_background[background_idx][mask] # type: ignore[index]\n else:\n # ... or with the averaged superpixel value\n temp[mask] = fudged_image[mask]\n pert_imgs.append(temp)\n\n return np.array(pert_imgs), segments_mask\n\n def generate_superpixels(self, image: np.ndarray) -> np.ndarray:\n \"\"\"\n Generates superpixels from (i.e., segments) an image.\n\n Parameters\n ----------\n image\n A grayscale or RGB image.\n\n Returns\n -------\n A `[H, W]` array of integers. Each integer is a segment (superpixel) label.\n \"\"\"\n\n image_preproc = self._preprocess_img(image)\n\n return self.segmentation_fn(image_preproc)\n\n def _preprocess_img(self, image: np.ndarray) -> np.ndarray:\n \"\"\"\n Applies necessary transformations to the image prior to segmentation.\n\n Parameters\n ----------\n image\n A grayscale or RGB image.\n\n Returns\n -------\n A preprocessed image.\n \"\"\"\n\n # Grayscale images are repeated across channels\n if not self.custom_segmentation and image.shape[-1] == 1:\n image_preproc = np.repeat(image, 3, axis=2)\n else:\n image_preproc = image.copy()\n\n return image_preproc\n\n\nclass AnchorImage(Explainer):\n def __init__(self,\n predictor: Callable[[np.ndarray], np.ndarray],\n image_shape: tuple,\n dtype: Type[np.generic] = np.float32,\n segmentation_fn: Any = 'slic',\n segmentation_kwargs: Optional[dict] = None,\n images_background: Optional[np.ndarray] = None,\n seed: Optional[int] = None) -> None:\n \"\"\"\n Initialize anchor image explainer.\n\n Parameters\n ----------\n predictor\n A callable that takes a `numpy` array of `N` data points as inputs and returns `N` outputs.\n image_shape\n Shape of the image to be explained. The channel axis is expected to be last.\n dtype\n A `numpy` scalar type that corresponds to the type of input array expected by `predictor`. This may be\n used to construct arrays of the given type to be passed through the `predictor`. For most use cases\n this argument should have no effect, but it is exposed for use with predictors that would break when\n called with an array of unsupported type.\n segmentation_fn\n Any of the built in segmentation function strings: ``'felzenszwalb'``, ``'slic'`` or ``'quickshift'`` or\n a custom segmentation function (callable) which returns an image mask with labels for each superpixel.\n See http://scikit-image.org/docs/dev/api/skimage.segmentation.html for more info.\n segmentation_kwargs\n Keyword arguments for the built in segmentation functions.\n images_background\n Images to overlay superpixels on.\n seed\n If set, ensures different runs with the same input will yield same explanation.\n\n Raises\n ------\n :py:class:`alibi.exceptions.AlibiPredictorCallException`\n If calling `predictor` fails at runtime.\n :py:class:`alibi.exceptions.AlibiPredictorReturnTypeError`\n If the return type of `predictor` is not `np.ndarray`.\n \"\"\"\n super().__init__(meta=copy.deepcopy(DEFAULT_META_ANCHOR))\n np.random.seed(seed)\n\n # TODO: this logic needs improvement. We should check against a fixed set of strings\n # for built-ins instead of any `str`.\n if isinstance(segmentation_fn, str) and segmentation_kwargs is None:\n try:\n segmentation_kwargs = DEFAULT_SEGMENTATION_KWARGS[segmentation_fn]\n except KeyError:\n logger.warning(\n 'DEFAULT_SEGMENTATION_KWARGS did not contain any entry'\n 'for segmentation method {}. No kwargs will be passed to'\n 'the segmentation function!'.format(segmentation_fn)\n )\n segmentation_kwargs = {}\n elif callable(segmentation_fn) and segmentation_kwargs:\n logger.warning(\n 'Specified both a segmentation function to create superpixels and '\n 'keyword arguments for built-in segmentation functions. By default '\n 'the specified segmentation function will be used.'\n )\n\n # set the predictor\n self.image_shape = tuple(image_shape) # coerce lists\n self.dtype = dtype\n self.predictor = self._transform_predictor(predictor)\n\n # segmentation function is either a user-defined function or one of the values in\n fn_options = {'felzenszwalb': felzenszwalb, 'slic': slic, 'quickshift': quickshift}\n if callable(segmentation_fn):\n self.custom_segmentation = True\n self.segmentation_fn = segmentation_fn\n else:\n self.custom_segmentation = False\n self.segmentation_fn = partial(fn_options[segmentation_fn], **segmentation_kwargs) # type: ignore[arg-type]\n\n self.images_background = images_background\n # a superpixel is perturbed with prob 1 - p_sample\n self.p_sample = 0.5 # type: float\n\n # update metadata\n self.meta['params'].update(\n custom_segmentation=self.custom_segmentation,\n segmentation_kwargs=segmentation_kwargs,\n p_sample=self.p_sample,\n seed=seed,\n image_shape=self.image_shape,\n images_background=self.images_background\n )\n if not self.custom_segmentation:\n self.meta['params'].update(segmentation_fn=segmentation_fn)\n else:\n self.meta['params'].update(segmentation_fn='custom')\n\n def generate_superpixels(self, image: np.ndarray) -> np.ndarray:\n \"\"\"\n Generates superpixels from (i.e., segments) an image.\n\n Parameters\n ----------\n image\n A grayscale or RGB image.\n\n Returns\n -------\n A `[H, W]` array of integers. Each integer is a segment (superpixel) label.\n \"\"\"\n\n image_preproc = self._preprocess_img(image)\n\n return self.segmentation_fn(image_preproc)\n\n def _preprocess_img(self, image: np.ndarray) -> np.ndarray:\n \"\"\"\n Applies necessary transformations to the image prior to segmentation.\n\n Parameters\n ----------\n image\n A grayscale or RGB image.\n\n Returns\n -------\n A preprocessed image.\n \"\"\"\n\n # Grayscale images are repeated across channels\n if not self.custom_segmentation and image.shape[-1] == 1:\n image_preproc = np.repeat(image, 3, axis=2)\n else:\n image_preproc = image.copy()\n\n return image_preproc\n\n def explain(self, # type: ignore[override]\n image: np.ndarray,\n p_sample: float = 0.5,\n threshold: float = 0.95,\n delta: float = 0.1,\n tau: float = 0.15,\n batch_size: int = 100,\n coverage_samples: int = 10000,\n beam_size: int = 1,\n stop_on_first: bool = False,\n max_anchor_size: Optional[int] = None,\n min_samples_start: int = 100,\n n_covered_ex: int = 10,\n binary_cache_size: int = 10000,\n cache_margin: int = 1000,\n verbose: bool = False,\n verbose_every: int = 1,\n **kwargs: Any) -> Explanation:\n \"\"\"\n Explain instance and return anchor with metadata.\n\n Parameters\n ----------\n image\n Image to be explained.\n p_sample\n Probability for a pixel to be represented by the average value of its superpixel.\n threshold\n Minimum precision threshold.\n delta\n Used to compute `beta`.\n tau\n Margin between lower confidence bound and minimum precision of upper bound.\n batch_size\n Batch size used for sampling.\n coverage_samples\n Number of samples used to estimate coverage from during result search.\n beam_size\n The number of anchors extended at each step of new anchors construction.\n stop_on_first\n If ``True``, the beam search algorithm will return the first anchor that has satisfies the\n probability constraint.\n max_anchor_size\n Maximum number of features in result.\n min_samples_start\n Min number of initial samples.\n n_covered_ex\n How many examples where anchors apply to store for each anchor sampled during search\n (both examples where prediction on samples agrees/disagrees with `desired_label` are stored).\n binary_cache_size\n The result search pre-allocates `binary_cache_size` batches for storing the binary arrays\n returned during sampling.\n cache_margin\n When only ``max(cache_margin, batch_size)`` positions in the binary cache remain empty, a new cache\n of the same size is pre-allocated to continue buffering samples.\n verbose\n Display updates during the anchor search iterations.\n verbose_every\n Frequency of displayed iterations during anchor search process.\n\n Returns\n -------\n explanation\n `Explanation` object containing the anchor explaining the instance with additional metadata as attributes.\n See usage at `AnchorImage examples`_ for details.\n\n .. _AnchorImage examples:\n https://docs.seldon.io/projects/alibi/en/stable/methods/Anchors.html\n \"\"\"\n # get params for storage in meta\n params = locals()\n remove = ['image', 'self']\n for key in remove:\n params.pop(key)\n\n sampler = AnchorImageSampler(\n predictor=self.predictor,\n segmentation_fn=self.segmentation_fn,\n custom_segmentation=self.custom_segmentation,\n image=image,\n images_background=self.images_background,\n p_sample=p_sample,\n n_covered_ex=n_covered_ex,\n )\n\n # get anchors and add metadata\n mab = AnchorBaseBeam(\n samplers=[sampler],\n sample_cache_size=binary_cache_size,\n cache_margin=cache_margin,\n **kwargs)\n result = mab.anchor_beam(\n desired_confidence=threshold,\n delta=delta,\n epsilon=tau,\n batch_size=batch_size,\n coverage_samples=coverage_samples,\n beam_size=beam_size,\n stop_on_first=stop_on_first,\n max_anchor_size=max_anchor_size,\n min_samples_start=min_samples_start,\n verbose=verbose,\n verbose_every=verbose_every,\n **kwargs,\n ) # type: Any\n\n return self._build_explanation(\n image, result, sampler.instance_label, params, sampler\n )\n\n def _build_explanation(\n self,\n image: np.ndarray,\n result: dict,\n predicted_label: int,\n params: dict,\n sampler: AnchorImageSampler,\n ) -> Explanation:\n \"\"\"\n Uses the metadata returned by the anchor search algorithm together with\n the instance to be explained to build an explanation object.\n\n Parameters\n ----------\n image\n Instance to be explained.\n result\n Dictionary containing the search anchor and metadata.\n predicted_label\n Label of the instance to be explained.\n params\n Parameters passed to `:py:meth:alibi.explainers.anchor_image.AnchorImage.explain`.\n \"\"\"\n\n result['instance'] = image\n result['instances'] = np.expand_dims(image, 0)\n result['prediction'] = np.array([predicted_label])\n\n # overlay image with anchor mask\n anchor = self.overlay_mask(image, sampler.segments, result['feature'])\n exp = AnchorExplanation('image', result)\n\n # output explanation dictionary\n data = copy.deepcopy(DEFAULT_DATA_ANCHOR_IMG)\n data.update(\n anchor=anchor,\n segments=sampler.segments,\n precision=exp.precision(),\n coverage=exp.coverage(),\n raw=exp.exp_map\n )\n\n # create explanation object\n explanation = Explanation(meta=copy.deepcopy(self.meta), data=data)\n\n # params passed to explain\n explanation.meta['params'].update(params)\n return explanation\n\n def overlay_mask(self, image: np.ndarray, segments: np.ndarray, mask_features: list,\n scale: tuple = (0, 255)) -> np.ndarray:\n \"\"\"\n Overlay image with mask described by the mask features.\n\n Parameters\n ----------\n image\n Image to be explained.\n segments\n Superpixels.\n mask_features\n List with superpixels present in mask.\n scale\n Pixel scale for masked image.\n\n Returns\n -------\n masked_image\n Image overlaid with mask.\n \"\"\"\n\n mask = np.zeros(segments.shape)\n for f in mask_features:\n mask[segments == f] = 1\n image = scale_image(image, scale=scale)\n masked_image = (image * np.expand_dims(mask, 2)).astype(int)\n\n return masked_image\n\n def _transform_predictor(self, predictor: Callable) -> Callable:\n # check if predictor returns predicted class or prediction probabilities for each class\n # if needed adjust predictor so it returns the predicted class\n x = np.zeros((1,) + self.image_shape, dtype=self.dtype)\n try:\n prediction = predictor(x)\n except Exception as e:\n msg = f\"Predictor failed to be called on {type(x)} of shape {x.shape} and dtype {x.dtype}. \" \\\n f\"Check that the parameter `image_shape` is correctly specified.\"\n raise AlibiPredictorCallException(msg) from e\n\n if not isinstance(prediction, np.ndarray):\n msg = f\"Excepted predictor return type to be {np.ndarray} but got {type(prediction)}.\"\n raise AlibiPredictorReturnTypeError(msg)\n\n if np.argmax(prediction.shape) == 0:\n return predictor\n else:\n transformer = ArgmaxTransformer(predictor)\n return transformer\n\n def reset_predictor(self, predictor: Callable) -> None:\n \"\"\"\n Resets the predictor function.\n\n Parameters\n ----------\n predictor\n New predictor function.\n \"\"\"\n self.predictor = self._transform_predictor(predictor)\n",
"\"\"\"\nThis module contains the Tensorflow implementation of models used for the Counterfactual with Reinforcement Learning\nexperiments for both data modalities (image and tabular).\n\"\"\"\n\nimport tensorflow as tf\nimport tensorflow.keras as keras\nfrom typing import List\n\n\nclass MNISTClassifier(keras.Model):\n \"\"\"\n MNIST classifier used in the experiments for Counterfactual with Reinforcement Learning. The model consists of two\n convolutional layers having 64 and 32 channels and a kernel size of 2 with ReLU nonlinearities, followed by\n maxpooling of size 2 and dropout of 0.3. The convolutional block is followed by a fully connected layer of 256 with\n ReLU nonlinearity, and finally a fully connected layer is used to predict the class logits (10 in MNIST case).\n \"\"\"\n\n def __init__(self, output_dim: int = 10, **kwargs) -> None:\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n output_dim\n Output dimension\n \"\"\"\n super().__init__(**kwargs)\n\n self.conv1 = keras.layers.Conv2D(64, 2, padding=\"same\", activation=\"relu\")\n self.maxpool1 = keras.layers.MaxPool2D(pool_size=(2, 2), strides=2)\n self.dropout1 = keras.layers.Dropout(0.3)\n self.conv2 = keras.layers.Conv2D(32, 2, padding=\"same\", activation=\"relu\")\n self.maxpool2 = keras.layers.MaxPool2D(pool_size=(2, 2), strides=2)\n self.dropout2 = keras.layers.Dropout(0.3)\n self.flatten = keras.layers.Flatten()\n self.fc1 = keras.layers.Dense(256, activation=\"relu\")\n self.fc2 = keras.layers.Dense(output_dim)\n\n def call(self, x: tf.Tensor, training: bool = True, **kwargs) -> tf.Tensor:\n \"\"\"\n Forward pass.\n\n Parameters\n ----------\n x\n Input tensor.\n training\n Training flag.\n **kwargs\n Other arguments. Not used.\n\n Returns\n -------\n Classification logits.\n \"\"\"\n x = self.dropout1(self.maxpool1(self.conv1(x)), training=training)\n x = self.dropout2(self.maxpool2(self.conv2(x)), training=training)\n x = self.fc2(self.fc1(self.flatten(x)))\n return x\n\n\nclass MNISTEncoder(keras.Model):\n \"\"\"\n MNIST encoder used in the experiments for the Counterfactual with Reinforcement Learning. The model\n consists of 3 convolutional layers having 16, 8 and 8 channels and a kernel size of 3, with ReLU nonlinearities.\n Each convolutional layer is followed by a maxpooling layer of size 2. Finally, a fully connected layer\n follows the convolutional block with a tanh nonlinearity. The tanh clips the output between [-1, 1], required\n in the DDPG algorithm (e.g., [act_low, act_high]). The embedding dimension used in the paper is 32, although\n this can vary.\n \"\"\"\n\n def __init__(self, latent_dim: int, **kwargs) -> None:\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n latent_dim\n Latent dimension.\n \"\"\"\n super().__init__(**kwargs)\n\n self.conv1 = keras.layers.Conv2D(16, 3, padding=\"same\", activation=\"relu\")\n self.maxpool1 = keras.layers.MaxPool2D(pool_size=(2, 2), strides=2)\n self.conv2 = keras.layers.Conv2D(8, 3, padding=\"same\", activation=\"relu\")\n self.maxpool2 = keras.layers.MaxPool2D(pool_size=(2, 2), strides=2)\n self.conv3 = keras.layers.Conv2D(8, 3, padding=\"same\", activation=\"relu\")\n self.maxpool3 = keras.layers.MaxPool2D(pool_size=(2, 2), strides=2)\n self.flatten = keras.layers.Flatten()\n self.fc1 = keras.layers.Dense(latent_dim, activation='tanh')\n\n def call(self, x: tf.Tensor, **kwargs) -> tf.Tensor:\n \"\"\"\n Forward pass.\n\n Parameters\n ----------\n x\n Input tensor.\n **kwargs\n Other arguments. Not used.\n\n Returns\n -------\n Encoding representation having each component in the interval [-1, 1]\n \"\"\"\n x = self.maxpool1(self.conv1(x))\n x = self.maxpool2(self.conv2(x))\n x = self.maxpool3(self.conv3(x))\n x = self.fc1(self.flatten(x))\n return x\n\n\nclass MNISTDecoder(keras.Model):\n \"\"\"\n MNIST decoder used in the Counterfactual with Reinforcement Learning experiments. The model consists of a fully\n connected layer of 128 units with ReLU activation followed by a convolutional block. The convolutional block\n consists fo 4 convolutional layers having 8, 8, 8 and 1 channels and a kernel size of 3. Each convolutional layer,\n except the last one, has ReLU nonlinearities and is followed by an up-sampling layer of size 2. The final layers\n uses a sigmoid activation to clip the output values in [0, 1].\n \"\"\"\n\n def __init__(self, **kwargs) -> None:\n \"\"\" Constructor. \"\"\"\n super().__init__(**kwargs)\n\n self.fc1 = keras.layers.Dense(128, activation=\"relu\")\n self.reshape = keras.layers.Reshape((4, 4, 8))\n self.conv1 = keras.layers.Conv2D(8, (3, 3), padding=\"same\", activation=\"relu\")\n self.up1 = keras.layers.UpSampling2D(size=(2, 2))\n self.conv2 = keras.layers.Conv2D(8, (3, 3), padding=\"same\", activation=\"relu\")\n self.up2 = keras.layers.UpSampling2D(size=(2, 2))\n self.conv3 = keras.layers.Conv2D(8, (3, 3), padding=\"valid\", activation=\"relu\")\n self.up3 = keras.layers.UpSampling2D(size=(2, 2))\n self.conv4 = keras.layers.Conv2D(1, (3, 3), padding=\"same\", activation=\"sigmoid\")\n\n def call(self, x: tf.Tensor, **kwargs) -> tf.Tensor:\n \"\"\"\n Forward pass.\n\n Parameters\n ----------\n x\n Input tensor\n **kwargs\n Other arguments. Not used.\n\n Returns\n -------\n Decoded input having each component in the interval [0, 1].\n \"\"\"\n x = self.reshape(self.fc1(x))\n x = self.up1(self.conv1(x))\n x = self.up2(self.conv2(x))\n x = self.up3(self.conv3(x))\n x = self.conv4(x)\n return x\n\n\nclass ADULTEncoder(keras.Model):\n \"\"\"\n ADULT encoder used in the Counterfactual with Reinforcement Learning experiments. The model consists of\n two fully connected layers with ReLU and tanh nonlinearities. The tanh nonlinearity clips the embedding in [-1, 1]\n as required in the DDPG algorithm (e.g., [act_low, act_high]). The layers' dimensions used in the paper are\n 128 and 15, although those can vary as they were selected to generalize across many datasets.\n \"\"\"\n\n def __init__(self, hidden_dim: int, latent_dim: int, **kwargs):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n hidden_dim\n Hidden dimension.\n latent_dim\n Latent dimension.\n \"\"\"\n super().__init__(**kwargs)\n self.fc1 = keras.layers.Dense(hidden_dim)\n self.fc2 = keras.layers.Dense(latent_dim)\n\n def call(self, x: tf.Tensor, **kwargs) -> tf.Tensor:\n \"\"\"\n Forward pass.\n\n Parameters\n ----------\n x\n Input tensor.\n **kwargs\n Other arguments.\n\n Returns\n -------\n Encoding representation having each component in the interval [-1, 1].\n \"\"\"\n x = tf.nn.relu(self.fc1(x))\n x = tf.nn.tanh(self.fc2(x))\n return x\n\n\nclass ADULTDecoder(keras.Model):\n \"\"\"\n ADULT decoder used in the Counterfactual with Reinforcement Learning experiments. The model consists of\n of a fully connected layer with ReLU nonlinearity, and a multiheaded layer, one for each categorical feature and\n a single head for the rest of numerical features. The hidden dimension used in the paper is 128.\n \"\"\"\n\n def __init__(self, hidden_dim: int, output_dims: List[int], **kwargs):\n \"\"\"\n Constructor.\n\n Parameters\n ----------\n hidden_dim\n Hidden dimension.\n output_dim\n List of output dimensions.\n \"\"\"\n super().__init__(**kwargs)\n\n self.fc1 = keras.layers.Dense(hidden_dim)\n self.fcs = [keras.layers.Dense(dim) for dim in output_dims]\n\n def call(self, x: tf.Tensor, **kwargs) -> List[tf.Tensor]:\n \"\"\"\n Forward pass.\n\n Parameters\n ----------\n x\n Input tensor.\n **kwargs\n Other arguments. Not used.\n\n Returns\n -------\n List of reconstruction of the input tensor. First element corresponds to the reconstruction of all the \\\n numerical features if they exist, and the rest of the elements correspond to each categorical feature.\n \"\"\"\n x = tf.nn.relu(self.fc1(x))\n xs = [fc(x) for fc in self.fcs]\n return xs\n"
] | [
[
"numpy.logical_not",
"numpy.expand_dims",
"numpy.random.seed",
"numpy.random.choice",
"numpy.unique",
"numpy.argmax",
"numpy.mean",
"numpy.repeat",
"numpy.array",
"numpy.zeros",
"numpy.where"
],
[
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.MaxPool2D",
"tensorflow.keras.layers.UpSampling2D",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Flatten"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
kpe/tensor2tensor | [
"453c473030c354a3d9a4c27b12bcec8942334bf4",
"453c473030c354a3d9a4c27b12bcec8942334bf4",
"453c473030c354a3d9a4c27b12bcec8942334bf4",
"453c473030c354a3d9a4c27b12bcec8942334bf4",
"453c473030c354a3d9a4c27b12bcec8942334bf4",
"453c473030c354a3d9a4c27b12bcec8942334bf4",
"453c473030c354a3d9a4c27b12bcec8942334bf4",
"453c473030c354a3d9a4c27b12bcec8942334bf4"
] | [
"tensor2tensor/models/research/moe.py",
"tensor2tensor/data_generators/algorithmic_math_test.py",
"tensor2tensor/data_generators/problem_test.py",
"tensor2tensor/rl/restarter_test.py",
"tensor2tensor/models/research/transformer_aux.py",
"tensor2tensor/data_generators/conll_ner.py",
"tensor2tensor/v2/t2t.py",
"tensor2tensor/data_generators/wiki_revision.py"
] | [
"# coding=utf-8\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Mixture-of-experts code.\n\nInterfaces and algorithms are under development and subject to rapid change\nwithout notice.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport mesh_tensorflow as mtf\nimport tensorflow as tf\n\n\ndef transformer_moe_layer_v1(inputs, output_dim, hparams, train,\n master_dtype=tf.bfloat16,\n slice_dtype=tf.float32):\n \"\"\"Local mixture of experts that works well on TPU.\n\n Adapted from the paper https://arxiv.org/abs/1701.06538\n\n Note: until the algorithm and inferface solidify, we pass in a hyperparameters\n dictionary in order not to complicate the interface in mtf_transformer.py .\n Once this code moves out of \"research\", we should pass the hyperparameters\n separately.\n\n Hyperparameters used:\n hparams.moe_num_experts: number of experts\n hparams.moe_hidden_size: size of hidden layer in each expert\n hparams.moe_group_size: size of each \"group\" for gating purposes\n hparams.moe_capacity_factor_train: a float\n hparams.moe_capacity_factor_eval: a float\n hparams.moe_gating: a string\n + all hyperparmeters used by _top_2_gating()\n\n The number of parameters in the gating network is:\n (input_dim.size * hparams.num_experts) +\n\n The number of parameters in the experts themselves is:\n (hparams.num_experts\n * (input_dim.size + output_dim.size)\n * hparams.moe_hidden_size)\n\n The input is n-dimensional: [<batch_and_length_dims>, input_dim], consisting\n of the representations of all positions in a batch of sequences.\n\n Each position of each sequence is sent to 0-2 experts. The expert\n choices and the combination weights are determined by a learned gating\n function.\n\n This function returns a small auxiliary loss that should be added to the\n training loss of the model. This loss helps to balance expert usage.\n Without the loss, it is very likely that a few experts will be trained and\n the rest will starve.\n\n Several hacks are necessary to get around current TPU limitations:\n\n - To ensure static shapes, we enforce (by truncation/padding)\n that each sequence send the same number of elements to each expert.\n\n It would make more sense to enforce this equality over the entire batch,\n but due to our hacked-up gather-by-matmul implementation, we need to divide\n the batch into \"groups\". For each group, the same number of elements\n are sent to each expert.\n\n TODO(noam): Factor this code better. We want to be able to substitute\n different code for the experts themselves.\n\n Args:\n inputs: a mtf.Tensor with shape [<batch_dims...>, length_dim, input_dim]\n output_dim: a mtf.Dimension (for Transformer, this is input_dim)\n hparams: model hyperparameters\n train: a boolean\n master_dtype: a tf.dtype\n slice_dtype: a tf.dtype\n\n Returns:\n outputs: a Tensor with shape [<batch_dims...>, length_dim, output_dim]\n loss: a mtf scalar\n\n Raises:\n ValueError: on unrecognized hparams.moe_gating\n \"\"\"\n orig_inputs = inputs\n input_dim = inputs.shape.dims[-1]\n hidden_dim = mtf.Dimension(\"expert_hidden\", hparams.moe_hidden_size)\n experts_dim = mtf.Dimension(\"experts\", hparams.moe_num_experts)\n group_size_dim = mtf.Dimension(\"group\", hparams.moe_group_size)\n batch_dim = mtf.Dimension(\n orig_inputs.shape[0].name,\n orig_inputs.shape.size // (group_size_dim.size * input_dim.size))\n inputs = mtf.reshape(inputs, [batch_dim, group_size_dim, input_dim])\n\n # Each sequence sends expert_capacity positions to each expert.\n capacity_factor = (\n hparams.moe_capacity_factor_train if train else\n hparams.moe_capacity_factor_eval)\n expert_capacity = min(\n group_size_dim.size,\n int((group_size_dim.size * capacity_factor) / experts_dim.size))\n expert_capacity_dim = mtf.Dimension(\"expert_capacity\", expert_capacity)\n\n experts_dim_unsplit = mtf.Dimension(\"expert_unsplit\", experts_dim.size)\n batch_dim_unsplit = mtf.Dimension(\"batch_unsplit\", batch_dim.size)\n\n if hparams.moe_gating == \"top_2\":\n dispatch_tensor, combine_tensor, loss = _top_2_gating(\n inputs=inputs,\n outer_expert_dims=None,\n experts_dim=experts_dim_unsplit,\n expert_capacity_dim=expert_capacity_dim,\n hparams=hparams,\n train=train)\n else:\n raise ValueError(\"unknown hparams.moe_gating=%s\" % hparams.moe_gating)\n\n # put num_experts dimension first to make split easier in alltoall\n expert_inputs = mtf.einsum([inputs, dispatch_tensor], mtf.Shape(\n [experts_dim_unsplit, batch_dim, expert_capacity_dim, input_dim]))\n\n expert_inputs = mtf.reshape(expert_inputs, mtf.Shape(\n [experts_dim, batch_dim_unsplit, expert_capacity_dim, input_dim]))\n\n # Now feed the expert inputs through the experts.\n h = mtf.layers.dense(\n expert_inputs, hidden_dim, expert_dims=[experts_dim],\n activation=mtf.relu, use_bias=False, master_dtype=master_dtype,\n slice_dtype=slice_dtype, name=\"x0\")\n expert_output = mtf.layers.dense(\n h, output_dim, expert_dims=[experts_dim], use_bias=False,\n master_dtype=master_dtype, slice_dtype=slice_dtype, name=\"x1\")\n\n expert_output = mtf.reshape(expert_output, mtf.Shape(\n [experts_dim_unsplit, batch_dim, expert_capacity_dim, input_dim]))\n\n output = mtf.einsum([expert_output, combine_tensor], mtf.Shape(\n [batch_dim, group_size_dim, output_dim]))\n\n output = mtf.reshape(output, orig_inputs.shape.dims[:-1] + [output_dim])\n\n return output, loss * hparams.moe_loss_coef\n\n\ndef transformer_moe_layer_v2(inputs, output_dim, hparams, train,\n master_dtype=tf.bfloat16, slice_dtype=tf.float32):\n \"\"\"2-level mixture of experts.\n\n Adapted from the paper https://arxiv.org/abs/1701.06538\n\n Note: until the algorithm and inferface solidify, we pass in a hyperparameters\n dictionary in order not to complicate the interface in mtf_transformer.py .\n Once this code moves out of \"research\", we should pass the hyperparameters\n separately.\n\n Hyperparameters used:\n hparams.moe_num_experts: number of experts\n hparams.moe_hidden_size: size of hidden layer in each expert\n hparams.moe_group_size: size of each \"group\" for gating purposes\n hparams.moe_capacity_factor_train: a float\n hparams.moe_capacity_factor_eval: a float\n hparams.moe_capacity_factor_second_level: a float\n hparams.moe_gating: a string\n + all hyperparmeters used by _top_2_gating()\n\n One set of params for experts in first level and different of hparams\n per expert in the second level.\n The number of parameters in the gating network is:\n (input_dim.size * (hparams.num_experts) +\n (moe_hidden_size * hparams.num_experts) * hparams.num_experts\n\n\n The number of parameters in the experts themselves is:\n (hparams.num_experts\n * (input_dim.size + output_dim.size)\n * hparams.moe_hidden_size)\n\n The input is n-dimensional: [<batch_and_length_dims>, input_dim], consisting\n of the representations of all positions in a batch of sequences.\n\n Each position of each sequence is sent to 0-3 experts. The expert\n choices and the combination weights are determined by a learned gating\n function.\n\n This function returns a small auxiliary loss that should be added to the\n training loss of the model. This loss helps to balance expert usage.\n Without the loss, it is very likely that a few experts will be trained and\n the rest will starve.\n\n Several hacks are necessary to get around current TPU limitations:\n\n - To ensure static shapes, we enforce (by truncation/padding)\n that each sequence send the same number of elements to each expert.\n\n It would make more sense to enforce this equality over the entire batch,\n but due to our hacked-up gather-by-matmul implementation, we need to divide\n the batch into \"groups\". For each group, the same number of elements\n are sent to each expert.\n\n TODO(noam): Factor this code better. We want to be able to substitute\n different code for the experts themselves.\n\n Dimensions cheat sheet:\n a, b: batch size\n l: original sequence length\n m: input depth\n n: output depth\n g, h: number of groups\n s, t: group size\n x, y: number of experts\n c, d: expert capacity\n\n input: [a0, b1, l, m]\n input: [a0, g1, s, m]\n dispatch_tensor_x: [a0, g1, s, x, c]\n expert_input: [a0, g1, x, c, m]\n alltoall: [a0, g, x1, c, m]\n alltoall: [a0, g, x1, c, m]\n transpose: [x1, a0, g, c, m]\n reshape: [x1, h0, s, m]\n assignment2: [x1, h0, t, y, d]\n expert_input2: [x1, h0, y, d, m]\n alltoall: [x1, h, y0, d, m]\n ...\n reverse of that\n\n gating params 0: [m, x]\n gating params 1: [x1, m, y]\n\n expert params:\n [x1, y0, m, hidden]\n [x1, y0, hidden, n]\n\n Args:\n inputs: a mtf.Tensor with shape [a, b, l, m]\n output_dim: a mtf.Dimension (for Transformer, this is input_dim)\n hparams: model hyperparameters\n train: a boolean\n master_dtype: a tf.dtype\n slice_dtype: a tf.dtype\n\n Returns:\n outputs: a Tensor with shape [a, b, l, n]\n loss: a mtf scalar\n\n Raises:\n ValueError: on unrecognized hparams.moe_gating\n \"\"\"\n insert_outer_batch_dim = (len(inputs.shape.dims) == 3)\n if insert_outer_batch_dim:\n inputs = mtf.reshape(\n inputs, [mtf.Dimension(\"outer_batch\", 1)] + inputs.shape.dims)\n\n assert len(hparams.moe_num_experts) == 2\n a0, b1, l, m = inputs.shape.dims\n hidden_dim = mtf.Dimension(\"expert_hidden\", hparams.moe_hidden_size)\n x1 = mtf.Dimension(\"expert_x\", hparams.moe_num_experts[0])\n y0 = mtf.Dimension(\"expert_y\", hparams.moe_num_experts[1])\n x = mtf.Dimension(\"expert_x_unsplit\", hparams.moe_num_experts[0])\n y = mtf.Dimension(\"expert_y_unsplit\", hparams.moe_num_experts[1])\n n = output_dim\n\n # We \"cheat\" here and look at the mesh shape and layout. This is to ensure\n # that the number of groups (g.size) is a multiple of the mesh dimension\n # over which those groups are split.\n num_groups, group_size = _split_into_groups(\n b1.size * l.size, hparams.moe_group_size,\n mtf.tensor_dim_to_mesh_dim_size(hparams.layout, hparams.mesh_shape, b1))\n g1 = mtf.Dimension(b1.name, num_groups)\n g = mtf.Dimension(b1.name + \"_unsplit\", g1.size)\n s = mtf.Dimension(\"group_size_x\", group_size)\n\n # Each sequence sends (at most?) expert_capacity positions to each expert.\n # Static expert_capacity dimension is needed for expert batch sizes\n capacity_factor = (\n hparams.moe_capacity_factor_train if train else\n hparams.moe_capacity_factor_eval)\n expert_capacity = min(s.size, int((s.size * capacity_factor) / x.size))\n expert_capacity = max(expert_capacity, 4)\n c = mtf.Dimension(\"expert_capacity_x\", expert_capacity)\n\n # We \"cheat\" here and look at the mesh shape and layout. This is to ensure\n # that the number of groups (h.size) is a multiple of the mesh dimension\n # over which those groups are split.\n num_groups, group_size = _split_into_groups(\n a0.size * g.size * c.size,\n hparams.moe_group_size,\n mtf.tensor_dim_to_mesh_dim_size(hparams.layout, hparams.mesh_shape, a0))\n t = mtf.Dimension(\"group_size_y\", group_size)\n h0 = mtf.Dimension(a0.name, num_groups)\n h = mtf.Dimension(a0.name + \"_unsplit\", h0.size)\n\n expert_capacity = min(\n t.size,\n int((t.size * hparams.moe_capacity_factor_second_level) / y.size))\n expert_capacity = max(expert_capacity, 4)\n d = mtf.Dimension(\"expert_capacity_y\", expert_capacity)\n\n # First level of expert routing\n # Reshape the inner batch size to a multiple of group_dim g1 and\n # group_size_dim s.\n inputs = mtf.reshape(inputs, [a0, g1, s, m])\n\n # Get the assignments for the first level.\n # dispatch_tensor_x has shape [a0, g1, s, x, c]\n if hparams.moe_gating == \"top_2\":\n dispatch_tensor_x, combine_tensor_x, loss_outer = _top_2_gating(\n inputs=inputs,\n outer_expert_dims=None,\n experts_dim=x,\n expert_capacity_dim=c,\n hparams=hparams,\n train=train)\n else:\n raise ValueError(\"unknown hparams.moe_gating=%s\" % hparams.moe_gating)\n\n # Now create expert_inputs based on the assignments.\n # put num_experts dimension first to make split easier in alltoall\n expert_inputs_x = mtf.einsum([inputs, dispatch_tensor_x], [x, a0, g1, c, m])\n\n # we construct an \"importance\" Tensor for the inputs to the second-level\n # gating. The importance of an input is 1.0 if it represents the\n # first-choice expert-group and 0.5 if it represents the second-choice expert\n # group. This is used by the second-level gating.\n importance = mtf.reduce_sum(combine_tensor_x, output_shape=[x, a0, g1, c])\n importance = 0.5 * (\n mtf.to_float(mtf.greater(importance, 0.5)) +\n mtf.to_float(mtf.greater(importance, 0.0)))\n\n # First level, all to all. Here we change the split dimension from g1 to x1.\n expert_inputs_x = mtf.reshape(expert_inputs_x, mtf.Shape(\n [x1, a0, g, c, m]))\n importance = mtf.reshape(importance, [x1, a0, g, c])\n\n # Second level of expert routing\n # Reshape the expert_inputs outer batch dim to be a multiple of group_dim h0\n # and group_size_dim t.\n inputs_y = mtf.reshape(expert_inputs_x, [x1, h0, t, m])\n importance = mtf.reshape(importance, [x1, h0, t])\n\n # Get the assignments for the second level.\n # dispatch_tensor_y has shape [x1, h0, t, y, d]\n if hparams.moe_gating == \"top_2\":\n dispatch_tensor_y, combine_tensor_y, loss_inner = _top_2_gating(\n inputs=inputs_y,\n outer_expert_dims=[x1],\n experts_dim=y,\n expert_capacity_dim=d,\n hparams=hparams,\n train=train,\n importance=importance)\n else:\n raise ValueError(\"unknown hparams.moe_gating=%s\" % hparams.moe_gating)\n\n # Now create expert_inputs based on the assignments.\n # put num_experts dimension first to make split easier in alltoall\n expert_inputs_y = mtf.einsum([inputs_y, dispatch_tensor_y], [y, x1, h0, d, m])\n\n # Second level, all to all. Here we change the split dimension from h0 to y0.\n expert_inputs_y = mtf.reshape(expert_inputs_y, mtf.Shape(\n [y0, x1, h, d, m]))\n\n hidden_output = mtf.layers.dense(\n expert_inputs_y, hidden_dim, expert_dims=[y0, x1],\n activation=mtf.relu, use_bias=False, master_dtype=master_dtype,\n slice_dtype=slice_dtype, name=\"expert0\")\n expert_output = mtf.layers.dense(\n hidden_output, output_dim, expert_dims=[y0, x1],\n use_bias=False, master_dtype=master_dtype, slice_dtype=slice_dtype,\n name=\"expert1\")\n\n # NOW COMBINE EXPERT OUTPUTS (reversing everything we have done)\n # expert_output has shape [y0, x1, h, d, n]\n\n # alltoall\n expert_output = mtf.reshape(expert_output, mtf.Shape(\n [y, x1, h0, d, n]))\n\n # combine results from inner level\n output_y = mtf.einsum([expert_output, combine_tensor_y], [x1, h0, t, n])\n\n # Reshape the combined tensor from inner level to now contain outer_batch_dim\n # a0 and group_dim g\n output = mtf.reshape(output_y, [x1, a0, g, c, n])\n\n # alltoall from expert_dim x to group_dim g1\n expert_output_x = mtf.reshape(output, mtf.Shape([x, a0, g1, c, n]))\n\n # combine results from outer level\n output_x = mtf.einsum([expert_output_x, combine_tensor_x], [a0, g1, s, n])\n\n # Reshape the combined tensor to now contain inner_batch_dim\n # b1 and the original sequence length\n output = mtf.reshape(output_x, [a0, b1, l, n])\n if insert_outer_batch_dim:\n output = mtf.reshape(output, [b1, l, n])\n return output, (loss_outer + loss_inner) * hparams.moe_loss_coef\n\n\ndef _top_2_gating(\n inputs, outer_expert_dims, experts_dim, expert_capacity_dim,\n hparams, train, importance=None):\n \"\"\"Compute gating for mixture-of-experts in TensorFlow.\n\n Note: until the algorithm and inferface solidify, we pass in a hyperparameters\n dictionary in order not to complicate the interface in mtf_transformer.py .\n Once this code moves out of \"research\", we should pass the hyperparameters\n separately.\n\n Hyperparameters used:\n hparams.moe_use_second_place_loss: a boolean\n hparams.moe_second_policy_train: a string\n hparams.moe_second_policy_eval: a string\n hparams.moe_second_threshold: a float\n\n The returned forward assignment is a tensor used to map (via einsum) from the\n inputs to the expert_inputs. Likewise, the returned combine_tensor is\n used to map (via einsum) from the expert outputs to the outputs. Both the\n forward and backward assignments are mostly zeros. The shapes of the tensors\n are as follows.\n\n inputs: [<batch_dims>, group_size_dim, input_dim]\n importance: [<batch_dims>, group_size_dim]\n dispatch_tensor:\n [<batch_dims>, group_size_dim, experts_dim, expert_capacity_dim]\n expert_inputs:\n [<batch_dims>, experts_dim, expert_capacity_dim, input_dim]\n\n expert_outputs: [<batch_dims>, experts_dim, expert_capacity_dim, output_dim]\n combine_tensor:\n [<batch_dims>, group_size_dim, experts_dim, expert_capacity_dim]\n outputs: [<batch_dims>, group_size_dim, output_dim]\n\n \"importance\" is an optional tensor with one floating-point value for each\n input vector. If the importance of an input is 1.0, then we send it to\n up to 2 experts. If 0.0 < importance < 1.0, then we send it to at most\n one expert. If importance == 0.0, then we send it to no experts.\n\n We use \"importance\" at the second-level gating function of a hierarchical\n mixture of experts. Inputs to the first-choice expert-group get importance\n 1.0. Inputs to the second-choice expert group get importance 0.5.\n Inputs that represent padding get importance 0.0.\n\n Args:\n inputs: a mtf.Tensor with shape [<batch_dims>, group_size_dim, input_dim]\n outer_expert_dims: an optional list of dimensions. This is for the case\n where we are at an inner level of a hierarchical MoE.\n experts_dim: a Dimension (the number of experts)\n expert_capacity_dim: a Dimension (number of examples per group per expert)\n hparams: model hyperparameters.\n train: a boolean\n importance: an optional tensor with shape [<batch_dims>, group_size_dim]\n\n Returns:\n dispatch_tensor: a Tensor with shape\n [<batch_dims>, group_size_dim, experts_dim, expert_capacity_dim]\n combine_tensor: a Tensor with shape\n [<batch_dims>, group_size_dim, experts_dim, expert_capacity_dim]\n loss: a mtf scalar\n\n Raises:\n ValueError: on illegal hyperparameters\n \"\"\"\n group_size_dim, unused_input_dim = inputs.shape.dims[-2:]\n\n raw_gates = mtf.softmax(mtf.layers.dense(\n inputs, experts_dim, use_bias=False,\n expert_dims=outer_expert_dims), experts_dim)\n\n # The internals of this function run in float32.\n # bfloat16 seems to reduce quality.\n raw_gates = mtf.to_float(raw_gates)\n\n expert_capacity_f = float(expert_capacity_dim.size)\n\n # FIND TOP 2 EXPERTS PER POSITON\n # Find the top expert for each position. shape=[batch, group]\n index_1, gate_1 = mtf.top_1(raw_gates, experts_dim)\n # [batch, group, experts]\n mask_1 = mtf.one_hot(index_1, experts_dim, dtype=raw_gates.dtype)\n density_1_proxy = raw_gates\n if importance is not None:\n mask_1 *= mtf.to_float(mtf.equal(importance, 1.0))\n gate_1 *= mtf.to_float(mtf.equal(importance, 1.0))\n density_1_proxy *= mtf.to_float(mtf.equal(importance, 1.0))\n gates_without_top_1 = raw_gates * (1.0 - mask_1)\n # [batch, group]\n index_2, gate_2 = mtf.top_1(gates_without_top_1, experts_dim)\n # [batch, group, experts]\n mask_2 = mtf.one_hot(index_2, experts_dim, dtype=raw_gates.dtype)\n if importance is not None:\n mask_2 *= mtf.to_float(mtf.greater(importance, 0.0))\n\n denom = gate_1 + gate_2 + 1e-9\n gate_1 /= denom\n gate_2 /= denom\n\n # BALANCING LOSSES\n # shape = [batch, experts]\n # We want to equalize the fraction of the batch assigned to each expert\n density_1 = mtf.reduce_mean(mask_1, reduced_dim=group_size_dim)\n # Something continuous that is correlated with what we want to equalize.\n density_1_proxy = mtf.reduce_mean(density_1_proxy, reduced_dim=group_size_dim)\n density_1 = mtf.Print(\n density_1, [mtf.reduce_mean(density_1, output_shape=[experts_dim])],\n \"density_1\", summarize=1000)\n loss = (mtf.reduce_mean(density_1_proxy * density_1)\n * float(experts_dim.size * experts_dim.size))\n\n if hparams.moe_use_second_place_loss:\n # Also add a loss to encourage all experts to be used equally also as the\n # second-place expert. Experimentally, this seems to be a wash.\n # We want to equalize the fraction of the batch assigned to each expert:\n density_2 = mtf.reduce_mean(mask_2, reduced_dim=group_size_dim)\n # As a proxy for density_2, we renormalize the raw gates after the top one\n # has been removed.\n normalized = gates_without_top_1 / (\n mtf.reduce_sum(gates_without_top_1, reduced_dim=experts_dim) + 1e-9)\n density_2_proxy = mtf.reduce_mean(normalized, reduced_dim=group_size_dim)\n loss_2 = (mtf.reduce_mean(density_2_proxy * density_2)\n * float(experts_dim.size * experts_dim.size))\n loss += loss_2 * 0.5\n\n # Depending on the policy in the hparams, we may drop out some of the\n # second-place experts.\n policy = (\n hparams.moe_second_policy_train if train else\n hparams.moe_second_policy_eval)\n threshold = (\n hparams.moe_second_threshold_train if train else\n hparams.moe_second_threshold_eval)\n if policy == \"all\":\n # Use second-place experts for all examples.\n pass\n elif policy == \"none\":\n # Never use second-place experts for all examples.\n mask_2 = mtf.zeros_like(mask_2)\n elif policy == \"threshold\":\n # Use second-place experts if gate_2 > threshold.\n mask_2 *= mtf.to_float(mtf.greater(gate_2, threshold))\n elif policy == \"random\":\n # Use second-place experts with probablity min(1.0, gate_2 / threshold).\n mask_2 *= mtf.to_float(\n mtf.less(mtf.random_uniform(gate_2.mesh, gate_2.shape),\n gate_2 / max(threshold, 1e-9)))\n else:\n raise ValueError(\"Unknown policy %s\" % policy)\n mask_2 = mtf.Print(\n mask_2, [mtf.reduce_mean(mask_2, output_shape=[experts_dim])],\n \"density_2\", summarize=1000)\n\n # COMPUTE ASSIGNMENT TO EXPERTS\n # [batch, group, experts]\n # This is the position within the expert's mini-batch for this sequence\n position_in_expert_1 = mtf.cumsum(\n mask_1, group_size_dim, exclusive=True) * mask_1\n # Remove the elements that don't fit. [batch, group, experts]\n mask_1 *= mtf.to_float(mtf.less(position_in_expert_1, expert_capacity_f))\n # [batch, experts]\n # How many examples in this sequence go to this expert\n mask_1_count = mtf.reduce_sum(mask_1, reduced_dim=group_size_dim)\n # [batch, group] - mostly ones, but zeros where something didn't fit\n mask_1_flat = mtf.reduce_sum(mask_1, reduced_dim=experts_dim)\n # [batch, group]\n position_in_expert_1 = mtf.reduce_sum(\n position_in_expert_1, reduced_dim=experts_dim)\n # Weight assigned to first expert. [batch, group]\n gate_1 *= mask_1_flat\n\n # [batch, group, experts]\n position_in_expert_2 = (\n mtf.cumsum(mask_2, group_size_dim, exclusive=True) + mask_1_count)\n position_in_expert_2 *= mask_2\n mask_2 *= mtf.to_float(mtf.less(position_in_expert_2, expert_capacity_f))\n # mask_2_count = mtf.reduce_sum(mask_2, reduced_dim=experts_dim)\n mask_2_flat = mtf.reduce_sum(mask_2, reduced_dim=experts_dim)\n gate_2 *= mask_2_flat\n position_in_expert_2 = mtf.reduce_sum(\n position_in_expert_2, reduced_dim=experts_dim)\n\n # [batch, group, experts, expert_capacity]\n combine_tensor = (\n gate_1 * mask_1_flat\n * mtf.one_hot(index_1, experts_dim)\n * mtf.one_hot(mtf.to_int32(position_in_expert_1), expert_capacity_dim) +\n gate_2 * mask_2_flat\n * mtf.one_hot(index_2, experts_dim)\n * mtf.one_hot(mtf.to_int32(position_in_expert_2), expert_capacity_dim))\n\n combine_tensor = mtf.cast(combine_tensor, inputs.dtype)\n loss = mtf.cast(loss, inputs.dtype)\n\n dispatch_tensor = mtf.cast(\n mtf.cast(combine_tensor, tf.bool), combine_tensor.dtype)\n\n return dispatch_tensor, combine_tensor, loss\n\n\ndef set_default_moe_hparams(hparams):\n \"\"\"Add necessary hyperparameters for mixture-of-experts.\"\"\"\n hparams.moe_num_experts = 16\n hparams.moe_loss_coef = 1e-2\n hparams.add_hparam(\"moe_gating\", \"top_2\")\n # Experts have fixed capacity per batch. We need some extra capacity\n # in case gating is not perfectly balanced.\n # moe_capacity_factor_* should be set to a value >=1.\n hparams.add_hparam(\"moe_capacity_factor_train\", 1.25)\n hparams.add_hparam(\"moe_capacity_factor_eval\", 2.0)\n hparams.add_hparam(\"moe_capacity_factor_second_level\", 1.0)\n # Each expert has a hidden layer with this size.\n hparams.add_hparam(\"moe_hidden_size\", 4096)\n # For gating, divide inputs into groups of this size before gating.\n # Each group sends the same number of inputs to each expert.\n # Ideally, the group size would be the whole batch, but this is expensive\n # due to our use of matrix multiplication for reordering.\n hparams.add_hparam(\"moe_group_size\", 1024)\n # For top_2 gating, whether to impose an additional loss in order to make\n # the experts equally used as the second-place expert.\n hparams.add_hparam(\"moe_use_second_place_loss\", 0)\n # In top_2 gating, policy for whether to use a second-place expert.\n # Legal values are:\n # \"all\": always\n # \"none\": never\n # \"threshold\": if gate value > the given threshold\n # \"random\": if gate value > threshold*random_uniform(0,1)\n hparams.add_hparam(\"moe_second_policy_train\", \"random\")\n hparams.add_hparam(\"moe_second_policy_eval\", \"random\")\n hparams.add_hparam(\"moe_second_threshold_train\", 0.2)\n hparams.add_hparam(\"moe_second_threshold_eval\", 0.2)\n\n\ndef _split_into_groups(n, max_group_size, mesh_dim_size):\n \"\"\"Helper function for figuring out how to split a dimensino into groups.\n\n We have a dimension with size n and we want to split it into\n two dimensions: n = num_groups * group_size\n\n group_size should be the largest possible value meeting the constraints:\n group_size <= max_group_size\n (num_groups = n/group_size) is a multiple of mesh_dim_size\n\n Args:\n n: an integer\n max_group_size: an integer\n mesh_dim_size: an integer\n\n Returns:\n num_groups: an integer\n group_size: an integer\n\n Raises:\n ValueError: if n is not a multiple of mesh_dim_size\n \"\"\"\n if n % mesh_dim_size != 0:\n raise ValueError(\n \"n=%d is not a multiple of mesh_dim_size=%d\" % (n, mesh_dim_size))\n num_groups = max(1, n // max_group_size)\n while (num_groups % mesh_dim_size != 0 or n % num_groups != 0):\n num_groups += 1\n group_size = n // num_groups\n tf.logging.info(\n \"_split_into_groups(n=%d, max_group_size=%d, mesh_dim_size=%d)\"\n \" = (num_groups=%d group_size=%d)\" %\n (n, max_group_size, mesh_dim_size, num_groups, group_size))\n return num_groups, group_size\n",
"# coding=utf-8\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tensor2tensor.data_generators.algorithmic_math.\"\"\"\n# TODO(rsepassi): This test is flaky. Disable, remove, or update.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport six\nimport sympy\nfrom tensor2tensor.data_generators import algorithmic_math\n\nimport tensorflow as tf\n\n\nclass AlgorithmicMathTest(tf.test.TestCase):\n\n def testAlgebraInverse(self):\n dataset_objects = algorithmic_math.math_dataset_init(26)\n counter = 0\n for d in algorithmic_math.algebra_inverse(26, 0, 3, 10):\n counter += 1\n decoded_input = dataset_objects.int_decoder(d[\"inputs\"])\n solve_var, expression = decoded_input.split(\":\")\n lhs, rhs = expression.split(\"=\")\n\n # Solve for the solve-var.\n result = sympy.solve(\"%s-(%s)\" % (lhs, rhs), solve_var)\n target_expression = dataset_objects.int_decoder(d[\"targets\"])\n\n # Check that the target and sympy's solutions are equivalent.\n self.assertEqual(\n 0, sympy.simplify(str(result[0]) + \"-(%s)\" % target_expression))\n self.assertEqual(counter, 10)\n\n def testAlgebraSimplify(self):\n dataset_objects = algorithmic_math.math_dataset_init(8, digits=5)\n counter = 0\n for d in algorithmic_math.algebra_simplify(8, 0, 3, 10):\n counter += 1\n expression = dataset_objects.int_decoder(d[\"inputs\"])\n target = dataset_objects.int_decoder(d[\"targets\"])\n\n # Check that the input and output are equivalent expressions.\n self.assertEqual(0, sympy.simplify(\"%s-(%s)\" % (expression, target)))\n self.assertEqual(counter, 10)\n\n def testCalculusIntegrate(self):\n dataset_objects = algorithmic_math.math_dataset_init(\n 8, digits=5, functions={\"log\": \"L\"})\n counter = 0\n for d in algorithmic_math.calculus_integrate(8, 0, 3, 10):\n counter += 1\n decoded_input = dataset_objects.int_decoder(d[\"inputs\"])\n var, expression = decoded_input.split(\":\")\n target = dataset_objects.int_decoder(d[\"targets\"])\n\n for fn_name, fn_char in six.iteritems(dataset_objects.functions):\n target = target.replace(fn_char, fn_name)\n\n # Take the derivative of the target.\n derivative = str(sympy.diff(target, var))\n\n # Check that the derivative of the integral equals the input.\n self.assertEqual(0, sympy.simplify(\"%s-(%s)\" % (expression, derivative)))\n self.assertEqual(counter, 10)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# coding=utf-8\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test for common problem functionalities.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized # for assertLen\nimport numpy as np\n\nfrom tensor2tensor.data_generators import algorithmic\nfrom tensor2tensor.data_generators import problem as problem_module\nfrom tensor2tensor.data_generators import problem_hparams\nfrom tensor2tensor.layers import modalities\nfrom tensor2tensor.utils import test_utils\n\nimport tensorflow as tf\ntf.compat.v1.enable_eager_execution()\n\n\ndef assert_tensors_equal(sess, t1, t2, n):\n \"\"\"Compute tensors `n` times and ensure that they are equal.\"\"\"\n\n for _ in range(n):\n\n v1, v2 = sess.run([t1, t2])\n\n if v1.shape != v2.shape:\n return False\n\n if not np.all(v1 == v2):\n return False\n\n return True\n\n\nclass ProblemTest(parameterized.TestCase, tf.test.TestCase):\n\n @classmethod\n def setUpClass(cls):\n algorithmic.TinyAlgo.setup_for_test()\n\n @test_utils.run_in_graph_mode_only()\n def testNoShuffleDeterministic(self):\n problem = algorithmic.TinyAlgo()\n dataset = problem.dataset(mode=tf.estimator.ModeKeys.TRAIN,\n data_dir=algorithmic.TinyAlgo.data_dir,\n shuffle_files=False)\n\n tensor1 = dataset.make_one_shot_iterator().get_next()[\"targets\"]\n tensor2 = dataset.make_one_shot_iterator().get_next()[\"targets\"]\n\n with tf.Session() as sess:\n self.assertTrue(assert_tensors_equal(sess, tensor1, tensor2, 20))\n\n @test_utils.run_in_graph_mode_only()\n def testNoShufflePreprocess(self):\n\n problem = algorithmic.TinyAlgo()\n dataset1 = problem.dataset(mode=tf.estimator.ModeKeys.TRAIN,\n data_dir=algorithmic.TinyAlgo.data_dir,\n shuffle_files=False, preprocess=False)\n dataset2 = problem.dataset(mode=tf.estimator.ModeKeys.TRAIN,\n data_dir=algorithmic.TinyAlgo.data_dir,\n shuffle_files=False, preprocess=True)\n\n tensor1 = dataset1.make_one_shot_iterator().get_next()[\"targets\"]\n tensor2 = dataset2.make_one_shot_iterator().get_next()[\"targets\"]\n\n with tf.Session() as sess:\n self.assertTrue(assert_tensors_equal(sess, tensor1, tensor2, 20))\n\n @test_utils.run_in_graph_and_eager_modes()\n def testProblemHparamsModality(self):\n problem = problem_hparams.TestProblem(input_vocab_size=2,\n target_vocab_size=3)\n p_hparams = problem.get_hparams()\n self.assertEqual(p_hparams.modality[\"inputs\"],\n modalities.ModalityType.SYMBOL)\n self.assertEqual(p_hparams.modality[\"targets\"],\n modalities.ModalityType.SYMBOL)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testProblemHparamsInputOnlyModality(self):\n class InputOnlyProblem(problem_module.Problem):\n\n def hparams(self, defaults, model_hparams):\n hp = defaults\n hp.modality = {\"inputs\": modalities.ModalityType.SYMBOL}\n hp.vocab_size = {\"inputs\": 2}\n\n problem = InputOnlyProblem(False, False)\n p_hparams = problem.get_hparams()\n self.assertEqual(p_hparams.modality[\"inputs\"],\n modalities.ModalityType.SYMBOL)\n self.assertLen(p_hparams.modality, 1)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testProblemHparamsTargetOnlyModality(self):\n class TargetOnlyProblem(problem_module.Problem):\n\n def hparams(self, defaults, model_hparams):\n hp = defaults\n hp.modality = {\"targets\": modalities.ModalityType.SYMBOL}\n hp.vocab_size = {\"targets\": 3}\n\n problem = TargetOnlyProblem(False, False)\n p_hparams = problem.get_hparams()\n self.assertEqual(p_hparams.modality[\"targets\"],\n modalities.ModalityType.SYMBOL)\n self.assertLen(p_hparams.modality, 1)\n\n @test_utils.run_in_graph_and_eager_modes()\n def testDataFilenames(self):\n problem = algorithmic.TinyAlgo()\n\n num_shards = 10\n shuffled = False\n data_dir = \"/tmp\"\n\n # Test training_filepaths and data_filepaths give the same list on\n # appropriate arguments.\n self.assertAllEqual(\n problem.training_filepaths(data_dir, num_shards, shuffled),\n problem.data_filepaths(problem_module.DatasetSplit.TRAIN, data_dir,\n num_shards, shuffled))\n\n self.assertAllEqual(\n problem.dev_filepaths(data_dir, num_shards, shuffled),\n problem.data_filepaths(problem_module.DatasetSplit.EVAL, data_dir,\n num_shards, shuffled))\n\n self.assertAllEqual(\n problem.test_filepaths(data_dir, num_shards, shuffled),\n problem.data_filepaths(problem_module.DatasetSplit.TEST, data_dir,\n num_shards, shuffled))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# coding=utf-8\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for rl_utils.\"\"\"\n\nimport os\n\nfrom tensor2tensor.rl.restarter import Restarter\n\nimport tensorflow as tf\n\n\nTEST_MODE_1 = \"mode1\"\nTEST_MODE_2 = \"mode2\"\nTEST_NUM_STEPS = 2\n\n\nclass RestarterTest(tf.test.TestCase):\n\n def setUp(self):\n self.out_dir = tf.test.get_temp_dir()\n tf.gfile.DeleteRecursively(self.out_dir)\n tf.gfile.MkDir(self.out_dir)\n\n def create_checkpoint(self, global_step):\n checkpoint_name = \"model.ckpt-{}\".format(global_step)\n for suffix in (\"index\", \"meta\", \"data-00000-of-00001\"):\n filename = \"{}.{}\".format(checkpoint_name, suffix)\n # Just create the file.\n with tf.gfile.Open(os.path.join(self.out_dir, filename), \"w\") as f:\n f.write(\"\")\n tf.train.update_checkpoint_state(self.out_dir, checkpoint_name)\n\n def run_single_mode(self, mode, target_local_step, target_global_step):\n restarter = Restarter(mode, self.out_dir, target_local_step)\n with restarter.training_loop():\n self.create_checkpoint(target_global_step)\n\n def assert_first_run(self, restarter, steps_to_go, target_global_step):\n self.assertFalse(restarter.should_skip)\n self.assertFalse(restarter.restarting)\n self.assertEqual(restarter.steps_to_go, steps_to_go)\n self.assertEqual(restarter.target_global_step, target_global_step)\n\n def test_runs_in_single_mode(self):\n restarter = Restarter(\n TEST_MODE_1, self.out_dir, target_local_step=TEST_NUM_STEPS\n )\n self.assert_first_run(\n restarter, steps_to_go=TEST_NUM_STEPS, target_global_step=TEST_NUM_STEPS\n )\n\n def test_runs_in_two_modes(self):\n global_step = TEST_NUM_STEPS\n local_steps = {\n TEST_MODE_1: TEST_NUM_STEPS,\n TEST_MODE_2: 0\n }\n self.run_single_mode(TEST_MODE_1, local_steps[TEST_MODE_1], global_step)\n\n for mode in [TEST_MODE_2, TEST_MODE_1]:\n global_step += TEST_NUM_STEPS\n local_steps[mode] += TEST_NUM_STEPS\n restarter = Restarter(\n mode, self.out_dir, target_local_step=local_steps[mode]\n )\n self.assert_first_run(\n restarter, steps_to_go=TEST_NUM_STEPS, target_global_step=global_step\n )\n with restarter.training_loop():\n self.create_checkpoint(global_step)\n\n def test_skips_already_done(self):\n self.run_single_mode(\n TEST_MODE_1, target_local_step=TEST_NUM_STEPS,\n target_global_step=TEST_NUM_STEPS\n )\n\n restarter = Restarter(\n TEST_MODE_1, self.out_dir, target_local_step=TEST_NUM_STEPS\n )\n # We should skip the training as those steps are already completed.\n self.assertTrue(restarter.should_skip)\n\n def test_restarts_after_interruption(self):\n # Run some initial training first.\n self.run_single_mode(\n TEST_MODE_1, target_local_step=TEST_NUM_STEPS,\n target_global_step=TEST_NUM_STEPS\n )\n global_step = TEST_NUM_STEPS\n\n restarter = Restarter(\n TEST_MODE_2, self.out_dir, target_local_step=2\n )\n with self.assertRaises(RuntimeError):\n global_step += 1\n with restarter.training_loop():\n self.create_checkpoint(global_step)\n # Simulate training interruption after the first step.\n raise RuntimeError\n restarter = Restarter(\n TEST_MODE_2, self.out_dir, target_local_step=2\n )\n\n self.assertFalse(restarter.should_skip)\n self.assertTrue(restarter.restarting)\n # Training should resume after the first step.\n self.assertEqual(restarter.steps_to_go, 1)\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# coding=utf-8\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Transformer with auxiliary losses from https://arxiv.org/abs/1803.00144.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensor2tensor.layers import common_layers\nfrom tensor2tensor.models import transformer\nfrom tensor2tensor.utils import registry\n\nimport tensorflow as tf\n\n\ndef shift_and_pad(tensor, shift, axis=0):\n \"\"\"Shifts and pads with zero along an axis.\n\n Example:\n shift_and_pad([1, 2, 3, 4], 2) --> [0, 0, 1, 2]\n shift_and_pad([1, 2, 3, 4], -2) --> [3, 4, 0, 0]\n\n Args:\n tensor: Tensor; to be shifted and padded.\n shift: int; number of positions to shift by.\n axis: int; along which axis to shift and pad.\n\n Returns:\n A Tensor with the same shape as the input tensor.\n \"\"\"\n shape = tensor.shape\n rank = len(shape)\n assert 0 <= abs(axis) < rank\n\n length = int(shape[axis])\n assert 0 <= abs(shift) < length\n\n paddings = [(0, 0)] * rank\n begin = [0] * rank\n size = [-1] * rank\n\n if shift > 0:\n paddings[axis] = (shift, 0)\n size[axis] = length - shift\n elif shift < 0:\n paddings[axis] = (0, -shift)\n begin[axis] = -shift\n\n ret = tf.pad(tf.slice(tensor, begin, size), paddings)\n\n return ret\n\n\[email protected]_model\nclass TransformerAux(transformer.Transformer):\n \"\"\"Attention net. See file docstring.\"\"\"\n\n def _extract_shift_values(self):\n \"\"\"Parses the shift string.\n\n The hparams should contain the key shift_values, which maps to a\n comma-separated string of integers. These integers specify the number of\n timesteps to predict/reconstruct to compute auxiliary losses.\n\n For instance, \"-4,2,6\" means to reconstruct the target 4 steps before and\n predict the targets 2 steps and 6 steps ahead.\n\n Returns:\n List of int != 0 shift values to compute the auxiliary losses.\n \"\"\"\n shift_values_str = self._hparams.get(\"shift_values\", \"\")\n shift_values = [int(x) for x in shift_values_str.split(\",\")]\n\n tf.logging.info(\n \"Computing auxiliary losses for the following shifts: %s\",\n shift_values)\n\n return shift_values\n\n def auxiliary_loss(self, body_output, features, shift):\n \"\"\"Auxiliary predict loss.\n\n Args:\n body_output: Tensor with shape [batch_size, decoder_length, hidden_dim].\n features: Map of features to the model. Must contain the following:\n \"targets\": Target decoder outputs.\n [batch_size, decoder_length, 1, hidden_dim]\n shift: int != 0, amount to shift/pad the target sequence.\n If shift > 0, it represents the number of previous timesteps to\n reconstruct; if shift < 0, it represents the number of future timesteps\n to predict.\n\n Returns:\n A 2-tuple of the numerator and denominator of the cross-entropy loss.\n\n Raises:\n ValueError: if features does not contain a targets_raw tensor.\n \"\"\"\n assert isinstance(shift, int) and shift != 0\n name = \"reconst_%d\" % shift if shift > 0 else \"predict_%d\" % abs(shift)\n\n if features and \"targets_raw\" in features:\n targets = features[\"targets_raw\"]\n targets = common_layers.flatten4d3d(targets)\n else:\n raise ValueError(\n \"Feature map must contain a targets_raw tensor.\")\n\n with tf.variable_scope(name):\n logits = self.top(body_output, features)\n labels = shift_and_pad(targets, shift, axis=1)\n return common_layers.padded_cross_entropy(\n logits,\n labels,\n self._hparams.label_smoothing)\n\n def body(self, features):\n \"\"\"Transformer main model_fn.\n\n Args:\n features: Map of features to the model. Should contain the following:\n \"inputs\": Transformer inputs.\n [batch_size, input_length, 1, hidden_dim].\n \"targets\": Target decoder outputs.\n [batch_size, target_length, 1, hidden_dim]\n \"target_space_id\": A scalar int from data_generators.problem.SpaceID.\n\n Returns:\n A 2-tuple containing:\n Logit tensor. [batch_size, decoder_length, vocab_size]\n Map of keys to loss tensors. Should contain the following:\n \"training\": Training loss (shift == 0).\n \"auxiliary\": Auxiliary loss (shift != 0).\n \"\"\"\n output = super(TransformerAux, self).body(features)\n output, losses = self._normalize_body_output(output)\n\n aux = 0.0\n for shift in self._extract_shift_values():\n loss_num, loss_den = self.auxiliary_loss(output, features, shift)\n aux += loss_num / loss_den\n losses[\"auxiliary\"] = aux\n\n return output, losses\n\n\[email protected]_hparams\ndef transformer_aux_base():\n \"\"\"Set of hyperparameters.\"\"\"\n hparams = transformer.transformer_base()\n hparams.shared_embedding_and_softmax_weights = False\n hparams.add_hparam(\"shift_values\", \"1,2,3,4\")\n return hparams\n\n\[email protected]_hparams\ndef transformer_aux_tiny():\n \"\"\"Set of hyperparameters.\"\"\"\n hparams = transformer.transformer_tiny()\n hparams.shared_embedding_and_softmax_weights = False\n hparams.add_hparam(\"shift_values\", \"1,2\")\n return hparams\n",
"# coding=utf-8\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Data generators for CoNLL dataset.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport zipfile\n\nfrom tensor2tensor.data_generators import generator_utils\nfrom tensor2tensor.data_generators import problem\nfrom tensor2tensor.data_generators import text_problems\nfrom tensor2tensor.utils import registry\nimport tensorflow as tf\n\n\[email protected]_problem\nclass Conll2002Ner(text_problems.Text2textTmpdir):\n \"\"\"Base class for CoNLL2002 problems.\"\"\"\n\n def source_data_files(self, dataset_split):\n \"\"\"Files to be passed to generate_samples.\"\"\"\n raise NotImplementedError()\n\n def generate_samples(self, data_dir, tmp_dir, dataset_split):\n del data_dir\n\n url = \"https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/packages/corpora/conll2002.zip\" # pylint: disable=line-too-long\n compressed_filename = os.path.basename(url)\n compressed_filepath = os.path.join(tmp_dir, compressed_filename)\n generator_utils.maybe_download(tmp_dir, compressed_filename, url)\n\n compressed_dir = compressed_filepath.strip(\".zip\")\n\n filenames = self.source_data_files(dataset_split)\n for filename in filenames:\n filepath = os.path.join(compressed_dir, filename)\n if not tf.gfile.Exists(filepath):\n with zipfile.ZipFile(compressed_filepath, \"r\") as corpus_zip:\n corpus_zip.extractall(tmp_dir)\n with tf.gfile.GFile(filepath, mode=\"r\") as cur_file:\n words, tags = [], []\n for line in cur_file:\n line_split = line.strip().split()\n if not line_split:\n yield {\n \"inputs\": str.join(\" \", words),\n \"targets\": str.join(\" \", tags)\n }\n words, tags = [], []\n continue\n words.append(line_split[0])\n tags.append(line_split[2])\n if words:\n yield {\"inputs\": str.join(\" \", words), \"targets\": str.join(\" \", tags)}\n\n\[email protected]_problem\nclass Conll2002EsNer(Conll2002Ner):\n \"\"\"Problem spec for CoNLL2002 Spanish named entity task.\"\"\"\n TRAIN_FILES = [\"esp.train\"]\n EVAL_FILES = [\"esp.testa\", \"esp.testb\"]\n\n def source_data_files(self, dataset_split):\n is_training = dataset_split == problem.DatasetSplit.TRAIN\n return self.TRAIN_FILES if is_training else self.EVAL_FILES\n\n\[email protected]_problem\nclass Conll2002NlNer(Conll2002Ner):\n \"\"\"Problem spec for CoNLL2002 Dutch named entity task.\"\"\"\n TRAIN_FILES = [\"ned.train\"]\n EVAL_FILES = [\"ned.testa\", \"ned.testb\"]\n\n def source_data_files(self, dataset_split):\n is_training = dataset_split == problem.DatasetSplit.TRAIN\n return self.TRAIN_FILES if is_training else self.EVAL_FILES\n",
"# coding=utf-8\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"T2T models, configs and main training functions.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport os\n\nimport gin\n\nfrom tensor2tensor import problems\nfrom tensor2tensor.utils import data_reader\nfrom tensor2tensor.v2.models import basic\nfrom tensor2tensor.v2.models import resnet\nfrom tensor2tensor.v2.models import transformer\n\nimport tensorflow as tf\nimport tensorflow_datasets as tfds\n\n\n# Since there are few models and configs for now, we use this simple registry.\n# TODO(lukaszkaiser): find a better way to do this or remove altogether.\n_MODEL_REGISTRY = {\n \"basic_fc_relu\": lambda: basic.BasicFcRelu,\n \"basic_fc_large\": basic.basic_fc_large,\n \"basic_fc_relu_v2\": lambda: basic.BasicFcReluV2,\n \"resnet\": lambda: resnet.Resnet,\n \"transformer\": transformer.transformer_base_single_gpu,\n}\n\n\ndef train_and_eval_dataset(dataset_name, data_dir):\n \"\"\"Return train and evaluation datasets, feature info and supervised keys.\n\n Args:\n dataset_name: a string, the name of the dataset; if it starts with \"v1_\"\n then we'll search T2T Problem registry for it, otherwise we assume it\n is a dataset from TFDS and load it from there.\n data_dir: directory where the data is located.\n\n Returns:\n a 4-tuple consisting of:\n * the train tf.data.Dataset\n * the eval tf.data.Dataset\n * information about features: a python dictionary with feature names\n as keys and an object as value that provides .shape and .num_classes.\n * supervised_keys: information what's the input and what's the target,\n ie., a pair of lists with input and target feature names.\n \"\"\"\n if dataset_name.startswith(\"v1_\"):\n return _train_and_eval_dataset_v1(dataset_name[3:], data_dir)\n dataset_builder = tfds.builder(dataset_name, data_dir=data_dir)\n info = dataset_builder.info\n splits = dataset_builder.info.splits\n if tfds.Split.TRAIN not in splits:\n raise ValueError(\"To train we require a train split in the dataset.\")\n if tfds.Split.VALIDATION not in splits and \"test\" not in splits:\n raise ValueError(\"We require a validation or test split in the dataset.\")\n eval_split = tfds.Split.VALIDATION\n if tfds.Split.VALIDATION not in splits:\n eval_split = tfds.Split.TEST\n train, valid = tfds.load(\n name=dataset_name, split=[tfds.Split.TRAIN, eval_split])\n keys = None\n if info.supervised_keys:\n keys = ([info.supervised_keys[0]], [info.supervised_keys[1]])\n return train, valid, info.features, keys\n\n\ndef _make_info(shape_list, num_classes):\n \"\"\"Create an info-like tuple for feature given some shapes and vocab size.\"\"\"\n feature_info = collections.namedtuple(\"FeatureInfo\", [\"shape\", \"num_classes\"])\n cur_shape = list(shape_list[0])\n # We need to merge the provided shapes, put None where they disagree.\n for shape in shape_list:\n if len(shape) != len(cur_shape):\n raise ValueError(\"Shapes need to have the same number of dimensions.\")\n for i in range(len(shape)):\n if cur_shape[i] is not None:\n if shape[i] != cur_shape[i]:\n cur_shape[i] = None\n return feature_info(cur_shape, num_classes)\n\n\ndef _select_features(example, feature_list=None):\n \"\"\"Select a subset of features from the example dict.\"\"\"\n feature_list = feature_list or [\"inputs\", \"targets\"]\n return {f: example[f] for f in feature_list}\n\n\ndef _train_and_eval_dataset_v1(problem_name, data_dir):\n \"\"\"Return train and evaluation datasets, feature info and supervised keys.\"\"\"\n problem = problems.problem(problem_name)\n train_dataset = problem.dataset(tf.estimator.ModeKeys.TRAIN, data_dir)\n train_dataset = train_dataset.map(_select_features)\n eval_dataset = problem.dataset(tf.estimator.ModeKeys.EVAL, data_dir)\n eval_dataset = eval_dataset.map(_select_features)\n supervised_keys = ([\"inputs\"], [\"targets\"])\n hparams = problem.get_hparams()\n # We take a few training examples to guess the shapes.\n input_shapes, target_shapes = [], []\n for example in train_dataset.take(3):\n input_shapes.append(example[\"inputs\"].shape.as_list())\n target_shapes.append(example[\"targets\"].shape.as_list())\n input_vocab_size = hparams.vocab_size[\"inputs\"]\n target_vocab_size = hparams.vocab_size[\"targets\"]\n input_info = _make_info(input_shapes, input_vocab_size)\n target_info = _make_info(target_shapes, target_vocab_size)\n info = {\"inputs\": input_info, \"targets\": target_info}\n return train_dataset, eval_dataset, info, supervised_keys\n\n\[email protected](blacklist=[\"dataset\", \"training\"])\ndef preprocess_fn(dataset, training, max_target_length=-1):\n def target_right_length(_, target):\n if max_target_length < 1 or not training:\n return tf.constant(True)\n return tf.less(tf.shape(target)[0], max_target_length + 1)\n dataset = dataset.filter(target_right_length)\n return dataset\n\n\[email protected](blacklist=[\"dataset\", \"training\", \"shapes\", \"target_names\"])\ndef batch_fn(dataset, training, shapes, target_names,\n batch_size=32, eval_batch_size=32, bucket_batch_length=32,\n bucket_max_length=256, bucket_min_length=8,\n bucket_length_step=1.1, buckets=None):\n \"\"\"Batching function.\"\"\"\n del target_names\n # If bucketing is not specified, check if target shapes are variable.\n cur_batch_size = batch_size if training else eval_batch_size\n if buckets is None:\n variable_target_shapes = False\n target_shape = shapes[1]\n for dim in target_shape:\n if dim is None:\n variable_target_shapes = True\n tf.logging.info(\"Heuristically setting bucketing to %s based on shapes \"\n \"of target tensors.\" % variable_target_shapes)\n if variable_target_shapes:\n batch_size_per_token = cur_batch_size * bucket_batch_length\n scheme = data_reader.batching_scheme(batch_size_per_token,\n bucket_max_length,\n bucket_min_length,\n bucket_length_step,\n drop_long_sequences=training)\n buckets = (scheme[\"boundaries\"], scheme[\"batch_sizes\"])\n\n if buckets:\n tf.logging.info(\"Bucketing with buckets %s.\" % str(buckets))\n def example_length(_, target):\n return tf.shape(target)[0]\n boundaries, batch_sizes = buckets\n dataset = dataset.apply(tf.data.experimental.bucket_by_sequence_length(\n example_length, boundaries, batch_sizes))\n else:\n dataset = dataset.padded_batch(cur_batch_size, shapes)\n return dataset\n\n\ndef shuffle_and_batch_data(dataset, target_names, features_info, training):\n \"\"\"Shuffle and batch the given dataset.\"\"\"\n def append_targets(example):\n \"\"\"Append targets to the example dictionary. Needed for Keras.\"\"\"\n if len(target_names) == 1:\n return (example, example[target_names[0]])\n targets = {}\n for name in target_names:\n targets[name] = example[name]\n return (example, targets)\n dataset = dataset.map(append_targets)\n if training:\n dataset = dataset.repeat()\n shapes = {k: features_info[k].shape for k in features_info}\n shapes = (shapes, shapes[target_names[0]])\n dataset = dataset.shuffle(128)\n dataset = preprocess_fn(dataset, training)\n dataset = batch_fn(dataset, training, shapes, target_names)\n return dataset.prefetch(8)\n\n\[email protected]()\nclass T2TLearningRateSchedule(\n tf.keras.optimizers.schedules.LearningRateSchedule):\n \"\"\"A LearningRateSchedule that uses a T2T config.\"\"\"\n\n def __init__(self, schedule=None, constant=0.1, warmup_steps=200):\n \"\"\"Applies the give T2T schedule string with the given parameters.\"\"\"\n super(T2TLearningRateSchedule, self).__init__()\n self.schedule = schedule or \"constant * linear_warmup * rsqrt_decay\"\n self.constant = constant\n self.warmup_steps = warmup_steps\n\n def __call__(self, step):\n ret = tf.constant(1.0)\n for name in [n.strip() for n in self.schedule.split(\"*\")]:\n if name == \"constant\":\n ret *= self.constant\n elif name == \"linear_warmup\":\n ret *= tf.minimum(1.0, step / self.warmup_steps)\n elif name == \"rsqrt_decay\":\n ret *= tf.rsqrt(tf.maximum(step, self.warmup_steps))\n else:\n raise ValueError(\"Unknown factor %s.\" % name)\n tf.contrib.summary.scalar(\"learning_rate\", ret)\n return ret\n\n def get_config(self):\n return {\n \"schedule\": self.schedule,\n \"constant\": self.constant,\n \"warmup_steps\": self.warmup_steps,\n }\n\n\[email protected](blacklist=[\"model\"])\ndef optimize_fn(model,\n optimizer=None,\n learning_rate_schedule=None,\n loss=None,\n metrics=None):\n \"\"\"Compile the model in Keras.\"\"\"\n learning_rate_schedule = learning_rate_schedule or T2TLearningRateSchedule()\n if optimizer:\n optimizer = optimizer(learning_rate=learning_rate_schedule)\n else: # We use Adam by default with adjusted parameters.\n optimizer = tf.keras.optimizers.Adam(\n learning_rate=learning_rate_schedule,\n beta_1=0.9, beta_2=0.997, epsilon=1e-9)\n metrics = metrics or [tf.keras.metrics.sparse_categorical_accuracy]\n def xent_loss(y, x):\n return tf.keras.backend.sparse_categorical_crossentropy(\n y, x, from_logits=True)\n loss = loss or xent_loss\n return model.compile(optimizer=optimizer,\n loss=loss,\n metrics=metrics)\n\n\n# We include in gin config everything that could be useful to share between\n# users, so when it gets saved in a .gin file it can be re-ran with few flags.\[email protected](blacklist=[\"data_dir\", \"output_dir\"])\ndef train_fn(data_dir=None, output_dir=None,\n model_class=gin.REQUIRED, dataset=gin.REQUIRED,\n input_names=None, target_names=None,\n train_steps=1000, eval_steps=1, eval_frequency=100):\n \"\"\"Train the given model on the given dataset.\n\n Args:\n data_dir: Directory where the data is located.\n output_dir: Directory where to put the logs and checkpoints.\n model_class: The model class to train.\n dataset: The name of the dataset to train on.\n input_names: List of strings with the names of the features on input.\n target_names: List of strings with the names of the target features.\n train_steps: for how many steps to train.\n eval_steps: for how many steps to do evaluation.\n eval_frequency: how often (every this many steps) to run evaluation.\n \"\"\"\n train_data, eval_data, features_info, keys = train_and_eval_dataset(\n dataset, data_dir)\n if input_names is None:\n input_names = keys[0]\n if target_names is None:\n target_names = keys[1]\n # TODO(lukaszkaiser): The use of distribution strategy below fails like this:\n # .../keras/models.py\", line 93, in _clone_functional_model\n # for layer in model._input_layers:\n # AttributeError: 'BasicFcRelu' object has no attribute '_input_layers'\n # strategy = tf.distribute.MirroredStrategy()\n # with strategy.scope():\n model = model_class(features_info=features_info,\n input_names=input_names, target_names=target_names)\n optimize_fn(model)\n train_batches = shuffle_and_batch_data(\n train_data, target_names, features_info, training=True)\n eval_batches = shuffle_and_batch_data(\n eval_data, target_names, features_info, training=False)\n # Need to run one training step just to get optimizer variables to load.\n model.fit(train_batches, epochs=1, steps_per_epoch=1)\n\n # Training loop.\n callbacks = []\n callbacks.append(tf.keras.callbacks.History())\n callbacks.append(tf.keras.callbacks.BaseLogger())\n last_epoch = 0\n if output_dir is not None:\n callbacks.append(tf.keras.callbacks.TensorBoard(log_dir=output_dir))\n output_format = os.path.join(output_dir, \"model-{epoch:05d}\")\n callbacks.append(tf.keras.callbacks.ModelCheckpoint(\n filepath=output_format, save_weights_only=True))\n checkpoints = tf.gfile.Glob(os.path.join(output_dir, \"model-*\"))\n # Take basenames and strip the \"model-\" prefix.\n checkpoints = [os.path.basename(ckpt)[6:] for ckpt in checkpoints]\n # Get epoch numbers from the filenames and sort to obtain last epoch.\n epoch_numbers = [int(ckpt[:5]) for ckpt in checkpoints if len(ckpt) > 4]\n epoch_numbers.sort()\n if epoch_numbers:\n last_epoch = epoch_numbers[-1]\n saved_path = os.path.join(output_dir, \"model-%05d\" % last_epoch)\n model.load_weights(saved_path)\n model.fit(train_batches,\n epochs=train_steps // eval_frequency,\n steps_per_epoch=eval_frequency,\n validation_data=eval_batches,\n validation_steps=eval_steps,\n initial_epoch=last_epoch,\n callbacks=callbacks)\n\n\ndef t2t_train(model_name, dataset_name,\n data_dir=None, output_dir=None, config_file=None, config=None):\n \"\"\"Main function to train the given model on the given dataset.\n\n Args:\n model_name: The name of the model to train.\n dataset_name: The name of the dataset to train on.\n data_dir: Directory where the data is located.\n output_dir: Directory where to put the logs and checkpoints.\n config_file: the gin configuration file to use.\n config: string (in gin format) to override gin parameters.\n \"\"\"\n if model_name not in _MODEL_REGISTRY:\n raise ValueError(\"Model %s not in registry. Available models:\\n * %s.\" %\n (model_name, \"\\n * \".join(_MODEL_REGISTRY.keys())))\n model_class = _MODEL_REGISTRY[model_name]()\n gin.bind_parameter(\"train_fn.model_class\", model_class)\n gin.bind_parameter(\"train_fn.dataset\", dataset_name)\n gin.parse_config_files_and_bindings(config_file, config)\n # TODO(lukaszkaiser): save gin config in output_dir if provided?\n train_fn(data_dir, output_dir=output_dir)\n",
"# coding=utf-8\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Data extraction/preprocessing for processing wiki history dumps for GEC.\n\nWe use a set of heuristics to distill prose from the wikipedia xml. We produce\nsource-target pairs of text reflecting wikipedia edits.\n\nWikiRevision problem - fragment of older revision -> fragment of newer revision.\n\nThis implements data extraction from wikipedia as desribed in the paper,\nWeakly Supervised Grammatical Error Correction using Iterative Decoding\n(https://arxiv.org/pdf/1811.01710.pdf).\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport random\n\nfrom tensor2tensor.data_generators import generator_utils\nfrom tensor2tensor.data_generators import text_encoder\nfrom tensor2tensor.data_generators import text_problems\nfrom tensor2tensor.data_generators import wiki_revision_utils\nfrom tensor2tensor.utils import metrics\nfrom tensor2tensor.utils import registry\n\nimport tensorflow as tf\n\nflags = tf.flags\nFLAGS = flags.FLAGS\n\nflags.DEFINE_integer(\"wiki_revision_num_train_shards\", 50,\n \"Set the number of training shards to be output.\")\nflags.DEFINE_integer(\"wiki_revision_num_dev_shards\", 1,\n \"Set the number of dev shards to be output.\")\n\nflags.DEFINE_string(\n \"wiki_revision_data_prefix\", \"\",\n \"Specify the prefix for input data. Expects 7z compressed Wikipedia XML \"\n \"files, available at https://dumps.wikimedia.org/enwiki/latest/.\")\nflags.DEFINE_string(\n \"wiki_revision_vocab_file\", \"\",\n \"Specify a wordpieces vocabulary with which to encode the text. Will \"\n \"generate one from data if not specified.\")\n\nflags.DEFINE_integer(\n \"wiki_revision_max_examples_per_shard\", 0,\n \"Use this to set a cap on examples per shard. \"\n \"0 is no cap.\")\n\n# Data filtration heuristics:\nflags.DEFINE_integer(\"wiki_revision_max_page_size_exp\", 26,\n \"Exponent for 2**X byte cap on page size.\")\nflags.DEFINE_float(\n \"wiki_revision_max_equal_to_diff_ratio\", 0,\n \"Max ratio between count of equal, diff chars for generated \"\n \"examples. Ratio of 1 means examples with more diff chars \"\n \"than equal chars will be tossed out.\")\nflags.DEFINE_float(\n \"wiki_revision_revision_skip_factor\", 1.5,\n \"If >1, process only logarithmically many revisions. \"\n \"This avoids blowup in runtime due to many-revision pages. \"\n \"See wiki_revision_utils.include_revision for details.\")\nflags.DEFINE_float(\"wiki_revision_percent_identical_examples\", 0.04,\n \"Percent of generated examples for which source == target.\")\nflags.DEFINE_bool(\n \"wiki_revision_introduce_errors\", True, \"Add errors to the data.\"\n \"See wiki_revision_utils.introduce_errors for details.\")\n\n\[email protected]_problem\nclass WikiRevision(text_problems.Text2TextProblem):\n \"\"\"Old segment -> revised segment.\n\n Data filtration heuristics:\n wiki_revision_max_page_size_exp:\n pages above this # of bytes are thrown out\n\n wiki_revision_revision_skip_factor:\n rate of logarithmic downsampling of revision history list\n\n wiki_revision_percent_identical_examples:\n how many identitcal examples to admit, as percent of total examples\n\n wiki_revision_introduce_errors:\n whether or not to introduce spelling-type errors on the source side\n\n wiki_revision_max_equal_to_diff_ratio:\n whether or not to introduce spelling-type errors on the source side\n\n\n Vocab size=32k\n Maximum input/target length = 1024 wordpiece tokens\n \"\"\"\n num_identity_examples = 0\n num_total_examples = 0\n num_identity_examples = 0\n num_pages = 0\n num_revisions_total = 0\n num_revisions_admitted = 0\n num_examples_thrown_out_identity = 0\n num_examples_thrown_out_too_long = 0\n num_examples_thrown_out_edit_distance = 0\n num_examples_with_introduced_error = 0\n num_introduced_errors = 0\n num_source_tokens = 0\n num_target_tokens = 0\n corpus_files = None\n\n @property\n def approx_vocab_size(self):\n return 2**15 # 32K\n\n @property\n def strip(self):\n \"\"\"Whether to strip wikipedia-stuff to get plain text.\"\"\"\n return True\n\n @property\n def wiki_revision_skip_factor(self):\n \"\"\"If this value is >1.0, process only logarithmically many revisions.\"\"\"\n return FLAGS.wiki_revision_revision_skip_factor\n\n @property\n def max_segment_length(self):\n \"\"\"Maximum number of input/target wordpiece tokens.\"\"\"\n return 256\n\n @property\n def max_examples_per_shard(self):\n \"\"\"Maximum number of examples to generate per shard. 0=unlimited.\"\"\"\n return FLAGS.wiki_revision_max_examples_per_shard\n\n def aggregate_job_stats(self):\n # Aggregate job stats for output.\n stat = []\n # Run stats.\n stat.append(\"Flags for job:\\n\"\n \"Dev shards: {}\\n\"\n \"Train shards: {}\\n\"\n \"Revision skip factor: {}\\n\"\n \"Max page size: 2**{}\\n\"\n \"Introduce errors: {}\\n\"\n \"Max edit ratio: {}\\n\"\n \"Percent Identical Examples: {}\\n\"\n \"\".format(FLAGS.wiki_revision_num_dev_shards,\n FLAGS.wiki_revision_num_train_shards,\n FLAGS.wiki_revision_revision_skip_factor,\n FLAGS.wiki_revision_max_page_size_exp,\n FLAGS.wiki_revision_introduce_errors,\n FLAGS.wiki_revision_max_equal_to_diff_ratio,\n FLAGS.wiki_revision_percent_identical_examples))\n\n # File stats.\n stat.append(\"corpus files: {}\\n\"\n \"\\tnames: {}\\n\"\n \"\\tpages per input file: {:.1f}\\n\"\n \"\".format(\n len(self.corpus_files), self.corpus_files,\n (0 if not self.corpus_files else\n self.num_pages / len(self.corpus_files))))\n # Page stats.\n stat.append(\n \"pages processed: {}\\n\"\n \"\\trevisions per page: {:.2f}, total: {}\\n\"\n \"\\trevisions admitted per page: {:.2f}, percent of total: {:.2f}\\n\"\n \"\".format(\n self.num_pages, (0 if not self.num_pages else\n self.num_revisions_total / self.num_pages),\n self.num_revisions_total,\n (0 if not self.num_pages else\n self.num_revisions_admitted / self.num_pages),\n (0 if not self.num_revisions_total else\n 100 * self.num_revisions_admitted / self.num_revisions_total)))\n # Revision stats.\n stat.append(\n \"revisions admitted: {}\\n\"\n \"\\texamples generated per revision: {:.2f}\\n\"\n \"\".format(self.num_revisions_admitted,\n (0 if not self.num_revisions_admitted else\n self.num_total_examples / self.num_revisions_admitted)))\n # Example stats.\n stat.append(\n \"examples generated: {}\\n\"\n \"\\twith error introduced: {}, percent of total: {:.2f}\\n\"\n \"\\ttotal errors introduced: {}, errors per errorred example: {:.2f}\\n\"\n \"\\texamples thrown out: {}\\n\"\n \"\\t\\ttoo long: {}\\n\"\n \"\\t\\tidentity: {}\\n\"\n \"\\t\\tedit distance: {}\\n\"\n \"\\tremaining identity examples: {}\\n\"\n \"\\tratio identity (actual, desired): {:.3f}, {}\\n\"\n \"\".format(\n self.num_total_examples, self.num_examples_with_introduced_error,\n (0 if not self.num_total_examples else 100 *\n self.num_examples_with_introduced_error / self.num_total_examples),\n self.num_introduced_errors,\n (0 if not self.num_examples_with_introduced_error else\n self.num_introduced_errors /\n self.num_examples_with_introduced_error),\n self.num_examples_thrown_out_too_long +\n self.num_examples_thrown_out_identity +\n self.num_examples_thrown_out_edit_distance,\n self.num_examples_thrown_out_too_long,\n self.num_examples_thrown_out_identity,\n self.num_examples_thrown_out_edit_distance,\n self.num_identity_examples,\n (0 if not self.num_total_examples else\n self.num_identity_examples / self.num_total_examples),\n FLAGS.wiki_revision_percent_identical_examples))\n # Token stats.\n stat.append(\"tokens generated: {}\\n\"\n \"\\tsource: {}\\n\"\n \"\\ttarget: {}\\n\"\n \"\\tper example: {:.2f}\\n\"\n \"\\t\\tsource: {:.2f}\\n\"\n \"\\t\\ttarget: {:.2f}\\n\"\n \"\".format(self.num_source_tokens + self.num_target_tokens,\n self.num_source_tokens, self.num_target_tokens,\n (0 if not self.num_total_examples else\n (self.num_source_tokens + self.num_target_tokens) /\n self.num_total_examples),\n (0 if not self.num_total_examples else\n self.num_source_tokens / self.num_total_examples),\n (0 if not self.num_total_examples else\n self.num_target_tokens / self.num_total_examples)))\n return \"\\n\".join(stat)\n\n def generate_data(self, data_dir, tmp_dir, task_id=-1):\n\n if task_id == -1 or task_id is None:\n for i in range(FLAGS.wiki_revision_num_train_shards +\n FLAGS.wiki_revision_num_dev_shards):\n self.generate_data(data_dir, tmp_dir, i)\n return\n\n tf.logging.info(\n \"Flags for job (task_id {}): \"\n \"Dev shards: {}, Train shards: {}, \"\n \"Revision skip factor: {}, Max page size: 2**{}, Introduce errors: {},\"\n \"Percent Identical Examples: {}\"\n \"\".format(task_id, FLAGS.wiki_revision_num_dev_shards,\n FLAGS.wiki_revision_num_train_shards,\n FLAGS.wiki_revision_revision_skip_factor,\n FLAGS.wiki_revision_max_page_size_exp,\n FLAGS.wiki_revision_introduce_errors,\n FLAGS.wiki_revision_percent_identical_examples))\n\n if FLAGS.wiki_revision_vocab_file:\n encoder = wiki_revision_utils.get_encoder_from_vocab(\n FLAGS.wiki_revision_vocab_file)\n else:\n encoder = wiki_revision_utils.get_or_generate_vocabulary(\n data_dir, tmp_dir, FLAGS.wiki_revision_data_prefix,\n FLAGS.wiki_revision_max_page_size_exp, self.approx_vocab_size,\n self.strip)\n\n random.seed(123)\n if task_id < FLAGS.wiki_revision_num_train_shards:\n out_file = self.training_filepaths(\n data_dir, FLAGS.wiki_revision_num_train_shards,\n shuffled=False)[task_id]\n else:\n out_file = self.dev_filepaths(\n data_dir, FLAGS.wiki_revision_num_dev_shards,\n shuffled=False)[task_id - FLAGS.wiki_revision_num_train_shards]\n\n tf.logging.info(\"Generating files for path: %s\", out_file)\n self.corpus_files = wiki_revision_utils.corpus_files_for_shard(\n task_id, FLAGS.wiki_revision_num_train_shards,\n FLAGS.wiki_revision_num_dev_shards, FLAGS.wiki_revision_data_prefix)\n example_generator = self.generator(encoder, self.corpus_files, tmp_dir)\n\n packed_example_generator = self._maybe_pack_examples(example_generator)\n generator_utils.generate_files(packed_example_generator, [out_file])\n generator_utils.shuffle_dataset([out_file])\n\n tf.logging.info(\n \"Job stats: identity examples: {}, total examples {}, ratio: {}\".format(\n self.num_identity_examples, self.num_total_examples,\n (1 + self.num_identity_examples) / (1 + self.num_total_examples)))\n\n job_stats_string = self.aggregate_job_stats()\n out_dir, filename = out_file.replace(\"-unshuffled\", \"\").rsplit(\"/\", 1)\n stats_prefix = \"/stats_\"\n stats_file_path = \"\".join([out_dir, stats_prefix, filename])\n if tf.gfile.Exists(\n stats_file_path) and tf.gfile.Open(stats_file_path).size() != 0:\n tf.logging.info(\"Skipping writing stats because output file exists.\")\n else:\n with tf.gfile.Open(stats_file_path, \"w\") as out:\n tf.logging.info(\"Writing job stats to {}\".format(stats_file_path))\n out.write(job_stats_string)\n\n tf.logging.info(job_stats_string)\n\n def generator(self, encoder, corpus_files, tmp_dir):\n for page in wiki_revision_utils.corpus_page_generator(\n corpus_files, tmp_dir, FLAGS.wiki_revision_max_page_size_exp):\n self.num_pages += 1\n examples = self.page_to_examples(page, encoder)\n for x in examples:\n yield x\n if self.num_total_examples % 100000 == 0:\n tf.logging.info(\n u\"page count={} num_total_examples={} id={} title={}\".format(\n self.num_pages, self.num_total_examples, page[\"id\"],\n page[\"title\"]))\n if (self.max_examples_per_shard and\n self.num_total_examples >= self.max_examples_per_shard):\n tf.logging.info(\n \"Examples per shard {} >= max_examples_per_shard {}. Shutting down.\"\n .format(self.num_total_examples, self.max_examples_per_shard))\n break\n tf.logging.info(\n \"Total pages: {}, total examples: {}, examples per page: {}\".format(\n self.num_pages, self.num_total_examples, 0 if not self.num_pages\n else self.num_total_examples / self.num_pages))\n\n def page_to_examples(self, page, encoder):\n revisions = page[\"revisions\"]\n self.num_revisions_total += len(revisions)\n if len(revisions) < 2:\n return []\n revisions = [\n wiki_revision_utils.get_text(r)\n for n, r in enumerate(revisions)\n if wiki_revision_utils.include_revision(\n n, self.wiki_revision_skip_factor) or n + 1 == len(revisions)\n ]\n self.num_revisions_admitted += len(revisions)\n\n ret = []\n for i in range(len(revisions) - 1):\n old_revision = revisions[i]\n new_revision = revisions[i + 1]\n\n if FLAGS.wiki_revision_introduce_errors:\n old_revision_text, num_added_err = wiki_revision_utils.introduce_errors(\n revisions[i])\n if num_added_err:\n self.num_introduced_errors += num_added_err\n self.num_examples_with_introduced_error += 1\n else:\n old_revision_text = revisions[i]\n new_revision_text = revisions[i + 1]\n if encoder:\n # Encode text into list of ids, if a text encoder is present.\n old_revision = encoder.encode(old_revision_text)\n new_revision = encoder.encode(new_revision_text)\n else:\n # Retain text (as list of characters), if a text encoder is not present.\n old_revision = old_revision_text\n new_revision = new_revision_text\n ret.extend(\n self.make_examples(\n encoder,\n old_revision,\n new_revision,\n max_length=self.max_segment_length,\n percent_identical_examples=FLAGS\n .wiki_revision_percent_identical_examples))\n return ret\n\n def make_examples(self,\n encoder,\n old_snapshot,\n new_snapshot,\n max_length=1024,\n percent_identical_examples=0.01,\n max_length_distance=0):\n \"\"\"Produce training examples based on a pair of snapshots.\n\n Aligns the snapshots, then chops at a random subset of the alignment points\n to create (old snippet -> new snippet) examples.\n\n Most negative examples (those with no changes) are discarded, but we\n keep some of them, maintaining a proportion in the final data\n determined by percent_identical_examples.\n\n Args:\n encoder: the subword text encoder\n old_snapshot: a list of ids\n new_snapshot: a list of ids\n max_length: an integer. Maximum length of \"inputs\" and \"targets\".\n percent_identical_examples: a float\n max_length_distance: an integer. Max token edit dist for admitted examples\n\n Returns:\n a list of feature dictionaries. The dictionaries have\n \"inputs\" and \"targets\" populated. text_encoder.EOS is appended to both.\n \"\"\"\n ret = []\n eos_sequence = [text_encoder.EOS_ID]\n # Pick a per-token cut probability with a log-uniform distribution between\n # 1/4 and 1/(max_length / 2)\n bound1 = -math.log(4.0)\n bound2 = -math.log(max_length / 2.0)\n cut_prob = math.exp(random.random() * (bound2 - bound1) + bound1)\n opcodes = wiki_revision_utils.fast_match_sequences(old_snapshot,\n new_snapshot)\n cut_points = [(0, 0)]\n for tag, i1, i2, j1, j2 in opcodes:\n if tag == \"equal\":\n for i in range(i1, i2 + 1):\n if random.random() < cut_prob:\n cut_points.append((i, i + j1 - i1))\n cut_points.append((len(old_snapshot), len(new_snapshot)))\n src_tgt_pairs = []\n for cut_number in range(len(cut_points) - 1):\n i1, j1 = cut_points[cut_number]\n i2, j2 = cut_points[cut_number + 1]\n old_segment = old_snapshot[i1:i2]\n new_segment = new_snapshot[j1:j2]\n src_tgt_pairs.append((old_segment, new_segment))\n\n src_tgt_pairs, thrown_edit_count = wiki_revision_utils.edit_distance_filter(\n wiki_revision_utils.throw_empty_pairs(src_tgt_pairs),\n FLAGS.wiki_revision_max_equal_to_diff_ratio)\n\n self.num_examples_thrown_out_edit_distance += thrown_edit_count\n\n for source, target in src_tgt_pairs:\n # Add EOS segment.\n old_segment = source + eos_sequence\n new_segment = target + eos_sequence\n if len(old_segment) <= max_length and len(new_segment) <= max_length:\n if max_length_distance and (abs(len(old_segment) - len(new_segment)) >\n max_length_distance):\n self.num_examples_thrown_out_edit_distance += 1\n continue\n if old_segment == new_segment:\n # If current proportion of identity is below target\n # percent_identical_examples, then roll for a 50% chance to add an\n # identitical example. Random roll preserves nondeterminism.\n # percent_identical_examples, then add identitical example.\n # Random roll preserves nondeterminism in selecting identity examples.\n if (((self.num_identity_examples) / (1 + self.num_total_examples)) >\n percent_identical_examples) or random.random() > 0.5:\n self.num_examples_thrown_out_identity += 1\n continue\n else:\n self.num_identity_examples += 1\n self.num_total_examples += 1\n self.num_source_tokens += len(old_segment) - 1\n self.num_target_tokens += len(new_segment) - 1\n ret.append({\"inputs\": old_segment, \"targets\": new_segment})\n else:\n self.num_examples_thrown_out_too_long += 1\n\n return ret\n\n def eval_metrics(self):\n return [\n metrics.Metrics.ACC,\n metrics.Metrics.ACC_TOP5,\n metrics.Metrics.ACC_PER_SEQ,\n metrics.Metrics.NEG_LOG_PERPLEXITY,\n ]\n\n @property\n def invert_prob(self):\n \"\"\"Ratio of e^2 positive forward to backward examples.\"\"\"\n return 1.0 / (1.0 + math.exp(2.0))\n\n\[email protected]_problem\nclass WikiRevisionPacked1k(WikiRevision):\n \"\"\"Packed version for TPU.\"\"\"\n\n @property\n def packed_length(self):\n return 1024\n\n\[email protected]_problem\nclass WikiRevisionPacked256(WikiRevision):\n \"\"\"Packed version for TPU.\"\"\"\n\n @property\n def packed_length(self):\n return 256\n\n @property\n def max_segment_length(self):\n return 256\n"
] | [
[
"tensorflow.logging.info"
],
[
"tensorflow.test.main"
],
[
"numpy.all",
"tensorflow.compat.v1.enable_eager_execution",
"tensorflow.test.main",
"tensorflow.Session"
],
[
"tensorflow.train.update_checkpoint_state",
"tensorflow.gfile.DeleteRecursively",
"tensorflow.gfile.MkDir",
"tensorflow.test.main",
"tensorflow.test.get_temp_dir"
],
[
"tensorflow.slice",
"tensorflow.variable_scope",
"tensorflow.logging.info"
],
[
"tensorflow.gfile.Exists",
"tensorflow.gfile.GFile"
],
[
"tensorflow.keras.callbacks.BaseLogger",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.constant",
"tensorflow.shape",
"tensorflow.data.experimental.bucket_by_sequence_length",
"tensorflow.maximum",
"tensorflow.minimum",
"tensorflow.keras.backend.sparse_categorical_crossentropy",
"tensorflow.keras.optimizers.Adam",
"tensorflow.contrib.summary.scalar",
"tensorflow.logging.info",
"tensorflow.keras.callbacks.History",
"tensorflow.keras.callbacks.TensorBoard"
],
[
"tensorflow.gfile.Exists",
"tensorflow.logging.info",
"tensorflow.gfile.Open"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Kotwic4/SCOTR | [
"6afabedb672641a9777d8aa9d7b75f998e53c0c9"
] | [
"generator/mnistGenerator.py"
] | [
"import random\n\nfrom sklearn.datasets import fetch_mldata\n\nfrom util import open_file_in_directory\n\nMNIST_DIR = './tmp/mnist'\nMNIST_TRAIN_DIR = './mnist/train'\nMNIST_TEST_DIR = './mnist/test'\nMNIST_SAMPLE_DIR = './mnist/sample'\nTEST_CASES = 60000\n\n\ndef mnist_img_to_file(mnist_img, file):\n for x in range(28):\n for y in range(28):\n file.write(str(mnist_img[x * 28 + y]) + \" \")\n file.write('\\n')\n\n\ndef generate_samples(data, labels, directory='.', filename='results.txt', sampleNumber=100):\n result = open_file_in_directory(directory, filename)\n for i in range(sampleNumber):\n index = random.randrange(data.shape[0])\n label = labels[index]\n img = data[index]\n img_filename = str(index) + \".txt\"\n line = img_filename + ' ' + str(label) + '\\n'\n result.write(line)\n file = open_file_in_directory(directory, img_filename)\n mnist_img_to_file(img, file)\n file.close()\n result.close()\n\n\ndef generate_test_file(data, labels, directory='.', filename='results.txt'):\n result = open_file_in_directory(directory, filename)\n result.write(str(data.shape[0]) + '\\n')\n indexes = [i for i in range(data.shape[0])]\n random.shuffle(indexes)\n for i in indexes:\n label = labels[i]\n img = data[i]\n line = str(label) + '\\n'\n result.write(line)\n mnist_img_to_file(img, result)\n result.close()\n\n\ndef generate_test_data(data, labels):\n test_data = data[TEST_CASES:]\n test_labels = labels[TEST_CASES:]\n generate_test_file(test_data, test_labels, MNIST_TEST_DIR)\n\n\ndef generate_train_data(data, labels):\n train_data = data[:TEST_CASES]\n train_labels = labels[:TEST_CASES]\n generate_test_file(train_data, train_labels, MNIST_TRAIN_DIR)\n\n\ndef main():\n mnist = fetch_mldata('MNIST original', data_home=MNIST_DIR)\n labels = mnist.target.astype(int)\n data = mnist.data\n generate_train_data(data, labels)\n generate_test_data(data, labels)\n generate_samples(data, labels, MNIST_SAMPLE_DIR)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"sklearn.datasets.fetch_mldata"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SwagJ/SuperPoint | [
"ecbf1d6e809ea8c7c832078ad26d2a74ed2fae29"
] | [
"superpoint/models/simple_classifier.py"
] | [
"import tensorflow as tf\nfrom tensorflow import layers as tfl\n\nfrom .base_model import BaseModel, Mode\n\n\nclass SimpleClassifier(BaseModel):\n input_spec = {\n 'image': {'shape': [None, None, None, 1], 'type': tf.float32}\n }\n required_config_keys = []\n default_config = {'data_format': 'channels_first'}\n\n def _model(self, inputs, mode, **config):\n x = inputs['image']\n if config['data_format'] == 'channels_first':\n x = tf.transpose(x, [0, 3, 1, 2])\n\n params = {'padding': 'SAME', 'data_format': config['data_format']}\n\n x = tfl.conv2d(x, 32, 5, activation=tf.nn.relu, name='conv1', **params)\n x = tfl.max_pooling2d(x, 2, 2, name='pool1', **params)\n\n x = tfl.conv2d(x, 64, 5, activation=tf.nn.relu, name='conv2', **params)\n x = tfl.max_pooling2d(x, 2, 2, name='pool2', **params)\n\n x = tfl.flatten(x)\n x = tfl.dense(x, 1024, activation=tf.nn.relu, name='fc1')\n x = tfl.dense(x, 10, name='fc2')\n\n if mode == Mode.TRAIN:\n return {'logits': x}\n else:\n return {'logits': x, 'prob': tf.nn.softmax(x), 'pred': tf.argmax(x, axis=-1)}\n\n def _loss(self, outputs, inputs, **config):\n with tf.name_scope('loss'):\n loss = tf.reduce_mean(tf.compat.v1.losses.sparse_softmax_cross_entropy(\n labels=inputs['label'], logits=outputs['logits']))\n return loss\n\n def _metrics(self, outputs, inputs, **config):\n metrics = {}\n with tf.name_scope('metrics'):\n correct_count = tf.equal(outputs['pred'], inputs['label'])\n correct_count = tf.cast(correct_count, tf.float32)\n metrics['accuracy'] = tf.reduce_mean(correct_count)\n return metrics\n"
] | [
[
"tensorflow.layers.conv2d",
"tensorflow.layers.flatten",
"tensorflow.transpose",
"tensorflow.nn.softmax",
"tensorflow.reduce_mean",
"tensorflow.equal",
"tensorflow.cast",
"tensorflow.layers.dense",
"tensorflow.layers.max_pooling2d",
"tensorflow.name_scope",
"tensorflow.argmax",
"tensorflow.compat.v1.losses.sparse_softmax_cross_entropy"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
robertdstein/flarestack | [
"2ce7e67da336514f6f38f06126a1fbd82131e441",
"2ce7e67da336514f6f38f06126a1fbd82131e441"
] | [
"flarestack/core/astro.py",
"flarestack/analyses/simcube/verify_simcube_sensitivity.py"
] | [
"\"\"\"\nFunction taken from IceCube astro package.\n\"\"\"\nimport numpy as np\n\n\ndef angular_distance(lon1, lat1, lon2, lat2):\n \"\"\"\n calculate the angular distince along the great circle\n on the surface of a shpere between the points\n (`lon1`,`lat1`) and (`lon2`,`lat2`)\n This function Works for equatorial coordinates\n with right ascension as longitude and declination\n as latitude. This function uses the Vincenty formula\n for calculating the distance.\n Parameters\n ----------\n lon1 : array_like\n longitude of first point in radians\n lat1 : array_like\n latitude of the first point in radians\n lon2 : array_like\n longitude of second point in radians\n lat2 : array_like\n latitude of the second point in radians\n \"\"\"\n c1 = np.cos(lat1)\n c2 = np.cos(lat2)\n s1 = np.sin(lat1)\n s2 = np.sin(lat2)\n sd = np.sin(lon2 - lon1)\n cd = np.cos(lon2 - lon1)\n\n return np.arctan2(np.hypot(c2 * sd, c1 * s2 - s1 * c2 * cd), s1 * s2 + c1 * c2 * cd)\n",
"from __future__ import division\nimport numpy as np\nimport os\nfrom flarestack.shared import plot_output_dir\nfrom flarestack.utils.prepare_catalogue import ps_catalogue_name\nimport matplotlib.pyplot as plt\nfrom flarestack.utils.asimov_estimator import AsimovEstimator\nfrom flarestack import analyse, MinimisationHandler\nfrom flarestack.data.simulate.simcube import simcube_dataset\nfrom flarestack.data.public import icecube_ps_3_year\n\nname = \"analyses/simcube/verify_sensitivity\"\n\n# Initialise Injectors/LLHs\n\ninjection_gamma = 2.0\n\ninjection_energy = {\n \"energy_pdf_name\": \"PowerLaw\",\n \"gamma\": injection_gamma,\n}\n\ninjection_time = {\n \"time_pdf_name\": \"steady\",\n}\n\nllh_time = injection_time\n\ninj_dict = {\n \"injection_energy_pdf\": injection_energy,\n \"injection_time_pdf\": injection_time,\n}\n\nllh_energy = injection_energy\n\nllh_dict = {\n \"name\": \"standard\",\n \"llh_energy_pdf\": llh_energy,\n \"llh_time_pdf\": llh_time,\n}\n\nsindecs = np.linspace(0.75, 0.0, 4)\n\ndatasets = [\n (\"IceCube (One Year\", icecube_ps_3_year.get_seasons(\"IC86-2012\")),\n # (\"Simcube (One year)\", simcube_dataset.get_seasons()),\n]\n\n# plt.figure()\n# ax1 = plt.subplot2grid((4, 1), (0, 0), colspan=3, rowspan=3)\n# ax2 = plt.subplot2grid((4, 1), (3, 0), colspan=3, rowspan=1, sharex=ax1)\n# refs = reference_7year_discovery_potential(sindecs, injection_gamma)\n#\n# ax1.plot(sindecs, refs, label=r\"7-year Point Source analysis\", color=\"k\")\n\nfor i, (label, dataset) in enumerate(datasets):\n\n for sindec in sindecs:\n\n cat_path = ps_catalogue_name(sindec)\n\n mh_dict = {\n \"name\": name + \"/sindec=\" + \"{0:.2f}\".format(sindec) + \"/\",\n \"mh_name\": \"fixed_weights\",\n \"datasets\": dataset,\n \"catalogue\": cat_path,\n \"llh_dict\": llh_dict,\n \"inj_dict\": inj_dict,\n \"n_trials\": 10,\n \"n_steps\": 15,\n }\n\n mh_dict[\"scale\"] = 1.0\n\n mh = MinimisationHandler.create(mh_dict)\n # print(mh.simulate_and_run(0.))\n print(mh.guess_scale())\n\n # analyse(mh_dict, n_cpu=2)\n\n input(\"?\")\n\n # disc_pots = np.array(disc_pots)\n #\n # plot_range = np.linspace(-0.99, 0.99, 1000)\n #\n # ax1.plot(sindecs[mask], disc_pots, color=color,\n # label=\"Flarestack Estimation ({0})\".format(sample_name))\n #\n # disc_ratios = np.array(disc_pots)/refs[mask]\n # print(disc_ratios)\n # print(\"Range:\", max(disc_ratios)/min(disc_ratios))\n #\n # ax2.scatter(sindecs[mask], disc_ratios, color=color)\n # ax2.plot(sindecs[mask], disc_ratios, color=color, linestyle=\"--\")\n #\n # ax1.set_xlim(xmin=-1., xmax=1.)\n # # ax1.set_ylim(ymin=1.e-13, ymax=1.e-10)\n # ax1.grid(True, which='both')\n # ax1.semilogy(nonposy='clip')\n # ax1.set_ylabel(r\"Flux Strength [ GeV$^{-1}$ cm$^{-2}$ s$^{-1}$ ]\",\n # fontsize=12)\n #\n # ax2.set_ylabel(r\"ratio\", fontsize=12)\n # ax2.set_xlabel(r\"sin($\\delta$)\", fontsize=12)\n # #\n # ax1.set_xlim(xmin=-1.0, xmax=1.0)\n # # ax2.set_ylim(ymin=0.5, ymax=1.5)\n # ax2.grid(True)\n # xticklabels = ax1.get_xticklabels()\n # plt.setp(xticklabels, visible=False)\n # plt.subplots_adjust(hspace=0.001)\n #\n # plt.suptitle('Point Source Discovery Potential')\n #\n # # ratio_interp = interp1d(sindecs, sens_ratios)\n # #\n # # interp_range = np.linspace(np.min(sindecs),\n # # np.max(sindecs), 1000)\n #\n # # ax1.plot(\n # # interp_range,\n # # reference_sensitivity(interp_range)*ratio_interp(interp_range),\n # # color='red', linestyle=\"--\", label=\"Ratio Interpolation\")\n #\n # ax1.legend(loc='upper right', fancybox=True, framealpha=1.)\n\n # save_dir = plot_output_dir(name)\n #\n # try:\n # os.makedirs(save_dir)\n # except OSError:\n # pass\n #\n # title = [\"/PSDisc.pdf\", \"/IC86_1.pdf\"][i]\n #\n # plt.savefig(save_dir + title)\n # plt.close()\n"
] | [
[
"numpy.cos",
"numpy.hypot",
"numpy.sin"
],
[
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aypan17/value_learning | [
"240a67ecf99b178fe0c4ced2bfd1dd50453fbdfe",
"240a67ecf99b178fe0c4ced2bfd1dd50453fbdfe",
"240a67ecf99b178fe0c4ced2bfd1dd50453fbdfe"
] | [
"bgp/simglucose/controller/basal_bolus_ctrller.py",
"pansim/pandemic_simulator/callback.py",
"flow/utils/compute_norms.py"
] | [
"from .base import Controller\nfrom .base import Action\nimport numpy as np\nimport pandas as pd\nimport pkg_resources\nimport logging\nfrom collections import namedtuple\n\nlogger = logging.getLogger(__name__)\nCONTROL_QUEST = '/source/dir/simglucose/params/Quest.csv'\nPATIENT_PARA_FILE = '/source/dir/simglucose/params/vpatient_params.csv'\nParamTup = namedtuple('ParamTup', ['basal', 'cf', 'cr'])\n\nclass BBController(Controller):\n def __init__(self, target=140):\n self.quest = pd.read_csv(CONTROL_QUEST)\n self.patient_params = pd.read_csv(\n PATIENT_PARA_FILE)\n self.target = target\n\n def policy(self, observation, reward, done, **kwargs):\n sample_time = kwargs.get('sample_time', 1)\n pname = kwargs.get('patient_name')\n\n meal = kwargs.get('meal')\n\n action = self._bb_policy(\n pname,\n meal,\n observation.CGM,\n sample_time)\n return action\n\n def _bb_policy(self, name, meal, glucose, env_sample_time):\n if any(self.quest.Name.str.match(name)):\n q = self.quest[self.quest.Name.str.match(name)]\n params = self.patient_params[self.patient_params.Name.str.match(\n name)]\n u2ss = np.asscalar(params.u2ss.values)\n BW = np.asscalar(params.BW.values)\n else:\n q = pd.DataFrame([['Average', 13.5, 23.52, 50, 30]],\n columns=['Name', 'CR', 'CF', 'TDI', 'Age'])\n u2ss = 1.43\n BW = 57.0\n\n basal = u2ss * BW / 6000\n if meal > 0:\n logger.info('Calculating bolus ...')\n logger.debug('glucose = {}'.format(glucose))\n bolus = np.asscalar(meal / q.CR.values + (glucose > 150)\n * (glucose - self.target) / q.CF.values)\n else:\n bolus = 0\n\n bolus = bolus / env_sample_time\n action = Action(basal=basal, bolus=bolus)\n return action\n\n def reset(self):\n pass\n\n\nclass ManualBBController(Controller):\n def __init__(self, target, cr, cf, basal, sample_rate=5, use_cf=True, use_bol=True, cooldown=0,\n corrected=True, use_low_lim=False, low_lim=70):\n super().__init__(self)\n self.target = target\n self.orig_cr = self.cr = cr\n self.orig_cf = self.cf = cf\n self.orig_basal = self.basal = basal\n self.sample_rate = sample_rate\n self.use_cf = use_cf\n self.use_bol = use_bol\n self.cooldown = cooldown\n self.last_cf = np.inf\n self.corrected = corrected\n self.use_low_lim = low_lim\n self.low_lim = low_lim\n\n def increment(self, cr_incr=0, cf_incr=0, basal_incr=0):\n self.cr += cr_incr\n self.cf += cf_incr\n self.basal += basal_incr\n\n def policy(self, observation, reward, done, **kwargs):\n carbs = kwargs.get('carbs')\n glucose = kwargs.get('glucose')\n action = self.manual_bb_policy(carbs, glucose)\n return action\n\n def manual_bb_policy(self, carbs, glucose, log=False):\n if carbs > 0:\n if self.corrected:\n carb_correct = carbs / self.cr\n else:\n # assuming carbs are already multiplied by sampling rate\n carb_correct = (carbs/self.sample_rate) / self.cr # TODO: not sure about this\n hyper_correct = (glucose > self.target) * (glucose - self.target) / self.cf\n hypo_correct = (glucose < self.low_lim) * (self.low_lim - glucose) / self.cf\n bolus = 0\n if self.use_low_lim:\n bolus -= hypo_correct\n if self.use_cf:\n if self.last_cf > self.cooldown and hyper_correct > 0:\n bolus += hyper_correct\n self.last_cf = 0\n if self.use_bol:\n bolus += carb_correct\n bolus = bolus / self.sample_rate\n else:\n bolus = 0\n carb_correct = 0\n hyper_correct = 0\n hypo_correct = 0\n self.last_cf += self.sample_rate\n if log:\n return Action(basal=self.basal, bolus=bolus), hyper_correct, hypo_correct, carb_correct\n else:\n return Action(basal=self.basal, bolus=bolus)\n\n def get_params(self):\n return ParamTup(basal=self.basal, cf=self.cf, cr=self.cr)\n\n def adjust(self, basal_adj, cr_adj):\n self.basal += self.orig_basal * basal_adj\n self.cr += self.orig_cr * cr_adj\n\n def reset(self):\n self.cr = self.orig_cr\n self.cf = self.orig_cf\n self.basal = self.orig_basal\n self.last_cf = np.inf\n\nclass MyController(Controller):\n def __init__(self, init_state):\n self.init_state = init_state\n self.state = init_state\n\n def policy(self, observation, reward, done, **info):\n '''\n Every controller must have this implementation!\n ----\n Inputs:\n observation - a namedtuple defined in simglucose.simulation.env. For\n now, it only has one entry: blood glucose level measured\n by CGM sensor.\n reward - current reward returned by environment\n done - True, game over. False, game continues\n info - additional information as key word arguments,\n simglucose.simulation.env.T1DSimEnv returns patient_name\n and sample_time\n ----\n Output:\n action - a namedtuple defined at the beginning of this file. The\n controller action contains two entries: basal, bolus\n '''\n self.state = observation\n action = Action(basal=0, bolus=0)\n return action\n\n def reset(self):\n '''\n Reset the controller state to inital state, must be implemented\n '''\n self.state = self.init_state\n",
"from stable_baselines3.common.callbacks import BaseCallback\r\nfrom pandemic_simulator.environment.interfaces import sorted_infection_summary\r\n\r\nimport wandb\r\nimport numpy as np\r\n\r\n\r\nclass WandbCallback(BaseCallback):\r\n\t\"\"\"\r\n\tA wandb logging callback that derives from ``BaseCallback``.\r\n\r\n\t:param verbose: (int) Verbosity level 0: not output 1: info 2: debug\r\n\t\"\"\"\r\n\tdef __init__(self, name=\"\", gamma=0.99, viz=None, eval_freq=10, multiprocessing=False, verbose=0):\r\n\t\t\r\n\t\tself.name = name\r\n\t\tself.viz = viz\r\n\t\tself.eval_freq = eval_freq\r\n\t\tself.multi = multiprocessing\r\n\t\tself.gamma = gamma\r\n\t\tself.n_rollouts=0\r\n\t\tself.record = False\r\n\t\tsuper(WandbCallback, self).__init__(verbose)\r\n\t\t# Those variables will be accessible in the callback\r\n\t\t# (they are defined in the base class)\r\n\t\t# The RL model\r\n\t\t# self.model = None # type: BaseAlgorithm\r\n\t\t# An alias for self.model.get_env(), the environment used for training\r\n\t\t# self.training_env = None\t# type: Union[gym.Env, VecEnv, None]\r\n\t\t# Number of time the callback was called\r\n\t\t# self.n_calls = 0\t# type: int\r\n\t\t# self.num_timesteps = 0 # type: int\r\n\t\t# local and global variables\r\n\t\t# self.locals = None # type: Dict[str, Any]\r\n\t\t# self.globals = None # type: Dict[str, Any]\r\n\t\t# The logger object, used to report things in the terminal\r\n\t\t# self.logger = None # stable_baselines3.common.logger\r\n\t\t# # Sometimes, for event callback, it is useful\r\n\t\t# # to have access to the parent object\r\n\t\t# self.parent = None # type: Optional[BaseCallback]\r\n\r\n\tdef _on_training_start(self) -> None:\r\n\t\t\"\"\"\r\n\t\tThis method is called before the first rollout starts.\r\n\t\t\"\"\"\r\n\r\n\tdef _on_rollout_start(self) -> None:\r\n\t\t\"\"\"\r\n\t\tA rollout is the collection of environment interaction\r\n\t\tusing the current policy.\r\n\t\tThis event is triggered before collecting new samples.\r\n\t\t\"\"\"\r\n\t\tself.episode_rewards = []\r\n\t\tself.episode_reward_std = []\r\n\t\tself.episode_true_rewards = []\r\n\t\tself.episode_true_reward_std = []\r\n\t\tself.episode_infection_data = np.array([[0, 0, 0, 0, 0]])\r\n\t\tself.episode_threshold = []\r\n\r\n\t\tself.n_rollouts += 1\r\n\t\tself.record = (self.n_rollouts % self.eval_freq == 0)\r\n\t\tself.counter = 0\r\n\r\n\tdef _on_step(self) -> bool:\r\n\t\t\"\"\"\r\n\t\tThis method will be called by the model after each call to `env.step()`.\r\n\r\n\t\tFor child callback (of an `EventCallback`), this will be called\r\n\t\twhen the event is triggered.\r\n\r\n\t\t:return: (bool) If the callback returns False, training is aborted early.\r\n\t\t\"\"\"\r\n\t\tlist_obs = self.training_env.get_attr(\"observation\")\r\n\t\trew = self.training_env.get_attr(\"last_reward\")\r\n\t\ttrue_rew = self.training_env.get_attr(\"get_true_reward\")\r\n\t\tinfection_data = np.zeros((1, 5))\r\n\t\tthreshold_data = np.zeros(len(list_obs))\r\n\t\tfor obs in list_obs:\r\n\t\t\tinfection_data += obs.global_infection_summary[-1]\r\n\t\t\tthreshold_data += obs.infection_above_threshold[-1].item()\r\n\r\n\t\tself.episode_rewards.append(np.mean(rew))\r\n\t\tself.episode_reward_std.append(np.std(rew))\r\n\t\tself.episode_true_rewards.append(np.mean(true_rew))\r\n\t\tself.episode_true_reward_std.append(np.std(true_rew))\r\n\t\tself.episode_infection_data = np.concatenate([self.episode_infection_data, infection_data / len(list_obs)])\r\n\t\tself.episode_threshold.append(np.sum(threshold_data) / len(list_obs))\r\n\t\t\r\n\t\tif self.record and self.counter < 192:\r\n\t\t\tgis = np.array([obs.global_infection_summary[-1] for obs in list_obs]).squeeze(1)\r\n\t\t\tgts = np.array([obs.global_testing_summary[-1] for obs in list_obs]).squeeze(1)\r\n\t\t\tstage = np.array([obs.stage[-1].item() for obs in list_obs])\r\n\t\t\t#self.viz.record((list_obs[0], rew[0], true_rew[0]))\r\n\t\t\tself.viz.record_list(obs, gis, gts, stage, rew, true_rew)\r\n\t\t\tself.counter += 1\r\n\t\treturn True\r\n\r\n\tdef _on_rollout_end(self) -> None:\r\n\t\t\"\"\"\r\n\t\tThis event is triggered before updating the policy.\r\n\t\t\"\"\"\r\n\t\tinfection_summary = np.sum(self.episode_infection_data, axis=0)\r\n\t\thorizon = len(self.episode_rewards)\r\n\t\ttrue_w = np.geomspace(1, 1, num=horizon)\r\n\t\tproxy_w = np.geomspace(1, 1, num=horizon)\r\n\t\tn_ppl = np.sum(self.episode_infection_data[1])\r\n\r\n\t\twandb.log({\"reward\": np.dot(proxy_w, np.array(self.episode_rewards)),\r\n\t\t\t\t \"reward_std\": np.mean(self.episode_reward_std), \r\n\t\t\t\t \"true_reward\": np.dot(true_w, np.array(self.episode_true_rewards)),\r\n\t\t\t\t \"true_reward_std\": np.mean(self.episode_true_reward_std),\r\n\t\t\t\t \"proportion_critical\": infection_summary[0] / n_ppl,\r\n\t\t\t\t \"proportion_dead\": infection_summary[1] / n_ppl,\r\n\t\t\t\t \"proportion_infected\": infection_summary[2] / n_ppl,\r\n\t\t\t\t \"proportion_healthy\": infection_summary[3] / n_ppl,\r\n\t\t\t\t \"proportion_recovered\": infection_summary[4] / n_ppl,\r\n\t\t\t\t \"time_over_threshold\": np.mean(self.episode_threshold),\r\n\t\t\t\t })\r\n\t\tif self.record:\r\n\t\t\tself.viz.plot(name=self.name, epoch=self.n_rollouts)\r\n\t\t\tself.model.save(f\"pandemic_policy/{self.name}\")\r\n\t\t\tself.viz.reset()\r\n\t\t\tself.record = False\r\n\t\tprint(f\"{self.n_rollouts} epochs completed\")\r\n\r\n\r\n\tdef _on_training_end(self) -> None:\r\n\t\t\"\"\"\r\n\t\tThis event is triggered before exiting the `learn()` method.\r\n\t\t\"\"\"\r\n\t\tpass\r\n\r\n\r\nclass SacdCallback:\r\n\t\"\"\"\r\n\tA wandb logging callback that derives from ``BaseCallback``.\r\n\r\n\t:param verbose: (int) Verbosity level 0: not output 1: info 2: debug\r\n\t\"\"\"\r\n\tdef __init__(self, name=\"\", gamma=0.99):\r\n\t\t\r\n\t\tself.name = name\r\n\t\tself.gamma = gamma\r\n\t\tself.multi = False\r\n\t\tself.n_rollouts=0\r\n\r\n\t\tsuper(SacdCallback, self).__init__()\r\n\t\t# Those variables will be accessible in the callback\r\n\t\t# (they are defined in the base class)\r\n\t\t# The RL model\r\n\t\t# self.model = None # type: BaseAlgorithm\r\n\t\t# An alias for self.model.get_env(), the environment used for training\r\n\t\t# self.training_env = None\t# type: Union[gym.Env, VecEnv, None]\r\n\t\t# Number of time the callback was called\r\n\t\t# self.n_calls = 0\t# type: int\r\n\t\t# self.num_timesteps = 0 # type: int\r\n\t\t# local and global variables\r\n\t\t# self.locals = None # type: Dict[str, Any]\r\n\t\t# self.globals = None # type: Dict[str, Any]\r\n\t\t# The logger object, used to report things in the terminal\r\n\t\t# self.logger = None # stable_baselines3.common.logger\r\n\t\t# # Sometimes, for event callback, it is useful\r\n\t\t# # to have access to the parent object\r\n\t\t# self.parent = None # type: Optional[BaseCallback]\r\n\r\n\tdef on_training_start(self) -> None:\r\n\t\t\"\"\"\r\n\t\tThis method is called before the first rollout starts.\r\n\t\t\"\"\"\r\n\r\n\tdef on_rollout_start(self) -> None:\r\n\t\t\"\"\"\r\n\t\tA rollout is the collection of environment interaction\r\n\t\tusing the current policy.\r\n\t\tThis event is triggered before collecting new samples.\r\n\t\t\"\"\"\r\n\t\tself.episode_rewards = []\r\n\t\tself.episode_reward_std = []\r\n\t\tself.episode_true_rewards = []\r\n\t\tself.episode_true_reward_std = []\r\n\t\tself.episode_infection_data = np.array([[0, 0, 0, 0, 0]])\r\n\t\tself.episode_threshold = []\r\n\r\n\t\tself.n_rollouts += 1\r\n\t\t\r\n\r\n\tdef on_step(self, env) -> bool:\r\n\t\t\"\"\"\r\n\t\tThis method will be called by the model after each call to `env.step()`.\r\n\r\n\t\tFor child callback (of an `EventCallback`), this will be called\r\n\t\twhen the event is triggered.\r\n\r\n\t\t:return: (bool) If the callback returns False, training is aborted early.\r\n\t\t\"\"\"\r\n\t\tlist_obs = env.get_attr(\"observation\") if self.multi else env.get_attr(\"observation\")\r\n\t\trew = env.get_attr(\"last_reward\")\r\n\t\ttrue_rew = env.get_attr(\"get_true_reward\")\r\n\t\tinfection_data = np.zeros((1, 5))\r\n\t\tthreshold_data = np.zeros(len(list_obs))\r\n\t\tfor obs in list_obs:\r\n\t\t\tinfection_data += np.squeeze(obs.global_infection_summary, axis=0) \r\n\t\t\tthreshold_data += np.squeeze(obs.infection_above_threshold)\r\n\r\n\t\tself.episode_rewards.append(np.mean(rew))\r\n\t\tself.episode_reward_std.append(np.std(rew))\r\n\t\tself.episode_true_rewards.append(np.mean(true_rew))\r\n\t\tself.episode_true_reward_std.append(np.std(true_rew))\r\n\t\tself.episode_infection_data = np.concatenate([self.episode_infection_data, infection_data / len(list_obs)])\r\n\t\tself.episode_threshold.append(np.sum(threshold_data) / len(list_obs))\r\n\r\n\t\treturn True\r\n\r\n\tdef on_rollout_end(self) -> None:\r\n\t\t\"\"\"\r\n\t\tThis event is triggered before updating the policy.\r\n\t\t\"\"\"\r\n\t\tinfection_summary = np.sum(self.episode_infection_data, axis=0)\r\n\t\thorizon = len(self.episode_rewards)\r\n\t # true_w = np.geomspace(1, 0.99**(horizon-1), num=horizon)\r\n\t # proxy_w = np.geomspace(1, self.gamma**(horizon-1), num=horizon)\r\n\t\tproxy_w = np.geomspace(1, 1, num=horizon)\r\n\t\ttrue_w = np.geomspace(1, 1, num=horizon)\r\n\t\t\r\n\t\tn_ppl = np.sum(self.episode_infection_data[1])\r\n\r\n\t\twandb.log({\"reward\": np.dot(proxy_w, np.array(self.episode_rewards)),\r\n\t\t\t\t \"reward_std\": np.mean(self.episode_reward_std), \r\n\t\t\t\t \"true_reward\": np.dot(true_w, np.array(self.episode_true_rewards)),\r\n\t\t\t\t \"true_reward_std\": np.mean(self.episode_true_reward_std),\r\n\t\t\t\t \"proportion_critical\": infection_summary[0] / n_ppl,\r\n\t\t\t\t \"proportion_dead\": infection_summary[1] / n_ppl,\r\n\t\t\t\t \"proportion_infected\": infection_summary[2] / n_ppl,\r\n\t\t\t\t \"proportion_healthy\": infection_summary[3] / n_ppl,\r\n\t\t\t\t \"proportion_recovered\": infection_summary[4] / n_ppl,\r\n\t\t\t\t \"time_over_threshold\": np.mean(self.episode_threshold),\r\n\t\t\t\t })\r\n\t\tprint(f\"{self.n_rollouts} epochs completed\")\r\n\r\n\r\n\tdef _on_training_end(self) -> None:\r\n\t\t\"\"\"\r\n\t\tThis event is triggered before exiting the `learn()` method.\r\n\t\t\"\"\"\r\n\t\tpass\r\n",
"\"\"\"Plot rewards vs. norms.\r\n\r\nAttributes\r\n----------\r\nEXAMPLE_USAGE : str\r\n Example call to the function, which is\r\n ::\r\n\r\n python ./visualizer_rllib.py /tmp/ray/result_dir 1\r\n\r\nparser : ArgumentParser\r\n Command-line argument parser\r\n\"\"\"\r\n\r\nimport argparse\r\nimport gym\r\nimport numpy as np\r\nimport os\r\nimport sys\r\nimport time\r\nfrom copy import deepcopy\r\nimport json\r\nimport pandas as pd\r\n\r\nimport seaborn\r\nimport scipy\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.stats import multivariate_normal as MVN\r\n\r\nimport ray\r\ntry:\r\n from ray.rllib.agents.agent import get_agent_class\r\nexcept ImportError:\r\n from ray.rllib.agents.registry import get_agent_class\r\nfrom ray.tune.registry import register_env\r\n\r\nfrom flow.core.util import emission_to_csv\r\nfrom flow.utils.registry import make_create_env\r\nfrom flow.utils.rllib import get_flow_params\r\nfrom flow.utils.rllib import get_rllib_config\r\nfrom flow.utils.rllib import get_rllib_pkl\r\nfrom flow.core.rewards import REWARD_REGISTRY\r\n\r\nimport tensorflow as tf\r\n\r\n\r\nEXAMPLE_USAGE = \"\"\"\r\nexample usage:\r\n python ./visualizer_rllib.py /ray_results/experiment_dir/result_dir 1\r\n\r\nHere the arguments are:\r\n1 - the path to the simulation results\r\n2 - the number of the checkpoint\r\n\"\"\"\r\n\r\nclass DiagGaussian(object):\r\n \"\"\"Action distribution where each vector element is a gaussian.\r\n\r\n The first half of the input vector defines the gaussian means, and the\r\n second half the gaussian standard deviations.\r\n \"\"\"\r\n\r\n def __init__(self, inputs):\r\n mean, log_std = np.split(inputs, 2)\r\n self.mean = mean\r\n self.log_std = log_std\r\n self.std = np.exp(log_std)\r\n\r\n def kl(self, other):\r\n if other is None:\r\n return 0\r\n assert isinstance(other, DiagGaussian)\r\n if other.mean.shape != self.mean.shape:\r\n return None\r\n return np.sum(\r\n other.log_std - self.log_std +\r\n (np.square(self.std) + np.square(self.mean - other.mean)) /\r\n (2.0 * np.square(other.std)))\r\n\r\n @property\r\n def entropy(self):\r\n return np.sum(\r\n self.log_std + .5 * np.log(2.0 * np.pi * np.e))\r\n\r\ndef distributions_js(distribution_p, distribution_q, n_samples=10 ** 5):\r\n # jensen shannon divergence. (Jensen shannon distance is the square root of the divergence)\r\n # all the logarithms are defined as log2 (because of information entrophy)\r\n X = distribution_p.rvs(n_samples)\r\n p_X = distribution_p.pdf(X)\r\n q_X = distribution_q.pdf(X)\r\n log_mix_X = np.log2(p_X + q_X)\r\n\r\n Y = distribution_q.rvs(n_samples)\r\n p_Y = distribution_p.pdf(Y)\r\n q_Y = distribution_q.pdf(Y)\r\n log_mix_Y = np.log2(p_Y + q_Y)\r\n\r\n return (np.log2(p_X).mean() - (log_mix_X.mean() - np.log2(2))\r\n + np.log2(q_Y).mean() - (log_mix_Y.mean() - np.log2(2))) / 2\r\n\r\ndef get_dist_params(agent_logits, base_logits):\r\n mean_agent, std_agent = np.split(agent_logits, 2)\r\n mean_base, std_base = np.split(base_logits, 2)\r\n cars = len(std_agent)\r\n cov_agent = np.zeros((cars, cars), float)\r\n cov_base = np.zeros((cars, cars), float)\r\n np.fill_diagonal(cov_agent, np.exp(std_agent))\r\n np.fill_diagonal(cov_base, np.exp(std_base))\r\n return mean_agent, cov_agent, mean_base, cov_base\r\n\r\ndef hellinger(agent_logits, base_logits):\r\n mu1, sigma1, mu2, sigma2 = get_dist_params(agent_logits, base_logits)\r\n sigma1_plus_sigma2 = sigma1 + sigma2\r\n mu1_minus_mu2 = mu1 - mu2\r\n E = mu1_minus_mu2.T @ np.linalg.inv(sigma1_plus_sigma2/2) @ mu1_minus_mu2\r\n epsilon = -0.125*E\r\n numerator = np.sqrt(np.linalg.det(sigma1 @ sigma2))\r\n denominator = np.linalg.det(sigma1_plus_sigma2/2)\r\n squared_hellinger = 1 - np.sqrt(numerator/denominator)*np.exp(epsilon)\r\n squared_hellinger = squared_hellinger.item()\r\n return np.sqrt(squared_hellinger)\r\n\r\ndef jensen_shannon(agent_logits, base_logits, n_samples=10 ** 5):\r\n mean_agent, cov_agent, mean_base, cov_base = get_dist_params(agent_logits, base_logits)\r\n agent = MVN(mean=mean_agent, cov=cov_agent)\r\n base = MVN(mean=mean_base, cov=cov_base)\r\n return distributions_js(base, agent, n_samples=n_samples)\r\n\r\n\r\ndef rollout(env, args, agent, baseline_agent, true_specification, true2_specification=None):\r\n # Simulate and collect metrics\r\n rets = []\r\n true_rets = []\r\n true_rets2 = []\r\n #actions = []\r\n log_probs = []\r\n base_log_probs = []\r\n vfs = []\r\n base_vfs = []\r\n kls = []\r\n car_kls = []\r\n js = []\r\n car_js = []\r\n h = []\r\n car_h = []\r\n\r\n for i in range(args.num_rollouts):\r\n ret = 0\r\n true_ret = 0\r\n true_ret2 = 0\r\n #action_moments = [] \r\n log_prob = []\r\n base_log_prob = []\r\n vf = []\r\n base_vf = []\r\n kl = []\r\n car_kl = []\r\n js_dist = []\r\n car_js_dist = []\r\n h_dist = []\r\n car_h_dist = []\r\n\r\n state = env.reset()\r\n for j in range(args.horizon):\r\n action = agent.compute_action(state, full_fetch=True)\r\n baseline_action = baseline_agent.compute_action(state, full_fetch=True)\r\n\r\n vf_preds = action[2]['vf_preds']\r\n logp = action[2]['action_logp']\r\n logits = action[2]['behaviour_logits']\r\n base_vf_preds = baseline_action[2]['vf_preds']\r\n base_logp = baseline_action[2]['action_logp']\r\n base_logits = baseline_action[2]['behaviour_logits']\r\n\r\n action = action[0]\r\n\r\n cars = []\r\n car_logits = []\r\n car_base_logits = []\r\n for i, rl_id in enumerate(env.unwrapped.rl_veh):\r\n # get rl vehicles inside the network\r\n if rl_id in env.unwrapped.k.vehicle.get_rl_ids():\r\n cars.append(i)\r\n for c in cars:\r\n car_logits.append(logits[c])\r\n car_base_logits.append(base_logits[c])\r\n for c in cars:\r\n car_logits.append(logits[c + len(logits)//2])\r\n car_base_logits.append(base_logits[c])\r\n car_logits = np.array(car_logits)\r\n car_base_logits = np.array(car_base_logits)\r\n \r\n if (j+1) % 20 == 0:\r\n vf.append(vf_preds)\r\n log_prob.append(logp)\r\n #action_moments.append((np.mean(action).item(), np.std(action).item()))\r\n action_dist = DiagGaussian(logits)\r\n base_log_prob.append(base_logp)\r\n base_vf.append(base_vf_preds)\r\n base_action_dist = DiagGaussian(base_logits)\r\n kl.append(base_action_dist.kl(action_dist))\r\n js_dist.append(jensen_shannon(logits, base_logits))\r\n h_dist.append(hellinger(logits, base_logits))\r\n\r\n if len(cars) > 0:\r\n car_action_dist = DiagGaussian(car_logits)\r\n car_base_action_dist = DiagGaussian(car_base_logits)\r\n car_kl.append(car_base_action_dist.kl(car_action_dist))\r\n car_js_dist.append(jensen_shannon(car_logits, car_base_logits))\r\n car_h_dist.append(hellinger(car_logits, car_base_logits))\r\n\r\n state, reward, done, _ = env.step(action)\r\n ret += reward\r\n vels = np.array([env.unwrapped.k.vehicle.get_speed(veh_id) for veh_id in env.unwrapped.k.vehicle.get_ids()])\r\n if all(vels > -100):\r\n true_ret = sum([eta * REWARD_REGISTRY[rew](env, action) for rew, eta in true_specification])\r\n if true2_specification:\r\n true_ret2 = sum([eta * REWARD_REGISTRY[rew](env, action) for rew, eta in true2_specification])\r\n\r\n if done:\r\n break\r\n\r\n if done and (j+1) != args.horizon:\r\n continue\r\n rets.append(ret)\r\n true_rets.append(true_ret)\r\n true_rets2.append(true_ret2)\r\n #actions.append(action_moments)\r\n base_log_probs.append(base_log_prob)\r\n log_probs.append(log_prob)\r\n vfs.append(vf)\r\n base_vfs.append(base_vf)\r\n kls.append(kl)\r\n car_kls.append(car_kl)\r\n js.append(js_dist)\r\n car_js.append(car_js_dist)\r\n h.append(h_dist)\r\n car_h.append(car_h_dist)\r\n\r\n print(f'==== Finished epoch ====')\r\n if len(rets) == 0:\r\n print(\"ERROR\")\r\n return None, None, None, None, None, None, None, None, None, None, None, None, None\r\n return rets, true_rets, true_rets2, \\\r\n np.mean(log_probs, axis=0), np.mean(base_log_probs, axis=0), \\\r\n np.mean(vfs, axis=0), np.mean(base_vfs, axis=0), \\\r\n np.mean(kls, axis=0), np.mean(car_kls, axis=0), \\\r\n np.mean(js, axis=0), np.mean(car_js, axis=0), \\\r\n np.mean(h, axis=0), np.mean(car_h, axis=0)\r\n\r\ndef reward_specification(rewards, weights):\r\n rewards = rewards.split(\",\")\r\n weights = weights.split(\",\")\r\n assert len(rewards) == len(weights)\r\n return [(r, float(w)) for r, w in zip(rewards, weights)]\r\n\r\ndef compute_norms(args):\r\n results = args.results if args.results[-1] != '/' \\\r\n else args.results[:-1]\r\n\r\n params = []\r\n l_1 = []\r\n l_2 = []\r\n lc = []\r\n rew = []\r\n true_rew = []\r\n true_rew2 = []\r\n epochs = None\r\n log_probs = []\r\n base_log_probs = []\r\n vfs = []\r\n base_vfs = []\r\n kls = []\r\n car_kls = []\r\n js = []\r\n car_js = []\r\n h = []\r\n car_h = []\r\n e = []\r\n m = []\r\n not_created = True\r\n\r\n \r\n\r\n proxy_specification = reward_specification(args.proxy, args.proxy_weights)\r\n true_specification = reward_specification(args.true, args.true_weights)\r\n\r\n if args.true2 and args.true2_weights:\r\n true2_specification = reward_specification(args.true2, args.true2_weights)\r\n else:\r\n true2_specification = None\r\n\r\n for directory in os.listdir(results):\r\n # misspecification = float(directory.split(\"_\")[-1])\r\n misspecification = []\r\n for d in os.listdir(os.path.join(results, directory)):\r\n result_dir = os.path.join(results, directory, d)\r\n if not os.path.isdir(result_dir):\r\n continue \r\n try:\r\n config = get_rllib_config(result_dir)\r\n except:\r\n print(f\"Loading {result_dir} config failed\")\r\n continue\r\n print(result_dir)\r\n\r\n if args.skip != -1:\r\n epochs = [str(i) for i in range(args.low, args.high+1, args.skip)]\r\n print(f'Epochs: {epochs}')\r\n # Get the proxy reward at all the epochs\r\n else:\r\n data = pd.read_csv(os.path.join(result_dir, 'progress.csv'))\r\n proxy = data['episode_reward_mean'].to_numpy(dtype=float)[49::50]\r\n steps = data['training_iteration'].to_numpy(dtype=int)[49::50]\r\n max_idx = np.argmax(proxy)\r\n last_idx = -1\r\n print(f'Max proxy of {proxy[max_idx]} achieved at epoch {steps[max_idx]}.')\r\n print(f'Last proxy of {proxy[last_idx]} achieved at epoch {steps[last_idx]}.')\r\n if max_idx == -1:\r\n epochs = [str(steps[-1])]\r\n else:\r\n epochs = [str(steps[max_idx]), str(steps[-1])]\r\n\r\n # Run on only one cpu for rendering purposes\r\n config['num_workers'] = 0\r\n\r\n flow_params = get_flow_params(config)\r\n\r\n # hack for old pkl files\r\n sim_params = flow_params['sim']\r\n setattr(sim_params, 'num_clients', 1)\r\n\r\n # for hacks for old pkl files \r\n if not hasattr(sim_params, 'use_ballistic'):\r\n sim_params.use_ballistic = False\r\n\r\n # Determine agent and checkpoint\r\n config_run = config['env_config']['run'] if 'run' in config['env_config'] \\\r\n else None\r\n if args.run and config_run:\r\n if args.run != config_run:\r\n print('visualizer_rllib.py: error: run argument '\r\n + '\\'{}\\' passed in '.format(args.run)\r\n + 'differs from the one stored in params.json '\r\n + '\\'{}\\''.format(config_run))\r\n sys.exit(1)\r\n if args.run:\r\n agent_cls = get_agent_class(args.run)\r\n elif config_run:\r\n agent_cls = get_agent_class(config_run)\r\n else:\r\n print('visualizer_rllib.py: error: could not find flow parameter '\r\n '\\'run\\' in params.json, '\r\n 'add argument --run to provide the algorithm or model used '\r\n 'to train the results\\n e.g. '\r\n 'python ./visualizer_rllib.py /tmp/ray/result_dir 1 --run PPO')\r\n sys.exit(1)\r\n\r\n sim_params.restart_instance = True\r\n dir_path = os.path.dirname(os.path.realpath(__file__))\r\n\r\n # Create and register a gym+rllib env\r\n create_env, env_name = make_create_env(params=flow_params, reward_specification=proxy_specification)\r\n register_env(env_name, create_env)\r\n create_env2, env_name2 = make_create_env(params=flow_params, reward_specification=proxy_specification)\r\n register_env(env_name2, create_env2)\r\n\r\n # Start the environment with the gui turned on and a path for the\r\n # emission file\r\n env_params = flow_params['env']\r\n env_params.restart_instance = False\r\n\r\n # lower the horizon if testing\r\n if args.horizon:\r\n config['horizon'] = args.horizon\r\n env_params.horizon = args.horizon\r\n\r\n # create the agent that will be used to compute the actions\r\n del config['callbacks']\r\n\r\n agent = agent_cls(env=env_name, config=config)\r\n if args.baseline:\r\n if not_created:\r\n try:\r\n config2 = get_rllib_config(args.baseline)\r\n except:\r\n print(f\"###### Loading baseline agent config failed ######\")\r\n break\r\n del config2['callbacks']\r\n baseline_agent = agent_cls(env=env_name2, config=config2)\r\n checkpoint = args.baseline + '/checkpoint_5000/checkpoint-5000' \r\n baseline_agent.restore(checkpoint)\r\n not_created = False\r\n print(\"====== Using baseline agent ======\")\r\n else:\r\n assert False\r\n if not not_created:\r\n assert False\r\n baseline_agent = None\r\n\r\n if hasattr(agent, \"local_evaluator\") and os.environ.get(\"TEST_FLAG\") != 'True':\r\n env = agent.local_evaluator.env\r\n else:\r\n env = gym.make(env_name)\r\n\r\n # if restart_instance, don't restart here because env.reset will restart later\r\n if not sim_params.restart_instance:\r\n env.restart_simulation(sim_params=sim_params, render=sim_params.render)\r\n\r\n weights = [w for _, w in agent.get_weights()['default_policy'].items()]\r\n names = [k for k, _ in agent.get_weights()['default_policy'].items()]\r\n sizes = [w.shape for w in weights[::4]]\r\n p = np.sum([np.prod(s) for s in sizes]).item()\r\n print(p, sizes)\r\n\r\n for epoch in epochs:\r\n checkpoint = result_dir + '/checkpoint_' + epoch\r\n checkpoint = checkpoint + '/checkpoint-' + epoch\r\n if not os.path.isfile(checkpoint):\r\n break\r\n agent.restore(checkpoint)\r\n\r\n r, tr, tr2, logp, base_logp, vf, base_vf, kl, car_kl, js_dist, car_js_dist, h_dist, car_h_dist = \\\r\n rollout(env, args, agent, baseline_agent, true_specification, true2_specification=true2_specification)\r\n\r\n if r is None:\r\n continue\r\n \r\n params.append(p)\r\n rew.append(r)\r\n true_rew.append(tr)\r\n true_rew2.append(tr2)\r\n log_probs.append(logp.tolist())\r\n base_log_probs.append(base_logp.tolist())\r\n vfs.append(vf.tolist())\r\n base_vfs.append(vf.tolist())\r\n kls.append(kl.tolist())\r\n car_kls.append(car_kl.tolist())\r\n js.append(js_dist.tolist())\r\n car_js.append(car_js_dist.tolist())\r\n h.append(h_dist.tolist())\r\n car_h.append(car_h_dist.tolist())\r\n e.append(epoch)\r\n #m.append(misspecification)\r\n\r\n # try:\r\n # sv = np.array([scipy.linalg.svd(w, compute_uv=False, lapack_driver='gesvd')[0] for w in weights[::4]])\r\n # kernel_norm1 = [np.linalg.norm(w, ord=1) for w in weights[::4]]\r\n # kernel_norm2 = [np.linalg.norm(w, ord=2) for w in weights[::4]]\r\n # bias_norm1 = [np.linalg.norm(w, ord=1) for w in weights[1::4]]\r\n # bias_norm2 = [np.linalg.norm(w, ord=2) for w in weights[1::4]]\r\n\r\n # l_1.append(float(max(np.max(kernel_norm1), np.max(bias_norm1))))\r\n # l_2.append(float(max(np.max(kernel_norm2), np.max(bias_norm2))))\r\n # lc.append(np.prod(sv).item()) \r\n # else:\r\n # base_log_probs.append([])\r\n # base_vfs.append([])\r\n # kls.append([])\r\n # car_kls.append([])\r\n \r\n # except:\r\n # continue\r\n\r\n # terminate the environment\r\n env.unwrapped.terminate()\r\n\r\n\r\n with open(f'{results}.json', 'a', encoding='utf-8') as f:\r\n json.dump({'m': m, 'e': e, 'params': params, \r\n 'rew': rew, 'true_rew': true_rew, 'true_rew2': true_rew2,\r\n 'log_probs': log_probs, 'base_log_probs': base_log_probs, \r\n 'vfs': vfs, 'base_vfs': base_vfs, \r\n 'kls': kls, 'car_kls': car_kls, \r\n 'js': js, 'car_js': car_js, \r\n 'h': h, 'car_h': car_h}, f)\r\n f.close()\r\n\r\n #plot(args, l_1, l_2, lc, p2r, rew, e)\r\n \r\n\r\ndef create_parser():\r\n \"\"\"Create the parser to capture CLI arguments.\"\"\"\r\n parser = argparse.ArgumentParser(\r\n formatter_class=argparse.RawDescriptionHelpFormatter,\r\n description='[Flow] Evaluates a reinforcement learning agent '\r\n 'given a checkpoint.',\r\n epilog=EXAMPLE_USAGE)\r\n\r\n # required input parameters\r\n parser.add_argument(\r\n 'results', type=str, help='File with list of directory containing results')\r\n parser.add_argument(\r\n 'proxy', type=str, help='Proxy reward functions to include'\r\n )\r\n parser.add_argument(\r\n 'proxy_weights', type=str, help='Weights for proxy rewards'\r\n )\r\n parser.add_argument(\r\n 'true', type=str, help='True reward functions to include'\r\n )\r\n parser.add_argument(\r\n 'true_weights', type=str, help='Weights for true rewards'\r\n )\r\n\r\n # Optional inputs\r\n parser.add_argument(\r\n '--true2', type=str, default=None, help='True reward functions to include'\r\n )\r\n parser.add_argument(\r\n '--true2_weights', type=str, default=None, help='Weights for proxy rewards'\r\n )\r\n parser.add_argument(\r\n '--run',\r\n type=str,\r\n help='The algorithm or model to train. This may refer to '\r\n 'the name of a built-on algorithm (e.g. RLLib\\'s DQN '\r\n 'or PPO), or a user-defined trainable function or '\r\n 'class registered in the tune registry. '\r\n 'Required for results trained with flow-0.2.0 and before.')\r\n parser.add_argument(\r\n '--num_rollouts',\r\n type=int,\r\n default=4,\r\n help='The number of rollouts to visualize.')\r\n parser.add_argument(\r\n '--horizon',\r\n default=300,\r\n type=int,\r\n help='Specifies the horizon.')\r\n parser.add_argument('--low', type=int, default=500, help='the epoch to start plotting from')\r\n parser.add_argument('--high', type=int, default=5000, help='the epoch to stop plotting from')\r\n parser.add_argument('--skip', type=int, default=-1, help='the epoch to stop plotting at')\r\n parser.add_argument('--baseline', type=str, default=None, help=\"whether or not to use a baseline model for epochs\")\r\n\r\n return parser\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = create_parser()\r\n args = parser.parse_args()\r\n ray.init(num_cpus=1, log_to_driver=False)\r\n compute_norms(args)\r\n"
] | [
[
"pandas.read_csv",
"numpy.asscalar",
"pandas.DataFrame"
],
[
"numpy.squeeze",
"numpy.geomspace",
"numpy.std",
"numpy.mean",
"numpy.array",
"numpy.zeros",
"numpy.sum"
],
[
"numpy.square",
"numpy.split",
"numpy.log2",
"numpy.log",
"numpy.sqrt",
"numpy.linalg.inv",
"numpy.linalg.det",
"numpy.argmax",
"scipy.stats.multivariate_normal",
"numpy.mean",
"numpy.prod",
"numpy.array",
"numpy.exp",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ai-di/Brancher | [
"01d51137b0e6fc81512994c21cc3a19287353767",
"01d51137b0e6fc81512994c21cc3a19287353767",
"01d51137b0e6fc81512994c21cc3a19287353767",
"01d51137b0e6fc81512994c21cc3a19287353767",
"01d51137b0e6fc81512994c21cc3a19287353767",
"01d51137b0e6fc81512994c21cc3a19287353767"
] | [
"tests/test_autoregressive.py",
"examples/MAP_logistic_regression.py",
"development_playgrounds/WVGD_MNIST_logistic_regression.py",
"development_playgrounds/WVGD_playground.py",
"examples/logNormal_normal.py",
"development_playgrounds/DeepBelief_playground.py"
] | [
"import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom brancher.variables import RootVariable, RandomVariable, ProbabilisticModel\nfrom brancher.standard_variables import NormalVariable, LogNormalVariable, BetaVariable\nfrom brancher import inference\nimport brancher.functions as BF\n\n# Probabilistic model #\nT = 100\n\nnu = LogNormalVariable(0.3, 1., 'nu')\nx0 = NormalVariable(0., 1., 'x0')\nb = BetaVariable(0.5, 1.5, 'b')\n\nx = [x0]\nnames = [\"x0\"]\nfor t in range(1,T):\n names.append(\"x{}\".format(t))\n x.append(NormalVariable(b * x[t - 1], nu, names[t]))\nAR_model = ProbabilisticModel(x)\n\n# Generate data #\ndata = AR_model._get_sample(number_samples=1)\ntime_series = [float(data[xt].cpu().detach().numpy()) for xt in x]\ntrue_b = data[b].cpu().detach().numpy()\ntrue_nu = data[nu].cpu().detach().numpy()\nprint(\"The true coefficient is: {}\".format(float(true_b)))\n\n# Observe data #\n[xt.observe(data[xt][:, 0, :]) for xt in x]\n\n# Variational distribution #\nQnu = LogNormalVariable(0.5, 1., \"nu\", learnable=True)\nQb = BetaVariable(0.5, 0.5, \"b\", learnable=True)\nvariational_posterior = ProbabilisticModel([Qb, Qnu])\nAR_model.set_posterior_model(variational_posterior)\n\n# Inference #\ninference.perform_inference(AR_model,\n number_iterations=200,\n number_samples=300,\n optimizer='Adam',\n lr=0.05)\nloss_list = AR_model.diagnostics[\"loss curve\"]\n\n\n# Statistics\nposterior_samples = AR_model._get_posterior_sample(2000)\nnu_posterior_samples = posterior_samples[nu].cpu().detach().numpy().flatten()\nb_posterior_samples = posterior_samples[b].cpu().detach().numpy().flatten()\nb_mean = np.mean(b_posterior_samples)\nb_sd = np.sqrt(np.var(b_posterior_samples))\nprint(\"The estimated coefficient is: {} +- {}\".format(b_mean, b_sd))\n\n# Two subplots, unpack the axes array immediately\nf, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4)\nax1.plot(time_series)\nax1.set_title(\"Time series\")\nax2.plot(np.array(loss_list))\nax2.set_title(\"Convergence\")\nax2.set_xlabel(\"Iteration\")\nax3.hist(b_posterior_samples, 25)\nax3.axvline(x=true_b, lw=2, c=\"r\")\nax3.set_title(\"Posterior samples (b)\")\nax3.set_xlim(0,1)\nax4.hist(nu_posterior_samples, 25)\nax4.axvline(x=true_nu, lw=2, c=\"r\")\nax4.set_title(\"Posterior samples (nu)\")\nplt.show()",
"import matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn import datasets\n\nfrom brancher.variables import RootVariable, ProbabilisticModel\nfrom brancher.standard_variables import NormalVariable, CategoricalVariable, EmpiricalVariable, RandomIndices\nimport brancher.functions as BF\n\nfrom brancher import inference\nfrom brancher.inference import MAP\nfrom brancher.visualizations import plot_particles\n\n#TODO: Number of particles interface: Work in progress\n\n# Data\nnumber_regressors = 4\nnumber_output_classes = 3\ndataset_size = 10\ndataset = datasets.load_iris()\nind = list(range(dataset[\"target\"].shape[0]))\nnp.random.shuffle(ind)\ninput_variable = dataset[\"data\"][ind[:dataset_size], :].astype(\"float32\")\noutput_labels = dataset[\"target\"][ind[:dataset_size]].astype(\"int32\")\n\n# Data sampling model\nminibatch_size = dataset_size\nminibatch_indices = RandomIndices(dataset_size=dataset_size, batch_size=minibatch_size, name=\"indices\", is_observed=True)\nx = EmpiricalVariable(input_variable, indices=minibatch_indices, name=\"x\", is_observed=True)\nlabels = EmpiricalVariable(output_labels, indices=minibatch_indices, name=\"labels\", is_observed=True)\n\n# Architecture parameters\nweights = NormalVariable(np.zeros((number_output_classes, number_regressors)),\n 10 * np.ones((number_output_classes, number_regressors)), \"weights\")\n\n# Forward pass\nfinal_activations = BF.matmul(weights, x)\nk = CategoricalVariable(logits=final_activations, name=\"k\")\n\n# Probabilistic model\nmodel = ProbabilisticModel([k])\n\n# Observations\nk.observe(labels)\n\n# Variational model\ninitial_weights = np.random.normal(0., 1., (number_output_classes, number_regressors))\nmodel.set_posterior_model(ProbabilisticModel([RootVariable(initial_weights,\n name=\"weights\",\n learnable=True)]))\n\n# Inference\ninference.perform_inference(model,\n inference_method=MAP(),\n number_iterations=3000,\n number_samples=100,\n optimizer=\"SGD\",\n lr=0.0025)\nloss_list = model.diagnostics[\"loss curve\"]\nplt.show()\n\n# Test accuracy\ntest_size = len(ind[dataset_size:])\nnum_images = test_size*3\ntest_indices = RandomIndices(dataset_size=test_size, batch_size=1, name=\"test_indices\", is_observed=True)\ntest_images = EmpiricalVariable(dataset[\"data\"][ind[dataset_size:], :].astype(\"float32\"),\n indices=test_indices, name=\"x_test\", is_observed=True)\ntest_labels = EmpiricalVariable(dataset[\"target\"][ind[dataset_size:]].astype(\"int32\"),\n indices=test_indices, name=\"labels\", is_observed=True)\ntest_model = ProbabilisticModel([test_images, test_labels])\n\n\n\ns = 0\nscores_0 = []\ntest_image_list = []\ntest_label_list = []\nfor _ in range(num_images):\n test_sample = test_model._get_sample(1)\n test_image, test_label = test_sample[test_images], test_sample[test_labels]\n test_image_list.append(test_image)\n test_label_list.append(test_label)\n\nfor test_image, test_label in zip(test_image_list,test_label_list):\n model_output = np.reshape(np.mean(model._get_posterior_sample(80, input_values={x: test_image})[k].detach().numpy(), axis=0), newshape=(number_output_classes,))\n output_label = int(np.argmax(model_output))\n scores_0.append(1 if output_label == int(test_label.detach().numpy()) else 0)\n s += 1 if output_label == int(test_label.detach().numpy()) else 0\nprint(\"Accuracy: {} %\".format(100*s/float(num_images)))",
"import chainer\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom brancher.variables import RootVariable, ProbabilisticModel\nfrom brancher.standard_variables import NormalVariable, CategoricalVariable, EmpiricalVariable, RandomIndices\nfrom brancher import inference\nimport brancher.functions as BF\n\nfrom brancher.particle_inference_tools import VoronoiSet\nfrom brancher import inference\nfrom brancher.inference import WassersteinVariationalGradientDescent as WVGD\n\n# Data\nnumber_pixels = 28*28\nnumber_output_classes = 10\ntrain, test = chainer.datasets.get_mnist()\n#dataset_size = len(train)\ndataset_size = 50\ninput_variable = np.array([np.reshape(image[0], newshape=(number_pixels, 1)) for image in train][0:dataset_size]).astype(\"float32\")\noutput_labels = np.array([image[1]*np.ones((1, 1)) for image in train][0:dataset_size]).astype(\"int32\")\n\n# Data sampling model\nminibatch_size = 10\nminibatch_indices = RandomIndices(dataset_size=dataset_size, batch_size=minibatch_size, name=\"indices\", is_observed=True)\nx = EmpiricalVariable(input_variable, indices=minibatch_indices, name=\"x\", is_observed=True)\nlabels = EmpiricalVariable(output_labels, indices=minibatch_indices, name=\"labels\", is_observed=True)\n\n# Architecture parameters\nweights = NormalVariable(np.zeros((number_output_classes, number_pixels)),\n 10 * np.ones((number_output_classes, number_pixels)), \"weights\")\n\n# Forward pass\nfinal_activations = BF.matmul(weights, x)\nk = CategoricalVariable(logits=final_activations, name=\"k\")\n\n# Probabilistic model\nmodel = ProbabilisticModel([k])\n\n# Observations\nk.observe(labels)\n\n# Variational model\nnum_particles = 1 #10\ninitial_locations = [np.random.normal(0., 1., (number_output_classes, 28*28))\n for _ in range(num_particles)]\nparticles = [ProbabilisticModel([RootVariable(location, name=\"weights\", learnable=True)])\n for location in initial_locations]\n\n# Importance sampling distributions\nvariational_samplers = [ProbabilisticModel([NormalVariable(loc=location, scale=0.1,\n name=\"weights\", learnable=True)])\n for location in initial_locations]\n\n# Inference\ninference_method = WVGD(variational_samplers=variational_samplers,\n particles=particles,\n biased=False)\ninference.perform_inference(model,\n inference_method=inference_method,\n number_iterations=2000,\n number_samples=20,\n optimizer=\"Adam\",\n lr=0.0025,\n posterior_model=particles,\n pretraining_iterations=0)\nloss_list = model.diagnostics[\"loss curve\"]\n\n\n# ELBO\nprint(model.posterior_model.estimate_log_model_evidence(number_samples=10000))\n\n# # Local variational models\n# plt.plot(loss_list)\n# plt.show()\n#\n# # Test accuracy\n# num_images = 2000\n# test_size = len(test)\n# test_indices = RandomIndices(dataset_size=test_size, batch_size=1, name=\"test_indices\", is_observed=True)\n# test_images = EmpiricalVariable(np.array([np.reshape(image[0], newshape=(number_pixels, 1)) for image in test]).astype(\"float32\"),\n# indices=test_indices, name=\"x_test\", is_observed=True)\n# test_labels = EmpiricalVariable(np.array([image[1] * np.ones((1, 1))\n# for image in test]).astype(\"int32\"), indices=test_indices, name=\"labels\", is_observed=True)\n# test_model = ProbabilisticModel([test_images, test_labels])\n#\n# s = 0\n# model.set_posterior_model(variational_samplers[0])\n# scores_0 = []\n#\n# test_image_list = []\n# test_label_list = []\n# for _ in range(num_images):\n# test_sample = test_model._get_sample(1)\n# test_image, test_label = test_sample[test_images], test_sample[test_labels]\n# test_image_list.append(test_image)\n# test_label_list.append(test_label)\n#\n# for test_image, test_label in zip(test_image_list,test_label_list):\n# model_output = np.reshape(np.mean(model._get_posterior_sample(10, input_values={x: test_image})[k].data, axis=0), newshape=(10,))\n# output_label = int(np.argmax(model_output))\n# scores_0.append(1 if output_label == int(test_label.data) else 0)\n# s += 1 if output_label == int(test_label.data) else 0\n# print(\"Accuracy 0: {} %\".format(100*s/float(num_images)))\n#\n# s = 0\n# model.set_posterior_model(variational_samplers[1])\n# scores_1 = []\n# for test_image, test_label in zip(test_image_list,test_label_list):\n# model_output = np.reshape(np.mean(model._get_posterior_sample(10, input_values={x: test_image})[k].data, axis=0), newshape=(10,))\n# output_label = int(np.argmax(model_output))\n# scores_1.append(1 if output_label == int(test_label.data) else 0)\n# s += 1 if output_label == int(test_label.data) else 0\n# print(\"Accuracy 1: {} %\".format(100*s/float(num_images)))\n#\n# s = 0\n# scores_ne = []\n# for test_image, test_label in zip(test_image_list,test_label_list):\n#\n# model.set_posterior_model(variational_samplers[0])\n# model_output0 = np.reshape(np.mean(model._get_posterior_sample(10, input_values={x: test_image})[k].data, axis=0), newshape=(10,))\n#\n# model.set_posterior_model(variational_samplers[1])\n# model_output1 = np.reshape(np.mean(model._get_posterior_sample(10, input_values={x: test_image})[k].data, axis=0), newshape=(10,))\n#\n# model_output = 0.5*(model_output0 + model_output1)\n#\n# output_label = int(np.argmax(model_output))\n# scores_ne.append(1 if output_label == int(test_label.data) else 0)\n# s += 1 if output_label == int(test_label.data) else 0\n# print(\"Accuracy Naive Ensemble: {} %\".format(100*s/float(num_images)))\n#\n# corr = np.corrcoef(scores_0, scores_1)[0,1]\n# print(\"Correlation: {}\".format(corr))\n#\n# print(\"TO DO\")",
"import numpy as np\nimport matplotlib.pyplot as plt\nimport chainer\n\nfrom brancher.variables import RootVariable, ProbabilisticModel\nfrom brancher.particle_inference_tools import VoronoiSet\nfrom brancher.standard_variables import EmpiricalVariable, NormalVariable, LogNormalVariable\nfrom brancher import inference\nfrom brancher.inference import WassersteinVariationalGradientDescent as WVGD\nfrom brancher.visualizations import ensemble_histogram\nfrom brancher.pandas_interface import reformat_sample_to_pandas\n\n# Model\ndimensionality = 1\ntheta = NormalVariable(loc=0., scale=2., name=\"theta\")\nx = NormalVariable(theta ** 2, scale=0.2, name=\"x\")\nmodel = ProbabilisticModel([x, theta])\n\n# Generate data\nN = 3\ntheta_real = 0.1\nx_real = NormalVariable(theta_real ** 2, 0.2, \"x\")\ndata = x_real._get_sample(number_samples=N)\n\n# Observe data\nx.observe(data[x_real][:, 0, :])\n\n# Variational model\nnum_particles = 2\ninitial_locations = [-2, 2]\n\n#initial_locations = [0, 0.1]\nparticles = [ProbabilisticModel([RootVariable(p, name=\"theta\", learnable=True)])\n for p in initial_locations]\n\n# Importance sampling distributions\nvariational_samplers = [ProbabilisticModel([NormalVariable(loc=location, scale=0.2,\n name=\"theta\", learnable=True)])\n for location in initial_locations]\n\n# Inference\ninference_method = WVGD(variational_samplers=variational_samplers,\n particles=particles,\n biased=False,\n number_post_samples=80000)\ninference.perform_inference(model,\n inference_method=inference_method,\n number_iterations=1500,\n number_samples=50,\n optimizer=\"Adam\",\n lr=0.005,\n posterior_model=particles,\n pretraining_iterations=0)\nloss_list = model.diagnostics[\"loss curve\"]\n\n# Local variational models\nplt.plot(loss_list)\nplt.show()\n\n#Plot posterior\n#from brancher.visualizations import plot_density\n#plot_density(model.posterior_model, variables=[\"theta\"])\n#plt.show()\n\n# ELBO\nprint(model.posterior_model.estimate_log_model_evidence(number_samples=10000))",
"import brancher.config as cfg\ncfg.set_device(\"cpu\")\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom brancher.variables import ProbabilisticModel\nfrom brancher.standard_variables import NormalVariable, LogNormalVariable\nfrom brancher import inference\n\n# Normal model\nnu = LogNormalVariable(0., 1., \"nu\")\nmu = NormalVariable(0., 10., \"mu\")\nx = NormalVariable(mu, nu, \"x\")\nmodel = ProbabilisticModel([x])\n\n# # Generate data\nnu_real = 1.\nmu_real = -2.\ndata = model.get_sample(number_samples=20, input_values={mu: mu_real, nu: nu_real})\n\n# Observe data\nx.observe(data)\n\n# Variational model\nQnu = LogNormalVariable(0., 1., \"nu\", learnable=True)\nQmu = NormalVariable(0., 1., \"mu\", learnable=True)\nmodel.set_posterior_model(ProbabilisticModel([Qmu, Qnu]))\n\n# Inference\ninference.perform_inference(model,\n number_iterations=300,\n number_samples=100,\n optimizer='SGD',\n lr=0.0001)\nloss_list = model.diagnostics[\"loss curve\"]\n\nplt.plot(loss_list)\nplt.title(\"Loss (negative ELBO)\")\nplt.show()\n\nfrom brancher.visualizations import plot_posterior\n\nplot_posterior(model, variables=[\"mu\", \"nu\", \"x\"])\nplt.show()",
"import torch\nimport torch.nn as nn\nimport torchvision\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom brancher.variables import RootVariable, ProbabilisticModel\nfrom brancher.standard_variables import NormalVariable, EmpiricalVariable, BinomialVariable, DeterministicVariable, LogNormalVariable\nfrom brancher import inference\nfrom brancher.inference import ReverseKL\nfrom brancher.gradient_estimators import Taylor1Estimator, PathwiseDerivativeEstimator, BlackBoxEstimator\nimport brancher.functions as BF\n\nfrom brancher.config import device\n\n# Data\nimage_size = 28*28\nlatent_size1 = 2\nlatent_size2 = 100\n\ntrain = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=None)\ntest = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=None)\ndataset_size = len(train)\n#dataset = torch.Tensor(np.reshape(train.train_data.numpy(), newshape=(dataset_size, image_size, 1))).double().to(device)\ndataset = np.reshape(train.train_data.numpy(), newshape=(dataset_size, image_size, 1))\ndata_mean = np.mean(dataset)\ndataset = (dataset > data_mean).astype(\"int32\")\n\n## Encoder 1 ##\nclass EncoderArchitecture1(nn.Module):\n\n def __init__(self, image_size, latent_size2, hidden_size=100):\n super(EncoderArchitecture1, self).__init__()\n self.l1 = nn.Linear(image_size, hidden_size)\n self.f1 = nn.ReLU()\n self.l2 = nn.Linear(hidden_size, latent_size2) # Latent mean output\n self.l3 = nn.Linear(hidden_size, latent_size2) # Latent log sd output\n self.softplus = nn.Softplus()\n\n def __call__(self, x):\n h0 = self.f1(self.l1(x.squeeze()))\n output_mean = self.l2(h0)\n output_log_sd = self.l3(h0)\n return {\"mean\": output_mean, \"sd\": self.softplus(output_log_sd) + 0.01}\n\n## Encoder 2 ##\nclass EncoderArchitecture2(nn.Module):\n\n def __init__(self, latent_size1, latent_size2, hidden_size=50):\n super(EncoderArchitecture2, self).__init__()\n self.l1 = nn.Linear(latent_size2, hidden_size)\n self.f1 = nn.ReLU()\n self.l2 = nn.Linear(hidden_size, latent_size1) # Latent mean output\n self.l3 = nn.Linear(hidden_size, latent_size1) # Latent log sd output\n self.softplus = nn.Softplus()\n\n def __call__(self, x):\n h0 = self.f1(self.l1(x.squeeze()))\n output_mean = self.l2(h0)\n output_log_sd = self.l3(h0)\n return {\"mean\": output_mean, \"sd\": self.softplus(output_log_sd) + 0.01}\n\n\n## Decoder ##\nclass DecoderArchitecture1(nn.Module):\n\n def __init__(self, latent_size1, latent_size2, hidden_size=30):\n super(DecoderArchitecture1, self).__init__()\n self.l1 = nn.Linear(latent_size1, hidden_size)\n self.f1 = nn.ReLU()\n self.l2 = nn.Linear(hidden_size, latent_size2) # Latent mean output\n #self.l3 = nn.Linear(hidden_size, latent_size2) # Latent SD output\n #self.softplus = nn.Softplus()\n\n def __call__(self, x):\n h0 = self.f1(self.l1(x))\n output_mean = self.l2(h0)\n #output_log_sd = self.l3(h0)\n return {\"mean\": output_mean}\n\nclass DecoderArchitecture2(nn.Module):\n\n def __init__(self, latent_size2, image_size):\n super(DecoderArchitecture2, self).__init__()\n self.l1 = nn.Linear(latent_size2, image_size)\n\n def __call__(self, x):\n output_mean = self.l1(x)\n return {\"mean\": output_mean}\n\n\n# Initialize encoder and decoders\nencoder1 = BF.BrancherFunction(EncoderArchitecture1(image_size=image_size, latent_size2=latent_size2))\nencoder2 = BF.BrancherFunction(EncoderArchitecture2(latent_size1=latent_size1, latent_size2=latent_size2))\ndecoder1 = BF.BrancherFunction(DecoderArchitecture1(latent_size1=latent_size1, latent_size2=latent_size2))\ndecoder2 = BF.BrancherFunction(DecoderArchitecture2(latent_size2=latent_size2, image_size=image_size))\n\n# Generative model\nz1sd = 1.\nz2sd = 0.5 #0.01\nz1 = NormalVariable(np.zeros((latent_size1,)), z1sd*np.ones((latent_size1,)), name=\"z1\")\ndecoder_output1 = DeterministicVariable(decoder1(z1), name=\"decoder_output1\")\nz2 = NormalVariable(BF.relu(decoder_output1[\"mean\"]), z2sd*np.ones((latent_size2,)), name=\"z2\")\ndecoder_output2 = DeterministicVariable(decoder2(z2), name=\"decoder_output2\")\nx = BinomialVariable(total_count=1, logits=decoder_output2[\"mean\"], name=\"x\")\nmodel = ProbabilisticModel([x, z1, z2])\n\n# Amortized variational distribution\nb_size = 10\nQx = EmpiricalVariable(dataset, batch_size=b_size, name=\"x\", is_observed=True)\nencoder_output1 = DeterministicVariable(encoder1(Qx), name=\"encoder_output1\")\nQz2 = NormalVariable(encoder_output1[\"mean\"], encoder_output1[\"sd\"], name=\"z2\")\nencoder_output2 = DeterministicVariable(encoder2(encoder_output1[\"mean\"]), name=\"encoder_output2\")\nQz1 = NormalVariable(encoder_output2[\"mean\"], encoder_output2[\"sd\"], name=\"z1\")\nmodel.set_posterior_model(ProbabilisticModel([Qx, Qz1, Qz2]))\n\nmodel.get_sample(1)\nmodel.posterior_model.get_sample(1)\n\n# Joint-contrastive inference\nnum_itr = 2000\ninference.perform_inference(model,\n inference_method=ReverseKL(gradient_estimator=PathwiseDerivativeEstimator),\n number_iterations=num_itr,\n number_samples=1,\n optimizer=\"Adam\",\n lr=0.0005)\nloss_list1 = model.diagnostics[\"loss curve\"]\n\nN_ELBO = 20\nN_ELBO_ITR = 1\nELBO = 0\nfor n in range(N_ELBO_ITR):\n ELBO += (model.estimate_log_model_evidence(N_ELBO)/float(N_ELBO_ITR)).detach().numpy()\nprint(ELBO)\n\n\n#\n# sigmoid = lambda x: 1/(np.exp(-x) + 1)\n# image_grid = []\n# z_range = np.linspace(-3, 3, 30)\n# for z1a in z_range:\n# image_row = []\n# for z1b in z_range:\n# sample = model.get_sample(1, input_values={z1: np.array([z1a, z1b])})\n# image = sigmoid(np.reshape(sample[\"decoder_output2\"].values[0][\"mean\"], newshape=(28, 28)))\n# image_row += [image]\n# image_grid += [np.concatenate(image_row, axis=0)]\n# image_grid = np.concatenate(image_grid, axis=1)\n# plt.imshow(image_grid)\n# plt.colorbar()\n# plt.show()\n\n# Initialize encoder and decoders\nencoder1 = BF.BrancherFunction(EncoderArchitecture1(image_size=image_size, latent_size2=latent_size2))\nencoder2 = BF.BrancherFunction(EncoderArchitecture2(latent_size1=latent_size1, latent_size2=latent_size2))\ndecoder1 = BF.BrancherFunction(DecoderArchitecture1(latent_size1=latent_size1, latent_size2=latent_size2))\ndecoder2 = BF.BrancherFunction(DecoderArchitecture2(latent_size2=latent_size2, image_size=image_size))\n\n# Generative model\nz1 = NormalVariable(np.zeros((latent_size1,)), z1sd*np.ones((latent_size1,)), name=\"z1\")\ndecoder_output1 = DeterministicVariable(decoder1(z1), name=\"decoder_output1\")\nz2 = NormalVariable(BF.relu(decoder_output1[\"mean\"]), z2sd*np.ones((latent_size2,)), name=\"z2\")\ndecoder_output2 = DeterministicVariable(decoder2(z2), name=\"decoder_output2\")\nx = BinomialVariable(total_count=1, logits=decoder_output2[\"mean\"], name=\"x\")\nmodel = ProbabilisticModel([x, z1, z2])\n\n# Amortized variational distribution\nQx = EmpiricalVariable(dataset, batch_size=b_size, name=\"x\", is_observed=True)\nencoder_output1 = DeterministicVariable(encoder1(Qx), name=\"encoder_output1\")\nencoder_output2 = DeterministicVariable(encoder2(encoder_output1[\"mean\"]), name=\"encoder_output2\")\n\nl0 = 0\n\nQlambda11 = RootVariable(l0*np.ones((latent_size1,)), 'lambda11', learnable=True)\nQlambda12 = RootVariable(l0*np.ones((latent_size1,)), 'lambda12', learnable=True)\nQz1 = NormalVariable((1 - BF.sigmoid(Qlambda11))*encoder_output2[\"mean\"],\n BF.sigmoid(Qlambda12) * z2sd + (1 - BF.sigmoid(Qlambda12)) * encoder_output2[\"sd\"], name=\"z1\")\n\nQdecoder_output1 = DeterministicVariable(decoder1(Qz1), name=\"Qdecoder_output1\")\n\nQlambda21 = RootVariable(l0*np.ones((latent_size2,)), 'lambda2', learnable=True)\nQlambda22 = RootVariable(l0*np.ones((latent_size2,)), 'lambda3', learnable=True)\nQz2 = NormalVariable(BF.sigmoid(Qlambda21)*BF.relu(Qdecoder_output1[\"mean\"]) + (1 - BF.sigmoid(Qlambda21))*encoder_output1[\"mean\"],\n BF.sigmoid(Qlambda22) * z2sd + (1 - BF.sigmoid(Qlambda22)) * encoder_output1[\"sd\"], name=\"z2\")\n\n\nmodel.set_posterior_model(ProbabilisticModel([Qx, Qz1, Qz2]))\n\nmodel.get_sample(1)\nmodel.posterior_model.get_sample(1)\n\n# Joint-contrastive inference\ninference.perform_inference(model,\n inference_method=ReverseKL(gradient_estimator=PathwiseDerivativeEstimator),\n number_iterations=num_itr,\n number_samples=1,\n optimizer=\"Adam\",\n lr=0.0005)\nloss_list2 = model.diagnostics[\"loss curve\"]\n\nELBO = 0\nfor n in range(N_ELBO_ITR):\n ELBO += (model.estimate_log_model_evidence(N_ELBO)/float(N_ELBO_ITR)).detach().numpy()\nprint(ELBO)\n\n#Plot results\nplt.plot(loss_list1, label=\"MF\")\nplt.plot(loss_list2, label=\"Structured\")\nplt.legend(loc=\"best\")\nplt.show()\n\nsigmoid = lambda x: 1/(np.exp(-x) + 1)\nimage_grid = []\nz_range = np.linspace(-3, 3, 50)\nfor z1a in z_range:\n image_row = []\n for z1b in z_range:\n sample = model.get_sample(1, input_values={z1: np.array([z1a, z1b])})\n image = sigmoid(np.reshape(sample[\"decoder_output2\"].values[0][\"mean\"], newshape=(28, 28)))\n image_row += [image]\n image_grid += [np.concatenate(image_row, axis=0)]\nimage_grid = np.concatenate(image_grid, axis=1)\nplt.imshow(image_grid)\nplt.colorbar()\nplt.show()\n\npass"
] | [
[
"matplotlib.pyplot.subplots",
"numpy.mean",
"numpy.var",
"numpy.array",
"matplotlib.pyplot.show"
],
[
"sklearn.datasets.load_iris",
"numpy.random.shuffle",
"numpy.ones",
"numpy.random.normal",
"numpy.argmax",
"matplotlib.pyplot.show",
"numpy.zeros"
],
[
"numpy.reshape",
"numpy.random.normal",
"numpy.zeros",
"numpy.ones"
],
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show"
],
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.title"
],
[
"numpy.array",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.imshow",
"torch.nn.Softplus",
"numpy.linspace",
"numpy.reshape",
"numpy.ones",
"matplotlib.pyplot.plot",
"numpy.concatenate",
"matplotlib.pyplot.colorbar",
"torch.nn.Linear",
"numpy.mean",
"numpy.exp",
"torch.nn.ReLU",
"matplotlib.pyplot.show",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AyazSaiyed/hub | [
"597c5726fd72d17f562bffec25e114115dadcac5",
"84ac11ac756050a186cc8bddb54e104323fb9dff"
] | [
"tensorflow_hub/tools/module_search/search.py",
"tensorflow_hub/resolver_test.py"
] | [
"# Copyright 2019 The TensorFlow Hub Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tool to rank modules to use in a downstream classification task.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import app\nfrom absl import flags\n\nimport pandas as pd\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_hub.tools.module_search import utils\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\"dataset\", None,\n \"Specification of a dataset. E.g. use `cifar10#1000` to \"\n \"perform search using 1000 examples from tfds `cifar10` \"\n \"dataset.\")\n\nflags.DEFINE_multi_string(\"module\", None, \"Module to consider in the search\")\n\nflags.DEFINE_string(\"module_list\", None,\n \"Path to text file with a module per line to be considered in the search.\"\n \"Empty lines and lines starting with # are ignored\")\n\n\ndef load_data(data_spec):\n return utils.load_data(**data_spec)\n\n\ndef load_raw_features(data_spec):\n data = load_data(data_spec=data_spec)\n return data.map(lambda x: tf.image.resize(x[\"image\"], (224, 224)))\n\n\ndef load_labels(data_spec):\n data = load_data(data_spec=data_spec)\n return np.array([x for x in data.map(lambda x: x[\"label\"])])\n\n\ndef compute_embeddings(module_spec, data_spec):\n raw_features = load_raw_features(data_spec=data_spec)\n embedding_fn = utils.load_embedding_fn(\n module=module_spec)\n outputs = []\n for batch in raw_features.batch(10):\n outputs.extend(embedding_fn(batch))\n return np.array(outputs)\n\n\ndef compute_score(module_spec, data_spec):\n embeddings = compute_embeddings(module_spec=module_spec,\n data_spec=data_spec)\n distances = utils.compute_distance_matrix_loo(embeddings)\n labels = load_labels(data_spec=data_spec)\n error_rate = utils.knn_errorrate_loo(distances, labels, k=1)\n return np.array(error_rate)\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n\n if not FLAGS.dataset:\n raise app.UsageError(\"--dataset is a required argument.\")\n\n module_list = []\n if FLAGS.module:\n module_list.extend(FLAGS.module)\n\n if FLAGS.module_list:\n with tf.io.gfile.GFile(FLAGS.module_list) as f:\n lines = f.read().split(\"\\n\")\n module_list.extend([l for l in lines if l != \"\" and not l.startswith(\"#\")])\n\n ds_sections = FLAGS.dataset.split(\"#\")\n dataset = ds_sections[0]\n train_examples = int(ds_sections[1]) if len(ds_sections) != 0 else None\n data_spec = {\n \"dataset\": dataset,\n \"split\": \"train\",\n \"num_examples\": train_examples,\n }\n\n results = []\n for module in module_list:\n results.append((\n module, data_spec,\n compute_score(module_spec=module, data_spec=data_spec)))\n\n df = pd.DataFrame(results, columns=[\"module\", \"data\", \"1nn\"])\n df = df.filter([\"module\", \"1nn\"])\n df.sort_values([\"1nn\"])\n df.reset_index(drop=True)\n df.set_index(\"module\")\n\n with pd.option_context(\n \"display.max_rows\", None,\n \"display.max_columns\", None,\n \"display.precision\", 3,\n \"max_colwidth\", -1, # Don't truncate columns (e.g. module name).\n \"display.expand_frame_repr\", False, # Don't wrap output.\n ):\n print(\"# Module ranking for %s\" % data_spec)\n print(df)\n\n\nif __name__ == '__main__':\n app.run(main)\n",
"# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorflow_hub.resolver.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# pylint:disable=g-import-not-at-top,g-statement-before-imports\ntry:\n import mock as mock\nexcept ImportError:\n import unittest.mock as mock\n# pylint:disable=g-import-not-at-top,g-statement-before-imports\n\nimport os\nimport re\nimport socket\nimport tempfile\nimport threading\nimport time\nimport uuid\n\nfrom absl import flags\nimport tensorflow as tf\n\nfrom tensorflow_hub import resolver\nfrom tensorflow_hub import tf_utils\nfrom tensorflow_hub import tf_v1\n\n\nFLAGS = flags.FLAGS\n\n\nclass PathResolverTest(tf.test.TestCase):\n\n def setUp(self):\n super(PathResolverTest, self).setUp()\n self.resolver = resolver.PathResolver()\n\n def testHandleSupported(self):\n os.chdir(os.path.join(self.get_temp_dir()))\n self.assertTrue(self.resolver.is_supported(\"/tmp\"))\n tf_v1.gfile.MkDir(\"foo/\")\n self.assertTrue(self.resolver.is_supported(\"./foo/\"))\n self.assertTrue(self.resolver.is_supported(\"foo/\"))\n # Directory doesn't exist.\n self.assertFalse(self.resolver.is_supported(\"bar/\"))\n self.assertFalse(self.resolver.is_supported(\"foo/bar\"))\n\n def testGetModulePath(self):\n tf_v1.gfile.MkDir(\"/tmp/1234\")\n path = self.resolver(\"/tmp/1234\")\n self.assertEqual(path, \"/tmp/1234\")\n\n\nclass FakeResolver(resolver.Resolver):\n \"\"\"Fake Resolver used to test composite Resolvers.\"\"\"\n\n def __init__(self, prefix):\n self.prefix = prefix\n\n def is_supported(self, handle):\n return handle.startswith(self.prefix)\n\n def __call__(self, handle):\n if handle.endswith(\"error\"):\n raise ValueError(\"error for: \" + handle)\n return handle + \"-resolved_by_\" + self.prefix\n\n\nclass ResolverTest(tf.test.TestCase):\n\n def testCacheDir(self):\n # No cache dir set, None is returned.\n cache_dir = resolver.tfhub_cache_dir()\n self.assertEqual(cache_dir, None)\n # Use temp dir.\n cache_dir = resolver.tfhub_cache_dir(use_temp=True)\n self.assertEquals(cache_dir,\n os.path.join(tempfile.gettempdir(), \"tfhub_modules\"))\n # Use override\n cache_dir = resolver.tfhub_cache_dir(default_cache_dir=\"/d\", use_temp=True)\n self.assertEqual(\"/d\", cache_dir)\n # Use a flag\n FLAGS.tfhub_cache_dir = \"/e\"\n cache_dir = resolver.tfhub_cache_dir(default_cache_dir=\"/d\", use_temp=True)\n self.assertEqual(\"/e\", cache_dir)\n FLAGS.tfhub_cache_dir = \"\"\n # Use env variable\n os.environ[resolver._TFHUB_CACHE_DIR] = \"/f\"\n cache_dir = resolver.tfhub_cache_dir(default_cache_dir=\"/d\", use_temp=True)\n self.assertEqual(\"/f\", cache_dir)\n FLAGS.tfhub_cache_dir = \"/e\"\n cache_dir = resolver.tfhub_cache_dir(default_cache_dir=\"/d\", use_temp=True)\n self.assertEqual(\"/f\", cache_dir)\n FLAGS.tfhub_cache_dir = \"\"\n os.unsetenv(resolver._TFHUB_CACHE_DIR)\n\n def testDirSize(self):\n fake_task_uid = 1234\n\n # Create a directory with some files and sub-directory and check its size.\n test_dir = resolver._temp_download_dir(self.get_temp_dir(), fake_task_uid)\n tf_v1.gfile.MakeDirs(test_dir)\n tf_utils.atomic_write_string_to_file(\n os.path.join(test_dir, \"file1\"), \"content1\", False)\n tf_utils.atomic_write_string_to_file(\n os.path.join(test_dir, \"file2\"), \"content2\", False)\n test_sub_dir = os.path.join(test_dir, \"sub_dir\")\n tf_v1.gfile.MakeDirs(test_sub_dir)\n tf_utils.atomic_write_string_to_file(\n os.path.join(test_sub_dir, \"file3\"), \"content3\", False)\n self.assertEqual(3 * 8, resolver._dir_size(test_dir))\n self.assertEqual(8, resolver._dir_size(test_sub_dir))\n\n # Treat the directory as a temporary directory used by a module download by\n # referring to that directory from the lock file.\n fake_lock_filename = resolver._lock_filename(self.get_temp_dir())\n tf_utils.atomic_write_string_to_file(\n fake_lock_filename, resolver._lock_file_contents(fake_task_uid), False)\n self.assertEqual(3 * 8, resolver._locked_tmp_dir_size(fake_lock_filename))\n\n # Check that if temp directory doesn't exist, 0 is returned.\n tf_v1.gfile.DeleteRecursively(test_dir)\n self.assertEqual(0, resolver._locked_tmp_dir_size(fake_lock_filename))\n\n def testLockFileName(self):\n self.assertEquals(\"/a/b/c.lock\", resolver._lock_filename(\"/a/b/c/\"))\n\n def testTempDownloadDir(self):\n self.assertEquals(\"/a/b.t.tmp\", resolver._temp_download_dir(\"/a/b/\", \"t\"))\n\n def testReadTaskUidFromLockFile(self):\n module_dir = os.path.join(self.get_temp_dir(), \"module\")\n task_uid = uuid.uuid4().hex\n lock_filename = resolver._lock_filename(module_dir)\n tf_utils.atomic_write_string_to_file(lock_filename,\n resolver._lock_file_contents(task_uid),\n overwrite=False)\n self.assertEqual(task_uid, resolver._task_uid_from_lock_file(lock_filename))\n\n def testWaitForLockToDisappear_DownloadCompletes(self):\n module_dir = os.path.join(self.get_temp_dir(), \"module\")\n task_uid = uuid.uuid4().hex\n lock_filename = resolver._lock_filename(module_dir)\n # Write lock file\n tf_utils.atomic_write_string_to_file(lock_filename,\n resolver._lock_file_contents(task_uid),\n overwrite=False)\n # Wait for the lock file to disappear (in a separate thread)\n thread = threading.Thread(target=resolver._wait_for_lock_to_disappear,\n args=(\"module\", lock_filename, 600,))\n thread.start()\n # Delete the lock file.\n tf_v1.gfile.Remove(lock_filename)\n thread.join(10)\n # The waiting terminates without errors.\n\n def testWaitForLockToDisappear_DownloadOngoing(self):\n module_dir = os.path.join(self.get_temp_dir(), \"module\")\n task_uid = uuid.uuid4().hex\n lock_filename = resolver._lock_filename(module_dir)\n lock_file_content = resolver._lock_file_contents(task_uid)\n tf_utils.atomic_write_string_to_file(\n lock_filename, lock_file_content, overwrite=False)\n\n lock_expiration_wait_time_secs = 10\n thread = threading.Thread(\n target=resolver._wait_for_lock_to_disappear,\n args=(\n \"module\",\n lock_filename,\n lock_expiration_wait_time_secs,\n ))\n thread.start()\n # Simulate download by writing a file every 1 sec. While writes are happing\n # the lock file remains in place.\n tmp_dir = resolver._temp_download_dir(self.get_temp_dir(), task_uid)\n tf_v1.gfile.MakeDirs(tmp_dir)\n for x in range(2 * lock_expiration_wait_time_secs):\n tf_utils.atomic_write_string_to_file(\n os.path.join(tmp_dir, \"file_%d\" % x), \"test\", overwrite=False)\n # While writes are happening the original lock file is in place.\n self.assertEqual(lock_file_content,\n tf_utils.read_file_to_string(lock_filename))\n time.sleep(1)\n thread.join(lock_expiration_wait_time_secs)\n # The waiting terminates without errors.\n\n def testWaitForLockToDisappear_DownloadAborted(self):\n module_dir = os.path.join(self.get_temp_dir(), \"module\")\n task_uid = uuid.uuid4().hex\n lock_filename = resolver._lock_filename(module_dir)\n lock_file_content = resolver._lock_file_contents(task_uid)\n tf_utils.atomic_write_string_to_file(\n lock_filename, lock_file_content, overwrite=False)\n tmp_dir = resolver._temp_download_dir(self.get_temp_dir(), task_uid)\n tf_v1.gfile.MakeDirs(tmp_dir)\n\n thread = threading.Thread(target=resolver._wait_for_lock_to_disappear,\n args=(\"module\", lock_filename, 10,))\n thread.start()\n thread.join(30)\n # Because nobody was writing to tmp_dir, the lock file got reclaimed by\n # resolver._wait_for_lock_to_disappear.\n self.assertFalse(tf_v1.gfile.Exists(lock_filename))\n\n def testModuleAlreadyDownloaded(self):\n # Simulate the case when a rogue process finishes downloading a module\n # right before the current process can perform a rename of a temp directory\n # to a permanent module directory.\n module_dir = os.path.join(self.get_temp_dir(), \"module\")\n def fake_download_fn_with_rogue_behavior(handle, tmp_dir):\n del handle, tmp_dir\n # Create module directory\n tf_v1.gfile.MakeDirs(module_dir)\n tf_utils.atomic_write_string_to_file(\n os.path.join(module_dir, \"file\"), \"content\", False)\n\n self.assertEqual(\n module_dir,\n resolver.atomic_download(\"module\", fake_download_fn_with_rogue_behavior,\n module_dir))\n self.assertEqual(tf_v1.gfile.ListDirectory(module_dir), [\"file\"])\n self.assertFalse(tf_v1.gfile.Exists(resolver._lock_filename(module_dir)))\n parent_dir = os.path.abspath(os.path.join(module_dir, \"..\"))\n self.assertEqual(\n sorted(tf_v1.gfile.ListDirectory(parent_dir)),\n [\"module\", \"module.descriptor.txt\"])\n self.assertRegexpMatches(\n tf_utils.read_file_to_string(\n resolver._module_descriptor_file(module_dir)),\n \"Module: module\\n\"\n \"Download Time: .*\\n\"\n \"Downloader Hostname: %s .PID:%d.\" % (re.escape(socket.gethostname()),\n os.getpid()))\n\n def testModuleConcurrentDownload(self):\n module_dir = os.path.join(self.get_temp_dir(), \"module\")\n\n # To simulate one downloading starting while the other is still in progress,\n # call resolver.atomic_download() from download_fn(). The second download\n # is set up with download_fn() that fails. That download_fn() is not\n # expected to be called.\n def second_download_fn(handle, tmp_dir):\n del handle, tmp_dir\n self.fail(\"This should not be called. The module should have been \"\n \"downloaded already.\")\n\n second_download_thread = threading.Thread(\n target=resolver.atomic_download,\n args=(\n \"module\",\n second_download_fn,\n module_dir,\n ))\n\n def first_download_fn(handle, tmp_dir):\n del handle, tmp_dir\n tf_v1.gfile.MakeDirs(module_dir)\n tf_utils.atomic_write_string_to_file(\n os.path.join(module_dir, \"file\"), \"content\", False)\n second_download_thread.start()\n\n self.assertEqual(module_dir,\n resolver.atomic_download(\"module\", first_download_fn,\n module_dir))\n second_download_thread.join(30)\n # The waiting terminates without errors.\n\n def testModuleLockLostDownloadKilled(self):\n module_dir = os.path.join(self.get_temp_dir(), \"module\")\n download_aborted_msg = \"Download aborted.\"\n def kill_download(handle, tmp_dir):\n del handle, tmp_dir\n # Simulate lock loss by removing the lock.\n tf_v1.gfile.Remove(resolver._lock_filename(module_dir))\n # Throw an error to simulate aborted download.\n raise OSError(download_aborted_msg)\n\n try:\n resolver.atomic_download(\"module\", kill_download, module_dir)\n self.fail(\"atomic_download() should have thrown an exception.\")\n except OSError as _:\n pass\n parent_dir = os.path.abspath(os.path.join(module_dir, \"..\"))\n # Test that all files got cleaned up.\n self.assertEqual(tf_v1.gfile.ListDirectory(parent_dir), [])\n\n def testMergePath(self):\n self.assertEqual(\n resolver._merge_relative_path(\"gs://module-cache\", \"\"),\n \"gs://module-cache\")\n self.assertEqual(\n resolver._merge_relative_path(\"gs://module-cache\", \"./\"),\n \"gs://module-cache\")\n self.assertEqual(\n resolver._merge_relative_path(\"gs://module-cache\", \"./file\"),\n \"gs://module-cache/file\")\n self.assertEqual(\n resolver._merge_relative_path(\"gs://module-cache\", \"hello/../bla\"),\n \"gs://module-cache/bla\")\n self.assertEqual(\n resolver._merge_relative_path(\"gs://module-cache\", \"/\"),\n \"gs://module-cache\", \"/\")\n with self.assertRaisesRegexp(ValueError, \"is invalid\"):\n resolver._merge_relative_path(\"gs://module-cache\", \"/../\")\n with self.assertRaisesRegexp(ValueError, \"is invalid\"):\n resolver._merge_relative_path(\"gs://module-cache\", \"hello/../../bla\")\n\n def testNotFoundGCSBucket(self):\n # When trying to use not existing GCS bucket, test that\n # tf_util.atomic_write_string_to_file raises tf.error.NotFoundError.\n # Other errors that may arise from bad network connectivity are ignored by\n # resolver.atomic_download and retried infinitely.\n module_dir = \"\"\n def dummy_download_fn(handle, tmp_dir):\n del handle, tmp_dir\n return\n\n # Simulate missing GCS bucket by raising NotFoundError in\n # atomic_write_string_to_file.\n with mock.patch(\n \"tensorflow_hub.tf_utils.atomic_write_string_to_file\") as mock_:\n mock_.side_effect = tf.errors.NotFoundError(None, None, \"Test\")\n try:\n resolver.atomic_download(\"module\", dummy_download_fn, module_dir)\n assert False\n except tf.errors.NotFoundError as e:\n self.assertEqual(\"Test\", e.message)\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] | [
[
"tensorflow.compat.v2.image.resize",
"tensorflow.compat.v2.io.gfile.GFile",
"pandas.option_context",
"pandas.DataFrame",
"numpy.array"
],
[
"tensorflow.errors.NotFoundError",
"tensorflow.test.main"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tbose20/D-Ref | [
"eda6170a72838b89637df241dd5619e001f3afdb"
] | [
"captum/captum/_utils/gradient.py"
] | [
"#!/usr/bin/env python3\nimport threading\nimport typing\nimport warnings\nfrom collections import defaultdict\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast\n\nimport torch\nfrom captum._utils.common import (\n _reduce_list,\n _run_forward,\n _sort_key_list,\n _verify_select_neuron,\n)\nfrom captum._utils.typing import (\n Literal,\n ModuleOrModuleList,\n TargetType,\n TensorOrTupleOfTensorsGeneric,\n)\nfrom torch import Tensor, device\nfrom torch.nn import Module\n\n\ndef apply_gradient_requirements(\n inputs: Tuple[Tensor, ...], warn: bool = True\n) -> List[bool]:\n \"\"\"\n Iterates through tuple on input tensors and sets requires_grad to be true on\n each Tensor, and ensures all grads are set to zero. To ensure that the input\n is returned to its initial state, a list of flags representing whether or not\n a tensor originally required grad is returned.\n \"\"\"\n assert isinstance(\n inputs, tuple\n ), \"Inputs should be wrapped in a tuple prior to preparing for gradients\"\n grad_required = []\n for index, input in enumerate(inputs):\n assert isinstance(input, torch.Tensor), \"Given input is not a torch.Tensor\"\n grad_required.append(input.requires_grad)\n inputs_dtype = input.dtype\n # Note: torch 1.2 doesn't support is_complex for dtype that's why we check\n # on the existance of is_complex method.\n if not inputs_dtype.is_floating_point and not (\n hasattr(inputs_dtype, \"is_complex\") and inputs_dtype.is_complex\n ):\n if warn:\n warnings.warn(\n \"\"\"Input Tensor %d has a dtype of %s.\n Gradients cannot be activated\n for these data types.\"\"\"\n % (index, str(inputs_dtype))\n )\n elif not input.requires_grad:\n if warn:\n warnings.warn(\n \"Input Tensor %d did not already require gradients, \"\n \"required_grads has been set automatically.\" % index\n )\n input.requires_grad_()\n return grad_required\n\n\ndef undo_gradient_requirements(\n inputs: Tuple[Tensor, ...], grad_required: List[bool]\n) -> None:\n \"\"\"\n Iterates through list of tensors, zeros each gradient, and sets required\n grad to false if the corresponding index in grad_required is False.\n This method is used to undo the effects of prepare_gradient_inputs, making\n grads not required for any input tensor that did not initially require\n gradients.\n \"\"\"\n\n assert isinstance(\n inputs, tuple\n ), \"Inputs should be wrapped in a tuple prior to preparing for gradients.\"\n assert len(inputs) == len(\n grad_required\n ), \"Input tuple length should match gradient mask.\"\n for index, input in enumerate(inputs):\n assert isinstance(input, torch.Tensor), \"Given input is not a torch.Tensor\"\n if not grad_required[index]:\n input.requires_grad_(False)\n\n\ndef compute_gradients(\n forward_fn: Callable,\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n target_ind: TargetType = None,\n additional_forward_args: Any = None,\n) -> Tuple[Tensor, ...]:\n r\"\"\"\n Computes gradients of the output with respect to inputs for an\n arbitrary forward function.\n\n Args:\n\n forward_fn: forward function. This can be for example model's\n forward function.\n input: Input at which gradients are evaluated,\n will be passed to forward_fn.\n target_ind: Index of the target class for which gradients\n must be computed (classification only).\n additional_forward_args: Additional input arguments that forward\n function requires. It takes an empty tuple (no additional\n arguments) if no additional arguments are required\n \"\"\"\n with torch.autograd.set_grad_enabled(True):\n # runs forward pass\n outputs = _run_forward(forward_fn, inputs, target_ind, additional_forward_args)\n assert outputs[0].numel() == 1, (\n \"Target not provided when necessary, cannot\"\n \" take gradient with respect to multiple outputs.\"\n )\n # torch.unbind(forward_out) is a list of scalar tensor tuples and\n # contains batch_size * #steps elements\n grads = torch.autograd.grad(torch.unbind(outputs), inputs,create_graph=True, retain_graph=True) #create_graph True, allow_unused is added TB\n return grads\n\n\ndef _neuron_gradients(\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n saved_layer: Dict[device, Tuple[Tensor, ...]],\n key_list: List[device],\n gradient_neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],\n) -> Tuple[Tensor, ...]:\n with torch.autograd.set_grad_enabled(True):\n gradient_tensors = []\n for key in key_list:\n current_out_tensor = _verify_select_neuron(\n saved_layer[key], gradient_neuron_selector\n )\n gradient_tensors.append(\n torch.autograd.grad(\n torch.unbind(current_out_tensor)\n if current_out_tensor.numel() > 1\n else current_out_tensor,\n inputs,\n )\n )\n _total_gradients = _reduce_list(gradient_tensors, sum)\n return _total_gradients\n\n\[email protected]\ndef _forward_layer_eval(\n forward_fn: Callable,\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n layer: Module,\n additional_forward_args: Any = None,\n device_ids: Union[None, List[int]] = None,\n attribute_to_layer_input: bool = False,\n grad_enabled: bool = False,\n) -> Tuple[Tensor, ...]:\n ...\n\n\[email protected]\ndef _forward_layer_eval(\n forward_fn: Callable,\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n layer: List[Module],\n additional_forward_args: Any = None,\n device_ids: Union[None, List[int]] = None,\n attribute_to_layer_input: bool = False,\n grad_enabled: bool = False,\n) -> List[Tuple[Tensor, ...]]:\n ...\n\n\ndef _forward_layer_eval(\n forward_fn: Callable,\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n layer: ModuleOrModuleList,\n additional_forward_args: Any = None,\n device_ids: Union[None, List[int]] = None,\n attribute_to_layer_input: bool = False,\n grad_enabled: bool = False,\n) -> Union[Tuple[Tensor, ...], List[Tuple[Tensor, ...]]]:\n return _forward_layer_eval_with_neuron_grads(\n forward_fn,\n inputs,\n layer,\n additional_forward_args=additional_forward_args,\n gradient_neuron_selector=None,\n grad_enabled=grad_enabled,\n device_ids=device_ids,\n attribute_to_layer_input=attribute_to_layer_input,\n )\n\n\[email protected]\ndef _forward_layer_distributed_eval(\n forward_fn: Callable,\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n layer: ModuleOrModuleList,\n target_ind: TargetType = None,\n additional_forward_args: Any = None,\n attribute_to_layer_input: bool = False,\n forward_hook_with_return: Literal[False] = False,\n require_layer_grads: bool = False,\n) -> Dict[Module, Dict[device, Tuple[Tensor, ...]]]:\n ...\n\n\[email protected]\ndef _forward_layer_distributed_eval(\n forward_fn: Callable,\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n layer: ModuleOrModuleList,\n target_ind: TargetType = None,\n additional_forward_args: Any = None,\n attribute_to_layer_input: bool = False,\n *,\n forward_hook_with_return: Literal[True],\n require_layer_grads: bool = False,\n) -> Tuple[Dict[Module, Dict[device, Tuple[Tensor, ...]]], Tensor]:\n ...\n\n\ndef _forward_layer_distributed_eval(\n forward_fn: Callable,\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n layer: ModuleOrModuleList,\n target_ind: TargetType = None,\n additional_forward_args: Any = None,\n attribute_to_layer_input: bool = False,\n forward_hook_with_return: bool = False,\n require_layer_grads: bool = False,\n) -> Union[\n Tuple[Dict[Module, Dict[device, Tuple[Tensor, ...]]], Tensor],\n Dict[Module, Dict[device, Tuple[Tensor, ...]]],\n]:\n r\"\"\"\n A helper function that allows to set a hook on model's `layer`, run the forward\n pass and returns intermediate layer results, stored in a dictionary,\n and optionally also the output of the forward function. The keys in the\n dictionary are the device ids and the values are corresponding intermediate layer\n results, either the inputs or the outputs of the layer depending on whether we set\n `attribute_to_layer_input` to True or False.\n This is especially useful when we execute forward pass in a distributed setting,\n using `DataParallel`s for example.\n \"\"\"\n saved_layer: Dict[Module, Dict[device, Tuple[Tensor, ...]]] = defaultdict(dict)\n lock = threading.Lock()\n all_layers: List[Module] = [layer] if isinstance(layer, Module) else layer\n\n # Set a forward hook on specified module and run forward pass to\n # get layer output tensor(s).\n # For DataParallel models, each partition adds entry to dictionary\n # with key as device and value as corresponding Tensor.\n def hook_wrapper(original_module):\n def forward_hook(module, inp, out=None):\n eval_tsrs = inp if attribute_to_layer_input else out\n is_eval_tuple = isinstance(eval_tsrs, tuple)\n\n if not is_eval_tuple:\n eval_tsrs = (eval_tsrs,)\n if require_layer_grads:\n apply_gradient_requirements(eval_tsrs, warn=False)\n with lock:\n nonlocal saved_layer\n # Note that cloning behaviour of `eval_tsr` is different\n # when `forward_hook_with_return` is set to True. This is because\n # otherwise `backward()` on the last output layer won't execute.\n if forward_hook_with_return:\n saved_layer[original_module][eval_tsrs[0].device] = eval_tsrs\n eval_tsrs_to_return = tuple(\n eval_tsr.clone() for eval_tsr in eval_tsrs\n )\n if not is_eval_tuple:\n eval_tsrs_to_return = eval_tsrs_to_return[0]\n return eval_tsrs_to_return\n else:\n saved_layer[original_module][eval_tsrs[0].device] = tuple(\n eval_tsr.clone() for eval_tsr in eval_tsrs\n )\n\n return forward_hook\n\n all_hooks = []\n try:\n for single_layer in all_layers:\n if attribute_to_layer_input:\n all_hooks.append(\n single_layer.register_forward_pre_hook(hook_wrapper(single_layer))\n )\n else:\n all_hooks.append(\n single_layer.register_forward_hook(hook_wrapper(single_layer))\n )\n output = _run_forward(\n forward_fn,\n inputs,\n target=target_ind,\n additional_forward_args=additional_forward_args,\n )\n finally:\n for hook in all_hooks:\n hook.remove()\n\n if len(saved_layer) == 0:\n raise AssertionError(\"Forward hook did not obtain any outputs for given layer\")\n\n if forward_hook_with_return:\n return saved_layer, output\n return saved_layer\n\n\ndef _gather_distributed_tensors(\n saved_layer: Dict[device, Tuple[Tensor, ...]],\n device_ids: Union[None, List[int]] = None,\n key_list: Union[None, List[device]] = None,\n) -> Tuple[Tensor, ...]:\n r\"\"\"\n A helper function to concatenate intermediate layer results stored on\n different devices in `saved_layer`. `saved_layer` is a dictionary that\n contains `device_id` as a key and intermediate layer results (either\n the input or the output of the layer) stored on the device corresponding to\n the key.\n `key_list` is a list of devices in appropriate ordering for concatenation\n and if not provided, keys are sorted based on device ids.\n\n If only one key exists (standard model), key list simply has one element.\n \"\"\"\n if key_list is None:\n key_list = _sort_key_list(list(saved_layer.keys()), device_ids)\n return _reduce_list([saved_layer[device_id] for device_id in key_list])\n\n\ndef _extract_device_ids(\n forward_fn: Callable,\n saved_layer: Dict[Module, Dict[device, Tuple[Tensor, ...]]],\n device_ids: Union[None, List[int]],\n) -> Union[None, List[int]]:\n r\"\"\"\n A helper function to extract device_ids from `forward_function` in case it is\n provided as part of a `DataParallel` model or if is accessible from\n `forward_fn`.\n In case input device_ids is not None, this function returns that value.\n \"\"\"\n # Multiple devices / keys implies a DataParallel model, so we look for\n # device IDs if given or available from forward function\n # (DataParallel model object).\n if (\n max(len(saved_layer[single_layer]) for single_layer in saved_layer) > 1\n and device_ids is None\n ):\n if (\n hasattr(forward_fn, \"device_ids\")\n and cast(Any, forward_fn).device_ids is not None\n ):\n device_ids = cast(Any, forward_fn).device_ids\n else:\n raise AssertionError(\n \"Layer tensors are saved on multiple devices, however unable to access\"\n \" device ID list from the `forward_fn`. Device ID list must be\"\n \" accessible from `forward_fn`. For example, they can be retrieved\"\n \" if `forward_fn` is a model of type `DataParallel`. It is used\"\n \" for identifying device batch ordering.\"\n )\n return device_ids\n\n\[email protected]\ndef _forward_layer_eval_with_neuron_grads(\n forward_fn: Callable,\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n layer: Module,\n additional_forward_args: Any = None,\n *,\n gradient_neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],\n grad_enabled: bool = False,\n device_ids: Union[None, List[int]] = None,\n attribute_to_layer_input: bool = False,\n) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]]:\n ...\n\n\[email protected]\ndef _forward_layer_eval_with_neuron_grads(\n forward_fn: Callable,\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n layer: Module,\n additional_forward_args: Any = None,\n gradient_neuron_selector: None = None,\n grad_enabled: bool = False,\n device_ids: Union[None, List[int]] = None,\n attribute_to_layer_input: bool = False,\n) -> Tuple[Tensor, ...]:\n ...\n\n\[email protected]\ndef _forward_layer_eval_with_neuron_grads(\n forward_fn: Callable,\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n layer: List[Module],\n additional_forward_args: Any = None,\n gradient_neuron_selector: None = None,\n grad_enabled: bool = False,\n device_ids: Union[None, List[int]] = None,\n attribute_to_layer_input: bool = False,\n) -> List[Tuple[Tensor, ...]]:\n ...\n\n\ndef _forward_layer_eval_with_neuron_grads(\n forward_fn: Callable,\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n layer: ModuleOrModuleList,\n additional_forward_args: Any = None,\n gradient_neuron_selector: Union[\n None, int, Tuple[Union[int, slice], ...], Callable\n ] = None,\n grad_enabled: bool = False,\n device_ids: Union[None, List[int]] = None,\n attribute_to_layer_input: bool = False,\n) -> Union[\n Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]],\n Tuple[Tensor, ...],\n List[Tuple[Tensor, ...]],\n]:\n \"\"\"\n This method computes forward evaluation for a particular layer using a\n forward hook. If a gradient_neuron_selector is provided, then gradients with\n respect to that neuron in the layer output are also returned.\n\n These functionalities are combined due to the behavior of DataParallel models\n with hooks, in which hooks are executed once per device. We need to internally\n combine the separated tensors from devices by concatenating based on device_ids.\n Any necessary gradients must be taken with respect to each independent batched\n tensor, so the gradients are computed and combined appropriately.\n\n More information regarding the behavior of forward hooks with DataParallel models\n can be found in the PyTorch data parallel documentation. We maintain the separate\n evals in a dictionary protected by a lock, analogous to the gather implementation\n for the core PyTorch DataParallel implementation.\n \"\"\"\n grad_enabled = True if gradient_neuron_selector is not None else grad_enabled\n\n with torch.autograd.set_grad_enabled(grad_enabled):\n saved_layer = _forward_layer_distributed_eval(\n forward_fn,\n inputs,\n layer,\n additional_forward_args=additional_forward_args,\n attribute_to_layer_input=attribute_to_layer_input,\n )\n device_ids = _extract_device_ids(forward_fn, saved_layer, device_ids)\n # Identifies correct device ordering based on device ids.\n # key_list is a list of devices in appropriate ordering for concatenation.\n # If only one key exists (standard model), key list simply has one element.\n key_list = _sort_key_list(list(next(iter(saved_layer.values())).keys()), device_ids)\n if gradient_neuron_selector is not None:\n assert isinstance(\n layer, Module\n ), \"Cannot compute neuron gradients for multiple layers simultaneously!\"\n inp_grads = _neuron_gradients(\n inputs, saved_layer[layer], key_list, gradient_neuron_selector\n )\n return (\n _gather_distributed_tensors(saved_layer[layer], key_list=key_list),\n inp_grads,\n )\n else:\n if isinstance(layer, Module):\n return _gather_distributed_tensors(saved_layer[layer], key_list=key_list)\n else:\n return [\n _gather_distributed_tensors(saved_layer[curr_layer], key_list=key_list)\n for curr_layer in layer\n ]\n\n\[email protected]\ndef compute_layer_gradients_and_eval(\n forward_fn: Callable,\n layer: Module,\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n target_ind: TargetType = None,\n additional_forward_args: Any = None,\n *,\n gradient_neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],\n device_ids: Union[None, List[int]] = None,\n attribute_to_layer_input: bool = False,\n output_fn: Union[None, Callable] = None,\n) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...], Tuple[Tensor, ...]]:\n ...\n\n\[email protected]\ndef compute_layer_gradients_and_eval(\n forward_fn: Callable,\n layer: List[Module],\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n target_ind: TargetType = None,\n additional_forward_args: Any = None,\n gradient_neuron_selector: None = None,\n device_ids: Union[None, List[int]] = None,\n attribute_to_layer_input: bool = False,\n output_fn: Union[None, Callable] = None,\n) -> Tuple[List[Tuple[Tensor, ...]], List[Tuple[Tensor, ...]]]:\n ...\n\n\[email protected]\ndef compute_layer_gradients_and_eval(\n forward_fn: Callable,\n layer: Module,\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n target_ind: TargetType = None,\n additional_forward_args: Any = None,\n gradient_neuron_selector: None = None,\n device_ids: Union[None, List[int]] = None,\n attribute_to_layer_input: bool = False,\n output_fn: Union[None, Callable] = None,\n) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]]:\n ...\n\n\ndef compute_layer_gradients_and_eval(\n forward_fn: Callable,\n layer: ModuleOrModuleList,\n inputs: Union[Tensor, Tuple[Tensor, ...]],\n target_ind: TargetType = None,\n additional_forward_args: Any = None,\n gradient_neuron_selector: Union[\n None, int, Tuple[Union[int, slice], ...], Callable\n ] = None,\n device_ids: Union[None, List[int]] = None,\n attribute_to_layer_input: bool = False,\n output_fn: Union[None, Callable] = None,\n) -> Union[\n Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]],\n Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...], Tuple[Tensor, ...]],\n Tuple[List[Tuple[Tensor, ...]], List[Tuple[Tensor, ...]]],\n]:\n r\"\"\"\n Computes gradients of the output with respect to a given layer as well\n as the output evaluation of the layer for an arbitrary forward function\n and given input.\n\n For data parallel models, hooks are executed once per device ,so we\n need to internally combine the separated tensors from devices by\n concatenating based on device_ids. Any necessary gradients must be taken\n with respect to each independent batched tensor, so the gradients are\n computed and combined appropriately.\n\n More information regarding the behavior of forward hooks with DataParallel\n models can be found in the PyTorch data parallel documentation. We maintain\n the separate inputs in a dictionary protected by a lock, analogous to the\n gather implementation for the core PyTorch DataParallel implementation.\n\n NOTE: To properly handle inplace operations, a clone of the layer output\n is stored. This structure inhibits execution of a backward hook on the last\n module for the layer output when computing the gradient with respect to\n the input, since we store an intermediate clone, as\n opposed to the true module output. If backward module hooks are necessary\n for the final module when computing input gradients, utilize\n _forward_layer_eval_with_neuron_grads instead.\n\n Args:\n\n forward_fn: forward function. This can be for example model's\n forward function.\n layer: Layer for which gradients / output will be evaluated.\n inputs: Input at which gradients are evaluated,\n will be passed to forward_fn.\n target_ind: Index of the target class for which gradients\n must be computed (classification only).\n output_fn: An optional function that is applied to the layer inputs or\n outputs depending whether the `attribute_to_layer_input` is\n set to `True` or `False`\n args: Additional input arguments that forward function requires.\n It takes an empty tuple (no additional arguments) if no\n additional arguments are required\n\n\n Returns:\n 2-element tuple of **gradients**, **evals**:\n - **gradients**:\n Gradients of output with respect to target layer output.\n - **evals**:\n Target layer output for given input.\n \"\"\"\n with torch.autograd.set_grad_enabled(True):\n # saved_layer is a dictionary mapping device to a tuple of\n # layer evaluations on that device.\n saved_layer, output = _forward_layer_distributed_eval(\n forward_fn,\n inputs,\n layer,\n target_ind=target_ind,\n additional_forward_args=additional_forward_args,\n attribute_to_layer_input=attribute_to_layer_input,\n forward_hook_with_return=True,\n require_layer_grads=True,\n )\n assert output[0].numel() == 1, (\n \"Target not provided when necessary, cannot\"\n \" take gradient with respect to multiple outputs.\"\n )\n\n device_ids = _extract_device_ids(forward_fn, saved_layer, device_ids)\n\n # Identifies correct device ordering based on device ids.\n # key_list is a list of devices in appropriate ordering for concatenation.\n # If only one key exists (standard model), key list simply has one element.\n key_list = _sort_key_list(\n list(next(iter(saved_layer.values())).keys()), device_ids\n )\n all_outputs: Union[Tuple[Tensor, ...], List[Tuple[Tensor, ...]]]\n if isinstance(layer, Module):\n all_outputs = _reduce_list(\n [\n saved_layer[layer][device_id]\n if output_fn is None\n else output_fn(saved_layer[layer][device_id])\n for device_id in key_list\n ]\n )\n else:\n all_outputs = [\n _reduce_list(\n [\n saved_layer[single_layer][device_id]\n if output_fn is None\n else output_fn(saved_layer[single_layer][device_id])\n for device_id in key_list\n ]\n )\n for single_layer in layer\n ]\n all_layers: List[Module] = [layer] if isinstance(layer, Module) else layer\n grad_inputs = tuple(\n layer_tensor\n for single_layer in all_layers\n for device_id in key_list\n for layer_tensor in saved_layer[single_layer][device_id]\n )\n saved_grads = torch.autograd.grad(torch.unbind(output), grad_inputs)\n\n offset = 0\n all_grads: List[Tuple[Tensor, ...]] = []\n for single_layer in all_layers:\n num_tensors = len(next(iter(saved_layer[single_layer].values())))\n curr_saved_grads = [\n saved_grads[i : i + num_tensors]\n for i in range(\n offset, offset + len(key_list) * num_tensors, num_tensors\n )\n ]\n offset += len(key_list) * num_tensors\n if output_fn is not None:\n curr_saved_grads = [\n output_fn(curr_saved_grad) for curr_saved_grad in curr_saved_grads\n ]\n\n all_grads.append(_reduce_list(curr_saved_grads))\n\n layer_grads: Union[Tuple[Tensor, ...], List[Tuple[Tensor, ...]]]\n layer_grads = all_grads\n if isinstance(layer, Module):\n layer_grads = all_grads[0]\n\n if gradient_neuron_selector is not None:\n assert isinstance(\n layer, Module\n ), \"Cannot compute neuron gradients for multiple layers simultaneously!\"\n inp_grads = _neuron_gradients(\n inputs, saved_layer[layer], key_list, gradient_neuron_selector\n )\n return (\n cast(Tuple[Tensor, ...], layer_grads),\n cast(Tuple[Tensor, ...], all_outputs),\n inp_grads,\n )\n return layer_grads, all_outputs # type: ignore\n\n\ndef construct_neuron_grad_fn(\n layer: Module,\n neuron_selector: Union[int, Tuple[Union[int, slice], ...], Callable],\n device_ids: Union[None, List[int]] = None,\n attribute_to_neuron_input: bool = False,\n) -> Callable:\n def grad_fn(\n forward_fn: Callable,\n inputs: TensorOrTupleOfTensorsGeneric,\n target_ind: TargetType = None,\n additional_forward_args: Any = None,\n ) -> Tuple[Tensor, ...]:\n _, grads = _forward_layer_eval_with_neuron_grads(\n forward_fn,\n inputs,\n layer,\n additional_forward_args,\n gradient_neuron_selector=neuron_selector,\n device_ids=device_ids,\n attribute_to_layer_input=attribute_to_neuron_input,\n )\n return grads\n\n return grad_fn\n\n\ndef _compute_jacobian_wrt_params(\n model: Module,\n inputs: Union[Tuple[Tensor], Tensor],\n labels: Optional[Tensor] = None,\n loss_fn: Optional[Union[Module, Callable]] = None,\n) -> Tuple[Tensor, ...]:\n r\"\"\"\n Computes the Jacobian of a batch of test examples given a model, and optional\n loss function and target labels. This method is equivalent to calculating the\n gradient for every individual example in the minibatch.\n\n Args:\n model (torch.nn.Module): The trainable model providing the forward pass\n inputs (Tensor): The minibatch for which the forward pass is computed.\n The dimensions of input are (N, *) where N is the batch_size.\n The input must have a batch dimension, even if batch_size = 1.\n labels (Tensor or None): Labels for input if computing a loss function.\n loss_fn (torch.nn.Module or Callable or None): The loss function. If a library\n defined loss function is provided, it would be expected to be a\n torch.nn.Module. If a custom loss is provided, it can be either type,\n but must behave as a library loss function would if `reduction='none'`.\n\n Returns:\n grads (Tuple of Tensor): Returns the Jacobian for the minibatch as a\n tuple of gradients corresponding to the tuple of trainable parameters\n returned by `model.parameters()`. Each object grads[i] references to the\n gradients for the parameters in the i-th trainable layer of the model.\n Each grads[i] object is a tensor with the gradients for the `inputs`\n batch. For example, grads[i][j] would reference the gradients for the\n parameters of the i-th layer, for the j-th member of the minibatch.\n \"\"\"\n with torch.autograd.set_grad_enabled(True):\n out = model(inputs)\n assert out.dim() != 0, \"Please ensure model output has at least one dimension.\"\n\n if labels is not None and loss_fn is not None:\n loss = loss_fn(out, labels)\n if hasattr(loss_fn, \"reduction\"):\n msg0 = \"Please ensure loss_fn.reduction is set to `none`\"\n assert loss_fn.reduction == \"none\", msg0 # type: ignore\n else:\n msg1 = (\n \"Loss function is applying a reduction. Please ensure \"\n f\"Output shape: {out.shape} and Loss shape: {loss.shape} \"\n \"are matching.\"\n )\n assert loss.dim() != 0, msg1\n assert out.shape[0] == loss.shape[0], msg1\n out = loss\n\n grads_list = [\n torch.autograd.grad(\n outputs=out[i],\n inputs=model.parameters(), # type: ignore\n grad_outputs=torch.ones_like(out[i]),\n retain_graph=True,\n )\n for i in range(out.shape[0])\n ]\n\n grads = tuple([torch.stack(x) for x in zip(*grads_list)])\n\n return tuple(grads)\n\n\ndef _compute_jacobian_wrt_params_autograd_hacks(\n model: Module,\n inputs: Union[Tuple[Tensor], Tensor],\n labels: Optional[Tensor] = None,\n loss_fn: Optional[Module] = None,\n reduction_type: Optional[str] = \"sum\",\n) -> Tuple[Any, ...]:\n r\"\"\"\n NOT SUPPORTED FOR OPEN SOURCE. This method uses an internal 'hack` and is currently\n not supported.\n\n Computes the Jacobian of a batch of test examples given a model, and optional\n loss function and target labels. This method uses autograd_hacks to fully vectorize\n the Jacobian calculation. Currently, only linear and conv2d layers are supported.\n\n User must `add_hooks(model)` before calling this function.\n\n Args:\n model (torch.nn.Module): The trainable model providing the forward pass\n inputs (Tensor): The minibatch for which the forward pass is computed.\n The dimensions of input are (N, *) where N is the batch_size.\n The input must have a batch dimension, even if batch_size = 1.\n labels (Tensor or None): Labels for input if computing a loss function.\n loss_fn (torch.nn.Module or Callable or None): The loss function. If a library\n defined loss function is provided, it would be expected to be a\n torch.nn.Module. If a custom loss is provided, it can be either type,\n but must behave as a library loss function would if `reduction='sum'` or\n `reduction='mean'`.\n reduction_type (str): The type of reduction applied. If a loss_fn is passed,\n this should match `loss_fn.reduction`. Else if gradients are being\n computed on direct model outputs (scores), then 'sum' should be used.\n Defaults to 'sum'.\n\n Returns:\n grads (Tuple of Tensor): Returns the Jacobian for the minibatch as a\n tuple of gradients corresponding to the tuple of trainable parameters\n returned by `model.parameters()`. Each object grads[i] references to the\n gradients for the parameters in the i-th trainable layer of the model.\n Each grads[i] object is a tensor with the gradients for the `inputs`\n batch. For example, grads[i][j] would reference the gradients for the\n parameters of the i-th layer, for the j-th member of the minibatch.\n \"\"\"\n from captum._utils.fb import autograd_hacks\n\n with torch.autograd.set_grad_enabled(True):\n autograd_hacks.add_hooks(model)\n\n out = model(inputs)\n assert out.dim() != 0, \"Please ensure model output has at least one dimension.\"\n\n if labels is not None and loss_fn is not None:\n loss = loss_fn(out, labels)\n if hasattr(loss_fn, \"reduction\"):\n msg0 = \"Please ensure loss_fn.reduction is set to `sum` or `mean`\"\n assert loss_fn.reduction != \"none\", msg0\n msg1 = (\n f\"loss_fn.reduction ({loss_fn.reduction}) does not match reduction \"\n f\"type ({reduction_type}). Please ensure they are matching.\"\n )\n assert loss_fn.reduction == reduction_type, msg1\n msg2 = (\n \"Please ensure custom loss function is applying either a \"\n \"sum or mean reduction.\"\n )\n assert out.shape != loss.shape, msg2\n\n if reduction_type != \"sum\" and reduction_type != \"mean\":\n raise ValueError(\n f\"{reduction_type} is not a valid value for reduction_type. \"\n \"Must be either 'sum' or 'mean'.\"\n )\n out = loss\n\n model.zero_grad()\n out.backward(gradient=torch.ones_like(out))\n autograd_hacks.compute_grad1(model, loss_type=reduction_type)\n\n grads = tuple(\n param.grad1 # type: ignore\n for param in model.parameters()\n if hasattr(param, \"grad1\")\n )\n\n autograd_hacks.clear_backprops(model)\n autograd_hacks.remove_hooks(model)\n\n return grads\n"
] | [
[
"torch.stack",
"torch.autograd.set_grad_enabled",
"torch.ones_like",
"torch.unbind"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BonnerLab/model-tools | [
"ac90617cd79bb70a308e34a1e834971498329fb0"
] | [
"model_tools/activations/hooks.py"
] | [
"from abc import ABC, abstractmethod\nimport logging\nimport os\nfrom typing import Optional, Union, Iterable, Dict\n\nimport h5py\nimport numpy as np\nimport torch\nfrom PIL import Image\nfrom tqdm import tqdm\n\nfrom brainio.stimuli import StimulusSet\nfrom model_tools.activations import ActivationsModel\nfrom model_tools.activations.core import flatten, change_dict\nfrom model_tools.utils import fullname, s3\nfrom model_tools.utils.pca import IncrementalPCAPytorch, PCAPytorch\nfrom result_caching import store_dict\n\nStimuli = Union[Iterable[str], StimulusSet, Iterable[os.PathLike]]\nBasePCA = Union[IncrementalPCAPytorch, PCAPytorch]\n\n\nclass LayerHookBase(ABC):\n\n def __init__(self, activations_extractor: ActivationsModel, identifier: Optional[str] = None):\n self._extractor = activations_extractor\n self.identifier = identifier\n self.handle = None\n\n def __call__(self, batch_activations: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:\n self.setup(batch_activations)\n return change_dict(batch_activations, self.layer_apply, keep_name=True,\n multithread=os.getenv('MT_MULTITHREAD', '1') == '1')\n\n @classmethod\n def hook(cls, activations_extractor: ActivationsModel, identifier: Optional[str] = None, **kwargs):\n hook = cls(activations_extractor=activations_extractor, identifier=identifier, **kwargs)\n assert not cls.is_hooked(activations_extractor), f\"{cls.__name__} is already hooked\"\n handle = activations_extractor.register_batch_activations_hook(hook)\n hook.handle = handle\n return handle\n\n @classmethod\n def is_hooked(cls, activations_extractor: ActivationsModel) -> bool:\n return any(isinstance(hook, cls) for hook in\n activations_extractor._extractor._batch_activations_hooks.values())\n\n def setup(self, batch_activations: Dict[str, np.ndarray]) -> None:\n pass\n\n @abstractmethod\n def layer_apply(self, layer: str, activations: np.ndarray) -> np.ndarray:\n pass\n\n\nclass LayerGlobalMaxPool2d(LayerHookBase):\n\n def __init__(self, *args, identifier: Optional[str] = None, **kwargs):\n if identifier is None:\n identifier = 'maxpool'\n\n super(LayerGlobalMaxPool2d, self).__init__(*args, **kwargs, identifier=identifier)\n \n def layer_apply(self, layer: str, activations: np.ndarray) -> np.ndarray:\n if activations.ndim != 4:\n return activations\n return np.max(activations, axis=(2, 3))\n\n\nclass LayerRandomProjection(LayerHookBase):\n\n def __init__(self, *args,\n n_components: int = 1000,\n force: bool = False,\n identifier: Optional[str] = None,\n **kwargs):\n if identifier is None:\n identifier = f'randproj_ncomponents={n_components}_force={force}'\n\n super(LayerRandomProjection, self).__init__(*args, **kwargs, identifier=identifier)\n self._n_components = n_components\n self._force = force\n self._layer_ws = {}\n\n def layer_apply(self, layer: str, activations: np.ndarray) -> np.ndarray:\n activations = flatten(activations)\n if activations.shape[1] <= self._n_components and not self._force:\n return activations\n if layer not in self._layer_ws:\n w = np.random.normal(size=(activations.shape[-1], self._n_components)) / np.sqrt(self._n_components)\n self._layer_ws[layer] = w\n else:\n w = self._layer_ws[layer]\n activations = activations @ w\n return activations\n\n\nclass LayerPCA(LayerHookBase):\n\n def __init__(self, *args,\n n_components: int = 1000,\n force: bool = False,\n stimuli: Optional[Stimuli] = None,\n stimuli_identifier: Optional[str] = None,\n identifier: Optional[str] = None,\n batch_size: Optional[int] = None,\n device: Optional[Union[str, torch.device]] = None,\n **kwargs):\n if stimuli is None:\n # Default to ImageNet validation with 1 image per class\n stimuli = _get_imagenet_val(n_components)\n stimuli_identifier = 'brainscore-imagenetval'\n if isinstance(stimuli, StimulusSet) and stimuli_identifier is None and hasattr(stimuli, 'identifier'):\n stimuli_identifier = stimuli.identifier\n if stimuli_identifier is None:\n raise ValueError('If passing a list of paths for stimuli '\n 'or a StimulusSet without an identifier attribute, '\n 'you must provide a stimuli_identifier')\n\n if identifier is None:\n identifier = f'pca_ncomponents={n_components}_force={force}_stimuli_identifier={stimuli_identifier}'\n\n super(LayerPCA, self).__init__(*args, **kwargs, identifier=identifier)\n self._n_components = n_components\n self._force = force\n self._stimuli_identifier = stimuli_identifier\n self._stimuli = stimuli\n self._batch_size = batch_size\n self._device = device\n self._logger = logging.getLogger(fullname(self))\n self._layer_pcas = {}\n\n def setup(self, batch_activations) -> None:\n layers = batch_activations.keys()\n missing_layers = [layer for layer in layers if layer not in self._layer_pcas]\n if len(missing_layers) == 0:\n return\n layer_pcas = self._pcas(identifier=self._extractor.identifier,\n layers=missing_layers,\n n_components=self._n_components,\n force=self._force,\n stimuli_identifier=self._stimuli_identifier)\n self._layer_pcas = {**self._layer_pcas, **layer_pcas}\n \n def layer_apply(self, layer: str, activations: np.ndarray) -> np.ndarray:\n pca = self._layer_pcas[layer]\n activations = flatten(activations)\n if pca is None:\n return activations\n return pca.transform(torch.from_numpy(activations).to(self._device))\n\n @store_dict(dict_key='layers', identifier_ignore=['layers'])\n def _pcas(self, identifier, layers, n_components, force, stimuli_identifier) -> Dict[str, BasePCA]:\n self._logger.debug(f'Retrieving {stimuli_identifier} activations')\n self.handle.disable()\n activations = self._extractor(self._stimuli, layers=layers, stimuli_identifier=False)\n activations = {layer: activations.sel(layer=layer).values\n for layer in np.unique(activations['layer'])}\n assert len(set(layer_activations.shape[0] for layer_activations in activations.values())) == 1, \"stimuli differ\"\n self.handle.enable()\n\n self._logger.debug(f'Computing {stimuli_identifier} principal components')\n progress = tqdm(total=len(activations), desc=\"layer principal components\", leave=False)\n\n def init_and_progress(layer, activations):\n activations = flatten(activations)\n if activations.shape[1] <= n_components and not force:\n self._logger.debug(f\"Not computing principal components for {layer} \"\n f\"activations {activations.shape} as shape is small enough already\")\n progress.update(1)\n return None\n n_components_ = n_components if activations.shape[1] > n_components else activations.shape[1]\n if self._batch_size is None:\n pca = PCAPytorch(n_components_, device=self._device)\n pca.fit(torch.from_numpy(activations).to(self._device))\n else:\n pca = IncrementalPCAPytorch(n_components_, device=self._device)\n for i in range(0, activations.shape[0], self._batch_size):\n activations_batch = torch.from_numpy(activations[i:i + self._batch_size]).to(self._device)\n pca.fit_partial(activations_batch)\n return pca\n\n layer_pcas = change_dict(activations, init_and_progress, keep_name=True,\n multithread=os.getenv('MT_MULTITHREAD', '1') == '1')\n progress.close()\n return layer_pcas\n\n\ndef _get_imagenet_val(num_images):\n _logger = logging.getLogger(fullname(_get_imagenet_val))\n num_classes = 1000\n num_images_per_class = (num_images - 1) // num_classes\n base_indices = np.arange(num_images_per_class).astype(int)\n indices = []\n for i in range(num_classes):\n indices.extend(50 * i + base_indices)\n for i in range((num_images - 1) % num_classes + 1):\n indices.extend(50 * i + np.array([num_images_per_class]).astype(int))\n\n framework_home = os.path.expanduser(os.getenv('MT_HOME', '~/.model-tools'))\n imagenet_filepath = os.getenv('MT_IMAGENET_PATH', os.path.join(framework_home, 'imagenet2012.hdf5'))\n imagenet_dir = f\"{imagenet_filepath}-files\"\n os.makedirs(imagenet_dir, exist_ok=True)\n\n if not os.path.isfile(imagenet_filepath):\n os.makedirs(os.path.dirname(imagenet_filepath), exist_ok=True)\n _logger.debug(f\"Downloading ImageNet validation to {imagenet_filepath}\")\n s3.download_file(\"imagenet2012-val.hdf5\", imagenet_filepath)\n\n filepaths = []\n with h5py.File(imagenet_filepath, 'r') as f:\n for index in indices:\n imagepath = os.path.join(imagenet_dir, f\"{index}.png\")\n if not os.path.isfile(imagepath):\n image = np.array(f['val/images'][index])\n Image.fromarray(image).save(imagepath)\n filepaths.append(imagepath)\n\n return filepaths\n"
] | [
[
"numpy.sqrt",
"numpy.unique",
"numpy.arange",
"torch.from_numpy",
"numpy.max",
"numpy.random.normal",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hrrsjeong/METEORE | [
"ba8e517c51dbfd3fea5130f297c480c4626c2ff0"
] | [
"combination_model_prediction.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jul 9 18:03:39 2020\r\n\r\n@author: akanksha\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport joblib\r\nfrom itertools import combinations\r\nimport sklearn\r\nfrom functools import reduce\r\nimport argparse\r\nimport os\r\n\r\n\r\nparser = argparse.ArgumentParser(description = 'Prediction from combined models for the reads.')\r\n\r\nparser.add_argument('--methodsfile','-i', type = str, required = True,\r\n help = 'TSV file containing name and path of the method output tsv file. The output tsv file from the method should be in the format [ID,Pos,Strand,Score]. Can be compressed in gz.')\r\n\r\nparser.add_argument('--model','-m', choices = [\"default\",\"optimized\"], required = True, type = str,\r\n help = 'which model to select from default RF or optimized RF with max_depth 3 and n_estimator 10')\r\n\r\nparser.add_argument('--output', '-o',type = str, required = True,\r\n\t\thelp = 'Where to store the outputs')\r\noptions = parser.parse_args()\r\n\r\ndef mod_file(data_file_path):\r\n data_file=pd.read_csv(data_file_path, header=0, sep=\"\\t\")\r\n name=data_file_path.split(\"\\\\\")[-1].split(\".\")[0]\r\n data_file.drop_duplicates(subset=['Chr',\"ID\",\"Pos\",\"Strand\"],inplace=True) # add chr\r\n data_file.reset_index(inplace=True,drop=True)\r\n mask=data_file.index[data_file.Strand==\"-\"].tolist()\r\n data_file[\"Pos\"][mask]=data_file[\"Pos\"][mask]-1\r\n data_file.drop([\"Strand\"], axis=1, inplace=True)\r\n data_file.rename(columns={\"Score\":name}, inplace=True)\r\n data_file.reset_index(inplace=True, drop=True)\r\n return(data_file)\r\n\r\ndef main(mp,combine_file):\r\n\r\n loaded_model = joblib.load(open(mp, 'rb'))\r\n X=combine_file[combine_file.columns[3:]] #2:\r\n X=sklearn.preprocessing.MinMaxScaler().fit_transform(X)\r\n prediction=pd.DataFrame(loaded_model.predict(X)) ##\r\n prediction_prob=pd.DataFrame(loaded_model.predict_proba(X))\r\n prediction.rename(columns={0:\"Prediction\"}, inplace=True)\r\n prediction_prob=prediction_prob[[1]]\r\n prediction_prob.rename(columns={1:\"Prob_methylation\"}, inplace=True)\r\n final_output=pd.concat([combine_file[combine_file.columns[:3]],prediction,prediction_prob], axis=1) #:2\r\n #os.makedirs(options.output)\r\n #final_output.to_csv(options.output+'/predictions_combination_method.tsv', header=True, index=None, sep='\\t')\r\n dir = (\"combined_model_results\")\r\n if not os.path.isdir(dir):\r\n os.makedirs(dir)\r\n final_output.to_csv(dir+'/'+options.output, header=True, index=None, sep='\\t')\r\n else:\r\n final_output.to_csv(dir+'/'+options.output, header=True, index=None, sep='\\t')\r\n\r\nif __name__ == '__main__':\r\n\r\n df_file=pd.read_csv(options.methodsfile, header=None, sep='\\t')\r\n if options.model==\"default\":\r\n fillval=\"default\"\r\n else:\r\n fillval=\"max_depth_3_n_estimator_10\"\r\n modelname='_'.join(df_file[0])\r\n mp='saved_models/rf_model_'+fillval+'_'+modelname+'.model'\r\n dfs=[]\r\n for i in df_file[1]:\r\n dfs.append(mod_file(i))\r\n combine_file=reduce(lambda left,right: pd.merge(left, right, how='inner',on=[\"ID\",\"Chr\",\"Pos\"]), dfs) # add chr\r\n combine_file.drop_duplicates(subset=[\"ID\",\"Chr\",\"Pos\"],inplace=True) # add chr\r\n combine_file.reset_index(inplace=True, drop=True)\r\n main(mp,combine_file) ##\r\n"
] | [
[
"pandas.concat",
"pandas.read_csv",
"sklearn.preprocessing.MinMaxScaler",
"pandas.merge"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
simone-codeluppi/storm-analysis | [
"fa50fb7d670e9e4d712fa6fafb398963b39e209b",
"fa50fb7d670e9e4d712fa6fafb398963b39e209b"
] | [
"storm_analysis/diagnostics/sCMOS/configure.py",
"storm_analysis/test/test_ia_utilities.py"
] | [
"#!/usr/bin/env python\n\"\"\"\nConfigure folder for sCMOS testing.\n\nHazen 09/17\n\"\"\"\nimport numpy\nimport os\n\nimport storm_analysis\nimport storm_analysis.sa_library.parameters as parameters\n\nimport storm_analysis.simulator.emitters_on_grid as emittersOnGrid\nimport storm_analysis.simulator.emitters_uniform_random as emittersUniformRandom\n\nimport storm_analysis.diagnostics.sCMOS.settings as settings\n\n\ndef testingParameters(cal_file):\n \"\"\"\n Create a sCMOS parameters object.\n \"\"\"\n params = parameters.ParametersSCMOS()\n\n params.setAttr(\"max_frame\", \"int\", -1) \n params.setAttr(\"start_frame\", \"int\", -1) \n\n params.setAttr(\"background_sigma\", \"float\", 8.0)\n params.setAttr(\"camera_calibration\", \"filename\", cal_file)\n params.setAttr(\"find_max_radius\", \"int\", 5)\n params.setAttr(\"fit_error_model\", \"string\", settings.fit_error_model)\n params.setAttr(\"foreground_sigma\", \"float\", 1.5)\n params.setAttr(\"iterations\", \"int\", settings.iterations)\n params.setAttr(\"model\", \"string\", settings.model)\n params.setAttr(\"pixel_size\", \"float\", settings.pixel_size)\n params.setAttr(\"roi_size\", \"int\", settings.roi_size)\n params.setAttr(\"sigma\", \"float\", 1.5)\n params.setAttr(\"threshold\", \"float\", settings.threshold)\n\n # Don't do tracking.\n params.setAttr(\"descriptor\", \"string\", \"1\")\n params.setAttr(\"radius\", \"float\", \"0.0\")\n\n # Don't do drift-correction.\n params.setAttr(\"d_scale\", \"int\", 2)\n params.setAttr(\"drift_correction\", \"int\", 0)\n params.setAttr(\"frame_step\", \"int\", 500)\n params.setAttr(\"z_correction\", \"int\", 0)\n\n # Z fitting.\n #\n # These are nonsense values. We test either '2D' of '3D' mode\n # and check how well we do at fitting the localization widths.\n #\n params.setAttr(\"do_zfit\", \"int\", 0)\n\n params.setAttr(\"cutoff\", \"float\", 0.0) \n params.setAttr(\"max_z\", \"float\", 0.5)\n params.setAttr(\"min_z\", \"float\", -0.5)\n params.setAttr(\"z_value\", \"float\", 0.0)\n params.setAttr(\"z_step\", \"float\", 1.0)\n\n params.setAttr(\"wx_wo\", \"float\", 1.0)\n params.setAttr(\"wx_c\", \"float\", 1.0)\n params.setAttr(\"wx_d\", \"float\", 1.0)\n params.setAttr(\"wxA\", \"float\", 0.0)\n params.setAttr(\"wxB\", \"float\", 0.0)\n params.setAttr(\"wxC\", \"float\", 0.0)\n params.setAttr(\"wxD\", \"float\", 0.0)\n\n params.setAttr(\"wy_wo\", \"float\", 1.0)\n params.setAttr(\"wy_c\", \"float\", 1.0)\n params.setAttr(\"wy_d\", \"float\", 1.0)\n params.setAttr(\"wyA\", \"float\", 0.0)\n params.setAttr(\"wyB\", \"float\", 0.0)\n params.setAttr(\"wyC\", \"float\", 0.0)\n params.setAttr(\"wyD\", \"float\", 0.0)\n\n # 'peak_locations' testing.\n if hasattr(settings, \"peak_locations\") and (settings.peak_locations is not None):\n params.setAttr(\"peak_locations\", \"filename\", settings.peak_locations)\n\n return params\n\n \ndef configure(cal_file = None):\n\n # Create sCMOS calibration file if not specified.\n #\n if cal_file is None:\n cal_file = \"calib.npy\"\n offset = numpy.zeros((settings.y_size, settings.x_size)) + settings.camera_offset\n variance = numpy.ones((settings.y_size, settings.x_size)) * settings.camera_variance\n gain = numpy.ones((settings.y_size, settings.x_size)) * settings.camera_gain\n rqe = numpy.ones((settings.y_size, settings.x_size))\n numpy.save(cal_file, [offset, variance, gain, rqe, 2])\n\n # Create parameters file for analysis.\n #\n print(\"Creating XML file.\")\n params = testingParameters(cal_file)\n params.toXMLFile(\"scmos.xml\", pretty = True)\n\n # Create localization on a grid file.\n #\n print(\"Creating gridded localization.\")\n emittersOnGrid.emittersOnGrid(\"grid_list.hdf5\",\n settings.nx,\n settings.ny,\n 1.5,\n 20,\n 0.0,\n 0.0)\n\n # Create randomly located localizations file.\n #\n print(\"Creating random localization.\")\n emittersUniformRandom.emittersUniformRandom(\"random_list.hdf5\",\n 1.0,\n 10,\n settings.x_size,\n settings.y_size,\n 0.0)\n\nif (__name__ == \"__main__\"):\n configure()\n \n",
"#!/usr/bin/env python\n\"\"\"\nTests some aspect of ia_utilities.\n\"\"\"\nimport numpy\nimport tifffile\n\nimport storm_analysis.sa_library.dao_fit_c as daoFitC\nimport storm_analysis.sa_library.ia_utilities_c as iaUtilsC\nimport storm_analysis.simulator.draw_gaussians_c as dg\n\n\ndef imagesCopy(images):\n images_copy = []\n for image in images:\n images_copy.append(numpy.copy(image))\n return images_copy\n\n \ndef test_ia_util_1():\n \"\"\"\n Test finding peaks in an empty image.\n \"\"\"\n x_size = 100\n y_size = 80\n images = [numpy.zeros((x_size,y_size), dtype = numpy.float64)]\n z_values = [0.1]\n\n mxf = iaUtilsC.MaximaFinder(margin = 1,\n radius = 2,\n threshold = 1,\n z_values = z_values)\n\n [x, y, z] = mxf.findMaxima(images)\n assert(x.size == 0)\n\ndef test_ia_util_2():\n \"\"\"\n Test finding peaks in an image.\n \"\"\"\n x_size = 100\n y_size = 80\n images = [numpy.zeros((x_size,y_size), dtype = numpy.float64)]\n z_values = [0.1]\n\n # Above threshold, unequal height.\n images[0][10,11] = 1.1\n images[0][10,12] = 1.5\n\n # Above threshold, equal height.\n images[0][15,8] = 1.5\n images[0][15,9] = 1.5\n\n # Below threshold.\n images[0][20,11] = 0.5\n \n mxf = iaUtilsC.MaximaFinder(margin = 1,\n radius = 2,\n threshold = 1,\n z_values = z_values)\n\n [x, y, z, h] = mxf.findMaxima(images, want_height = True)\n\n assert(x.size == 2)\n for i in range(z.size):\n assert (abs(z[i] - z_values[0]) < 1.0e-6)\n assert (abs(h[i] - 1.5) < 1.0e-6)\n\ndef test_ia_util_3():\n \"\"\"\n Test agreement with fitting regarding orientation.\n \"\"\"\n height = 20.0\n sigma = 1.5\n x_size = 100\n y_size = 120\n background = numpy.zeros((x_size, y_size)) + 10.0\n image = dg.drawGaussians((x_size, y_size),\n numpy.array([[20.0, 40.0, height, sigma, sigma]]))\n image += background\n\n # Configure fitter.\n mfit = daoFitC.MultiFitter2D(sigma_range = [1.0, 2.0])\n mfit.initializeC(image)\n mfit.newImage(image)\n mfit.newBackground(background)\n\n # Configure finder.\n z_values = [0.0]\n mxf = iaUtilsC.MaximaFinder(margin = 1,\n radius = 2 * sigma,\n threshold = background[0,0] + 0.5*height,\n z_values = z_values)\n \n # Find peaks.\n [x, y, z] = mxf.findMaxima([image])\n\n sigma = numpy.ones(x.size) * sigma\n peaks = {\"x\" : x,\n \"y\" : y,\n \"z\" : z,\n \"sigma\" : sigma}\n\n # Pass peaks to fitter.\n mfit.newPeaks(peaks, \"finder\")\n\n # Check height.\n h = mfit.getPeakProperty(\"height\")\n for i in range(h.size):\n assert (abs(h[i] - height)/height < 0.1)\n\n # Check background.\n bg = mfit.getPeakProperty(\"background\")\n for i in range(bg.size):\n assert (abs(bg[i] - 10.0) < 1.0e-6)\n\n mfit.cleanup(verbose = False)\n\ndef test_ia_util_4():\n \"\"\"\n Multiple z planes test.\n \"\"\"\n x_size = 100\n y_size = 80\n images = [numpy.zeros((x_size,y_size), dtype = numpy.float64),\n numpy.zeros((x_size,y_size), dtype = numpy.float64),\n numpy.zeros((x_size,y_size), dtype = numpy.float64)]\n z_values = [1.0,2.0,3.0]\n\n images[0][20,10] = 1.3\n images[1][20,10] = 1.2\n images[2][20,10] = 1.5\n\n # Default z range (the entire stack).\n mxf = iaUtilsC.MaximaFinder(margin = 1,\n radius = 2,\n threshold = 1,\n z_values = z_values)\n\n [x, y, z] = mxf.findMaxima(imagesCopy(images))\n assert(x.size == 1)\n assert(abs(z[0] - z_values[2]) < 1.0e-6)\n\n # z range is limited to adjacent slices.\n #\n mxf = iaUtilsC.MaximaFinder(margin = 1,\n radius = 2,\n threshold = 1,\n z_range = 1,\n z_values = z_values)\n\n [x, y, z] = mxf.findMaxima(imagesCopy(images))\n assert(x.size == 2)\n assert(abs(z[0] - z_values[0]) < 1.0e-6)\n assert(abs(z[1] - z_values[2]) < 1.0e-6)\n\n # z range is limited to current slice.\n #\n mxf = iaUtilsC.MaximaFinder(margin = 1,\n radius = 2,\n threshold = 1,\n z_range = 0,\n z_values = z_values)\n\n [x, y, z] = mxf.findMaxima(imagesCopy(images))\n assert(x.size == 3)\n for i in range(z.size):\n assert(abs(z[i] - z_values[i]) < 1.0e-6)\n \ndef test_ia_util_5():\n \"\"\"\n Test that limits on the number of peak duplicates are working.\n \"\"\"\n x_size = 100\n y_size = 80\n images = [numpy.zeros((x_size,y_size), dtype = numpy.float64)]\n z_values = [0.1]\n\n # Single peak.\n images[0][10,21] = 1.1\n images[0][10,20] = 1.2\n \n mxf = iaUtilsC.MaximaFinder(margin = 1,\n radius = 2,\n threshold = 1,\n z_values = z_values)\n\n # Find the peak.\n [x, y, z] = mxf.findMaxima(imagesCopy(images))\n assert (x.size == 1)\n\n # This should not find anything, since the peak was\n # already found above.\n [x, y, z] = mxf.findMaxima(imagesCopy(images))\n assert (x.size == 0)\n\n # Reset and now we should find it again.\n mxf.resetTaken()\n [x, y, z] = mxf.findMaxima(imagesCopy(images))\n assert (x.size == 1)\n \ndef test_ia_util_6():\n \"\"\"\n Test radius.\n \"\"\"\n x_size = 100\n y_size = 80\n images = [numpy.zeros((x_size,y_size), dtype = numpy.float64)]\n z_values = [0.1]\n\n images[0][10,23] = 1.2\n images[0][10,22] = 1.1\n images[0][10,21] = 1.1\n images[0][10,20] = 1.2\n\n # Should only find 1 peak. \n mxf = iaUtilsC.MaximaFinder(margin = 1,\n radius = 3,\n threshold = 1,\n z_values = z_values)\n\n [x, y, z] = mxf.findMaxima(imagesCopy(images))\n assert (x.size == 1)\n\n # Should find two peaks.\n mxf = iaUtilsC.MaximaFinder(margin = 1,\n radius = 2,\n threshold = 1,\n z_values = z_values)\n\n [x, y, z] = mxf.findMaxima(imagesCopy(images))\n assert (x.size == 2)\n\ndef test_ia_util_7():\n \"\"\"\n Test margin.\n \"\"\"\n x_size = 100\n y_size = 80\n images = [numpy.zeros((x_size,y_size), dtype = numpy.float64)]\n z_values = [0.1]\n\n images[0][10,5] = 1.2\n images[0][10,6] = 1.1\n images[0][10,7] = 1.1\n images[0][20,10] = 1.1\n images[0][20,11] = 1.2\n\n # Should only find 1 peak.\n mxf = iaUtilsC.MaximaFinder(margin = 6,\n radius = 3,\n threshold = 1,\n z_values = z_values)\n\n [x, y, z] = mxf.findMaxima(imagesCopy(images))\n assert (x.size == 1)\n\n # Should find two peaks.\n mxf = iaUtilsC.MaximaFinder(margin = 4,\n radius = 3,\n threshold = 1,\n z_values = z_values)\n\n [x, y, z] = mxf.findMaxima(imagesCopy(images))\n assert (x.size == 2)\n\ndef test_ia_util_8():\n \"\"\"\n Test allowing multiple duplicates.\n \"\"\"\n x_size = 100\n y_size = 80\n images = [numpy.zeros((x_size,y_size), dtype = numpy.float64)]\n z_values = [0.1]\n\n # Single peak.\n images[0][10,21] = 1.1\n images[0][10,20] = 1.2\n \n mxf = iaUtilsC.MaximaFinder(margin = 1,\n n_duplicates = 2,\n radius = 2,\n threshold = 1,\n z_values = z_values)\n\n # Find the peak.\n np = [1, 1, 0]\n for elt in np:\n [x, y, z] = mxf.findMaxima(imagesCopy(images))\n assert (x.size == elt)\n\n # Reset.\n mxf.resetTaken()\n \n # Test again.\n np = [1, 1, 0]\n for elt in np:\n [x, y, z] = mxf.findMaxima(imagesCopy(images))\n assert (x.size == elt)\n\ndef test_ia_util_9():\n \"\"\"\n Test runningIfHasNeighbors() function.\n \"\"\"\n # Test 4 of 5 with new neighbors, one in error state.\n c_x = numpy.array([1.0, 2.0, 3.0, 4.0, 5.0])\n c_y = numpy.array([1.0, 1.0, 1.0, 1.0, 1.0])\n n_x = numpy.array([1.1, 2.1, 3.1, 4.1])\n n_y = numpy.array([1.1, 1.1, 1.1, 1.1])\n status = numpy.array([0, 1, 2, 1, 1], dtype = numpy.int32)\n\n new_status = iaUtilsC.runningIfHasNeighbors(status, c_x, c_y, n_x, n_y, 0.5)\n correct = [0, 0, 2, 0, 1]\n for i in range(new_status.size):\n assert(new_status[i] == correct[i])\n\n # Test 2 of 5 with new neighbors, one in error state.\n n_x = numpy.array([1.9, 2.1])\n n_y = numpy.array([1.1, 1.1])\n status = numpy.array([0, 1, 2, 1, 1], dtype = numpy.int32)\n\n new_status = iaUtilsC.runningIfHasNeighbors(status, c_x, c_y, n_x, n_y, 0.5)\n correct = [0, 0, 2, 1, 1]\n for i in range(new_status.size):\n assert(new_status[i] == correct[i])\n\n # Test 1 of 2 with new neighbors, but both with radius of each other.\n c_x = numpy.array([2.0, 3.0])\n c_y = numpy.array([2.0, 2.0])\n n_x = numpy.array([1.0])\n n_y = numpy.array([2.0])\n status = numpy.array([1, 1], dtype = numpy.int32)\n\n new_status = iaUtilsC.runningIfHasNeighbors(status, c_x, c_y, n_x, n_y, 1.5)\n correct = [0, 1]\n for i in range(new_status.size):\n assert(new_status[i] == correct[i])\n \ndef test_ia_util_10():\n \"\"\"\n Test markDimmerPeaks() function.\n \"\"\"\n n_peaks = 25\n x = numpy.random.uniform(size = n_peaks)\n y = numpy.random.uniform(size = n_peaks)\n h = numpy.random.uniform(size = n_peaks) + 1.0\n status = numpy.ones(n_peaks, dtype = numpy.int32)*iaUtilsC.CONVERGED\n\n # Make first peak the tallest.\n h[0] = 4.0\n\n # Move last peak outside of the removal radius.\n x[-1] = 4.0\n\n # Move second to last peak away from everything.\n x[-2] = 40.0\n \n assert (iaUtilsC.markDimmerPeaks(x, y, h, status, 2.0, 5.0) == (n_peaks - 3))\n\n for i in range(1,n_peaks-2):\n assert(status[i] == iaUtilsC.ERROR)\n assert(status[0] == iaUtilsC.RUNNING)\n assert(status[-1] == iaUtilsC.RUNNING)\n assert(status[-2] == iaUtilsC.CONVERGED)\n\ndef test_ia_util_11():\n \"\"\"\n Test finding peaks in an image.\n \"\"\"\n x_size = 100\n y_size = 80\n images = [numpy.zeros((x_size,y_size), dtype = numpy.float64)]\n z_values = [0.1]\n\n # A row of peaks greater than radius with decreasing heights, there\n # should still only be a single maxima.\n images[0][10,11] = 1.6\n images[0][10,12] = 1.5\n images[0][10,13] = 1.4\n images[0][10,14] = 1.3\n images[0][10,15] = 1.2\n images[0][10,16] = 1.1\n \n mxf = iaUtilsC.MaximaFinder(margin = 1,\n radius = 2,\n threshold = 1,\n z_values = z_values)\n\n [x, y, z] = mxf.findMaxima(images)\n assert(x.size == 1)\n assert(abs(x[0] - 11.0) < 1.0e-6)\n assert(abs(y[0] - 10.0) < 1.0e-6)\n\ndef test_ia_util_12():\n \"\"\"\n Test removeNeighbors().\n \"\"\"\n x = numpy.array([1.0, 2.0, 4.0])\n y = numpy.ones(x.size)\n\n [px, py] = iaUtilsC.removeNeighbors(x, y, 0.5)\n assert(px.size == 3)\n assert(py.size == 3)\n \n [px, py] = iaUtilsC.removeNeighbors(x, y, 1.5)\n assert(px.size == 1)\n assert(py.size == 1)\n\ndef test_ia_util_13():\n \"\"\"\n Test marking peak status (and neighbors status) based on significance.\n \"\"\"\n px = numpy.array([30.0,35.0,40.0])\n py = numpy.array([30.0,30.0,30.0]) \n sig = numpy.array([1.0,2.0,3.0])\n\n # This should not update anything.\n status = numpy.ones(3, dtype = numpy.int32)*iaUtilsC.CONVERGED\n n_marked = iaUtilsC.markLowSignificancePeaks(px,\n py,\n sig,\n status,\n 0.0,\n 7.0)\n\n assert(n_marked == 0)\n for i in range(3):\n assert(status[i] == iaUtilsC.CONVERGED)\n\n # This should only mark the first peak for removal.\n status = numpy.ones(3, dtype = numpy.int32)*iaUtilsC.CONVERGED\n n_marked = iaUtilsC.markLowSignificancePeaks(px,\n py,\n sig,\n status,\n sig[0] + 0.1,\n 3.0)\n assert(n_marked == 1)\n assert(status[0] == iaUtilsC.ERROR)\n assert(status[1] == iaUtilsC.CONVERGED)\n assert(status[2] == iaUtilsC.CONVERGED)\n\n # This should the first peak for removal and the second as RUNNING.\n status = numpy.ones(3, dtype = numpy.int32)*iaUtilsC.CONVERGED\n n_marked = iaUtilsC.markLowSignificancePeaks(px,\n py,\n sig,\n status,\n sig[0] + 0.1,\n 7.0)\n assert(n_marked == 1)\n assert(status[0] == iaUtilsC.ERROR)\n assert(status[1] == iaUtilsC.RUNNING)\n assert(status[2] == iaUtilsC.CONVERGED)\n\n # This should the first peak for removal and both others as RUNNING.\n status = numpy.ones(3, dtype = numpy.int32)*iaUtilsC.CONVERGED\n n_marked = iaUtilsC.markLowSignificancePeaks(px,\n py,\n sig,\n status,\n sig[0] + 0.1,\n 11.0)\n assert(n_marked == 1)\n assert(status[0] == iaUtilsC.ERROR)\n assert(status[1] == iaUtilsC.RUNNING)\n assert(status[2] == iaUtilsC.RUNNING)\n\n # This should the first peak & second for removal and the last as RUNNING.\n status = numpy.ones(3, dtype = numpy.int32)*iaUtilsC.CONVERGED\n n_marked = iaUtilsC.markLowSignificancePeaks(px,\n py,\n sig,\n status,\n sig[1] + 0.1,\n 7.0)\n assert(n_marked == 2)\n assert(status[0] == iaUtilsC.ERROR)\n assert(status[1] == iaUtilsC.ERROR)\n assert(status[2] == iaUtilsC.RUNNING) \n\ndef test_ia_util_14():\n \"\"\"\n Some tests of our KDTree.\n \"\"\"\n x1 = numpy.array([1.0, 2.0, 3.0])\n y1 = numpy.array([1.0, 1.0, 1.0])\n kd = iaUtilsC.KDTree(x = x1, y = y1)\n\n x2 = numpy.array([1.1, 4.0])\n y2 = numpy.array([1.0, 1.0])\n\n [dist, index] = kd.nearest(x2, y2, 0.2)\n assert(abs(dist[0] - 0.1) < 1.0e-6)\n assert(abs(dist[1] + 1.0) < 1.0e-6)\n assert(index[0] == 0)\n assert(index[1] == -1)\n \n [dist, index] = kd.nearest(x2, y2, 3.1)\n assert(abs(dist[0] - 0.1) < 1.0e-6)\n assert(abs(dist[1] - 1.0) < 1.0e-6)\n assert(index[0] == 0)\n assert(index[1] == 2) \n\n kd.cleanup()\n \n \nif (__name__ == \"__main__\"):\n test_ia_util_1()\n test_ia_util_2()\n test_ia_util_3()\n test_ia_util_4()\n test_ia_util_5()\n test_ia_util_6()\n test_ia_util_7()\n test_ia_util_8()\n test_ia_util_9()\n test_ia_util_10()\n test_ia_util_11()\n test_ia_util_12()\n test_ia_util_13()\n test_ia_util_14()\n"
] | [
[
"numpy.zeros",
"numpy.save",
"numpy.ones"
],
[
"numpy.ones",
"numpy.copy",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Mic-Tsai/Power-Consumption-Current-Sense-System-V22 | [
"7fe8348171efe53a2985a591ef7cf657bacc5fbd"
] | [
"example/Python_Plot/Battery example/ee_0120_Y_consist.py"
] | [
"import argparse, re, sys, os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\npath = ''\nflname = sys.argv[1]\ntry:\n\tchartType = sys.argv[2]\nexcept:\n\tchartType = 'ch1_vload'\nprint('chartType:'+chartType)\t\n\nfl = flname.split('/')\nfor i in fl[:-1]:\n\tpath = path+i+'/'\n\nfw = open(flname, 'r')\nrawdata = fw.read().strip()\n\nch1_list = []\nch2_list = []\n\nch1_vload = []\nch1_volt = []\nch1_iload = []\nch1_pload = []\n\nch2_vload = []\nch2_volt = []\nch2_iload = []\nch2_pload = []\n\nunit = ''\n\nline = rawdata.split('\\n')\nfor aline in line:\n\t\n\ttmp = aline.split('||')\n\tch1_list.append(tmp[0].lstrip())\n\tch2_list.append(tmp[2].lstrip())\n\nfor item in ch1_list:\n\ttmp = item.split(' | ')\n\tfor sub in tmp:\n\t\tif sub.count(\"V-load\"):\n\t\t\tch1_vload.append(float(re.search('\\d+\\.\\d+', sub).group()))\n\t\telif sub.count(\"Voltage\"):\n\t\t\tch1_volt.append(float(re.search('\\d+\\.\\d+', sub).group()))\n\t\telif sub.count(\"I-load\"):\n\t\t\tch1_iload.append(float(re.search('\\d+\\.\\d+', sub).group()))\n\t\telif sub.count(\"P-load\"):\n\t\t\tch1_pload.append(float(re.search('\\d+\\.\\d+', sub).group()))\n\nfor item in ch2_list:\n\ttmp = item.split(' | ')\n\tfor sub in tmp:\n\t\tif sub.count(\"V-load\"):\n\t\t\tch2_vload.append(float(re.search('\\d+\\.\\d+', sub).group()))\n\t\telif sub.count(\"Voltage\"):\n\t\t\tch2_volt.append(float(re.search('\\d+\\.\\d+', sub).group()))\n\t\telif sub.count(\"I-load\"):\n\t\t\tch2_iload.append(float(re.search('\\d+\\.\\d+', sub).group()))\n\t\telif sub.count(\"P-load\"):\n\t\t\tch2_pload.append(float(re.search('\\d+\\.\\d+', sub).group()))\n\nif chartType.lower().count('vload') or chartType.lower().count('v-load'):\n\tprint('**vload')\n\tunit = 'V'\n\tif chartType.lower().count('ch1'):\n\t\ty = ch1_vload\n\telse:\n\t\ty = ch2_vload\nelif chartType.lower().count('volt'):\n\tprint('**volt')\n\tunit = 'mV'\n\tif chartType.lower().count('ch1'):\n\t\ty = ch1_volt\n\telse:\n\t\ty = ch2_volt\nelif chartType.lower().count('iload') or chartType.lower().count('i-load'):\n\tprint('**iload')\n\tunit = 'mA'\n\tif chartType.lower().count('ch1'):\n\t\ty = ch1_iload\n\telse:\n\t\ty = ch2_iload\nelif chartType.lower().count('pload') or chartType.lower().count('p-load'):\n\tprint('**pload')\n\tunit = 'mW'\n\tif chartType.lower().count('ch1'):\n\t\ty = ch1_pload\n\telse:\n\t\ty = ch2_pload\n\nx = np.linspace(1,len(y),len(y))\nfig = plt.figure(1)\nax = plt.axes()\nplt.xlim([0, len(y)])\nplt.ylim([0,160])\nplt.plot(x,y,ls='-',c='b')\nplt.grid('on')\nplt.title(chartType)\nplt.ylabel('['+unit+']')\nplt.savefig(path+chartType+'.png')\nprint(\"File Path:\"+path+chartType+'.png')\n\n\n\n\n\n\n\n\n\n"
] | [
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
andytorrestb/rarefiedPlume | [
"c09234c701c395d16519d8a361eae17540711530"
] | [
"cases/1d/graphCaseValidation.py"
] | [
"import matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\n\n# Find path for cases\ncurr_dir_path = os.path.dirname(os.path.realpath(__file__))\n# print(curr_dir_path)\n# cases = os.listdir(curr_dir_path + '/Cases')\n# pop = cases.index('baseCase')\n# cases.pop(pop)\n\n# Label graph with bold characters\nfont_axis_publish = {\n 'color': 'black',\n 'weight': 'bold',\n 'size': 22,\n }\n\n# Read in digitized data\ndigi_n = pd.read_csv(\n curr_dir_path + '/n_nstar_radius.dat',\n header = 0,\n sep = '\\t',\n names = ['r', 'n_nstar']\n )\n\ndigi_T = pd.read_csv(\n curr_dir_path + '/T_Tstar_radius_DAC.dat',\n header = 0,\n sep = '\\t',\n names = ['r', 'T_Tstar']\n )\n\n# Read in simulated data. \nsim = pd.read_csv(\n curr_dir_path + '/postProcessing/sampleDict/0.3/horizontalLine_Ttra_Ar_rhoN_Ar.csv'\n )\n\n# Used to see what the values trend to. \nprint(sim['Ttra_Ar'])\n\nsim = sim[['x', 'rhoN_Ar', 'Ttra_Ar']].dropna()\nsim['rhoN_Ar'] = sim['rhoN_Ar'] / 8.377e20\nsim['Ttra_Ar'] = sim['Ttra_Ar'] / 1000.0\n \n\n# Producde Analytical Data\ndef TTt_Ma(Ma, ga = 1.4):\n return (ga + 1) / (2 + (ga - 1) * Ma ** 2)\n\ndef rrt_Ma(Ma, ga = 1.4):\n rrt = (1 / TTt_Ma(Ma, ga)) ** ((ga + 1) / (ga - 1))\n rrt = np.sqrt(np.sqrt(rrt) / Ma)\n return rrt\n\ndef nnt_Ma(Ma, ga = 1.4):\n return TTt_Ma(Ma, ga) ** (1 / (ga - 1))\n\ndef a(T, ga = 1.4, R = 287):\n return np.sqrt(ga * R * T)\n\nMa_domain = np.linspace(1, 25, 100) \nga = 1.67\nTTt = TTt_Ma(Ma_domain, ga = ga)\nrrt = rrt_Ma(Ma_domain, ga = ga)\nnnt = nnt_Ma(Ma_domain, ga = ga)\n\nprint(\"Printing rrt\")\nprint(rrt)\n\n# Graph Results\nplt.title('OpenFOAM vs DAC', fontdict = font_axis_publish)\nplt.ylabel('n/n*', fontdict = font_axis_publish)\nplt.xlabel('Radial distance, r (m)', fontdict = font_axis_publish)\n\nplt.plot(sim['x'], sim['rhoN_Ar'], label = 'OpenFOAM (Torres, Pitt, Kinzel)')\nplt.plot(digi_n['r'], digi_n['n_nstar'], label = 'DAC (Lumpkin, Stewart)')\nplt.plot(rrt, nnt, label = 'Analytical Solution')\nplt.legend()\nplt.yscale('log')\nplt.ylim(bottom = 1e-4, top = 1)\nplt.savefig(curr_dir_path + '/digitized_vs_analytical_n.png')\nplt.close()\n\nplt.title('OpenFOAM vs DAC', fontdict = font_axis_publish)\nplt.ylabel('T/T*', fontdict = font_axis_publish)\nplt.xlabel('Radial distance, r (m)', fontdict = font_axis_publish)\n\nplt.plot(sim['x'], sim['Ttra_Ar'], label = 'OpenFOAM (Torres, Pitt, Kinzel)')\nplt.plot(digi_T['r'], digi_T['T_Tstar'], label = 'DAC (Lumpkin, Stewart)')\nplt.plot(rrt, TTt, label = 'Analytical Solution')\nplt.legend()\nplt.yscale('log')\nplt.ylim(bottom = 1e-3, top = 1)\nplt.savefig(curr_dir_path + '/digitized_vs_analytical_T.png')\nplt.close()\n"
] | [
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"numpy.sqrt",
"matplotlib.pyplot.title",
"numpy.linspace",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
nakashima-kodai/FractalDB_Pretrained_ViT_PyTorch | [
"5d1df4023f05f5a8ff7e8a8810bf95119a0eeb96"
] | [
"pretrain.py"
] | [
"import os, sys\nimport math\n\nimport hydra\nimport torch\nimport timm\nfrom hydra.utils import instantiate\nfrom timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy\nfrom timm.utils import NativeScaler\n\nimport models\nfrom data import create_dataloader\nfrom utils import MetricLogger, SmoothedValue\nfrom utils import fix_random_seed\n\n\[email protected](config_path='./configs', config_name='pretrain')\ndef main(cfg):\n if cfg.seed is not None:\n fix_random_seed(cfg.seed)\n torch.backends.cudnn.benchmark = True\n\n # dataloader\n trainloader, num_classes = create_dataloader(cfg.data)\n\n # additional data augmentation (mixup/cutmix)\n mixup_fn = None\n mixup_enable = (cfg.data.mixup.mixup_alpha > 0.) or (cfg.data.mixup.cutmix_alpha > 0.)\n if mixup_enable:\n mixup_fn = instantiate(cfg.data.mixup, num_classes=num_classes)\n print(f'MixUp/Cutmix was enabled\\n')\n\n # create model\n model = instantiate(cfg.model, num_classes=num_classes)\n print(f'Model[{cfg.model.model_name}] was created')\n\n # wrap model with DP\n model = torch.nn.parallel.DataParallel(model)\n model.cuda()\n model_without_dp = model.module\n\n # optimizer\n scaled_lr = cfg.optim.args.lr * cfg.data.loader.batch_size / 512.0\n cfg.optim.args.lr = scaled_lr\n optimizer = instantiate(cfg.optim, model=model)\n print(f'Optimizer: \\n{optimizer}\\n')\n\n # scheduler\n lr_scheduler, _ = instantiate(cfg.scheduler, optimizer=optimizer)\n print(f'Scheduler: \\n{lr_scheduler}\\n')\n \n # criterion\n if cfg.data.mixup.mixup_alpha > 0.:\n criterion = SoftTargetCrossEntropy().cuda()\n print('SoftTargetCrossEntropy is used for criterion\\n')\n elif cfg.data.mixup.label_smoothing > 0.:\n criterion = LabelSmoothingCrossEntropy(cfg.data.mixup.label_smoothing).cuda()\n print('LabelSmoothingCrossEntropy is used for criterion\\n')\n else:\n criterion = torch.nn.CrossEntropyLoss().cuda()\n print('CrossEntropyLoss is used for criterion\\n')\n loss_scaler = NativeScaler()\n\n # load resume\n start_epoch = 1\n if cfg.resume is not None:\n checkpoint = torch.load(cfg.resume, map_location='cpu')\n model_without_dp.load_state_dict(checkpoint['model'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\n loss_scaler.load_state_dict(checkpoint['scaler'])\n start_epoch = checkpoint['epoch'] + 1\n print(f'Resume was loaded from {cfg.resume}\\n')\n\n print(f'Start training for {cfg.epochs} epochs')\n for epoch in range(start_epoch, cfg.epochs + 1):\n # train one epoch\n model.train()\n metric_logger = MetricLogger(delimiter=' ')\n metric_logger.add_meter('lr', SmoothedValue(window_size=1, fmt='{value:.6f}'))\n header = f'Epoch: [{epoch:03}/{cfg.epochs:03}]'\n for data in metric_logger.log_every(trainloader, cfg.print_iter_freq, header):\n images = data[0].cuda(non_blocking=True)\n labels = data[1].cuda(non_blocking=True)\n\n if mixup_fn is not None:\n images, labels = mixup_fn(images, labels)\n \n with torch.cuda.amp.autocast():\n outputs = model(images)\n loss = criterion(outputs, labels)\n\n loss_value = loss.item()\n if not math.isfinite(loss_value):\n print(f'Loss is {loss_value}, stopping training')\n sys.exit(1)\n\n optimizer.zero_grad()\n is_second_order = (hasattr(optimizer, 'is_second_order')) and (optimizer.is_second_order)\n loss_scaler(\n loss=loss,\n optimizer=optimizer,\n parameters=model.parameters(),\n create_graph=is_second_order\n )\n\n torch.cuda.synchronize()\n \n metric_logger.update(loss=loss_value)\n metric_logger.update(lr=optimizer.param_groups[0]['lr'])\n \n # gather the stats from all process\n metric_logger.synchronize_between_processes()\n print(f'Averaged stats: {metric_logger}')\n\n lr_scheduler.step(epoch)\n\n if epoch % cfg.save_epoch_freq == 0:\n save_path = f'{os.getcwd()}/{cfg.model.model_name}_{cfg.data.name}_{epoch:03}ep.pth'\n torch.save({\n 'model': model_without_dp.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'lr_scheduler': lr_scheduler.state_dict(),\n 'scaler': loss_scaler.state_dict(),\n 'epoch': epoch\n }, save_path)\n\n save_path = f'{os.getcwd()}/{cfg.model.model_name}_{cfg.data.name}_{epoch:03}ep.pth'\n torch.save({\n 'model': model_without_dp.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'lr_scheduler': lr_scheduler.state_dict(),\n 'scaler': loss_scaler.state_dict(),\n 'epoch': epoch\n }, save_path)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.cuda.synchronize",
"torch.nn.parallel.DataParallel",
"torch.load",
"torch.cuda.amp.autocast"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chekoduadarsh/deep_autoviml | [
"157fbdc2611dc0fbaee5fc4ebebe3e7c1eeb9b52"
] | [
"deep_autoviml/preprocessing/preprocessing_images.py"
] | [
"#Copyright 2021 Google LLC\r\n\r\n#Licensed under the Apache License, Version 2.0 (the \"License\");\r\n#you may not use this file except in compliance with the License.\r\n#You may obtain a copy of the License at\r\n#\r\n# https://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n#Unless required by applicable law or agreed to in writing, software\r\n#distributed under the License is distributed on an \"AS IS\" BASIS,\r\n#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n#See the License for the specific language governing permissions and\r\n#limitations under the License.\r\n############################################################################################\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport tempfile\r\nimport pdb\r\nimport copy\r\nimport warnings\r\nwarnings.filterwarnings(action='ignore')\r\nimport functools\r\nfrom itertools import combinations\r\nfrom collections import defaultdict\r\n\r\n# Make numpy values easier to read.\r\nnp.set_printoptions(precision=3, suppress=True)\r\n############################################################################################\r\n# data pipelines and feature engg here\r\n\r\n# pre-defined TF2 Keras models and your own models here\r\nfrom deep_autoviml.data_load.classify_features import check_model_options\r\n\r\n# Utils\r\n\r\n############################################################################################\r\n# TensorFlow ≥2.4 is required\r\nimport tensorflow as tf\r\nnp.random.seed(42)\r\ntf.random.set_seed(42)\r\nfrom tensorflow.keras import layers\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras.layers.experimental.preprocessing import Normalization, StringLookup, Hashing\r\nfrom tensorflow.keras.layers.experimental.preprocessing import IntegerLookup, CategoryEncoding, CategoryCrossing\r\nfrom tensorflow.keras.layers.experimental.preprocessing import TextVectorization, Discretization\r\nfrom tensorflow.keras.layers import Embedding, Flatten\r\n\r\nfrom tensorflow.keras.optimizers import SGD, Adam, RMSprop\r\nfrom tensorflow.keras import layers\r\nfrom tensorflow.keras import optimizers\r\nfrom tensorflow.keras.models import Model, load_model\r\nfrom tensorflow.keras import callbacks\r\nfrom tensorflow.keras import backend as K\r\nfrom tensorflow.keras import utils\r\nfrom tensorflow.keras.layers import BatchNormalization\r\nfrom tensorflow.keras.optimizers import SGD\r\nfrom tensorflow.keras import regularizers\r\nimport tensorflow_hub as hub\r\nimport tensorflow_text as text\r\n\r\nfrom sklearn.metrics import roc_auc_score, mean_squared_error, mean_absolute_error\r\nfrom IPython.core.display import Image, display\r\nimport pickle\r\n#############################################################################################\r\n##### Suppress all TF2 and TF1.x warnings ###################\r\ntry:\r\n tf.logging.set_verbosity(tf.logging.ERROR)\r\nexcept:\r\n tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\r\n############################################################################################\r\nfrom tensorflow.keras.layers import Reshape, MaxPooling1D, MaxPooling2D, AveragePooling2D, AveragePooling1D\r\nfrom tensorflow.keras import Model, Sequential\r\nfrom tensorflow.keras.layers import Activation, Dense, Embedding, GlobalAveragePooling1D, GlobalMaxPooling1D, Dropout, Conv1D\r\nfrom tensorflow.keras.layers.experimental.preprocessing import TextVectorization\r\n############################################################################################\r\ndef preprocessing_images(train_ds, model_options):\r\n \"\"\"\r\n This produces a preprocessing layer for an incoming tf.data.Dataset. It can be images only.\r\n You need to just send in a tf.data.DataSet from the training folder and a model_options dictionary.\r\n It will return a full-model-ready layer that you can add to your Keras Functional model as image layer!\r\n ########### Motivation and suggestions for coding for Image processing came from this blog #########\r\n Greatly indebted to Srivatsan for his Github and notebooks: https://github.com/srivatsan88/YouTubeLI\r\n ####################################################################################################\r\n \"\"\"\r\n try:\r\n ####### L O A D F E A T U R E E X T R A C T O R ################\r\n url = \"https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4\"\r\n feature_extractor = check_model_options(model_options, \"tf_hub_model\", url)\r\n img_height = model_options[\"image_height\"]\r\n img_width = model_options[\"image_width\"]\r\n image_channels = model_options[\"image_channels\"]\r\n num_predicts = model_options[\"num_predicts\"]\r\n try:\r\n feature_extractor_layer = hub.KerasLayer(feature_extractor, input_shape=(\r\n img_height,img_width,image_channels))\r\n except:\r\n print('Loading model from Tensorflow Hub failed. Check the URL and try again...')\r\n return\r\n feature_extractor_layer.trainable = False\r\n normalization_layer = tf.keras.layers.experimental.preprocessing.Rescaling(1./255)\r\n tf.random.set_seed(111)\r\n model = tf.keras.Sequential([\r\n normalization_layer,\r\n feature_extractor_layer,\r\n tf.keras.layers.Dropout(0.3),\r\n tf.keras.layers.Dense(num_predicts,activation='softmax')\r\n ])\r\n model.compile(\r\n optimizer='adam',\r\n loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),\r\n metrics=['accuracy'])\r\n except:\r\n print(' Error: Failed image preprocessing layer. Returning...')\r\n return\r\n return model\r\n"
] | [
[
"numpy.random.seed",
"tensorflow.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.layers.Dense",
"numpy.set_printoptions",
"tensorflow.compat.v1.logging.set_verbosity",
"tensorflow.logging.set_verbosity",
"tensorflow.keras.layers.experimental.preprocessing.Rescaling",
"tensorflow.keras.layers.Dropout",
"tensorflow.random.set_seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gdmcbain/quadpy | [
"c083d500027d7c1b2187ae06ff2b7fbdd360ccc7",
"c083d500027d7c1b2187ae06ff2b7fbdd360ccc7",
"c083d500027d7c1b2187ae06ff2b7fbdd360ccc7",
"c083d500027d7c1b2187ae06ff2b7fbdd360ccc7",
"c083d500027d7c1b2187ae06ff2b7fbdd360ccc7",
"c083d500027d7c1b2187ae06ff2b7fbdd360ccc7"
] | [
"quadpy/e3r/tools.py",
"quadpy/tetrahedron/tools.py",
"tools/witherden_vincent/import_witherden_vincent.py",
"quadpy/ncube/stroud.py",
"quadpy/sphere/helpers.py",
"quadpy/e3r2/tools.py"
] | [
"# -*- coding: utf-8 -*-\n#\nimport numpy\n\nfrom .. import helpers\n\n\ndef integrate(f, rule, dot=numpy.dot):\n flt = numpy.vectorize(float)\n return dot(f(flt(rule.points).T), flt(rule.weights))\n\n\ndef show(scheme, backend=\"mpl\"):\n \"\"\"Displays scheme for E_3^r quadrature.\n \"\"\"\n helpers.backend_to_function[backend](\n scheme.points, scheme.weights, volume=8 * numpy.pi, edges=[]\n )\n return\n",
"# -*- coding: utf-8 -*-\n#\nimport numpy\n\nfrom .. import helpers\nfrom ..nsimplex import transform, get_vol\n\n\ndef show(\n scheme,\n tet=numpy.array(\n [\n [+1, 0, -1.0 / numpy.sqrt(2.0)],\n [-1, 0, -1.0 / numpy.sqrt(2.0)],\n [0, +1, +1.0 / numpy.sqrt(2.0)],\n [0, -1, +1.0 / numpy.sqrt(2.0)],\n ]\n ),\n backend=\"mpl\",\n):\n edges = numpy.array([[tet[i], tet[j]] for i in range(4) for j in range(i)])\n edges = numpy.moveaxis(edges, 1, 2)\n helpers.backend_to_function[backend](\n transform(scheme.points.T, tet.T).T, scheme.weights, get_vol(tet), edges\n )\n return\n",
"\"\"\"\nImport data from Witherden/Vincent.\n\nzip file: https://www.sciencedirect.com/science/article/pii/S0898122115001224\n\"\"\"\nimport json\nimport os\nimport re\n\nimport numpy\n\n\ndef read_data_tri(filename):\n data = numpy.loadtxt(filename, dtype=float)\n if len(data.shape) == 1:\n data = numpy.array([data])\n\n points = data[:, :2]\n weights = data[:, 2]\n # The reference triangle is (-1, -1), (1, -1), (-1, 1). Transform the\n # points to barycentric coordinates.\n points += 1.0\n points *= 0.5\n points = numpy.array(\n [points[:, 0], points[:, 1], 1.0 - numpy.sum(points, axis=1)]\n ).T\n return points, weights * 0.5\n\n\ndef read_data_tet(filename):\n data = numpy.loadtxt(filename)\n if len(data.shape) == 1:\n data = numpy.array([data])\n points = data[:, :3]\n weights = data[:, 3]\n # Transform to barycentric coordinates.\n points += 1.0\n points *= 0.5\n points = numpy.array(\n [points[:, 0], points[:, 1], 1.0 - numpy.sum(points, axis=1)]\n ).T\n return points, weights * 0.75\n\n\ndef _grp_start_len(a, tol):\n '''Given a sorted 1D input array `a`, e.g., [0 0, 1, 2, 3, 4, 4, 4], this\n routine returns the indices where the blocks of equal integers start and\n how long the blocks are.\n '''\n # https://stackoverflow.com/a/50394587/353337\n m = numpy.concatenate([[True], numpy.abs((a[:-1] - a[1:])) > tol, [True]])\n idx = numpy.flatnonzero(m)\n return idx[:-1], numpy.diff(idx)\n\n\ndef data_to_json(degree, points, weights):\n d = {\"s1\": [], \"s2\": [], \"s3\": []}\n\n idx = numpy.argsort(weights)\n weights = weights[idx]\n points = points[idx]\n\n # get groups of equal weights\n for s, length in zip(*_grp_start_len(weights, 1.0e-12)):\n weight = weights[s]\n pts = points[s: s + length]\n if length == 1:\n d[\"s3\"].append([weight])\n elif length == 3:\n # Symmetry group [[a, a, b], [a, b, a], [b, a, a]].\n # Find the equal value `a`.\n tol = 1.0e-12\n beta = pts[0] - pts[0][0]\n ct = numpy.count_nonzero(abs(beta) < tol)\n assert ct in [1, 2], beta\n val = pts[0][0] if ct == 2 else pts[0][1]\n d[\"s2\"].append([weight, val])\n else:\n # Symmetry group perm([[a, b, c]]). Deliberately take the two smallest of a,\n # b, c as representatives.\n assert length == 6\n srt = numpy.sort(pts[0])\n d[\"s1\"].append([weight, srt[0], srt[1]])\n\n d[\"degree\"] = degree\n\n if len(d[\"s1\"]) == 0:\n d.pop(\"s1\")\n if len(d[\"s2\"]) == 0:\n d.pop(\"s2\")\n if len(d[\"s3\"]) == 0:\n d.pop(\"s3\")\n\n # Getting floats in scientific notation in python.json is almost impossible, so do\n # some work here. Compare with <https://stackoverflow.com/a/1733105/353337>.\n class PrettyFloat(float):\n def __repr__(self):\n return '{:.16e}'.format(self)\n\n def pretty_floats(obj):\n if isinstance(obj, float):\n return PrettyFloat(obj)\n elif isinstance(obj, dict):\n return dict((k, pretty_floats(v)) for k, v in obj.items())\n elif isinstance(obj, (list, tuple)):\n return list(map(pretty_floats, obj))\n return obj\n\n with open('wv{:02d}.json'.format(degree), \"w\") as f:\n string = pretty_floats(d).__repr__() \\\n .replace(\"'\", \"\\\"\") \\\n .replace(\"[[\", \"[\\n [\") \\\n .replace(\"],\", \"],\\n \") \\\n .replace(\"]],\", \"]\\n ],\")\n f.write(string)\n\n return\n\n\ndef import_triangle():\n directory = 'zip/expanded/tri/'\n for k, file in enumerate(os.listdir(directory)):\n filename = os.fsdecode(file)\n out = re.match(\"([0-9]+)-([0-9]+)\\.txt\", filename)\n degree = int(out.group(1))\n x, weights = read_data_tri(os.path.join(directory, filename))\n data_to_json(degree, x, weights)\n return\n\n\ndef import_tet():\n filenames = [\n \"1-1.txt\",\n \"2-4.txt\",\n \"3-8.txt\",\n \"5-14.txt\",\n \"6-24.txt\",\n \"7-35.txt\",\n \"8-46.txt\",\n \"9-59.txt\",\n \"10-81.txt\",\n ]\n for k, filename in enumerate(filenames):\n out = re.match(\"([0-9]+)-([0-9]+)\\.txt\", filename)\n strength = out.group(1)\n print(\"elif degree == {}:\".format(strength))\n print(\" data = [\")\n x, weights = read_data_tet(filename)\n data_to_code(x, weights)\n print(8 * \" \" + \"]\")\n return\n\n\nif __name__ == \"__main__\":\n import_triangle()\n # import_tet()\n",
"# -*- coding: utf-8 -*-\n#\nfrom __future__ import division\n\nimport numpy\nimport sympy\n\nfrom .ewing import Ewing\nfrom .hammer_stroud import HammerStroud\nfrom .mustard_lyness_blatt import MustardLynessBlatt\nfrom .phillips import Phillips\nfrom .stroud1957 import Stroud1957\nfrom .stroud1966 import Stroud1966\nfrom .stroud1968 import Stroud1968\nfrom .thacher import Thacher\nfrom .tyler import Tyler\n\nfrom ..helpers import fsd, pm\n\n\nclass Stroud(object):\n \"\"\"\n Arthur Stroud,\n Approximate Calculation of Multiple Integrals,\n Prentice Hall, 1971.\n \"\"\"\n\n def __init__(self, n, index, symbolic=False):\n self.name = \"Stroud({})\".format(index)\n self.dim = n\n\n scheme = {\n \"Cn 1-1\": lambda: Centroid(n),\n \"Cn 1-2\": lambda: ProductTrapezoidal(n),\n \"Cn 2-1\": lambda: Stroud1957(n, 2, symbolic),\n \"Cn 2-2\": lambda: Thacher(n, symbolic),\n \"Cn 3-1\": lambda: Stroud1957(n, 3, symbolic),\n \"Cn 3-2\": lambda: Cn32(n, symbolic),\n \"Cn 3-3\": lambda: Tyler(n, symbolic),\n \"Cn 3-4\": lambda: ProductGauss3(n, symbolic),\n \"Cn 3-5\": lambda: Ewing(n, symbolic),\n \"Cn 3-6\": lambda: ProductSimpson(n, symbolic),\n # TODO implement Cn 5-1\n # Cn 5-1 is not implemented because it's based on explicit values\n # only given for n=4,5,6.\n \"Cn 5-2\": lambda: HammerStroud(n, \"2-n\", symbolic),\n \"Cn 5-3\": lambda: Stroud1968(n, symbolic),\n \"Cn 5-4\": lambda: Stroud1966(n, \"a\", symbolic),\n \"Cn 5-5\": lambda: MustardLynessBlatt(n, symbolic),\n \"Cn 5-6\": lambda: Stroud1966(n, \"b\", symbolic),\n \"Cn 5-7\": lambda: Stroud1966(n, \"c\", symbolic),\n \"Cn 5-8\": lambda: Stroud1966(n, \"d\", symbolic),\n \"Cn 5-9\": lambda: ProductGauss5(n, symbolic),\n \"Cn 7-1\": lambda: Phillips(n, symbolic),\n }[index]()\n\n self.degree = scheme.degree\n self.weights = scheme.weights\n self.points = scheme.points\n return\n\n\nclass Centroid(object):\n def __init__(self, n):\n reference_volume = 2 ** n\n self.degree = 1\n self.weights = numpy.array([reference_volume])\n self.points = numpy.full((1, n), 0)\n return\n\n\nclass ProductTrapezoidal(object):\n def __init__(self, n):\n self.degree = 1\n self.weights = numpy.full(2 ** n, 1)\n self.points = pm(n, 1)\n return\n\n\nclass Cn32(object):\n def __init__(self, n, symbolic):\n frac = sympy.Rational if symbolic else lambda x, y: x / y\n sqrt = sympy.sqrt if symbolic else numpy.sqrt\n\n reference_volume = 2 ** n\n\n self.degree = 3\n self.weights = numpy.full(2 * n, frac(reference_volume, 2 * n))\n r = sqrt(frac(n, 3))\n self.points = fsd(n, (r, 1))\n return\n\n\nclass ProductGauss3(object):\n def __init__(self, n, symbolic):\n frac = sympy.Rational if symbolic else lambda x, y: x / y\n sqrt = sympy.sqrt if symbolic else numpy.sqrt\n\n self.degree = 3\n reference_volume = 2 ** n\n self.weights = numpy.full(2 ** n, frac(reference_volume, 2 ** n))\n r = sqrt(3) / 3\n self.points = pm(n, r)\n return\n\n\nclass ProductGauss5(object):\n def __init__(self, n, symbolic):\n frac = sympy.Rational if symbolic else lambda x, y: x / y\n sqrt = sympy.sqrt if symbolic else numpy.sqrt\n\n self.degree = 5\n lst = n * [[frac(5, 9), frac(8, 9), frac(5, 9)]]\n self.weights = numpy.product(\n numpy.array(numpy.meshgrid(*lst)).T.reshape(-1, n), axis=-1\n )\n sqrt35 = sqrt(frac(3, 5))\n lst = n * [[-sqrt35, 0, sqrt35]]\n self.points = numpy.array(numpy.meshgrid(*lst)).T.reshape(-1, n)\n return\n\n\nclass ProductSimpson(object):\n def __init__(self, n, symbolic):\n frac = sympy.Rational if symbolic else lambda x, y: x / y\n\n self.degree = 3\n lst = n * [[frac(1, 3), frac(4, 3), frac(1, 3)]]\n self.weights = numpy.product(\n numpy.array(numpy.meshgrid(*lst)).T.reshape(-1, n), axis=-1\n )\n lst = n * [[-1, 0, +1]]\n self.points = numpy.array(numpy.meshgrid(*lst)).T.reshape(-1, n)\n return\n",
"# -*- coding: utf-8 -*-\n#\nimport numpy\nimport sympy\n\n\ndef cartesian_to_spherical(X):\n return numpy.stack([numpy.arctan2(X[:, 1], X[:, 0]), numpy.arccos(X[:, 2])], axis=1)\n\n\ndef _atan2_0(X):\n \"\"\"Like sympy.atan2, but return 0 for x=y=0. Mathematically, the value is\n undefined, so sympy returns NaN, but for the sake of the coordinate\n conversion, its value doesn't matter. NaNs, however, produce NaNs down the\n line.\n \"\"\"\n out = numpy.array([sympy.atan2(X[k, 1], X[k, 0]) for k in range(len(X))])\n out[out == sympy.nan] = 0\n return out\n\n\ndef cartesian_to_spherical_sympy(X):\n vacos = numpy.vectorize(sympy.acos)\n return numpy.stack([_atan2_0(X), vacos(X[:, 2])], axis=1)\n\n\ndef _a1():\n return numpy.array(\n [\n [+1.0, 0.0, 0.0],\n [-1.0, 0.0, 0.0],\n [0.0, +1.0, 0.0],\n [0.0, -1.0, 0.0],\n [0.0, 0.0, +1.0],\n [0.0, 0.0, -1.0],\n ]\n )\n\n\ndef _a2():\n return numpy.array(\n [\n [+1.0, +1.0, 0.0],\n [+1.0, -1.0, 0.0],\n [-1.0, +1.0, 0.0],\n [-1.0, -1.0, 0.0],\n #\n [+1.0, 0.0, +1.0],\n [+1.0, 0.0, -1.0],\n [-1.0, 0.0, +1.0],\n [-1.0, 0.0, -1.0],\n #\n [0.0, +1.0, +1.0],\n [0.0, +1.0, -1.0],\n [0.0, -1.0, +1.0],\n [0.0, -1.0, -1.0],\n ]\n ) / numpy.sqrt(2.0)\n\n\ndef _a3():\n return numpy.array(\n [\n [+1.0, +1.0, +1.0],\n [+1.0, +1.0, -1.0],\n [+1.0, -1.0, +1.0],\n [+1.0, -1.0, -1.0],\n [-1.0, +1.0, +1.0],\n [-1.0, +1.0, -1.0],\n [-1.0, -1.0, +1.0],\n [-1.0, -1.0, -1.0],\n ]\n ) / numpy.sqrt(3.0)\n\n\ndef _pq0(alpha):\n a = numpy.sin(alpha * numpy.pi)\n b = numpy.cos(alpha * numpy.pi)\n zero = numpy.zeros_like(alpha)\n return numpy.array(\n [\n [+a, +b, zero],\n [-a, +b, zero],\n [-a, -b, zero],\n [+a, -b, zero],\n #\n [+b, +a, zero],\n [-b, +a, zero],\n [-b, -a, zero],\n [+b, -a, zero],\n #\n [+a, zero, +b],\n [-a, zero, +b],\n [-a, zero, -b],\n [+a, zero, -b],\n #\n [+b, zero, +a],\n [-b, zero, +a],\n [-b, zero, -a],\n [+b, zero, -a],\n #\n [zero, +a, +b],\n [zero, -a, +b],\n [zero, -a, -b],\n [zero, +a, -b],\n #\n [zero, +b, +a],\n [zero, -b, +a],\n [zero, -b, -a],\n [zero, +b, -a],\n ]\n )\n\n\ndef _llm(beta):\n # translate the point into cartesian coords; note that phi=pi/4.\n beta *= numpy.pi\n L = numpy.sin(beta) / numpy.sqrt(2)\n m = numpy.cos(beta)\n return numpy.array(\n [\n [+L, +L, +m],\n [-L, +L, +m],\n [+L, -L, +m],\n [-L, -L, +m],\n [+L, +L, -m],\n [-L, +L, -m],\n [+L, -L, -m],\n [-L, -L, -m],\n #\n [+L, +m, +L],\n [-L, +m, +L],\n [+L, +m, -L],\n [-L, +m, -L],\n [+L, -m, +L],\n [-L, -m, +L],\n [+L, -m, -L],\n [-L, -m, -L],\n #\n [+m, +L, +L],\n [+m, -L, +L],\n [+m, +L, -L],\n [+m, -L, -L],\n [-m, +L, +L],\n [-m, -L, +L],\n [-m, +L, -L],\n [-m, -L, -L],\n ]\n )\n\n\ndef _rsw(azimuthal, polar):\n # translate the point into cartesian coords; note that phi=pi/4.\n azimuthal *= numpy.pi\n polar *= numpy.pi\n\n sin_polar = numpy.sin(polar)\n cos_polar = numpy.cos(polar)\n sin_azimuthal = numpy.sin(azimuthal)\n cos_azimuthal = numpy.cos(azimuthal)\n\n r = sin_polar * cos_azimuthal\n s = sin_polar * sin_azimuthal\n w = cos_polar\n\n return numpy.array(\n [\n [+r, +s, +w],\n [+w, +r, +s],\n [+s, +w, +r],\n [+s, +r, +w],\n [+w, +s, +r],\n [+r, +w, +s],\n #\n [-r, +s, +w],\n [+w, -r, +s],\n [+s, +w, -r],\n [+s, -r, +w],\n [+w, +s, -r],\n [-r, +w, +s],\n #\n [+r, -s, +w],\n [+w, +r, -s],\n [-s, +w, +r],\n [-s, +r, +w],\n [+w, -s, +r],\n [+r, +w, -s],\n #\n [+r, +s, -w],\n [-w, +r, +s],\n [+s, -w, +r],\n [+s, +r, -w],\n [-w, +s, +r],\n [+r, -w, +s],\n #\n [-r, -s, +w],\n [+w, -r, -s],\n [-s, +w, -r],\n [-s, -r, +w],\n [+w, -s, -r],\n [-r, +w, -s],\n #\n [-r, +s, -w],\n [-w, -r, +s],\n [+s, -w, -r],\n [+s, -r, -w],\n [-w, +s, -r],\n [-r, -w, +s],\n #\n [+r, -s, -w],\n [-w, +r, -s],\n [-s, -w, +r],\n [-s, +r, -w],\n [-w, -s, +r],\n [+r, -w, -s],\n #\n [-r, -s, -w],\n [-w, -r, -s],\n [-s, -w, -r],\n [-s, -r, -w],\n [-w, -s, -r],\n [-r, -w, -s],\n ]\n )\n\n\ndef untangle2(data):\n points = []\n weights = []\n if \"a1\" in data:\n assert len(data[\"a1\"]) == 1\n points.append(_a1())\n w = data[\"a1\"][0]\n weights.append(numpy.full(6, w))\n\n if \"a2\" in data:\n assert len(data[\"a2\"]) == 1\n points.append(_a2())\n w = data[\"a2\"][0]\n weights.append(numpy.full(12, w))\n\n if \"a3\" in data:\n assert len(data[\"a3\"]) == 1\n points.append(_a3())\n w = data[\"a3\"][0]\n weights.append(numpy.full(8, w))\n\n if \"llm\" in data:\n beta = numpy.array(data[\"llm\"])[:, 1]\n out = _collapse0(numpy.moveaxis(_llm(beta), 0, 1)).T\n points.append(out)\n w = numpy.array(data[\"llm\"])[:, 0]\n weights.append(numpy.tile(w, 24))\n\n if \"pq0\" in data:\n beta = numpy.array(data[\"pq0\"])[:, 1]\n out = _collapse0(numpy.moveaxis(_pq0(beta), 0, 1)).T\n points.append(out)\n w = numpy.array(data[\"pq0\"])[:, 0]\n weights.append(numpy.tile(w, 24))\n\n if \"rsw\" in data:\n beta = numpy.array(data[\"rsw\"])[:, 1:].T\n out = _collapse0(numpy.moveaxis(_rsw(*beta), 0, 1)).T\n points.append(out)\n w = numpy.array(data[\"rsw\"])[:, 0]\n weights.append(numpy.tile(w, 48))\n\n points = numpy.concatenate(points)\n weights = numpy.concatenate(weights)\n return points, weights\n\n\ndef _collapse0(a):\n \"\"\"Collapse all dimensions of `a` except the first.\n \"\"\"\n return a.reshape(a.shape[0], -1)\n",
"# -*- coding: utf-8 -*-\n#\nimport numpy\n\nfrom .. import helpers\n\n\ndef integrate(f, rule, dot=numpy.dot):\n flt = numpy.vectorize(float)\n return dot(f(flt(rule.points).T), flt(rule.weights))\n\n\ndef show(scheme, backend=\"mpl\"):\n \"\"\"Displays scheme for E_3^{r^2} quadrature.\n \"\"\"\n helpers.backend_to_function[backend](\n scheme.points, scheme.weights, volume=numpy.pi ** 1.5, edges=[]\n )\n return\n"
] | [
[
"numpy.vectorize"
],
[
"numpy.sqrt",
"numpy.moveaxis"
],
[
"numpy.abs",
"numpy.sort",
"numpy.flatnonzero",
"numpy.diff",
"numpy.argsort",
"numpy.array",
"numpy.sum",
"numpy.loadtxt"
],
[
"numpy.array",
"numpy.meshgrid",
"numpy.full"
],
[
"numpy.sqrt",
"numpy.cos",
"numpy.arccos",
"numpy.sin",
"numpy.concatenate",
"numpy.arctan2",
"numpy.full",
"numpy.vectorize",
"numpy.zeros_like",
"numpy.tile",
"numpy.array"
],
[
"numpy.vectorize"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Stomach-ache/GLaS | [
"253092cce1922711e7d9c9df601f117f3ec56e0c"
] | [
"MIPS.py"
] | [
"# -*- coding: utf-8 -*-\n\nimport hnswlib\nimport numpy as np\n\ndef buildIndex(X):\n dim = X.shape[1]\n num_elements = X.shape[0]\n data_labels = np.arange(num_elements)\n p = hnswlib.Index(space = 'cosine', dim = dim)\n p.init_index(max_elements = num_elements, ef_construction = 200, M = 16)\n p.add_items(X, data_labels)\n p.set_ef(5)\n return p\n\ndef searchIndex(p, X, k=5):\n labels, distances = p.knn_query(X, k = k)\n return labels\n\n"
] | [
[
"numpy.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ayman3000/keras-preprocessing | [
"845c423e01acfe251d4276e52cf2b86e73f1646a",
"845c423e01acfe251d4276e52cf2b86e73f1646a"
] | [
"tests/image/utils_test.py",
"keras_preprocessing/image/image_data_generator.py"
] | [
"import io\nimport resource\nfrom pathlib import Path\n\nimport numpy as np\nimport PIL\nimport pytest\n\nfrom keras_preprocessing.image import utils\n\n\ndef test_validate_filename(tmpdir):\n valid_extensions = ('png', 'jpg')\n filename = tmpdir.ensure('test.png')\n assert utils.validate_filename(str(filename), valid_extensions)\n\n filename = tmpdir.ensure('test.PnG')\n assert utils.validate_filename(str(filename), valid_extensions)\n\n filename = tmpdir.ensure('test.some_extension')\n assert not utils.validate_filename(str(filename), valid_extensions)\n assert not utils.validate_filename('some_test_file.png', valid_extensions)\n\n\ndef test_load_img(tmpdir):\n filename_rgb = str(tmpdir / 'rgb_utils.png')\n filename_rgba = str(tmpdir / 'rgba_utils.png')\n filename_grayscale_8bit = str(tmpdir / 'grayscale_8bit_utils.png')\n filename_grayscale_16bit = str(tmpdir / 'grayscale_16bit_utils.tiff')\n filename_grayscale_32bit = str(tmpdir / 'grayscale_32bit_utils.tiff')\n\n original_rgb_array = np.array(255 * np.random.rand(100, 100, 3),\n dtype=np.uint8)\n original_rgb = utils.array_to_img(original_rgb_array, scale=False)\n original_rgb.save(filename_rgb)\n\n original_rgba_array = np.array(255 * np.random.rand(100, 100, 4),\n dtype=np.uint8)\n original_rgba = utils.array_to_img(original_rgba_array, scale=False)\n original_rgba.save(filename_rgba)\n\n original_grayscale_8bit_array = np.array(255 * np.random.rand(100, 100, 1),\n dtype=np.uint8)\n original_grayscale_8bit = utils.array_to_img(original_grayscale_8bit_array,\n scale=False)\n original_grayscale_8bit.save(filename_grayscale_8bit)\n\n original_grayscale_16bit_array = np.array(\n np.random.randint(-2147483648, 2147483647, (100, 100, 1)), dtype=np.int16\n )\n original_grayscale_16bit = utils.array_to_img(original_grayscale_16bit_array,\n scale=False, dtype='int16')\n original_grayscale_16bit.save(filename_grayscale_16bit)\n\n original_grayscale_32bit_array = np.array(\n np.random.randint(-2147483648, 2147483647, (100, 100, 1)), dtype=np.int32\n )\n original_grayscale_32bit = utils.array_to_img(original_grayscale_32bit_array,\n scale=False, dtype='int32')\n original_grayscale_32bit.save(filename_grayscale_32bit)\n\n # Test that loaded image is exactly equal to original.\n\n loaded_im = utils.load_img(filename_rgb)\n loaded_im_array = utils.img_to_array(loaded_im)\n assert loaded_im_array.shape == original_rgb_array.shape\n assert np.all(loaded_im_array == original_rgb_array)\n\n loaded_im = utils.load_img(filename_rgba, color_mode='rgba')\n loaded_im_array = utils.img_to_array(loaded_im)\n assert loaded_im_array.shape == original_rgba_array.shape\n assert np.all(loaded_im_array == original_rgba_array)\n\n loaded_im = utils.load_img(filename_rgb, color_mode='grayscale')\n loaded_im_array = utils.img_to_array(loaded_im)\n assert loaded_im_array.shape == (original_rgb_array.shape[0],\n original_rgb_array.shape[1], 1)\n\n loaded_im = utils.load_img(filename_grayscale_8bit, color_mode='grayscale')\n loaded_im_array = utils.img_to_array(loaded_im)\n assert loaded_im_array.shape == original_grayscale_8bit_array.shape\n assert np.all(loaded_im_array == original_grayscale_8bit_array)\n\n loaded_im = utils.load_img(filename_grayscale_16bit, color_mode='grayscale')\n loaded_im_array = utils.img_to_array(loaded_im, dtype='int16')\n assert loaded_im_array.shape == original_grayscale_16bit_array.shape\n assert np.all(loaded_im_array == original_grayscale_16bit_array)\n # test casting int16 image to float32\n loaded_im_array = utils.img_to_array(loaded_im)\n assert np.allclose(loaded_im_array, original_grayscale_16bit_array)\n\n loaded_im = utils.load_img(filename_grayscale_32bit, color_mode='grayscale')\n loaded_im_array = utils.img_to_array(loaded_im, dtype='int32')\n assert loaded_im_array.shape == original_grayscale_32bit_array.shape\n assert np.all(loaded_im_array == original_grayscale_32bit_array)\n # test casting int32 image to float32\n loaded_im_array = utils.img_to_array(loaded_im)\n assert np.allclose(loaded_im_array, original_grayscale_32bit_array)\n\n # Test that nothing is changed when target size is equal to original.\n\n loaded_im = utils.load_img(filename_rgb, target_size=(100, 100))\n loaded_im_array = utils.img_to_array(loaded_im)\n assert loaded_im_array.shape == original_rgb_array.shape\n assert np.all(loaded_im_array == original_rgb_array)\n\n loaded_im = utils.load_img(filename_rgba, color_mode='rgba',\n target_size=(100, 100))\n loaded_im_array = utils.img_to_array(loaded_im)\n assert loaded_im_array.shape == original_rgba_array.shape\n assert np.all(loaded_im_array == original_rgba_array)\n\n loaded_im = utils.load_img(filename_rgb, color_mode='grayscale',\n target_size=(100, 100))\n loaded_im_array = utils.img_to_array(loaded_im)\n assert loaded_im_array.shape == (original_rgba_array.shape[0],\n original_rgba_array.shape[1], 1)\n\n loaded_im = utils.load_img(filename_grayscale_8bit, color_mode='grayscale',\n target_size=(100, 100))\n loaded_im_array = utils.img_to_array(loaded_im)\n assert loaded_im_array.shape == original_grayscale_8bit_array.shape\n assert np.all(loaded_im_array == original_grayscale_8bit_array)\n\n loaded_im = utils.load_img(filename_grayscale_16bit, color_mode='grayscale',\n target_size=(100, 100))\n loaded_im_array = utils.img_to_array(loaded_im, dtype='int16')\n assert loaded_im_array.shape == original_grayscale_16bit_array.shape\n assert np.all(loaded_im_array == original_grayscale_16bit_array)\n\n loaded_im = utils.load_img(filename_grayscale_32bit, color_mode='grayscale',\n target_size=(100, 100))\n loaded_im_array = utils.img_to_array(loaded_im, dtype='int32')\n assert loaded_im_array.shape == original_grayscale_32bit_array.shape\n assert np.all(loaded_im_array == original_grayscale_32bit_array)\n\n # Test down-sampling with bilinear interpolation.\n\n loaded_im = utils.load_img(filename_rgb, target_size=(25, 25))\n loaded_im_array = utils.img_to_array(loaded_im)\n assert loaded_im_array.shape == (25, 25, 3)\n\n loaded_im = utils.load_img(filename_rgba, color_mode='rgba',\n target_size=(25, 25))\n loaded_im_array = utils.img_to_array(loaded_im)\n assert loaded_im_array.shape == (25, 25, 4)\n\n loaded_im = utils.load_img(filename_rgb, color_mode='grayscale',\n target_size=(25, 25))\n loaded_im_array = utils.img_to_array(loaded_im)\n assert loaded_im_array.shape == (25, 25, 1)\n\n loaded_im = utils.load_img(filename_grayscale_8bit, color_mode='grayscale',\n target_size=(25, 25))\n loaded_im_array = utils.img_to_array(loaded_im)\n assert loaded_im_array.shape == (25, 25, 1)\n\n loaded_im = utils.load_img(filename_grayscale_16bit, color_mode='grayscale',\n target_size=(25, 25))\n loaded_im_array = utils.img_to_array(loaded_im, dtype='int16')\n assert loaded_im_array.shape == (25, 25, 1)\n\n loaded_im = utils.load_img(filename_grayscale_32bit, color_mode='grayscale',\n target_size=(25, 25))\n loaded_im_array = utils.img_to_array(loaded_im, dtype='int32')\n assert loaded_im_array.shape == (25, 25, 1)\n\n # Test down-sampling with nearest neighbor interpolation.\n\n loaded_im_nearest = utils.load_img(filename_rgb, target_size=(25, 25),\n interpolation=\"nearest\")\n loaded_im_array_nearest = utils.img_to_array(loaded_im_nearest)\n assert loaded_im_array_nearest.shape == (25, 25, 3)\n assert np.any(loaded_im_array_nearest != loaded_im_array)\n\n loaded_im_nearest = utils.load_img(filename_rgba, color_mode='rgba',\n target_size=(25, 25),\n interpolation=\"nearest\")\n loaded_im_array_nearest = utils.img_to_array(loaded_im_nearest)\n assert loaded_im_array_nearest.shape == (25, 25, 4)\n assert np.any(loaded_im_array_nearest != loaded_im_array)\n\n loaded_im = utils.load_img(filename_grayscale_8bit, color_mode='grayscale',\n target_size=(25, 25), interpolation=\"nearest\")\n loaded_im_array = utils.img_to_array(loaded_im)\n assert loaded_im_array.shape == (25, 25, 1)\n\n loaded_im = utils.load_img(filename_grayscale_16bit, color_mode='grayscale',\n target_size=(25, 25), interpolation=\"nearest\")\n loaded_im_array = utils.img_to_array(loaded_im, dtype='int16')\n assert loaded_im_array.shape == (25, 25, 1)\n\n loaded_im = utils.load_img(filename_grayscale_32bit, color_mode='grayscale',\n target_size=(25, 25), interpolation=\"nearest\")\n loaded_im_array = utils.img_to_array(loaded_im, dtype='int32')\n assert loaded_im_array.shape == (25, 25, 1)\n\n # Test different path type\n with open(filename_grayscale_32bit, 'rb') as f:\n _path = io.BytesIO(f.read()) # io.Bytesio\n loaded_im = utils.load_img(_path, color_mode='grayscale')\n loaded_im_array = utils.img_to_array(loaded_im, dtype=np.int32)\n assert np.all(loaded_im_array == original_grayscale_32bit_array)\n\n _path = filename_grayscale_32bit # str\n loaded_im = utils.load_img(_path, color_mode='grayscale')\n loaded_im_array = utils.img_to_array(loaded_im, dtype=np.int32)\n assert np.all(loaded_im_array == original_grayscale_32bit_array)\n\n _path = filename_grayscale_32bit.encode() # bytes\n loaded_im = utils.load_img(_path, color_mode='grayscale')\n loaded_im_array = utils.img_to_array(loaded_im, dtype=np.int32)\n assert np.all(loaded_im_array == original_grayscale_32bit_array)\n\n _path = Path(tmpdir / 'grayscale_32bit_utils.tiff') # Path\n loaded_im = utils.load_img(_path, color_mode='grayscale')\n loaded_im_array = utils.img_to_array(loaded_im, dtype=np.int32)\n assert np.all(loaded_im_array == original_grayscale_32bit_array)\n\n # Check that exception is raised if interpolation not supported.\n\n loaded_im = utils.load_img(filename_rgb, interpolation=\"unsupported\")\n with pytest.raises(ValueError):\n loaded_im = utils.load_img(filename_rgb, target_size=(25, 25),\n interpolation=\"unsupported\")\n\n # Check that the aspect ratio of a square is the same\n\n filename_red_square = str(tmpdir / 'red_square_utils.png')\n A = np.zeros((50, 100, 3), dtype=np.uint8) # rectangle image 100x50\n A[20:30, 45:55, 0] = 255 # red square 10x10\n red_square_array = np.array(A)\n red_square = utils.array_to_img(red_square_array, scale=False)\n red_square.save(filename_red_square)\n\n loaded_im = utils.load_img(filename_red_square, target_size=(25, 25),\n keep_aspect_ratio=True)\n loaded_im_array = utils.img_to_array(loaded_im)\n assert loaded_im_array.shape == (25, 25, 3)\n\n red_channel_arr = loaded_im_array[:, :, 0].astype(np.bool)\n square_width = np.sum(np.sum(red_channel_arr, axis=0))\n square_height = np.sum(np.sum(red_channel_arr, axis=1))\n aspect_ratio_result = square_width / square_height\n\n # original square had 1:1 ratio\n assert aspect_ratio_result == pytest.approx(1.0)\n\n\ndef test_list_pictures(tmpdir):\n filenames = ['test.png', 'test0.jpg', 'test-1.jpeg', '2test.bmp',\n '2-test.ppm', '3.png', '1.jpeg', 'test.bmp', 'test0.ppm',\n 'test4.tiff', '5-test.tif', 'test.txt', 'foo.csv',\n 'face.gif', 'bar.txt']\n subdirs = ['', 'subdir1', 'subdir2']\n filenames = [tmpdir.ensure(subdir, f) for subdir in subdirs\n for f in filenames]\n\n found_images = utils.list_pictures(str(tmpdir))\n assert len(found_images) == 33\n\n found_images = utils.list_pictures(str(tmpdir), ext='png')\n assert len(found_images) == 6\n\n\ndef test_array_to_img_and_img_to_array():\n height, width = 10, 8\n\n # Test the data format\n # Test RGB 3D\n x = np.random.random((3, height, width))\n img = utils.array_to_img(x, data_format='channels_first')\n assert img.size == (width, height)\n\n x = utils.img_to_array(img, data_format='channels_first')\n assert x.shape == (3, height, width)\n\n # Test RGBA 3D\n x = np.random.random((4, height, width))\n img = utils.array_to_img(x, data_format='channels_first')\n assert img.size == (width, height)\n\n x = utils.img_to_array(img, data_format='channels_first')\n assert x.shape == (4, height, width)\n\n # Test 2D\n x = np.random.random((1, height, width))\n img = utils.array_to_img(x, data_format='channels_first')\n assert img.size == (width, height)\n\n x = utils.img_to_array(img, data_format='channels_first')\n assert x.shape == (1, height, width)\n\n # grayscale 32-bit signed integer\n x = np.array(\n np.random.randint(-2147483648, 2147483647, (1, height, width)),\n dtype=np.int32\n )\n img = utils.array_to_img(x, data_format='channels_first')\n assert img.size == (width, height)\n\n x = utils.img_to_array(img, data_format='channels_first')\n assert x.shape == (1, height, width)\n\n # Test tf data format\n # Test RGB 3D\n x = np.random.random((height, width, 3))\n img = utils.array_to_img(x, data_format='channels_last')\n assert img.size == (width, height)\n\n x = utils.img_to_array(img, data_format='channels_last')\n assert x.shape == (height, width, 3)\n\n # Test RGBA 3D\n x = np.random.random((height, width, 4))\n img = utils.array_to_img(x, data_format='channels_last')\n assert img.size == (width, height)\n\n x = utils.img_to_array(img, data_format='channels_last')\n assert x.shape == (height, width, 4)\n\n # Test 2D\n x = np.random.random((height, width, 1))\n img = utils.array_to_img(x, data_format='channels_last')\n assert img.size == (width, height)\n\n x = utils.img_to_array(img, data_format='channels_last')\n assert x.shape == (height, width, 1)\n\n # grayscale 16-bit signed integer\n x = np.array(\n np.random.randint(-2147483648, 2147483647, (height, width, 1)),\n dtype=np.int16\n )\n img = utils.array_to_img(x, data_format='channels_last')\n assert img.size == (width, height)\n\n x = utils.img_to_array(img, data_format='channels_last')\n assert x.shape == (height, width, 1)\n\n # grayscale 32-bit signed integer\n x = np.array(\n np.random.randint(-2147483648, 2147483647, (height, width, 1)),\n dtype=np.int32\n )\n img = utils.array_to_img(x, data_format='channels_last')\n assert img.size == (width, height)\n\n x = utils.img_to_array(img, data_format='channels_last')\n assert x.shape == (height, width, 1)\n\n # Test invalid use case\n with pytest.raises(ValueError):\n x = np.random.random((height, width)) # not 3D\n img = utils.array_to_img(x, data_format='channels_first')\n\n with pytest.raises(ValueError):\n x = np.random.random((height, width, 3))\n # unknown data_format\n img = utils.array_to_img(x, data_format='channels')\n\n with pytest.raises(ValueError):\n # neither RGB, RGBA, or gray-scale\n x = np.random.random((height, width, 5))\n img = utils.array_to_img(x, data_format='channels_last')\n\n with pytest.raises(ValueError):\n x = np.random.random((height, width, 3))\n # unknown data_format\n img = utils.img_to_array(x, data_format='channels')\n\n with pytest.raises(ValueError):\n # neither RGB, RGBA, or gray-scale\n x = np.random.random((height, width, 5, 3))\n img = utils.img_to_array(x, data_format='channels_last')\n\n\ndef write_sample_image(tmpdir):\n im = utils.array_to_img(np.random.rand(1, 1, 3))\n path = str(tmpdir / 'sample_image.png')\n utils.save_img(path, im)\n return path\n\n\ndef test_image_file_handlers_close(tmpdir):\n path = write_sample_image(tmpdir)\n max_open_files, _ = resource.getrlimit(resource.RLIMIT_NOFILE)\n for i in range(max_open_files+1):\n utils.load_img(path)\n\n\ndef test_load_img_returns_image(tmpdir):\n path = write_sample_image(tmpdir)\n im = utils.load_img(path)\n assert isinstance(im, PIL.Image.Image)\n\n\nif __name__ == '__main__':\n pytest.main([__file__])\n",
"\"\"\"Utilities for real-time data augmentation on image data.\n\"\"\"\nimport warnings\n\nimport numpy as np\n\nfrom .affine_transformations import (apply_affine_transform,\n apply_brightness_shift,\n apply_channel_shift, flip_axis)\nfrom .dataframe_iterator import DataFrameIterator\nfrom .directory_iterator import DirectoryIterator\nfrom .numpy_array_iterator import NumpyArrayIterator\n\n\nclass ImageDataGenerator(object):\n \"\"\"Generate batches of tensor image data with real-time data augmentation.\n The data will be looped over (in batches).\n\n # Arguments\n featurewise_center: Boolean.\n Set input mean to 0 over the dataset, feature-wise.\n samplewise_center: Boolean. Set each sample mean to 0.\n featurewise_std_normalization: Boolean.\n Divide inputs by std of the dataset, feature-wise.\n samplewise_std_normalization: Boolean. Divide each input by its std.\n zca_whitening: Boolean. Apply ZCA whitening.\n zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.\n rotation_range: Int. Degree range for random rotations.\n width_shift_range: Float, 1-D array-like or int\n - float: fraction of total width, if < 1, or pixels if >= 1.\n - 1-D array-like: random elements from the array.\n - int: integer number of pixels from interval\n `(-width_shift_range, +width_shift_range)`\n - With `width_shift_range=2` possible values\n are integers `[-1, 0, +1]`,\n same as with `width_shift_range=[-1, 0, +1]`,\n while with `width_shift_range=1.0` possible values are floats\n in the interval `[-1.0, +1.0)`.\n height_shift_range: Float, 1-D array-like or int\n - float: fraction of total height, if < 1, or pixels if >= 1.\n - 1-D array-like: random elements from the array.\n - int: integer number of pixels from interval\n `(-height_shift_range, +height_shift_range)`\n - With `height_shift_range=2` possible values\n are integers `[-1, 0, +1]`,\n same as with `height_shift_range=[-1, 0, +1]`,\n while with `height_shift_range=1.0` possible values are floats\n in the interval `[-1.0, +1.0)`.\n brightness_range: Tuple or list of two floats. Range for picking\n a brightness shift value from.\n shear_range: Float. Shear Intensity\n (Shear angle in counter-clockwise direction in degrees)\n zoom_range: Float or [lower, upper]. Range for random zoom.\n If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.\n channel_shift_range: Float. Range for random channel shifts.\n fill_mode: One of {\"constant\", \"nearest\", \"reflect\" or \"wrap\"}.\n Default is 'nearest'.\n Points outside the boundaries of the input are filled\n according to the given mode:\n - 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)\n - 'nearest': aaaaaaaa|abcd|dddddddd\n - 'reflect': abcddcba|abcd|dcbaabcd\n - 'wrap': abcdabcd|abcd|abcdabcd\n cval: Float or Int.\n Value used for points outside the boundaries\n when `fill_mode = \"constant\"`.\n horizontal_flip: Boolean. Randomly flip inputs horizontally.\n vertical_flip: Boolean. Randomly flip inputs vertically.\n rescale: rescaling factor. Defaults to None.\n If None or 0, no rescaling is applied,\n otherwise we multiply the data by the value provided\n (after applying all other transformations).\n preprocessing_function: function that will be applied on each input.\n The function will run after the image is resized and augmented.\n The function should take one argument:\n one image (NumPy tensor with rank 3),\n and should output a NumPy tensor with the same shape.\n data_format: Image data format,\n either \"channels_first\" or \"channels_last\".\n \"channels_last\" mode means that the images should have shape\n `(samples, height, width, channels)`,\n \"channels_first\" mode means that the images should have shape\n `(samples, channels, height, width)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n validation_split: Float. Fraction of images reserved for validation\n (strictly between 0 and 1).\n interpolation_order: int, order to use for\n the spline interpolation. Higher is slower.\n dtype: Dtype to use for the generated arrays.\n\n # Examples\n Example of using `.flow(x, y)`:\n\n ```python\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n y_train = np_utils.to_categorical(y_train, num_classes)\n y_test = np_utils.to_categorical(y_test, num_classes)\n\n datagen = ImageDataGenerator(\n featurewise_center=True,\n featurewise_std_normalization=True,\n rotation_range=20,\n width_shift_range=0.2,\n height_shift_range=0.2,\n horizontal_flip=True)\n\n # compute quantities required for featurewise normalization\n # (std, mean, and principal components if ZCA whitening is applied)\n datagen.fit(x_train)\n\n # fits the model on batches with real-time data augmentation:\n model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),\n steps_per_epoch=len(x_train) / 32, epochs=epochs)\n\n # here's a more \"manual\" example\n for e in range(epochs):\n print('Epoch', e)\n batches = 0\n for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):\n model.fit(x_batch, y_batch)\n batches += 1\n if batches >= len(x_train) / 32:\n # we need to break the loop by hand because\n # the generator loops indefinitely\n break\n ```\n Example of using `.flow_from_directory(directory)`:\n\n ```python\n train_datagen = ImageDataGenerator(\n rescale=1./255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\n test_datagen = ImageDataGenerator(rescale=1./255)\n\n train_generator = train_datagen.flow_from_directory(\n 'data/train',\n target_size=(150, 150),\n batch_size=32,\n class_mode='binary')\n\n validation_generator = test_datagen.flow_from_directory(\n 'data/validation',\n target_size=(150, 150),\n batch_size=32,\n class_mode='binary')\n\n model.fit_generator(\n train_generator,\n steps_per_epoch=2000,\n epochs=50,\n validation_data=validation_generator,\n validation_steps=800)\n ```\n\n Example of transforming images and masks together.\n\n ```python\n # we create two instances with the same arguments\n data_gen_args = dict(featurewise_center=True,\n featurewise_std_normalization=True,\n rotation_range=90,\n width_shift_range=0.1,\n height_shift_range=0.1,\n zoom_range=0.2)\n image_datagen = ImageDataGenerator(**data_gen_args)\n mask_datagen = ImageDataGenerator(**data_gen_args)\n\n # Provide the same seed and keyword arguments to the fit and flow methods\n seed = 1\n image_datagen.fit(images, augment=True, seed=seed)\n mask_datagen.fit(masks, augment=True, seed=seed)\n\n image_generator = image_datagen.flow_from_directory(\n 'data/images',\n class_mode=None,\n seed=seed)\n\n mask_generator = mask_datagen.flow_from_directory(\n 'data/masks',\n class_mode=None,\n seed=seed)\n\n # combine generators into one which yields image and masks\n train_generator = zip(image_generator, mask_generator)\n\n model.fit_generator(\n train_generator,\n steps_per_epoch=2000,\n epochs=50)\n ```\n\n Example of using ```.flow_from_dataframe(dataframe, directory,\n x_col, y_col)```:\n\n ```python\n\n train_df = pandas.read_csv(\"./train.csv\")\n valid_df = pandas.read_csv(\"./valid.csv\")\n\n train_datagen = ImageDataGenerator(\n rescale=1./255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\n test_datagen = ImageDataGenerator(rescale=1./255)\n\n train_generator = train_datagen.flow_from_dataframe(\n dataframe=train_df,\n directory='data/train',\n x_col=\"filename\",\n y_col=\"class\",\n target_size=(150, 150),\n batch_size=32,\n class_mode='binary')\n\n validation_generator = test_datagen.flow_from_dataframe(\n dataframe=valid_df,\n directory='data/validation',\n x_col=\"filename\",\n y_col=\"class\",\n target_size=(150, 150),\n batch_size=32,\n class_mode='binary')\n\n model.fit_generator(\n train_generator,\n steps_per_epoch=2000,\n epochs=50,\n validation_data=validation_generator,\n validation_steps=800)\n ```\n \"\"\"\n\n def __init__(self,\n featurewise_center=False,\n samplewise_center=False,\n featurewise_std_normalization=False,\n samplewise_std_normalization=False,\n zca_whitening=False,\n zca_epsilon=1e-6,\n rotation_range=0,\n width_shift_range=0.,\n height_shift_range=0.,\n brightness_range=None,\n shear_range=0.,\n zoom_range=0.,\n channel_shift_range=0.,\n fill_mode='nearest',\n cval=0.,\n horizontal_flip=False,\n vertical_flip=False,\n rescale=None,\n preprocessing_function=None,\n data_format='channels_last',\n validation_split=0.0,\n interpolation_order=1,\n dtype='float32'):\n\n self.featurewise_center = featurewise_center\n self.samplewise_center = samplewise_center\n self.featurewise_std_normalization = featurewise_std_normalization\n self.samplewise_std_normalization = samplewise_std_normalization\n self.zca_whitening = zca_whitening\n self.zca_epsilon = zca_epsilon\n self.rotation_range = rotation_range\n self.width_shift_range = width_shift_range\n self.height_shift_range = height_shift_range\n self.shear_range = shear_range\n self.zoom_range = zoom_range\n self.channel_shift_range = channel_shift_range\n self.fill_mode = fill_mode\n self.cval = cval\n self.horizontal_flip = horizontal_flip\n self.vertical_flip = vertical_flip\n self.rescale = rescale\n self.preprocessing_function = preprocessing_function\n self.dtype = dtype\n self.interpolation_order = interpolation_order\n\n if data_format not in {'channels_last', 'channels_first'}:\n raise ValueError(\n '`data_format` should be `\"channels_last\"` '\n '(channel after row and column) or '\n '`\"channels_first\"` (channel before row and column). '\n 'Received: %s' % data_format)\n self.data_format = data_format\n if data_format == 'channels_first':\n self.channel_axis = 1\n self.row_axis = 2\n self.col_axis = 3\n if data_format == 'channels_last':\n self.channel_axis = 3\n self.row_axis = 1\n self.col_axis = 2\n if validation_split and not 0 < validation_split < 1:\n raise ValueError(\n '`validation_split` must be strictly between 0 and 1. '\n ' Received: %s' % validation_split)\n self._validation_split = validation_split\n\n self.mean = None\n self.std = None\n self.zca_whitening_matrix = None\n\n if isinstance(zoom_range, (float, int)):\n self.zoom_range = [1 - zoom_range, 1 + zoom_range]\n elif (len(zoom_range) == 2 and\n all(isinstance(val, (float, int)) for val in zoom_range)):\n self.zoom_range = [zoom_range[0], zoom_range[1]]\n else:\n raise ValueError('`zoom_range` should be a float or '\n 'a tuple or list of two floats. '\n 'Received: %s' % (zoom_range,))\n if zca_whitening:\n if not featurewise_center:\n self.featurewise_center = True\n warnings.warn('This ImageDataGenerator specifies '\n '`zca_whitening`, which overrides '\n 'setting of `featurewise_center`.')\n if featurewise_std_normalization:\n self.featurewise_std_normalization = False\n warnings.warn('This ImageDataGenerator specifies '\n '`zca_whitening` '\n 'which overrides setting of'\n '`featurewise_std_normalization`.')\n if featurewise_std_normalization:\n if not featurewise_center:\n self.featurewise_center = True\n warnings.warn('This ImageDataGenerator specifies '\n '`featurewise_std_normalization`, '\n 'which overrides setting of '\n '`featurewise_center`.')\n if samplewise_std_normalization:\n if not samplewise_center:\n self.samplewise_center = True\n warnings.warn('This ImageDataGenerator specifies '\n '`samplewise_std_normalization`, '\n 'which overrides setting of '\n '`samplewise_center`.')\n if brightness_range is not None:\n if (not isinstance(brightness_range, (tuple, list)) or\n len(brightness_range) != 2):\n raise ValueError(\n '`brightness_range should be tuple or list of two floats. '\n 'Received: %s' % (brightness_range,))\n self.brightness_range = brightness_range\n\n def flow(self,\n x,\n y=None,\n bbox=None,\n batch_size=32,\n shuffle=True,\n sample_weight=None,\n seed=None,\n save_to_dir=None,\n save_prefix='',\n save_format='png',\n ignore_class_split=False,\n subset=None):\n \"\"\"Takes data & label arrays, generates batches of augmented data.\n\n # Arguments\n x: Input data. NumPy array of rank 4 or a tuple.\n If tuple, the first element\n should contain the images and the second element\n another NumPy array or a list of NumPy arrays\n that gets passed to the output\n without any modifications.\n Can be used to feed the model miscellaneous data\n along with the images.\n In case of grayscale data, the channels axis of the image array\n should have value 1, in case\n of RGB data, it should have value 3, and in case\n of RGBA data, it should have value 4.\n y: Labels.\n batch_size: Int (default: 32).\n shuffle: Boolean (default: True).\n sample_weight: Sample weights.\n seed: Int (default: None).\n save_to_dir: None or str (default: None).\n This allows you to optionally specify a directory\n to which to save the augmented pictures being generated\n (useful for visualizing what you are doing).\n save_prefix: Str (default: `''`).\n Prefix to use for filenames of saved pictures\n (only relevant if `save_to_dir` is set).\n save_format: one of \"png\", \"jpeg\"\n (only relevant if `save_to_dir` is set). Default: \"png\".\n ignore_class_split: Boolean (default: False), ignore difference\n in number of classes in labels across train and validation\n split (useful for non-classification tasks)\n subset: Subset of data (`\"training\"` or `\"validation\"`) if\n `validation_split` is set in `ImageDataGenerator`.\n\n # Returns\n An `Iterator` yielding tuples of `(x, y)`\n where `x` is a NumPy array of image data\n (in the case of a single image input) or a list\n of NumPy arrays (in the case with\n additional inputs) and `y` is a NumPy array\n of corresponding labels. If 'sample_weight' is not None,\n the yielded tuples are of the form `(x, y, sample_weight)`.\n If `y` is None, only the NumPy array `x` is returned.\n \"\"\"\n return NumpyArrayIterator(\n x,\n y,\n bbox,\n self,\n batch_size=batch_size,\n shuffle=shuffle,\n sample_weight=sample_weight,\n seed=seed,\n data_format=self.data_format,\n save_to_dir=save_to_dir,\n save_prefix=save_prefix,\n save_format=save_format,\n ignore_class_split=ignore_class_split,\n subset=subset,\n dtype=self.dtype\n )\n\n def flow_from_directory(self,\n directory,\n target_size=(256, 256),\n color_mode='rgb',\n classes=None,\n class_mode='categorical',\n batch_size=32,\n shuffle=True,\n seed=None,\n save_to_dir=None,\n save_prefix='',\n save_format='png',\n follow_links=False,\n subset=None,\n interpolation='nearest',\n keep_aspect_ratio=False):\n \"\"\"Takes the path to a directory & generates batches of augmented data.\n\n # Arguments\n directory: string, path to the target directory.\n It should contain one subdirectory per class.\n Any PNG, JPG, BMP, PPM or TIF images\n inside each of the subdirectories directory tree\n will be included in the generator.\n See [this script](\n https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)\n for more details.\n target_size: Tuple of integers `(height, width)`,\n default: `(256, 256)`.\n The dimensions to which all images found will be resized.\n color_mode: One of \"grayscale\", \"rgb\", \"rgba\". Default: \"rgb\".\n Whether the images will be converted to\n have 1, 3, or 4 channels.\n classes: Optional list of class subdirectories\n (e.g. `['dogs', 'cats']`). Default: None.\n If not provided, the list of classes will be automatically\n inferred from the subdirectory names/structure\n under `directory`, where each subdirectory will\n be treated as a different class\n (and the order of the classes, which will map to the label\n indices, will be alphanumeric).\n The dictionary containing the mapping from class names to class\n indices can be obtained via the attribute `class_indices`.\n class_mode: One of \"categorical\", \"binary\", \"sparse\",\n \"input\", or None. Default: \"categorical\".\n Determines the type of label arrays that are returned:\n - \"categorical\" will be 2D one-hot encoded labels,\n - \"binary\" will be 1D binary labels,\n \"sparse\" will be 1D integer labels,\n - \"input\" will be images identical\n to input images (mainly used to work with autoencoders).\n - If None, no labels are returned\n (the generator will only yield batches of image data,\n which is useful to use with `model.predict_generator()`).\n Please note that in case of class_mode None,\n the data still needs to reside in a subdirectory\n of `directory` for it to work correctly.\n batch_size: Size of the batches of data (default: 32).\n shuffle: Whether to shuffle the data (default: True)\n If set to False, sorts the data in alphanumeric order.\n seed: Optional random seed for shuffling and transformations.\n save_to_dir: None or str (default: None).\n This allows you to optionally specify\n a directory to which to save\n the augmented pictures being generated\n (useful for visualizing what you are doing).\n save_prefix: Str. Prefix to use for filenames of saved pictures\n (only relevant if `save_to_dir` is set).\n save_format: One of \"png\", \"jpeg\"\n (only relevant if `save_to_dir` is set). Default: \"png\".\n follow_links: Whether to follow symlinks inside\n class subdirectories (default: False).\n subset: Subset of data (`\"training\"` or `\"validation\"`) if\n `validation_split` is set in `ImageDataGenerator`.\n interpolation: Interpolation method used to\n resample the image if the\n target size is different from that of the loaded image.\n Supported methods are `\"nearest\"`, `\"bilinear\"`,\n and `\"bicubic\"`.\n If PIL version 1.1.3 or newer is installed, `\"lanczos\"` is also\n supported. If PIL version 3.4.0 or newer is installed,\n `\"box\"` and `\"hamming\"` are also supported.\n By default, `\"nearest\"` is used.\n keep_aspect_ratio: Boolean, whether to resize images to a target\n size without aspect ratio distortion. The image is cropped in\n the center with target aspect ratio before resizing.\n\n # Returns\n A `DirectoryIterator` yielding tuples of `(x, y)`\n where `x` is a NumPy array containing a batch\n of images with shape `(batch_size, *target_size, channels)`\n and `y` is a NumPy array of corresponding labels.\n \"\"\"\n return DirectoryIterator(\n directory,\n self,\n target_size=target_size,\n keep_aspect_ratio=keep_aspect_ratio,\n color_mode=color_mode,\n classes=classes,\n class_mode=class_mode,\n data_format=self.data_format,\n batch_size=batch_size,\n shuffle=shuffle,\n seed=seed,\n save_to_dir=save_to_dir,\n save_prefix=save_prefix,\n save_format=save_format,\n follow_links=follow_links,\n subset=subset,\n interpolation=interpolation,\n dtype=self.dtype\n )\n\n def flow_from_dataframe(self,\n dataframe,\n directory=None,\n x_col=\"filename\",\n y_col=\"class\",\n bbox_col=\"bbox\",\n weight_col=None,\n target_size=(256, 256),\n color_mode='rgb',\n classes=None,\n class_mode='categorical',\n batch_size=32,\n shuffle=True,\n seed=None,\n save_to_dir=None,\n save_prefix='',\n save_format='png',\n subset=None,\n interpolation='nearest',\n validate_filenames=True,\n **kwargs):\n \"\"\"Takes the dataframe and the path to a directory\n and generates batches of augmented/normalized data.\n\n **A simple tutorial can be found **[here](\n http://bit.ly/keras_flow_from_dataframe).\n\n # Arguments\n dataframe: Pandas dataframe containing the filepaths relative to\n `directory` (or absolute paths if `directory` is None) of the\n images in a string column. It should include other column/s\n depending on the `class_mode`:\n - if `class_mode` is `\"categorical\"` (default value) it must\n include the `y_col` column with the class/es of each image.\n Values in column can be string/list/tuple if a single class\n or list/tuple if multiple classes.\n - if `class_mode` is `\"binary\"` or `\"sparse\"` it must include\n the given `y_col` column with class values as strings.\n - if `class_mode` is `\"raw\"` or `\"multi_output\"` it should contain\n the columns specified in `y_col`.\n - if `class_mode` is `\"input\"` or `None` no extra column is needed.\n directory: string, path to the directory to read images from. If `None`,\n data in `x_col` column should be absolute paths.\n x_col: string, column in `dataframe` that contains the filenames (or\n absolute paths if `directory` is `None`).\n y_col: string or list, column/s in `dataframe` that has the target data.\n weight_col: string, column in `dataframe` that contains the sample\n weights. Default: `None`.\n target_size: tuple of integers `(height, width)`, default: `(256, 256)`.\n The dimensions to which all images found will be resized.\n color_mode: one of \"grayscale\", \"rgb\", \"rgba\". Default: \"rgb\".\n Whether the images will be converted to have 1 or 3 color channels.\n classes: optional list of classes (e.g. `['dogs', 'cats']`).\n Default: None. If not provided, the list of classes will be\n automatically inferred from the `y_col`,\n which will map to the label indices, will be alphanumeric).\n The dictionary containing the mapping from class names to class\n indices can be obtained via the attribute `class_indices`.\n class_mode: one of \"binary\", \"categorical\", \"input\", \"multi_output\",\n \"raw\", sparse\" or None. Default: \"categorical\".\n Mode for yielding the targets:\n - `\"binary\"`: 1D NumPy array of binary labels,\n - `\"categorical\"`: 2D NumPy array of one-hot encoded labels.\n Supports multi-label output.\n - `\"input\"`: images identical to input images (mainly used to\n work with autoencoders),\n - `\"multi_output\"`: list with the values of the different columns,\n - `\"raw\"`: NumPy array of values in `y_col` column(s),\n - `\"sparse\"`: 1D NumPy array of integer labels,\n - `None`, no targets are returned (the generator will only yield\n batches of image data, which is useful to use in\n `model.predict_generator()`).\n batch_size: size of the batches of data (default: 32).\n shuffle: whether to shuffle the data (default: True)\n seed: optional random seed for shuffling and transformations.\n save_to_dir: None or str (default: None).\n This allows you to optionally specify a directory\n to which to save the augmented pictures being generated\n (useful for visualizing what you are doing).\n save_prefix: str. Prefix to use for filenames of saved pictures\n (only relevant if `save_to_dir` is set).\n save_format: one of \"png\", \"jpeg\"\n (only relevant if `save_to_dir` is set). Default: \"png\".\n follow_links: whether to follow symlinks inside class subdirectories\n (default: False).\n subset: Subset of data (`\"training\"` or `\"validation\"`) if\n `validation_split` is set in `ImageDataGenerator`.\n interpolation: Interpolation method used to resample the image if the\n target size is different from that of the loaded image.\n Supported methods are `\"nearest\"`, `\"bilinear\"`, and `\"bicubic\"`.\n If PIL version 1.1.3 or newer is installed, `\"lanczos\"` is also\n supported. If PIL version 3.4.0 or newer is installed, `\"box\"` and\n `\"hamming\"` are also supported. By default, `\"nearest\"` is used.\n validate_filenames: Boolean, whether to validate image filenames in\n `x_col`. If `True`, invalid images will be ignored. Disabling this\n option can lead to speed-up in the execution of this function.\n Default: `True`.\n\n # Returns\n A `DataFrameIterator` yielding tuples of `(x, y)`\n where `x` is a NumPy array containing a batch\n of images with shape `(batch_size, *target_size, channels)`\n and `y` is a NumPy array of corresponding labels.\n \"\"\"\n if 'has_ext' in kwargs:\n warnings.warn('has_ext is deprecated, filenames in the dataframe have '\n 'to match the exact filenames in disk.',\n DeprecationWarning)\n if 'sort' in kwargs:\n warnings.warn('sort is deprecated, batches will be created in the'\n 'same order than the filenames provided if shuffle'\n 'is set to False.', DeprecationWarning)\n if class_mode == 'other':\n warnings.warn('`class_mode` \"other\" is deprecated, please use '\n '`class_mode` \"raw\".', DeprecationWarning)\n class_mode = 'raw'\n if 'drop_duplicates' in kwargs:\n warnings.warn('drop_duplicates is deprecated, you can drop duplicates '\n 'by using the pandas.DataFrame.drop_duplicates method.',\n DeprecationWarning)\n\n return DataFrameIterator(\n dataframe,\n directory,\n self,\n x_col=x_col,\n y_col=y_col,\n bbox_col=bbox_col,\n weight_col=weight_col,\n target_size=target_size,\n color_mode=color_mode,\n classes=classes,\n class_mode=class_mode,\n data_format=self.data_format,\n batch_size=batch_size,\n shuffle=shuffle,\n seed=seed,\n save_to_dir=save_to_dir,\n save_prefix=save_prefix,\n save_format=save_format,\n subset=subset,\n interpolation=interpolation,\n validate_filenames=validate_filenames,\n dtype=self.dtype\n )\n\n def standardize(self, x):\n \"\"\"Applies the normalization configuration in-place to a batch of inputs.\n\n `x` is changed in-place since the function is mainly used internally\n to standardize images and feed them to your network. If a copy of `x`\n would be created instead it would have a significant performance cost.\n If you want to apply this method without changing the input in-place\n you can call the method creating a copy before:\n\n standardize(np.copy(x))\n\n # Arguments\n x: Batch of inputs to be normalized.\n\n # Returns\n The inputs, normalized.\n \"\"\"\n if self.preprocessing_function:\n x = self.preprocessing_function(x)\n if self.rescale:\n x *= self.rescale\n if self.samplewise_center:\n x -= np.mean(x, keepdims=True)\n if self.samplewise_std_normalization:\n x /= (np.std(x, keepdims=True) + 1e-6)\n\n if self.featurewise_center:\n if self.mean is not None:\n x -= self.mean\n else:\n warnings.warn('This ImageDataGenerator specifies '\n '`featurewise_center`, but it hasn\\'t '\n 'been fit on any training data. Fit it '\n 'first by calling `.fit(numpy_data)`.')\n if self.featurewise_std_normalization:\n if self.std is not None:\n x /= (self.std + 1e-6)\n else:\n warnings.warn('This ImageDataGenerator specifies '\n '`featurewise_std_normalization`, '\n 'but it hasn\\'t '\n 'been fit on any training data. Fit it '\n 'first by calling `.fit(numpy_data)`.')\n if self.zca_whitening:\n if self.zca_whitening_matrix is not None:\n flat_x = x.reshape(-1, np.prod(x.shape[-3:]))\n white_x = flat_x @ self.zca_whitening_matrix\n x = np.reshape(white_x, x.shape)\n else:\n warnings.warn('This ImageDataGenerator specifies '\n '`zca_whitening`, but it hasn\\'t '\n 'been fit on any training data. Fit it '\n 'first by calling `.fit(numpy_data)`.')\n return x\n\n def get_random_transform(self, img_shape, seed=None):\n \"\"\"Generates random parameters for a transformation.\n\n # Arguments\n seed: Random seed.\n img_shape: Tuple of integers.\n Shape of the image that is transformed.\n\n # Returns\n A dictionary containing randomly chosen parameters describing the\n transformation.\n \"\"\"\n img_row_axis = self.row_axis - 1\n img_col_axis = self.col_axis - 1\n\n if seed is not None:\n np.random.seed(seed)\n\n if self.rotation_range:\n theta = np.random.uniform(\n -self.rotation_range,\n self.rotation_range)\n else:\n theta = 0\n\n if self.height_shift_range:\n try: # 1-D array-like or int\n tx = np.random.choice(self.height_shift_range)\n tx *= np.random.choice([-1, 1])\n except ValueError: # floating point\n tx = np.random.uniform(-self.height_shift_range,\n self.height_shift_range)\n if np.max(self.height_shift_range) < 1:\n tx *= img_shape[img_row_axis]\n else:\n tx = 0\n\n if self.width_shift_range:\n try: # 1-D array-like or int\n ty = np.random.choice(self.width_shift_range)\n ty *= np.random.choice([-1, 1])\n except ValueError: # floating point\n ty = np.random.uniform(-self.width_shift_range,\n self.width_shift_range)\n if np.max(self.width_shift_range) < 1:\n ty *= img_shape[img_col_axis]\n else:\n ty = 0\n\n if self.shear_range:\n shear = np.random.uniform(\n -self.shear_range,\n self.shear_range)\n else:\n shear = 0\n\n if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:\n zx, zy = 1, 1\n else:\n zx, zy = np.random.uniform(\n self.zoom_range[0],\n self.zoom_range[1],\n 2)\n\n flip_horizontal = (np.random.random() < 0.5) * self.horizontal_flip\n flip_vertical = (np.random.random() < 0.5) * self.vertical_flip\n\n channel_shift_intensity = None\n if self.channel_shift_range != 0:\n channel_shift_intensity = np.random.uniform(-self.channel_shift_range,\n self.channel_shift_range)\n\n brightness = None\n if self.brightness_range is not None:\n brightness = np.random.uniform(self.brightness_range[0],\n self.brightness_range[1])\n\n transform_parameters = {'theta': theta,\n 'tx': tx,\n 'ty': ty,\n 'shear': shear,\n 'zx': zx,\n 'zy': zy,\n 'flip_horizontal': flip_horizontal,\n 'flip_vertical': flip_vertical,\n 'channel_shift_intensity': channel_shift_intensity,\n 'brightness': brightness}\n\n return transform_parameters\n\n def apply_transform(self, x, transform_parameters):\n \"\"\"Applies a transformation to an image according to given parameters.\n\n # Arguments\n x: 3D tensor, single image.\n transform_parameters: Dictionary with string - parameter pairs\n describing the transformation.\n Currently, the following parameters\n from the dictionary are used:\n - `'theta'`: Float. Rotation angle in degrees.\n - `'tx'`: Float. Shift in the x direction.\n - `'ty'`: Float. Shift in the y direction.\n - `'shear'`: Float. Shear angle in degrees.\n - `'zx'`: Float. Zoom in the x direction.\n - `'zy'`: Float. Zoom in the y direction.\n - `'flip_horizontal'`: Boolean. Horizontal flip.\n - `'flip_vertical'`: Boolean. Vertical flip.\n - `'channel_shift_intensity'`: Float. Channel shift intensity.\n - `'brightness'`: Float. Brightness shift intensity.\n\n # Returns\n A transformed version of the input (same shape).\n \"\"\"\n # x is a single image, so it doesn't have image number at index 0\n img_row_axis = self.row_axis - 1\n img_col_axis = self.col_axis - 1\n img_channel_axis = self.channel_axis - 1\n\n x = apply_affine_transform(x, transform_parameters.get('theta', 0),\n transform_parameters.get('tx', 0),\n transform_parameters.get('ty', 0),\n transform_parameters.get('shear', 0),\n transform_parameters.get('zx', 1),\n transform_parameters.get('zy', 1),\n row_axis=img_row_axis,\n col_axis=img_col_axis,\n channel_axis=img_channel_axis,\n fill_mode=self.fill_mode,\n cval=self.cval,\n order=self.interpolation_order)\n\n if transform_parameters.get('channel_shift_intensity') is not None:\n x = apply_channel_shift(x,\n transform_parameters['channel_shift_intensity'],\n img_channel_axis)\n\n if transform_parameters.get('flip_horizontal', False):\n x = flip_axis(x, img_col_axis)\n\n if transform_parameters.get('flip_vertical', False):\n x = flip_axis(x, img_row_axis)\n\n if transform_parameters.get('brightness') is not None:\n x = apply_brightness_shift(x, transform_parameters['brightness'], False)\n\n return x\n\n def random_transform(self, x, seed=None):\n \"\"\"Applies a random transformation to an image.\n\n # Arguments\n x: 3D tensor, single image.\n seed: Random seed.\n\n # Returns\n A randomly transformed version of the input (same shape).\n \"\"\"\n params = self.get_random_transform(x.shape, seed)\n return self.apply_transform(x, params)\n\n def fit(self, x,\n augment=False,\n rounds=1,\n seed=None):\n \"\"\"Fits the data generator to some sample data.\n\n This computes the internal data stats related to the\n data-dependent transformations, based on an array of sample data.\n\n Only required if `featurewise_center` or\n `featurewise_std_normalization` or `zca_whitening` are set to True.\n\n When `rescale` is set to a value, rescaling is applied to\n sample data before computing the internal data stats.\n\n # Arguments\n x: Sample data. Should have rank 4.\n In case of grayscale data,\n the channels axis should have value 1, in case\n of RGB data, it should have value 3, and in case\n of RGBA data, it should have value 4.\n augment: Boolean (default: False).\n Whether to fit on randomly augmented samples.\n rounds: Int (default: 1).\n If using data augmentation (`augment=True`),\n this is how many augmentation passes over the data to use.\n seed: Int (default: None). Random seed.\n \"\"\"\n x = np.asarray(x, dtype=self.dtype)\n if x.ndim != 4:\n raise ValueError('Input to `.fit()` should have rank 4. '\n 'Got array with shape: ' + str(x.shape))\n if x.shape[self.channel_axis] not in {1, 3, 4}:\n warnings.warn(\n 'Expected input to be images (as Numpy array) '\n 'following the data format convention \"' +\n self.data_format + '\" (channels on axis ' +\n str(self.channel_axis) + '), i.e. expected '\n 'either 1, 3 or 4 channels on axis ' +\n str(self.channel_axis) + '. '\n 'However, it was passed an array with shape ' +\n str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +\n ' channels).')\n\n if seed is not None:\n np.random.seed(seed)\n\n x = np.copy(x)\n if self.rescale:\n x *= self.rescale\n\n if augment:\n ax = np.zeros(\n tuple([rounds * x.shape[0]] + list(x.shape)[1:]),\n dtype=self.dtype)\n for r in range(rounds):\n for i in range(x.shape[0]):\n ax[i + r * x.shape[0]] = self.random_transform(x[i])\n x = ax\n\n if self.featurewise_center:\n self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))\n broadcast_shape = [1, 1, 1]\n broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]\n self.mean = np.reshape(self.mean, broadcast_shape)\n x -= self.mean\n\n if self.featurewise_std_normalization:\n self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))\n broadcast_shape = [1, 1, 1]\n broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]\n self.std = np.reshape(self.std, broadcast_shape)\n x /= (self.std + 1e-6)\n\n if self.zca_whitening:\n n = len(x)\n flat_x = np.reshape(x, (n, -1))\n\n u, s, _ = np.linalg.svd(flat_x.T, full_matrices=False)\n s_inv = np.sqrt(n) / (s + self.zca_epsilon)\n self.zca_whitening_matrix = (u * s_inv).dot(u.T)\n"
] | [
[
"numpy.random.random",
"numpy.allclose",
"numpy.all",
"numpy.any",
"numpy.random.rand",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.random.randint"
],
[
"numpy.linalg.svd",
"numpy.random.random",
"numpy.sqrt",
"numpy.random.seed",
"numpy.random.choice",
"numpy.asarray",
"numpy.reshape",
"numpy.max",
"numpy.copy",
"numpy.std",
"numpy.mean",
"numpy.prod",
"numpy.random.uniform"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chence17/fcaf3d | [
"636aaa0410430deedd7bd4979e8c1bc307424a84",
"636aaa0410430deedd7bd4979e8c1bc307424a84",
"636aaa0410430deedd7bd4979e8c1bc307424a84"
] | [
"mmdet3d/datasets/s3dis_dataset.py",
"mmdet3d/core/bbox/structures/lidar_box3d.py",
"mmdet3d/models/dense_heads/fcaf3d_neck_with_head.py"
] | [
"import numpy as np\nfrom os import path as osp\n\nfrom mmdet3d.core import show_result, show_seg_result\nfrom mmdet3d.core.bbox import DepthInstance3DBoxes\nfrom mmdet.datasets import DATASETS\nfrom mmseg.datasets import DATASETS as SEG_DATASETS\nfrom .custom_3d import Custom3DDataset\nfrom .custom_3d_seg import Custom3DSegDataset\nfrom .pipelines import Compose\n\n\[email protected]_module()\nclass S3DISDataset(Custom3DDataset):\n \"\"\"S3DIS Dataset for Detection Task.\n\n This class is the inner dataset for S3DIS. Since S3DIS has 6 areas, we\n often train on 5 of them and test on the remaining one. The one for\n test is Area_5 as suggested in `GSDN <https://arxiv.org/abs/2006.12356>`_.\n To concatenate 5 areas during training\n `mmdet.datasets.dataset_wrappers.ConcatDataset` should be used.\n\n Args:\n data_root (str): Path of dataset root.\n ann_file (str): Path of annotation file.\n pipeline (list[dict], optional): Pipeline used for data processing.\n Defaults to None.\n classes (tuple[str], optional): Classes used in the dataset.\n Defaults to None.\n modality (dict, optional): Modality to specify the sensor data used\n as input. Defaults to None.\n box_type_3d (str, optional): Type of 3D box of this dataset.\n Based on the `box_type_3d`, the dataset will encapsulate the box\n to its original format then converted them to `box_type_3d`.\n Defaults to 'Depth' in this dataset. Available options includes\n\n - 'LiDAR': Box in LiDAR coordinates.\n - 'Depth': Box in depth coordinates, usually for indoor dataset.\n - 'Camera': Box in camera coordinates.\n filter_empty_gt (bool, optional): Whether to filter empty GT.\n Defaults to True.\n test_mode (bool, optional): Whether the dataset is in test mode.\n Defaults to False.\n \"\"\"\n CLASSES = ('table', 'chair', 'sofa', 'bookcase', 'board')\n\n def __init__(self,\n data_root,\n ann_file,\n pipeline=None,\n classes=None,\n modality=None,\n box_type_3d='Depth',\n filter_empty_gt=True,\n test_mode=False):\n super().__init__(\n data_root=data_root,\n ann_file=ann_file,\n pipeline=pipeline,\n classes=classes,\n modality=modality,\n box_type_3d=box_type_3d,\n filter_empty_gt=filter_empty_gt,\n test_mode=test_mode)\n\n def get_ann_info(self, index):\n \"\"\"Get annotation info according to the given index.\n\n Args:\n index (int): Index of the annotation data to get.\n\n Returns:\n dict: annotation information consists of the following keys:\n\n - gt_bboxes_3d (:obj:`DepthInstance3DBoxes`): \\\n 3D ground truth bboxes\n - gt_labels_3d (np.ndarray): Labels of ground truths.\n - pts_instance_mask_path (str): Path of instance masks.\n - pts_semantic_mask_path (str): Path of semantic masks.\n \"\"\"\n # Use index to get the annos, thus the evalhook could also use this api\n info = self.data_infos[index]\n if info['annos']['gt_num'] != 0:\n gt_bboxes_3d = info['annos']['gt_boxes_upright_depth'].astype(\n np.float32) # k, 6\n gt_labels_3d = info['annos']['class'].astype(np.long)\n else:\n gt_bboxes_3d = np.zeros((0, 6), dtype=np.float32)\n gt_labels_3d = np.zeros((0, ), dtype=np.long)\n\n # to target box structure\n gt_bboxes_3d = DepthInstance3DBoxes(\n gt_bboxes_3d,\n box_dim=gt_bboxes_3d.shape[-1],\n with_yaw=False,\n origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d)\n\n pts_instance_mask_path = osp.join(self.data_root,\n info['pts_instance_mask_path'])\n pts_semantic_mask_path = osp.join(self.data_root,\n info['pts_semantic_mask_path'])\n\n anns_results = dict(\n gt_bboxes_3d=gt_bboxes_3d,\n gt_labels_3d=gt_labels_3d,\n pts_instance_mask_path=pts_instance_mask_path,\n pts_semantic_mask_path=pts_semantic_mask_path)\n return anns_results\n\n def get_data_info(self, index):\n \"\"\"Get data info according to the given index.\n\n Args:\n index (int): Index of the sample data to get.\n\n Returns:\n dict: Data information that will be passed to the data \\\n preprocessing pipelines. It includes the following keys:\n\n - pts_filename (str): Filename of point clouds.\n - file_name (str): Filename of point clouds.\n - ann_info (dict): Annotation info.\n \"\"\"\n info = self.data_infos[index]\n pts_filename = osp.join(self.data_root, info['pts_path'])\n input_dict = dict(pts_filename=pts_filename)\n\n if not self.test_mode:\n annos = self.get_ann_info(index)\n input_dict['ann_info'] = annos\n if self.filter_empty_gt and ~(annos['gt_labels_3d'] != -1).any():\n return None\n return input_dict\n\n def _build_default_pipeline(self):\n \"\"\"Build the default pipeline for this dataset.\"\"\"\n pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='DEPTH',\n shift_height=False,\n load_dim=6,\n use_dim=[0, 1, 2, 3, 4, 5]),\n dict(\n type='DefaultFormatBundle3D',\n class_names=self.CLASSES,\n with_label=False),\n dict(type='Collect3D', keys=['points'])\n ]\n return Compose(pipeline)\n\n def show(self, results, out_dir, show=True, pipeline=None):\n \"\"\"Results visualization.\n\n Args:\n results (list[dict]): List of bounding boxes results.\n out_dir (str): Output directory of visualization result.\n show (bool): Visualize the results online.\n pipeline (list[dict], optional): raw data loading for showing.\n Default: None.\n \"\"\"\n assert out_dir is not None, 'Expect out_dir, got none.'\n pipeline = self._get_pipeline(pipeline)\n for i, result in enumerate(results):\n data_info = self.data_infos[i]\n pts_path = data_info['pts_path']\n file_name = osp.split(pts_path)[-1].split('.')[0]\n points = self._extract_data(i, pipeline, 'points').numpy()\n gt_bboxes = self.get_ann_info(i)['gt_bboxes_3d']\n gt_bboxes = gt_bboxes.corners.numpy() if len(gt_bboxes) else None\n gt_labels = self.get_ann_info(i)['gt_labels_3d']\n pred_bboxes = result['boxes_3d']\n pred_bboxes = pred_bboxes.corners.numpy() if len(pred_bboxes) else None\n pred_labels = result['labels_3d']\n show_result(points, gt_bboxes, gt_labels,\n pred_bboxes, pred_labels, out_dir, file_name, False)\n\n\nclass _S3DISSegDataset(Custom3DSegDataset):\n r\"\"\"S3DIS Dataset for Semantic Segmentation Task.\n\n This class is the inner dataset for S3DIS. Since S3DIS has 6 areas, we\n often train on 5 of them and test on the remaining one.\n However, there is not a fixed train-test split of S3DIS. People often test\n on Area_5 as suggested by `SEGCloud <https://arxiv.org/abs/1710.07563>`_.\n But many papers also report the average results of 6-fold cross validation\n over the 6 areas (e.g. `DGCNN <https://arxiv.org/abs/1801.07829>`_).\n Therefore, we use an inner dataset for one area, and further use a dataset\n wrapper to concat all the provided data in different areas.\n\n Args:\n data_root (str): Path of dataset root.\n ann_file (str): Path of annotation file.\n pipeline (list[dict], optional): Pipeline used for data processing.\n Defaults to None.\n classes (tuple[str], optional): Classes used in the dataset.\n Defaults to None.\n palette (list[list[int]], optional): The palette of segmentation map.\n Defaults to None.\n modality (dict, optional): Modality to specify the sensor data used\n as input. Defaults to None.\n test_mode (bool, optional): Whether the dataset is in test mode.\n Defaults to False.\n ignore_index (int, optional): The label index to be ignored, e.g. \\\n unannotated points. If None is given, set to len(self.CLASSES).\n Defaults to None.\n scene_idxs (np.ndarray | str, optional): Precomputed index to load\n data. For scenes with many points, we may sample it several times.\n Defaults to None.\n \"\"\"\n CLASSES = ('ceiling', 'floor', 'wall', 'beam', 'column', 'window', 'door',\n 'table', 'chair', 'sofa', 'bookcase', 'board', 'clutter')\n\n VALID_CLASS_IDS = tuple(range(13))\n\n ALL_CLASS_IDS = tuple(range(14)) # possibly with 'stair' class\n\n PALETTE = [[0, 255, 0], [0, 0, 255], [0, 255, 255], [255, 255, 0],\n [255, 0, 255], [100, 100, 255], [200, 200, 100],\n [170, 120, 200], [255, 0, 0], [200, 100, 100], [10, 200, 100],\n [200, 200, 200], [50, 50, 50]]\n\n def __init__(self,\n data_root,\n ann_file,\n pipeline=None,\n classes=None,\n palette=None,\n modality=None,\n test_mode=False,\n ignore_index=None,\n scene_idxs=None):\n\n super().__init__(\n data_root=data_root,\n ann_file=ann_file,\n pipeline=pipeline,\n classes=classes,\n palette=palette,\n modality=modality,\n test_mode=test_mode,\n ignore_index=ignore_index,\n scene_idxs=scene_idxs)\n\n def get_ann_info(self, index):\n \"\"\"Get annotation info according to the given index.\n\n Args:\n index (int): Index of the annotation data to get.\n\n Returns:\n dict: annotation information consists of the following keys:\n\n - pts_semantic_mask_path (str): Path of semantic masks.\n \"\"\"\n # Use index to get the annos, thus the evalhook could also use this api\n info = self.data_infos[index]\n\n pts_semantic_mask_path = osp.join(self.data_root,\n info['pts_semantic_mask_path'])\n\n anns_results = dict(pts_semantic_mask_path=pts_semantic_mask_path)\n return anns_results\n\n def _build_default_pipeline(self):\n \"\"\"Build the default pipeline for this dataset.\"\"\"\n pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='DEPTH',\n shift_height=False,\n use_color=True,\n load_dim=6,\n use_dim=[0, 1, 2, 3, 4, 5]),\n dict(\n type='LoadAnnotations3D',\n with_bbox_3d=False,\n with_label_3d=False,\n with_mask_3d=False,\n with_seg_3d=True),\n dict(\n type='PointSegClassMapping',\n valid_cat_ids=self.VALID_CLASS_IDS,\n max_cat_id=np.max(self.ALL_CLASS_IDS)),\n dict(\n type='DefaultFormatBundle3D',\n with_label=False,\n class_names=self.CLASSES),\n dict(type='Collect3D', keys=['points', 'pts_semantic_mask'])\n ]\n return Compose(pipeline)\n\n def show(self, results, out_dir, show=True, pipeline=None):\n \"\"\"Results visualization.\n\n Args:\n results (list[dict]): List of bounding boxes results.\n out_dir (str): Output directory of visualization result.\n show (bool): Visualize the results online.\n pipeline (list[dict], optional): raw data loading for showing.\n Default: None.\n \"\"\"\n assert out_dir is not None, 'Expect out_dir, got none.'\n pipeline = self._get_pipeline(pipeline)\n for i, result in enumerate(results):\n data_info = self.data_infos[i]\n pts_path = data_info['pts_path']\n file_name = osp.split(pts_path)[-1].split('.')[0]\n points, gt_sem_mask = self._extract_data(\n i, pipeline, ['points', 'pts_semantic_mask'], load_annos=True)\n points = points.numpy()\n pred_sem_mask = result['semantic_mask'].numpy()\n show_seg_result(points, gt_sem_mask,\n pred_sem_mask, out_dir, file_name,\n np.array(self.PALETTE), self.ignore_index, show)\n\n def get_scene_idxs(self, scene_idxs):\n \"\"\"Compute scene_idxs for data sampling.\n\n We sample more times for scenes with more points.\n \"\"\"\n # when testing, we load one whole scene every time\n if not self.test_mode and scene_idxs is None:\n raise NotImplementedError(\n 'please provide re-sampled scene indexes for training')\n\n return super().get_scene_idxs(scene_idxs)\n\n\[email protected]_module()\n@SEG_DATASETS.register_module()\nclass S3DISSegDataset(_S3DISSegDataset):\n r\"\"\"S3DIS Dataset for Semantic Segmentation Task.\n\n This class serves as the API for experiments on the S3DIS Dataset.\n It wraps the provided datasets of different areas.\n We don't use `mmdet.datasets.dataset_wrappers.ConcatDataset` because we\n need to concat the `scene_idxs` of different areas.\n\n Please refer to the `google form <https://docs.google.com/forms/d/e/1FAIpQL\n ScDimvNMCGhy_rmBA2gHfDu3naktRm6A8BPwAWWDv-Uhm6Shw/viewform?c=0&w=1>`_ for\n data downloading.\n\n Args:\n data_root (str): Path of dataset root.\n ann_files (list[str]): Path of several annotation files.\n pipeline (list[dict], optional): Pipeline used for data processing.\n Defaults to None.\n classes (tuple[str], optional): Classes used in the dataset.\n Defaults to None.\n palette (list[list[int]], optional): The palette of segmentation map.\n Defaults to None.\n modality (dict, optional): Modality to specify the sensor data used\n as input. Defaults to None.\n test_mode (bool, optional): Whether the dataset is in test mode.\n Defaults to False.\n ignore_index (int, optional): The label index to be ignored, e.g. \\\n unannotated points. If None is given, set to len(self.CLASSES).\n Defaults to None.\n scene_idxs (list[np.ndarray] | list[str], optional): Precomputed index\n to load data. For scenes with many points, we may sample it several\n times. Defaults to None.\n \"\"\"\n\n def __init__(self,\n data_root,\n ann_files,\n pipeline=None,\n classes=None,\n palette=None,\n modality=None,\n test_mode=False,\n ignore_index=None,\n scene_idxs=None):\n\n # make sure that ann_files and scene_idxs have same length\n ann_files = self._check_ann_files(ann_files)\n scene_idxs = self._check_scene_idxs(scene_idxs, len(ann_files))\n\n # initialize some attributes as datasets[0]\n super().__init__(\n data_root=data_root,\n ann_file=ann_files[0],\n pipeline=pipeline,\n classes=classes,\n palette=palette,\n modality=modality,\n test_mode=test_mode,\n ignore_index=ignore_index,\n scene_idxs=scene_idxs[0])\n\n datasets = [\n _S3DISSegDataset(\n data_root=data_root,\n ann_file=ann_files[i],\n pipeline=pipeline,\n classes=classes,\n palette=palette,\n modality=modality,\n test_mode=test_mode,\n ignore_index=ignore_index,\n scene_idxs=scene_idxs[i]) for i in range(len(ann_files))\n ]\n\n # data_infos and scene_idxs need to be concat\n self.concat_data_infos([dst.data_infos for dst in datasets])\n self.concat_scene_idxs([dst.scene_idxs for dst in datasets])\n\n # set group flag for the sampler\n if not self.test_mode:\n self._set_group_flag()\n\n def concat_data_infos(self, data_infos):\n \"\"\"Concat data_infos from several datasets to form self.data_infos.\n\n Args:\n data_infos (list[list[dict]])\n \"\"\"\n self.data_infos = [\n info for one_data_infos in data_infos for info in one_data_infos\n ]\n\n def concat_scene_idxs(self, scene_idxs):\n \"\"\"Concat scene_idxs from several datasets to form self.scene_idxs.\n\n Needs to manually add offset to scene_idxs[1, 2, ...].\n\n Args:\n scene_idxs (list[np.ndarray])\n \"\"\"\n self.scene_idxs = np.array([], dtype=np.int32)\n offset = 0\n for one_scene_idxs in scene_idxs:\n self.scene_idxs = np.concatenate(\n [self.scene_idxs, one_scene_idxs + offset]).astype(np.int32)\n offset = np.unique(self.scene_idxs).max() + 1\n\n @staticmethod\n def _duplicate_to_list(x, num):\n \"\"\"Repeat x `num` times to form a list.\"\"\"\n return [x for _ in range(num)]\n\n def _check_ann_files(self, ann_file):\n \"\"\"Make ann_files as list/tuple.\"\"\"\n # ann_file could be str\n if not isinstance(ann_file, (list, tuple)):\n ann_file = self._duplicate_to_list(ann_file, 1)\n return ann_file\n\n def _check_scene_idxs(self, scene_idx, num):\n \"\"\"Make scene_idxs as list/tuple.\"\"\"\n if scene_idx is None:\n return self._duplicate_to_list(scene_idx, num)\n # scene_idx could be str, np.ndarray, list or tuple\n if isinstance(scene_idx, str): # str\n return self._duplicate_to_list(scene_idx, num)\n if isinstance(scene_idx[0], str): # list of str\n return scene_idx\n if isinstance(scene_idx[0], (list, tuple, np.ndarray)): # list of idx\n return scene_idx\n # single idx\n return self._duplicate_to_list(scene_idx, num)\n",
"import numpy as np\nimport torch\n\nfrom mmdet3d.core.points import BasePoints\nfrom mmdet3d.ops.roiaware_pool3d import points_in_boxes_gpu\nfrom .base_box3d import BaseInstance3DBoxes\nfrom .utils import limit_period, rotation_3d_in_axis\n\n\nclass LiDARInstance3DBoxes(BaseInstance3DBoxes):\n \"\"\"3D boxes of instances in LIDAR coordinates.\n\n Coordinates in LiDAR:\n\n .. code-block:: none\n\n up z x front (yaw=-0.5*pi)\n ^ ^\n | /\n | /\n (yaw=-pi) left y <------ 0 -------- (yaw=0)\n\n The relative coordinate of bottom center in a LiDAR box is (0.5, 0.5, 0),\n and the yaw is around the z axis, thus the rotation axis=2.\n The yaw is 0 at the negative direction of y axis, and decreases from\n the negative direction of y to the positive direction of x.\n\n A refactor is ongoing to make the three coordinate systems\n easier to understand and convert between each other.\n\n Attributes:\n tensor (torch.Tensor): Float matrix of N x box_dim.\n box_dim (int): Integer indicating the dimension of a box.\n Each row is (x, y, z, x_size, y_size, z_size, yaw, ...).\n with_yaw (bool): If True, the value of yaw will be set to 0 as minmax\n boxes.\n \"\"\"\n\n @property\n def gravity_center(self):\n \"\"\"torch.Tensor: A tensor with center of each box.\"\"\"\n bottom_center = self.bottom_center\n gravity_center = torch.zeros_like(bottom_center)\n gravity_center[:, :2] = bottom_center[:, :2]\n gravity_center[:, 2] = bottom_center[:, 2] + self.tensor[:, 5] * 0.5\n return gravity_center\n\n @property\n def corners(self):\n \"\"\"torch.Tensor: Coordinates of corners of all the boxes\n in shape (N, 8, 3).\n\n Convert the boxes to corners in clockwise order, in form of\n ``(x0y0z0, x0y0z1, x0y1z1, x0y1z0, x1y0z0, x1y0z1, x1y1z1, x1y1z0)``\n\n .. code-block:: none\n\n up z\n front x ^\n / |\n / |\n (x1, y0, z1) + ----------- + (x1, y1, z1)\n /| / |\n / | / |\n (x0, y0, z1) + ----------- + + (x1, y1, z0)\n | / . | /\n | / oriign | /\n left y<-------- + ----------- + (x0, y1, z0)\n (x0, y0, z0)\n \"\"\"\n # TODO: rotation_3d_in_axis function do not support\n # empty tensor currently.\n assert len(self.tensor) != 0\n dims = self.dims\n corners_norm = torch.from_numpy(\n np.stack(np.unravel_index(np.arange(8), [2] * 3), axis=1)).to(\n device=dims.device, dtype=dims.dtype)\n\n corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]]\n # use relative origin [0.5, 0.5, 0]\n corners_norm = corners_norm - dims.new_tensor([0.5, 0.5, 0])\n corners = dims.view([-1, 1, 3]) * corners_norm.reshape([1, 8, 3])\n\n # rotate around z axis\n corners = rotation_3d_in_axis(corners, self.tensor[:, 6], axis=2)\n corners += self.tensor[:, :3].view(-1, 1, 3)\n return corners\n\n @property\n def bev(self):\n \"\"\"torch.Tensor: 2D BEV box of each box with rotation\n in XYWHR format.\"\"\"\n return self.tensor[:, [0, 1, 3, 4, 6]]\n\n @property\n def nearest_bev(self):\n \"\"\"torch.Tensor: A tensor of 2D BEV box of each box\n without rotation.\"\"\"\n # Obtain BEV boxes with rotation in XYWHR format\n bev_rotated_boxes = self.bev\n # convert the rotation to a valid range\n rotations = bev_rotated_boxes[:, -1]\n normed_rotations = torch.abs(limit_period(rotations, 0.5, np.pi))\n\n # find the center of boxes\n conditions = (normed_rotations > np.pi / 4)[..., None]\n bboxes_xywh = torch.where(conditions, bev_rotated_boxes[:,\n [0, 1, 3, 2]],\n bev_rotated_boxes[:, :4])\n\n centers = bboxes_xywh[:, :2]\n dims = bboxes_xywh[:, 2:]\n bev_boxes = torch.cat([centers - dims / 2, centers + dims / 2], dim=-1)\n return bev_boxes\n\n def rotate(self, angle, points=None):\n \"\"\"Rotate boxes with points (optional) with the given angle or \\\n rotation matrix.\n\n Args:\n angles (float | torch.Tensor | np.ndarray):\n Rotation angle or rotation matrix.\n points (torch.Tensor, numpy.ndarray, :obj:`BasePoints`, optional):\n Points to rotate. Defaults to None.\n\n Returns:\n tuple or None: When ``points`` is None, the function returns \\\n None, otherwise it returns the rotated points and the \\\n rotation matrix ``rot_mat_T``.\n \"\"\"\n if not isinstance(angle, torch.Tensor):\n angle = self.tensor.new_tensor(angle)\n assert angle.shape == torch.Size([3, 3]) or angle.numel() == 1, \\\n f'invalid rotation angle shape {angle.shape}'\n\n if angle.numel() == 1:\n rot_sin = torch.sin(angle)\n rot_cos = torch.cos(angle)\n rot_mat_T = self.tensor.new_tensor([[rot_cos, -rot_sin, 0],\n [rot_sin, rot_cos, 0],\n [0, 0, 1]])\n else:\n rot_mat_T = angle\n rot_sin = rot_mat_T[1, 0]\n rot_cos = rot_mat_T[0, 0]\n angle = np.arctan2(rot_sin, rot_cos)\n\n self.tensor[:, :3] = self.tensor[:, :3] @ rot_mat_T\n self.tensor[:, 6] += angle\n\n if self.tensor.shape[1] == 9:\n # rotate velo vector\n self.tensor[:, 7:9] = self.tensor[:, 7:9] @ rot_mat_T[:2, :2]\n\n if points is not None:\n if isinstance(points, torch.Tensor):\n points[:, :3] = points[:, :3] @ rot_mat_T\n elif isinstance(points, np.ndarray):\n rot_mat_T = rot_mat_T.numpy()\n points[:, :3] = np.dot(points[:, :3], rot_mat_T)\n elif isinstance(points, BasePoints):\n # clockwise\n points.rotate(-angle)\n else:\n raise ValueError\n return points, rot_mat_T\n\n def flip(self, bev_direction='horizontal', points=None):\n \"\"\"Flip the boxes in BEV along given BEV direction.\n\n In LIDAR coordinates, it flips the y (horizontal) or x (vertical) axis.\n\n Args:\n bev_direction (str): Flip direction (horizontal or vertical).\n points (torch.Tensor, numpy.ndarray, :obj:`BasePoints`, None):\n Points to flip. Defaults to None.\n\n Returns:\n torch.Tensor, numpy.ndarray or None: Flipped points.\n \"\"\"\n assert bev_direction in ('horizontal', 'vertical')\n if bev_direction == 'horizontal':\n self.tensor[:, 1::7] = -self.tensor[:, 1::7]\n if self.with_yaw:\n self.tensor[:, 6] = -self.tensor[:, 6] + np.pi\n elif bev_direction == 'vertical':\n self.tensor[:, 0::7] = -self.tensor[:, 0::7]\n if self.with_yaw:\n self.tensor[:, 6] = -self.tensor[:, 6]\n\n if points is not None:\n assert isinstance(points, (torch.Tensor, np.ndarray, BasePoints))\n if isinstance(points, (torch.Tensor, np.ndarray)):\n if bev_direction == 'horizontal':\n points[:, 1] = -points[:, 1]\n elif bev_direction == 'vertical':\n points[:, 0] = -points[:, 0]\n elif isinstance(points, BasePoints):\n points.flip(bev_direction)\n return points\n\n def in_range_bev(self, box_range):\n \"\"\"Check whether the boxes are in the given range.\n\n Args:\n box_range (list | torch.Tensor): the range of box\n (x_min, y_min, x_max, y_max)\n\n Note:\n The original implementation of SECOND checks whether boxes in\n a range by checking whether the points are in a convex\n polygon, we reduce the burden for simpler cases.\n\n Returns:\n torch.Tensor: Whether each box is inside the reference range.\n \"\"\"\n in_range_flags = ((self.tensor[:, 0] > box_range[0])\n & (self.tensor[:, 1] > box_range[1])\n & (self.tensor[:, 0] < box_range[2])\n & (self.tensor[:, 1] < box_range[3]))\n return in_range_flags\n\n def convert_to(self, dst, rt_mat=None):\n \"\"\"Convert self to ``dst`` mode.\n\n Args:\n dst (:obj:`BoxMode`): the target Box mode\n rt_mat (np.ndarray | torch.Tensor): The rotation and translation\n matrix between different coordinates. Defaults to None.\n The conversion from ``src`` coordinates to ``dst`` coordinates\n usually comes along the change of sensors, e.g., from camera\n to LiDAR. This requires a transformation matrix.\n\n Returns:\n :obj:`BaseInstance3DBoxes`: \\\n The converted box of the same type in the ``dst`` mode.\n \"\"\"\n from .box_3d_mode import Box3DMode\n return Box3DMode.convert(\n box=self, src=Box3DMode.LIDAR, dst=dst, rt_mat=rt_mat)\n\n def enlarged_box(self, extra_width):\n \"\"\"Enlarge the length, width and height boxes.\n\n Args:\n extra_width (float | torch.Tensor): Extra width to enlarge the box.\n\n Returns:\n :obj:`LiDARInstance3DBoxes`: Enlarged boxes.\n \"\"\"\n enlarged_boxes = self.tensor.clone()\n enlarged_boxes[:, 3:6] += extra_width * 2\n # bottom center z minus extra_width\n enlarged_boxes[:, 2] -= extra_width\n return self.new_box(enlarged_boxes)\n\n def points_in_boxes(self, points):\n \"\"\"Find the box which the points are in.\n\n Args:\n points (torch.Tensor): Points in shape (N, 3).\n\n Returns:\n torch.Tensor: The index of box where each point are in.\n \"\"\"\n box_idx = points_in_boxes_gpu(\n points.unsqueeze(0),\n self.tensor.unsqueeze(0).to(points.device)).squeeze(0)\n return box_idx\n",
"import torch\nfrom torch import nn\nimport MinkowskiEngine as ME\nfrom mmdet.core import BaseAssigner, reduce_mean, build_assigner\nfrom mmdet.models.builder import HEADS, build_loss\nfrom mmdet.core.bbox.builder import BBOX_ASSIGNERS\nfrom mmcv.cnn import Scale, bias_init_with_prob\n\nfrom mmdet3d.core.bbox.structures import rotation_3d_in_axis\nfrom mmdet3d.ops.pcdet_nms import pcdet_nms_gpu, pcdet_nms_normal_gpu\n\n\[email protected]_module()\nclass Fcaf3DNeckWithHead(nn.Module):\n def __init__(self,\n n_classes,\n in_channels,\n out_channels,\n n_reg_outs,\n voxel_size,\n pts_threshold,\n assigner,\n yaw_parametrization='fcaf3d',\n loss_centerness=dict(\n type='CrossEntropyLoss',\n use_sigmoid=True,\n loss_weight=1.0),\n loss_bbox=dict(type='IoU3DLoss', loss_weight=1.0),\n loss_cls=dict(\n type='FocalLoss',\n use_sigmoid=True,\n gamma=2.0,\n alpha=0.25,\n loss_weight=1.0),\n train_cfg=None,\n test_cfg=None):\n super(Fcaf3DNeckWithHead, self).__init__()\n self.voxel_size = voxel_size\n self.yaw_parametrization = yaw_parametrization\n self.assigner = build_assigner(assigner)\n self.loss_centerness = build_loss(loss_centerness)\n self.loss_bbox = build_loss(loss_bbox)\n self.loss_cls = build_loss(loss_cls)\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n self.pts_threshold = pts_threshold\n self._init_layers(in_channels, out_channels, n_reg_outs, n_classes)\n\n @staticmethod\n def _make_block(in_channels, out_channels):\n return nn.Sequential(\n ME.MinkowskiConvolution(in_channels, out_channels, kernel_size=3, dimension=3),\n ME.MinkowskiBatchNorm(out_channels),\n ME.MinkowskiELU()\n )\n\n @staticmethod\n def _make_up_block(in_channels, out_channels):\n return nn.Sequential(\n ME.MinkowskiGenerativeConvolutionTranspose(\n in_channels,\n out_channels,\n kernel_size=2,\n stride=2,\n dimension=3,\n ),\n ME.MinkowskiBatchNorm(out_channels),\n ME.MinkowskiELU(),\n ME.MinkowskiConvolution(out_channels, out_channels, kernel_size=3, dimension=3),\n ME.MinkowskiBatchNorm(out_channels),\n ME.MinkowskiELU()\n )\n\n def _init_layers(self, in_channels, out_channels, n_reg_outs, n_classes):\n # neck layers\n self.pruning = ME.MinkowskiPruning()\n for i in range(len(in_channels)):\n if i > 0:\n self.__setattr__(f'up_block_{i}', self._make_up_block(in_channels[i], in_channels[i - 1]))\n self.__setattr__(f'out_block_{i}', self._make_block(in_channels[i], out_channels))\n\n # head layers\n self.centerness_conv = ME.MinkowskiConvolution(out_channels, 1, kernel_size=1, dimension=3)\n self.reg_conv = ME.MinkowskiConvolution(out_channels, n_reg_outs, kernel_size=1, dimension=3)\n self.cls_conv = ME.MinkowskiConvolution(out_channels, n_classes, kernel_size=1, bias=True, dimension=3)\n self.scales = nn.ModuleList([Scale(1.) for _ in range(len(in_channels))])\n\n def init_weights(self):\n nn.init.normal_(self.centerness_conv.kernel, std=.01)\n nn.init.normal_(self.reg_conv.kernel, std=.01)\n nn.init.normal_(self.cls_conv.kernel, std=.01)\n nn.init.constant_(self.cls_conv.bias, bias_init_with_prob(.01))\n\n def forward(self, x):\n outs = []\n inputs = x\n x = inputs[-1]\n for i in range(len(inputs) - 1, -1, -1):\n if i < len(inputs) - 1:\n x = self.__getattr__(f'up_block_{i + 1}')(x)\n x = inputs[i] + x\n x = self._prune(x, scores)\n out = self.__getattr__(f'out_block_{i}')(x)\n out = self.forward_single(out, self.scales[i])\n scores = out[-1]\n outs.append(out[:-1])\n return zip(*outs[::-1])\n\n def _prune(self, x, scores):\n if self.pts_threshold < 0:\n return x\n\n with torch.no_grad():\n coordinates = x.C.float()\n interpolated_scores = scores.features_at_coordinates(coordinates)\n prune_mask = interpolated_scores.new_zeros((len(interpolated_scores)), dtype=torch.bool)\n for permutation in x.decomposition_permutations:\n score = interpolated_scores[permutation]\n mask = score.new_zeros((len(score)), dtype=torch.bool)\n topk = min(len(score), self.pts_threshold)\n ids = torch.topk(score.squeeze(1), topk, sorted=False).indices\n mask[ids] = True\n prune_mask[permutation[mask]] = True\n x = self.pruning(x, prune_mask)\n return x\n\n def loss(self,\n centernesses,\n bbox_preds,\n cls_scores,\n points,\n gt_bboxes,\n gt_labels,\n img_metas):\n assert len(centernesses[0]) == len(bbox_preds[0]) == len(cls_scores[0]) \\\n == len(points[0]) == len(img_metas) == len(gt_bboxes) == len(gt_labels)\n\n loss_centerness, loss_bbox, loss_cls = [], [], []\n for i in range(len(img_metas)):\n img_loss_centerness, img_loss_bbox, img_loss_cls = self._loss_single(\n centernesses=[x[i] for x in centernesses],\n bbox_preds=[x[i] for x in bbox_preds],\n cls_scores=[x[i] for x in cls_scores],\n points=[x[i] for x in points],\n img_meta=img_metas[i],\n gt_bboxes=gt_bboxes[i],\n gt_labels=gt_labels[i]\n )\n loss_centerness.append(img_loss_centerness)\n loss_bbox.append(img_loss_bbox)\n loss_cls.append(img_loss_cls)\n return dict(\n loss_centerness=torch.mean(torch.stack(loss_centerness)),\n loss_bbox=torch.mean(torch.stack(loss_bbox)),\n loss_cls=torch.mean(torch.stack(loss_cls))\n )\n\n # per image\n def _loss_single(self,\n centernesses,\n bbox_preds,\n cls_scores,\n points,\n gt_bboxes,\n gt_labels,\n img_meta):\n with torch.no_grad():\n centerness_targets, bbox_targets, labels = self.assigner.assign(points, gt_bboxes, gt_labels)\n\n centerness = torch.cat(centernesses)\n bbox_preds = torch.cat(bbox_preds)\n cls_scores = torch.cat(cls_scores)\n points = torch.cat(points)\n\n # skip background\n pos_inds = torch.nonzero(labels >= 0).squeeze(1)\n n_pos = torch.tensor(len(pos_inds), dtype=torch.float, device=centerness.device)\n n_pos = max(reduce_mean(n_pos), 1.)\n loss_cls = self.loss_cls(cls_scores, labels, avg_factor=n_pos)\n pos_centerness = centerness[pos_inds]\n pos_bbox_preds = bbox_preds[pos_inds]\n pos_centerness_targets = centerness_targets[pos_inds].unsqueeze(1)\n pos_bbox_targets = bbox_targets[pos_inds]\n # centerness weighted iou loss\n centerness_denorm = max(\n reduce_mean(pos_centerness_targets.sum().detach()), 1e-6)\n\n if len(pos_inds) > 0:\n pos_points = points[pos_inds]\n loss_centerness = self.loss_centerness(\n pos_centerness, pos_centerness_targets, avg_factor=n_pos\n )\n loss_bbox = self.loss_bbox(\n self._bbox_pred_to_bbox(pos_points, pos_bbox_preds),\n pos_bbox_targets,\n weight=pos_centerness_targets.squeeze(1),\n avg_factor=centerness_denorm\n )\n else:\n loss_centerness = pos_centerness.sum()\n loss_bbox = pos_bbox_preds.sum()\n return loss_centerness, loss_bbox, loss_cls\n\n def get_bboxes(self,\n centernesses,\n bbox_preds,\n cls_scores,\n points,\n img_metas,\n rescale=False):\n assert len(centernesses[0]) == len(bbox_preds[0]) == len(cls_scores[0]) \\\n == len(points[0]) == len(img_metas)\n results = []\n for i in range(len(img_metas)):\n result = self._get_bboxes_single(\n centernesses=[x[i] for x in centernesses],\n bbox_preds=[x[i] for x in bbox_preds],\n cls_scores=[x[i] for x in cls_scores],\n points=[x[i] for x in points],\n img_meta=img_metas[i]\n )\n results.append(result)\n return results\n\n # per image\n def _get_bboxes_single(self,\n centernesses,\n bbox_preds,\n cls_scores,\n points,\n img_meta):\n mlvl_bboxes, mlvl_scores = [], []\n for centerness, bbox_pred, cls_score, point in zip(\n centernesses, bbox_preds, cls_scores, points\n ):\n scores = cls_score.sigmoid() * centerness.sigmoid()\n max_scores, _ = scores.max(dim=1)\n\n if len(scores) > self.test_cfg.nms_pre > 0:\n _, ids = max_scores.topk(self.test_cfg.nms_pre)\n bbox_pred = bbox_pred[ids]\n scores = scores[ids]\n point = point[ids]\n\n bboxes = self._bbox_pred_to_bbox(point, bbox_pred)\n mlvl_bboxes.append(bboxes)\n mlvl_scores.append(scores)\n\n bboxes = torch.cat(mlvl_bboxes)\n scores = torch.cat(mlvl_scores)\n bboxes, scores, labels = self._nms(bboxes, scores, img_meta)\n return bboxes, scores, labels\n\n # per scale\n def forward_single(self, x, scale):\n centerness = self.centerness_conv(x).features\n scores = self.cls_conv(x)\n cls_score = scores.features\n prune_scores = ME.SparseTensor(\n scores.features.max(dim=1, keepdim=True).values,\n coordinate_map_key=scores.coordinate_map_key,\n coordinate_manager=scores.coordinate_manager)\n reg_final = self.reg_conv(x).features\n reg_distance = torch.exp(scale(reg_final[:, :6]))\n reg_angle = reg_final[:, 6:]\n bbox_pred = torch.cat((reg_distance, reg_angle), dim=1)\n\n centernesses, bbox_preds, cls_scores, points = [], [], [], []\n for permutation in x.decomposition_permutations:\n centernesses.append(centerness[permutation])\n bbox_preds.append(bbox_pred[permutation])\n cls_scores.append(cls_score[permutation])\n\n points = x.decomposed_coordinates\n for i in range(len(points)):\n points[i] = points[i] * self.voxel_size\n\n return centernesses, bbox_preds, cls_scores, points, prune_scores\n\n def _bbox_pred_to_bbox(self, points, bbox_pred):\n if bbox_pred.shape[0] == 0:\n return bbox_pred\n\n x_center = points[:, 0] + (bbox_pred[:, 1] - bbox_pred[:, 0]) / 2\n y_center = points[:, 1] + (bbox_pred[:, 3] - bbox_pred[:, 2]) / 2\n z_center = points[:, 2] + (bbox_pred[:, 5] - bbox_pred[:, 4]) / 2\n\n # dx_min, dx_max, dy_min, dy_max, dz_min, dz_max -> x, y, z, w, l, h\n base_bbox = torch.stack([\n x_center,\n y_center,\n z_center,\n bbox_pred[:, 0] + bbox_pred[:, 1],\n bbox_pred[:, 2] + bbox_pred[:, 3],\n bbox_pred[:, 4] + bbox_pred[:, 5],\n ], -1)\n\n if bbox_pred.shape[1] == 6:\n return base_bbox\n\n if self.yaw_parametrization == 'naive':\n # ..., alpha\n return torch.cat((\n base_bbox,\n bbox_pred[:, 6:7]\n ), -1)\n elif self.yaw_parametrization == 'sin-cos':\n # ..., sin(a), cos(a)\n norm = torch.pow(torch.pow(bbox_pred[:, 6:7], 2) + torch.pow(bbox_pred[:, 7:8], 2), 0.5)\n sin = bbox_pred[:, 6:7] / norm\n cos = bbox_pred[:, 7:8] / norm\n return torch.cat((\n base_bbox,\n torch.atan2(sin, cos)\n ), -1)\n else: # self.yaw_parametrization == 'fcaf3d'\n # ..., sin(2a)ln(q), cos(2a)ln(q)\n scale = bbox_pred[:, 0] + bbox_pred[:, 1] + bbox_pred[:, 2] + bbox_pred[:, 3]\n q = torch.exp(torch.sqrt(torch.pow(bbox_pred[:, 6], 2) + torch.pow(bbox_pred[:, 7], 2)))\n alpha = 0.5 * torch.atan2(bbox_pred[:, 6], bbox_pred[:, 7])\n return torch.stack((\n x_center,\n y_center,\n z_center,\n scale / (1 + q),\n scale / (1 + q) * q,\n bbox_pred[:, 5] + bbox_pred[:, 4],\n alpha\n ), dim=-1)\n\n def _nms(self, bboxes, scores, img_meta):\n n_classes = scores.shape[1]\n yaw_flag = bboxes.shape[1] == 7\n nms_bboxes, nms_scores, nms_labels = [], [], []\n for i in range(n_classes):\n ids = scores[:, i] > self.test_cfg.score_thr\n if not ids.any():\n continue\n\n class_scores = scores[ids, i]\n class_bboxes = bboxes[ids]\n if yaw_flag:\n nms_function = pcdet_nms_gpu\n else:\n class_bboxes = torch.cat((\n class_bboxes, torch.zeros_like(class_bboxes[:, :1])), dim=1)\n nms_function = pcdet_nms_normal_gpu\n\n nms_ids, _ = nms_function(class_bboxes, class_scores, self.test_cfg.iou_thr)\n nms_bboxes.append(class_bboxes[nms_ids])\n nms_scores.append(class_scores[nms_ids])\n nms_labels.append(bboxes.new_full(class_scores[nms_ids].shape, i, dtype=torch.long))\n\n if len(nms_bboxes):\n nms_bboxes = torch.cat(nms_bboxes, dim=0)\n nms_scores = torch.cat(nms_scores, dim=0)\n nms_labels = torch.cat(nms_labels, dim=0)\n else:\n nms_bboxes = bboxes.new_zeros((0, bboxes.shape[1]))\n nms_scores = bboxes.new_zeros((0,))\n nms_labels = bboxes.new_zeros((0,))\n\n if yaw_flag:\n box_dim = 7\n with_yaw = True\n else:\n box_dim = 6\n with_yaw = False\n nms_bboxes = nms_bboxes[:, :6]\n nms_bboxes = img_meta['box_type_3d'](\n nms_bboxes, box_dim=box_dim, with_yaw=with_yaw, origin=(.5, .5, .5))\n\n return nms_bboxes, nms_scores, nms_labels\n\n\ndef compute_centerness(bbox_targets):\n x_dims = bbox_targets[..., [0, 1]]\n y_dims = bbox_targets[..., [2, 3]]\n z_dims = bbox_targets[..., [4, 5]]\n centerness_targets = x_dims.min(dim=-1)[0] / x_dims.max(dim=-1)[0] * \\\n y_dims.min(dim=-1)[0] / y_dims.max(dim=-1)[0] * \\\n z_dims.min(dim=-1)[0] / z_dims.max(dim=-1)[0]\n return torch.sqrt(centerness_targets)\n\n\n@BBOX_ASSIGNERS.register_module()\nclass Fcaf3DAssigner(BaseAssigner):\n def __init__(self, limit, topk, n_scales):\n self.limit = limit\n self.topk = topk\n self.n_scales = n_scales\n\n def assign(self, points, gt_bboxes, gt_labels):\n float_max = 1e8\n # expand scales to align with points\n expanded_scales = [\n points[i].new_tensor(i).expand(len(points[i]))\n for i in range(len(points))\n ]\n points = torch.cat(points, dim=0)\n scales = torch.cat(expanded_scales, dim=0)\n\n # below is based on FCOSHead._get_target_single\n n_points = len(points)\n n_boxes = len(gt_bboxes)\n volumes = gt_bboxes.volume.to(points.device)\n volumes = volumes.expand(n_points, n_boxes).contiguous()\n gt_bboxes = torch.cat((gt_bboxes.gravity_center, gt_bboxes.tensor[:, 3:]), dim=1)\n gt_bboxes = gt_bboxes.to(points.device).expand(n_points, n_boxes, 7)\n expanded_points = points.unsqueeze(1).expand(n_points, n_boxes, 3)\n shift = torch.stack((\n expanded_points[..., 0] - gt_bboxes[..., 0],\n expanded_points[..., 1] - gt_bboxes[..., 1],\n expanded_points[..., 2] - gt_bboxes[..., 2]\n ), dim=-1).permute(1, 0, 2)\n shift = rotation_3d_in_axis(shift, -gt_bboxes[0, :, 6], axis=2).permute(1, 0, 2)\n centers = gt_bboxes[..., :3] + shift\n dx_min = centers[..., 0] - gt_bboxes[..., 0] + gt_bboxes[..., 3] / 2\n dx_max = gt_bboxes[..., 0] + gt_bboxes[..., 3] / 2 - centers[..., 0]\n dy_min = centers[..., 1] - gt_bboxes[..., 1] + gt_bboxes[..., 4] / 2\n dy_max = gt_bboxes[..., 1] + gt_bboxes[..., 4] / 2 - centers[..., 1]\n dz_min = centers[..., 2] - gt_bboxes[..., 2] + gt_bboxes[..., 5] / 2\n dz_max = gt_bboxes[..., 2] + gt_bboxes[..., 5] / 2 - centers[..., 2]\n bbox_targets = torch.stack((dx_min, dx_max, dy_min, dy_max, dz_min, dz_max, gt_bboxes[..., 6]), dim=-1)\n\n # condition1: inside a gt bbox\n inside_gt_bbox_mask = bbox_targets[..., :6].min(-1)[0] > 0 # skip angle\n\n # condition2: positive points per scale >= limit\n # calculate positive points per scale\n n_pos_points_per_scale = []\n for i in range(self.n_scales):\n n_pos_points_per_scale.append(torch.sum(inside_gt_bbox_mask[scales == i], dim=0))\n # find best scale\n n_pos_points_per_scale = torch.stack(n_pos_points_per_scale, dim=0)\n lower_limit_mask = n_pos_points_per_scale < self.limit\n lower_index = torch.argmax(lower_limit_mask.int(), dim=0) - 1\n lower_index = torch.where(lower_index < 0, 0, lower_index)\n all_upper_limit_mask = torch.all(torch.logical_not(lower_limit_mask), dim=0)\n best_scale = torch.where(all_upper_limit_mask, self.n_scales - 1, lower_index)\n # keep only points with best scale\n best_scale = torch.unsqueeze(best_scale, 0).expand(n_points, n_boxes)\n scales = torch.unsqueeze(scales, 1).expand(n_points, n_boxes)\n inside_best_scale_mask = best_scale == scales\n\n # condition3: limit topk locations per box by centerness\n centerness = compute_centerness(bbox_targets)\n centerness = torch.where(inside_gt_bbox_mask, centerness, torch.ones_like(centerness) * -1)\n centerness = torch.where(inside_best_scale_mask, centerness, torch.ones_like(centerness) * -1)\n top_centerness = torch.topk(centerness, min(self.topk + 1, len(centerness)), dim=0).values[-1]\n inside_top_centerness_mask = centerness > top_centerness.unsqueeze(0)\n\n # if there are still more than one objects for a location,\n # we choose the one with minimal area\n volumes = torch.where(inside_gt_bbox_mask, volumes, torch.ones_like(volumes) * float_max)\n volumes = torch.where(inside_best_scale_mask, volumes, torch.ones_like(volumes) * float_max)\n volumes = torch.where(inside_top_centerness_mask, volumes, torch.ones_like(volumes) * float_max)\n min_area, min_area_inds = volumes.min(dim=1)\n\n labels = gt_labels[min_area_inds]\n labels = torch.where(min_area == float_max, -1, labels)\n bbox_targets = bbox_targets[range(n_points), min_area_inds]\n centerness_targets = compute_centerness(bbox_targets)\n\n return centerness_targets, gt_bboxes[range(n_points), min_area_inds], labels\n"
] | [
[
"numpy.unique",
"numpy.concatenate",
"numpy.max",
"numpy.array",
"numpy.zeros"
],
[
"numpy.dot",
"torch.Size",
"torch.sin",
"torch.cat",
"numpy.arange",
"torch.zeros_like",
"numpy.arctan2",
"torch.where",
"torch.cos"
],
[
"torch.cat",
"torch.sqrt",
"torch.sum",
"torch.zeros_like",
"torch.unsqueeze",
"torch.pow",
"torch.nn.init.normal_",
"torch.no_grad",
"torch.where",
"torch.nonzero",
"torch.stack",
"torch.ones_like",
"torch.atan2",
"torch.logical_not"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PaulTHong/STDA-inf | [
"3d87a7843f879d17a343ba4838caa1f58f1e8e65"
] | [
"data/cal_mean_std.py"
] | [
"import numpy as np\nimport cv2\nimport os\n\nmean = []\nstd = []\nimg_list = []\n\ndir_path = './STL10-data/train'\nclass_paths = os.listdir(dir_path)\nprint(class_paths)\nfor cls in class_paths:\n img_paths = os.listdir(dir_path + os.sep + cls)\n print(len(img_paths))\n for img_path in img_paths:\n print(img_path)\n img_path = dir_path + os.sep + cls + os.sep + img_path\n img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)\n img = img[::, np.newaxis]\n img_list.append(img)\n\n# dir_path = './STL10-data/test'\n# class_paths = os.listdir(dir_path)\n# print(class_paths)\n# for cls in class_paths:\n # img_paths = os.listdir(dir_path + os.sep + cls)\n # print(len(img_paths))\n # for img_path in img_paths:\n # print(img_path)\n # img_path = dir_path + os.sep + cls + os.sep + img_path\n # img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)\n # img = img[::, np.newaxis]\n # img_list.append(img)\n\nimgs = np.concatenate(img_list, axis=3)\nimgs = imgs.astype(np.float32) / 255.0\n\nfor i in range(3):\n channel = imgs[:, :, i, :].ravel()\n mean.append(np.mean(channel))\n std.append(np.std(channel))\n\nmean.reverse()\nstd.reverse()\n\nprint(mean)\nprint(std)\n\n\n\n\n\n"
] | [
[
"numpy.concatenate",
"numpy.std",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vrushank-agrawal/opencv-x64-cmake | [
"3f9486510d706c8ac579ac82f5d58f667f948124",
"3f9486510d706c8ac579ac82f5d58f667f948124",
"3f9486510d706c8ac579ac82f5d58f667f948124"
] | [
"opencv/sources/modules/dnn/test/cityscapes_semsegm_test_enet.py",
"opencv/sources/samples/python/tutorial_code/imgProc/anisotropic_image_segmentation/anisotropic_image_segmentation.py",
"opencv/sources/samples/python/tutorial_code/TrackingMotion/good_features_to_track/goodFeaturesToTrack_Demo.py"
] | [
"import numpy as np\r\nimport sys\r\nimport os\r\nimport fnmatch\r\nimport argparse\r\n\r\ntry:\r\n import cv2 as cv\r\nexcept ImportError:\r\n raise ImportError('Can\\'t find OpenCV Python module. If you\\'ve built it from sources without installation, '\r\n 'configure environment variable PYTHONPATH to \"opencv_build_dir/lib\" directory (with \"python3\" subdirectory if required)')\r\ntry:\r\n import torch\r\nexcept ImportError:\r\n raise ImportError('Can\\'t find pytorch. Please install it by following instructions on the official site')\r\n\r\nfrom torch.utils.serialization import load_lua\r\nfrom pascal_semsegm_test_fcn import eval_segm_result, get_conf_mat, get_metrics, DatasetImageFetch, SemSegmEvaluation\r\nfrom imagenet_cls_test_alexnet import Framework, DnnCaffeModel\r\n\r\n\r\nclass NormalizePreproc:\r\n def __init__(self):\r\n pass\r\n\r\n @staticmethod\r\n def process(img):\r\n image_data = np.array(img).transpose(2, 0, 1).astype(np.float32)\r\n image_data = np.expand_dims(image_data, 0)\r\n image_data /= 255.0\r\n return image_data\r\n\r\n\r\nclass CityscapesDataFetch(DatasetImageFetch):\r\n img_dir = ''\r\n segm_dir = ''\r\n segm_files = []\r\n colors = []\r\n i = 0\r\n\r\n def __init__(self, img_dir, segm_dir, preproc):\r\n self.img_dir = img_dir\r\n self.segm_dir = segm_dir\r\n self.segm_files = sorted([img for img in self.locate('*_color.png', segm_dir)])\r\n self.colors = self.get_colors()\r\n self.data_prepoc = preproc\r\n self.i = 0\r\n\r\n @staticmethod\r\n def get_colors():\r\n result = []\r\n colors_list = (\r\n (0, 0, 0), (128, 64, 128), (244, 35, 232), (70, 70, 70), (102, 102, 156), (190, 153, 153), (153, 153, 153),\r\n (250, 170, 30), (220, 220, 0), (107, 142, 35), (152, 251, 152), (70, 130, 180), (220, 20, 60), (255, 0, 0),\r\n (0, 0, 142), (0, 0, 70), (0, 60, 100), (0, 80, 100), (0, 0, 230), (119, 11, 32))\r\n\r\n for c in colors_list:\r\n result.append(DatasetImageFetch.pix_to_c(c))\r\n return result\r\n\r\n def __iter__(self):\r\n return self\r\n\r\n def next(self):\r\n if self.i < len(self.segm_files):\r\n segm_file = self.segm_files[self.i]\r\n segm = cv.imread(segm_file, cv.IMREAD_COLOR)[:, :, ::-1]\r\n segm = cv.resize(segm, (1024, 512), interpolation=cv.INTER_NEAREST)\r\n\r\n img_file = self.rreplace(self.img_dir + segm_file[len(self.segm_dir):], 'gtFine_color', 'leftImg8bit')\r\n assert os.path.exists(img_file)\r\n img = cv.imread(img_file, cv.IMREAD_COLOR)[:, :, ::-1]\r\n img = cv.resize(img, (1024, 512))\r\n\r\n self.i += 1\r\n gt = self.color_to_gt(segm, self.colors)\r\n img = self.data_prepoc.process(img)\r\n return img, gt\r\n else:\r\n self.i = 0\r\n raise StopIteration\r\n\r\n def get_num_classes(self):\r\n return len(self.colors)\r\n\r\n @staticmethod\r\n def locate(pattern, root_path):\r\n for path, dirs, files in os.walk(os.path.abspath(root_path)):\r\n for filename in fnmatch.filter(files, pattern):\r\n yield os.path.join(path, filename)\r\n\r\n @staticmethod\r\n def rreplace(s, old, new, occurrence=1):\r\n li = s.rsplit(old, occurrence)\r\n return new.join(li)\r\n\r\n\r\nclass TorchModel(Framework):\r\n net = object\r\n\r\n def __init__(self, model_file):\r\n self.net = load_lua(model_file)\r\n\r\n def get_name(self):\r\n return 'Torch'\r\n\r\n def get_output(self, input_blob):\r\n tensor = torch.FloatTensor(input_blob)\r\n out = self.net.forward(tensor).numpy()\r\n return out\r\n\r\n\r\nclass DnnTorchModel(DnnCaffeModel):\r\n net = cv.dnn.Net()\r\n\r\n def __init__(self, model_file):\r\n self.net = cv.dnn.readNetFromTorch(model_file)\r\n\r\n def get_output(self, input_blob):\r\n self.net.setBlob(\"\", input_blob)\r\n self.net.forward()\r\n return self.net.getBlob(self.net.getLayerNames()[-1])\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--imgs_dir\", help=\"path to Cityscapes validation images dir, imgsfine/leftImg8bit/val\")\r\n parser.add_argument(\"--segm_dir\", help=\"path to Cityscapes dir with segmentation, gtfine/gtFine/val\")\r\n parser.add_argument(\"--model\", help=\"path to torch model, download it here: \"\r\n \"https://www.dropbox.com/sh/dywzk3gyb12hpe5/AAD5YkUa8XgMpHs2gCRgmCVCa\")\r\n parser.add_argument(\"--log\", help=\"path to logging file\")\r\n args = parser.parse_args()\r\n\r\n prep = NormalizePreproc()\r\n df = CityscapesDataFetch(args.imgs_dir, args.segm_dir, prep)\r\n\r\n fw = [TorchModel(args.model),\r\n DnnTorchModel(args.model)]\r\n\r\n segm_eval = SemSegmEvaluation(args.log)\r\n segm_eval.process(fw, df)\r\n",
"\r\nimport cv2 as cv\r\nimport numpy as np\r\nimport argparse\r\n\r\nW = 52 # window size is WxW\r\nC_Thr = 0.43 # threshold for coherency\r\nLowThr = 35 # threshold1 for orientation, it ranges from 0 to 180\r\nHighThr = 57 # threshold2 for orientation, it ranges from 0 to 180\r\n\r\n## [calcGST]\r\n## [calcJ_header]\r\n## [calcGST_proto]\r\ndef calcGST(inputIMG, w):\r\n## [calcGST_proto]\r\n img = inputIMG.astype(np.float32)\r\n\r\n # GST components calculation (start)\r\n # J = (J11 J12; J12 J22) - GST\r\n imgDiffX = cv.Sobel(img, cv.CV_32F, 1, 0, 3)\r\n imgDiffY = cv.Sobel(img, cv.CV_32F, 0, 1, 3)\r\n imgDiffXY = cv.multiply(imgDiffX, imgDiffY)\r\n ## [calcJ_header]\r\n\r\n imgDiffXX = cv.multiply(imgDiffX, imgDiffX)\r\n imgDiffYY = cv.multiply(imgDiffY, imgDiffY)\r\n\r\n J11 = cv.boxFilter(imgDiffXX, cv.CV_32F, (w,w))\r\n J22 = cv.boxFilter(imgDiffYY, cv.CV_32F, (w,w))\r\n J12 = cv.boxFilter(imgDiffXY, cv.CV_32F, (w,w))\r\n # GST components calculations (stop)\r\n\r\n # eigenvalue calculation (start)\r\n # lambda1 = 0.5*(J11 + J22 + sqrt((J11-J22)^2 + 4*J12^2))\r\n # lambda2 = 0.5*(J11 + J22 - sqrt((J11-J22)^2 + 4*J12^2))\r\n tmp1 = J11 + J22\r\n tmp2 = J11 - J22\r\n tmp2 = cv.multiply(tmp2, tmp2)\r\n tmp3 = cv.multiply(J12, J12)\r\n tmp4 = np.sqrt(tmp2 + 4.0 * tmp3)\r\n\r\n lambda1 = 0.5*(tmp1 + tmp4) # biggest eigenvalue\r\n lambda2 = 0.5*(tmp1 - tmp4) # smallest eigenvalue\r\n # eigenvalue calculation (stop)\r\n\r\n # Coherency calculation (start)\r\n # Coherency = (lambda1 - lambda2)/(lambda1 + lambda2)) - measure of anisotropism\r\n # Coherency is anisotropy degree (consistency of local orientation)\r\n imgCoherencyOut = cv.divide(lambda1 - lambda2, lambda1 + lambda2)\r\n # Coherency calculation (stop)\r\n\r\n # orientation angle calculation (start)\r\n # tan(2*Alpha) = 2*J12/(J22 - J11)\r\n # Alpha = 0.5 atan2(2*J12/(J22 - J11))\r\n imgOrientationOut = cv.phase(J22 - J11, 2.0 * J12, angleInDegrees = True)\r\n imgOrientationOut = 0.5 * imgOrientationOut\r\n # orientation angle calculation (stop)\r\n\r\n return imgCoherencyOut, imgOrientationOut\r\n## [calcGST]\r\n\r\nparser = argparse.ArgumentParser(description='Code for Anisotropic image segmentation tutorial.')\r\nparser.add_argument('-i', '--input', help='Path to input image.', required=True)\r\nargs = parser.parse_args()\r\n\r\nimgIn = cv.imread(args.input, cv.IMREAD_GRAYSCALE)\r\nif imgIn is None:\r\n print('Could not open or find the image: {}'.format(args.input))\r\n exit(0)\r\n\r\n## [main_extra]\r\n## [main]\r\nimgCoherency, imgOrientation = calcGST(imgIn, W)\r\n\r\n## [thresholding]\r\n_, imgCoherencyBin = cv.threshold(imgCoherency, C_Thr, 255, cv.THRESH_BINARY)\r\n_, imgOrientationBin = cv.threshold(imgOrientation, LowThr, HighThr, cv.THRESH_BINARY)\r\n## [thresholding]\r\n\r\n## [combining]\r\nimgBin = cv.bitwise_and(imgCoherencyBin, imgOrientationBin)\r\n## [combining]\r\n## [main]\r\n\r\nimgCoherency = cv.normalize(imgCoherency, None, alpha=0, beta=1, norm_type=cv.NORM_MINMAX, dtype=cv.CV_32F)\r\nimgOrientation = cv.normalize(imgOrientation, None, alpha=0, beta=1, norm_type=cv.NORM_MINMAX, dtype=cv.CV_32F)\r\n\r\ncv.imshow('result.jpg', np.uint8(0.5*(imgIn + imgBin)))\r\ncv.imshow('Coherency.jpg', imgCoherency)\r\ncv.imshow('Orientation.jpg', imgOrientation)\r\ncv.waitKey(0)\r\n## [main_extra]\r\n",
"from __future__ import print_function\r\nimport cv2 as cv\r\nimport numpy as np\r\nimport argparse\r\nimport random as rng\r\n\r\nsource_window = 'Image'\r\nmaxTrackbar = 100\r\nrng.seed(12345)\r\n\r\ndef goodFeaturesToTrack_Demo(val):\r\n maxCorners = max(val, 1)\r\n\r\n # Parameters for Shi-Tomasi algorithm\r\n qualityLevel = 0.01\r\n minDistance = 10\r\n blockSize = 3\r\n gradientSize = 3\r\n useHarrisDetector = False\r\n k = 0.04\r\n\r\n # Copy the source image\r\n copy = np.copy(src)\r\n\r\n # Apply corner detection\r\n corners = cv.goodFeaturesToTrack(src_gray, maxCorners, qualityLevel, minDistance, None, \\\r\n blockSize=blockSize, gradientSize=gradientSize, useHarrisDetector=useHarrisDetector, k=k)\r\n\r\n # Draw corners detected\r\n print('** Number of corners detected:', corners.shape[0])\r\n radius = 4\r\n for i in range(corners.shape[0]):\r\n cv.circle(copy, (int(corners[i,0,0]), int(corners[i,0,1])), radius, (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256)), cv.FILLED)\r\n\r\n # Show what you got\r\n cv.namedWindow(source_window)\r\n cv.imshow(source_window, copy)\r\n\r\n# Load source image and convert it to gray\r\nparser = argparse.ArgumentParser(description='Code for Shi-Tomasi corner detector tutorial.')\r\nparser.add_argument('--input', help='Path to input image.', default='pic3.png')\r\nargs = parser.parse_args()\r\n\r\nsrc = cv.imread(cv.samples.findFile(args.input))\r\nif src is None:\r\n print('Could not open or find the image:', args.input)\r\n exit(0)\r\n\r\nsrc_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)\r\n\r\n# Create a window and a trackbar\r\ncv.namedWindow(source_window)\r\nmaxCorners = 23 # initial threshold\r\ncv.createTrackbar('Threshold: ', source_window, maxCorners, maxTrackbar, goodFeaturesToTrack_Demo)\r\ncv.imshow(source_window, src)\r\ngoodFeaturesToTrack_Demo(maxCorners)\r\n\r\ncv.waitKey()\r\n"
] | [
[
"torch.FloatTensor",
"numpy.array",
"numpy.expand_dims",
"torch.utils.serialization.load_lua"
],
[
"numpy.uint8",
"numpy.sqrt"
],
[
"numpy.copy"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jmetz/momanalysis | [
"8d71490c99127568b184784890258e9a6ef876ef"
] | [
"mmhelper/output.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 09 09:59:13 2017\n\n@author: as624\n\"\"\"\nimport csv\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef output_detection_figures(\n image, wells, bacteria, timeindex, output_dir):\n \"\"\"\n Produces and saves figures showing the output from the detection\n\n Parameters\n ------\n image : ndarray (2D)\n The initial image that detection was run on\n wells : ndarray (2D) of dtype int\n A labelled image showing the detected wells\n bacteria : ndarray (2D) of dtype int\n A labelled image showing the detected bacteria\n timeindex : int\n The timepoint that has been analysed\n output_dir : str (path)\n Where to save the images\n \"\"\"\n # For detection figures, labels not needed (I think)?\n plt.figure(figsize=(16, 12))\n plt.imshow(image, cmap='gray')\n plt.contour(wells > 0, levels=[0.5], colors=['y'])\n #plt.contour(channel>0, levels=[0.5], colors=['r'])\n for lab_bac in range(1, bacteria.max() + 1):\n col = plt.cm.gist_rainbow((lab_bac / 9.1) % 1)\n plt.contour(bacteria == lab_bac, levels=[0.5], colors=[col])\n plt.savefig(os.path.join(\n output_dir, \"detection_frame_{:06d}\".format(timeindex)))\n plt.close()\n\n\ndef output_tracking_figures(\n data,\n fullwellimages,\n wellcoords,\n allbacteria,\n output_dir,\n bacteria_lineage):\n \"\"\"\n Produces and saves figures showing the output after tracking\n\n Parameters\n ------\n data : list of ndarrays\n List of initial image that detection was run on\n fullwellimages : list of ndarrays\n List of labelled images showing the detected wells\n wellcoords : list of arrays\n Each entry contains a further list where each entry contains well coordinates\n allbacteria : list of arrays\n List of labelled images showing the detected bacteria\n output_dir : str (path)\n Where to save the images\n bacteria_lineage : dictionary\n A dictionary that links the physical unique label of a bacteria\n to one which shows information on its lineage\n \"\"\"\n for tpoint, (image, fullwells, bacteria, coords) in enumerate(\n zip(data, fullwellimages, allbacteria, wellcoords)):\n # For detection figures, labels not needed (I think)?\n plt.figure(figsize=(16, 12))\n plt.imshow(image, cmap='gray')\n if len(np.unique(fullwells)) == 1:\n plt.savefig(os.path.join(\n output_dir, \"tracking_frame_{:06d}\".format(tpoint)))\n plt.close()\n continue\n plt.contour(fullwells > 0, levels=[0.5], colors=['y'])\n bacteriaim = np.zeros_like(fullwells)\n for welllabel in coords:\n bacteriaim[coords[welllabel]] = bacteria[welllabel]\n # Add in well labels top left(?) of well contour\n #bw = fullwells == welllabel\n # if not np.any(bw):\n # continue\n #pos0 = bw.nonzero()\n pos = (np.min(coords[welllabel][0]), np.max(coords[welllabel][1]))\n plt.text(pos[1], pos[0], \"%d\" % welllabel, color=\"y\")\n\n for lab_bac in range(1, bacteriaim.max() + 1):\n col = plt.cm.gist_rainbow((lab_bac / 9.1) % 1)\n bw0 = bacteriaim == lab_bac\n if not np.any(bw0):\n continue\n plt.contour(bw0, levels=[0.5], colors=[col])\n pos0 = bw0.nonzero()\n if len(pos0[0]) == 0 or len(pos0[1]) == 0:\n continue\n #lab_string = label_dict_string[lab_bac]\n pos = (np.min(pos0[0]), np.max(pos0[1]))\n plt.text(pos[1], pos[0], str(bacteria_lineage[lab_bac]), color=col)\n plt.savefig(os.path.join(\n output_dir, \"tracking_frame_{:06d}\".format(tpoint)))\n plt.close()\n\n\ndef final_output(measurements, output_dir):\n \"\"\"outputs a final csv with information on the bacteria detected\n\n Parameters\n ------\n measurements : Custom class instance\n Its attribute \"bacteria\" is a dictionary containing information on\n each individual bacteria\n output_dir : str (path)\n Where to write the csv\n \"\"\"\n output_csv_file = os.path.join(output_dir, 'Results.csv')\n with open(output_csv_file, \"w\", newline='') as file0:\n writer = csv.writer(file0)\n for numbac, (bac) in enumerate(measurements.bacteria.values()):\n if numbac == 0:\n writer.writerow(bac.headings_line)\n writer.writerow(bac.measurements_output)\n"
] | [
[
"matplotlib.pyplot.imshow",
"numpy.unique",
"numpy.min",
"numpy.max",
"matplotlib.pyplot.contour",
"numpy.zeros_like",
"matplotlib.pyplot.close",
"matplotlib.pyplot.cm.gist_rainbow",
"numpy.any",
"matplotlib.pyplot.text",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
iurteaga/menstrual_cycle_analysis | [
"799c7cb59d759e0c3929164bccdc5c7ce80324d0",
"799c7cb59d759e0c3929164bccdc5c7ce80324d0"
] | [
"src/characterization/cycle_period_length_analysis.py",
"src/prediction/aux_functions.py"
] | [
"#!/usr/bin/python\n\n# Imports\nimport sys, os, re, time\nimport argparse\nimport pdb\nimport pickle\nfrom itertools import *\n# Science\nimport numpy as np\nimport scipy.stats as stats\nimport pandas as pd\n# Plotting\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nfrom mpl_toolkits.mplot3d import Axes3D\n\n################################## FUNCTIONS ############################\n# Population time-series\ndef population_time_series_embedding_lengths(cycle_stats_df, attribute, cutoff_criteria, cutoff, sample_style, save_dir):\n '''\n Function that plots a population level time series embedding of cycle and period lengths\n In plot:\n x axis is length_attribute for cycle 1,\n y axis is length attribute for cycle 2,\n z is for cycle 3\n Input:\n cycle_stats_df: pandas dataframe, with information about user's cycle statistics\n attribute: whether to consider 'cycle_lengths' or 'period_lengths'\n cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)\n cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)\n sample_style: whether to pick 3 consecutive 'random' or 'first' cycles per-user\n save_dir: path where to save plot\n Output:\n None\n '''\n #get users with color by attribute > cutoff, and <= cutoff\n cycle_stats_df_greater_than = cycle_stats_df[cycle_stats_df[cutoff_criteria] > cutoff]\n cycle_stats_df_less_than = cycle_stats_df[cycle_stats_df[cutoff_criteria] <= cutoff]\n cycle_lengths_greater_than = cycle_stats_df_greater_than[attribute]\n cycle_lengths_less_than = cycle_stats_df_less_than[attribute]\n \n # Filename\n if sample_style == 'first':\n filename = '{}/population_time_series_embedding_for_{}_split_by_{}_{}_first_3.pdf'.format(save_dir, attribute, cutoff_criteria, cutoff)\n if sample_style == 'random':\n filename = '{}/population_time_series_embedding_for_{}_split_by_{}_{}_sample_3.pdf'.format(save_dir, attribute, cutoff_criteria, cutoff)\n \n # Plot\n colors = ['orange', 'c']\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n for index, cycle_lengths in enumerate([cycle_lengths_greater_than, cycle_lengths_less_than]):\n print('Start selecting cycles for one group')\n if sample_style=='first':\n sample_cycle_lengths = [cycle_length[:3] for cycle_length in cycle_lengths if len(cycle_length) >= 3]\n if sample_style=='random':\n sample_cycle_lengths = []\n for cycle_length in cycle_lengths:\n if len(cycle_length) >= 3:\n num_cycles_array = np.linspace(0, len(cycle_length)-3, len(cycle_length)-2)\n start_index = np.random.choice(num_cycles_array, size=1).astype(int)[0]\n sample_cycle_lengths.append(cycle_length[start_index:start_index+3])\n print('Finished selecting cycles for one group')\n \n print('Start plotting one group')\n for i in range(len(sample_cycle_lengths)):\n xs = sample_cycle_lengths[i][0]\n ys = sample_cycle_lengths[i][1]\n zs = sample_cycle_lengths[i][2]\n # Plot this point\n ax.scatter(xs, ys, zs, color = colors[index], s=1, alpha=0.3)\n print('Finished plotting one group')\n\n ax.set_xlabel(attribute+ '[i]')\n ax.set_ylabel(attribute+ '[i+1]')\n ax.set_zlabel(attribute+ '[i+2]')\n if attribute == 'cycle_lengths':\n #ref_line_points = np.linspace(10, 90, 10)\n #ax.plot(ref_line_points, ref_line_points, ref_line_points, color='red', linestyle='dashed', linewidth=4, markersize=4)#, alpha=0.8)\n ax.set_xlim3d(10,90)\n ax.set_ylim3d(10,90)\n ax.set_zlim3d(10,90)\n elif attribute == 'period_lengths':\n max_period_days=28\n #ref_line_points = np.linspace(1, max_period_days, 4)\n #ax.plot(ref_line_points, ref_line_points, ref_line_points, color='red', linestyle='dashed', linewidth=4, markersize=4)#, alpha=0.8)\n ax.set_xlim3d(1,max_period_days)\n ax.set_ylim3d(1,max_period_days)\n ax.set_zlim3d(1,max_period_days)\n ax.set_xticks(np.append([1],np.arange(4,max_period_days+1, 4)))\n ax.set_yticks(np.append([1],np.arange(4,max_period_days+1, 4)))\n ax.set_zticks(np.append([1],np.arange(4,max_period_days+1, 4)))\n \n plt.savefig(filename.format(save_dir), format='pdf', bbox_inches='tight')\n print('Finished one view')\n # With angles\n for angle in [30, 60, 90, 180]:\n print('Start one view')\n filename_angle = filename[:-4]+'_'+str(angle)+'.pdf'\n ax.view_init(elev=None, azim=angle)\n # Add (a)/(b) labels for paper\n ax.text2D(12, 7,'(a)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)\n plt.savefig(filename_angle.format(save_dir), format='pdf', bbox_inches='tight')\n print('Finished one view')\n\n plt.close()\n\n# Time series embedding for a randomly chosen user\ndef random_time_series_embedding_lengths(cycle_stats_df, attribute, cutoff_criteria, cutoff, save_dir):\n '''\n Function that plots a time series embedding of cycle and period lengths for a randomly chosen user per group\n In plot:\n x axis is length_attribute for cycle i,\n y axis is length attribute for cycle i+1,\n z is for cycle i+2\n Input:\n cycle_stats_df: pandas dataframe, with information about user's cycle statistics\n attribute: whether to consider 'cycle_lengths' or 'period_lengths'\n cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)\n cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)\n save_dir: path where to save plot\n Output:\n None\n '''\n # Select users with median number of cycles tracked\n cycle_stats_df_median = cycle_stats_df[cycle_stats_df['num_cycles_tracked'] == 11]\n filename = '{}/random_time_series_embedding_for_{}_split_by_{}_{}.pdf'.format(save_dir, attribute, cutoff_criteria, cutoff)\n\n #get users with color by attribute > cutoff, and <= cutoff\n cycle_stats_df_greater_than = cycle_stats_df_median[cycle_stats_df_median[cutoff_criteria] > cutoff]\n cycle_stats_df_less_than = cycle_stats_df_median[cycle_stats_df_median[cutoff_criteria] <= cutoff]\n cycle_lengths_greater_than = cycle_stats_df_greater_than[attribute]\n cycle_lengths_less_than = cycle_stats_df_less_than[attribute]\n \n # Randomly pick a user from each group\n cycle_lengths_greater_than_user = np.random.choice(cycle_lengths_greater_than, size=1, replace=False)\n cycle_lengths_less_than_user = np.random.choice(cycle_lengths_less_than, size=1, replace=False)\n\n # Plot\n colors = ['orange', 'c']\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n\n #plot each user, color by median intercycle length\n xs = list(cycle_lengths_greater_than_user[0][0:-2])\n ys = list(cycle_lengths_greater_than_user[0][1:-1])\n zs = list(cycle_lengths_greater_than_user[0][2:])\n ax.scatter(xs, ys, zs, color = 'orange')\n ax.plot(xs, ys, zs, color='orange', linestyle='dashed', alpha=0.8)\n\n xs = list(cycle_lengths_less_than_user[0][0:-2])\n ys = list(cycle_lengths_less_than_user[0][1:-1])\n zs = list(cycle_lengths_less_than_user[0][2:])\n ax.scatter(xs, ys, zs, color = 'c')\n ax.plot(xs, ys, zs, color='c', linestyle='dashed', alpha=0.8)\n \n ax.set_xlabel(attribute+ '[i]')\n ax.set_ylabel(attribute+ '[i+1]')\n ax.set_zlabel(attribute+ '[i+2]')\n if attribute == 'cycle_lengths':\n #ref_line_points = np.linspace(10, 90, 10)\n #ax.plot(ref_line_points, ref_line_points, ref_line_points, color='red', linestyle='dashed', linewidth=4, markersize=4)#, alpha=0.8)\n ax.set_xlim3d(10,90)\n ax.set_ylim3d(10,90)\n ax.set_zlim3d(10,90)\n elif attribute == 'period_lengths':\n max_period_days=28\n #ref_line_points = np.linspace(1, max_period_days, 4)\n #ax.plot(ref_line_points, ref_line_points, ref_line_points, color='red', linestyle='dashed', linewidth=4, markersize=4)#, alpha=0.8)\n ax.set_xlim3d(1,max_period_days)\n ax.set_ylim3d(1,max_period_days)\n ax.set_zlim3d(1,max_period_days)\n ax.set_xticks(np.append([1],np.arange(4,max_period_days+1, 4)))\n ax.set_yticks(np.append([1],np.arange(4,max_period_days+1, 4)))\n ax.set_zticks(np.append([1],np.arange(4,max_period_days+1, 4)))\n \n plt.savefig(filename.format(save_dir), format='pdf', bbox_inches='tight')\n print('Finished one view')\n # With angles\n for angle in [30, 60, 90, 180]:\n print('Start one view')\n filename_angle = filename[:-4]+'_'+str(angle)+'.pdf'\n ax.view_init(elev=None, azim=angle)\n plt.savefig(filename_angle.format(save_dir), format='pdf', bbox_inches='tight')\n print('Finished one view')\n\n plt.close()\n\n# Plot period and cycle length distributions per group\ndef plot_lengths_hist_by_attribute_cutoff(cycle_stats_df, cycle_df, attribute, cutoff_criteria, cutoff, pdf_or_cdf, save_dir):\n '''\n Function that plots cycle and period length distributions across groups\n Input:\n cycle_stats_df: pandas dataframe, with information about user's cycle statistics\n cycle_df: pandas dataframe, with information about each user's cycle\n attribute: whether to consider 'cycle_lengths' or 'period_lengths'\n cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)\n cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)\n pdf_or_cdf: whether to plot 'pdf's or 'cdf's\n save_dir: path where to save plot\n Output:\n None\n '''\n # Identify groups per cutoff criteria\n users_greater_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] > cutoff]['user_id'])\n users_less_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] <= cutoff]['user_id'])\n cycles_users_greater_than_cutoff = cycle_df[cycle_df['user_id'].isin(users_greater_than_cutoff)]\n cycles_users_less_than_cutoff = cycle_df[cycle_df['user_id'].isin(users_less_than_cutoff)]\n\n colors = ['orange', 'c']\n labels=['Highly variable', 'NOT highly variable']\n\n if attribute == 'cycle_length':\n # Compute histogram\n # Bins based on integer range of values\n my_bins=np.arange(\n np.min([cycles_users_greater_than_cutoff[attribute].dropna().min(), cycles_users_less_than_cutoff[attribute].dropna().min()]),\n np.max([cycles_users_greater_than_cutoff[attribute].dropna().max(), cycles_users_less_than_cutoff[attribute].dropna().max()])+1)\n all_counts, all_bins = np.histogram(cycle_df[attribute].dropna(), bins=my_bins, density=True)\n counts_greater_than_cutoff, bins_greater_than_cutoff = np.histogram(cycles_users_greater_than_cutoff[attribute].dropna(), bins=my_bins, density=True)\n counts_less_than_cutoff, bins_less_than_cutoff = np.histogram(cycles_users_less_than_cutoff[attribute].dropna(), bins=my_bins, density=True)\n\n # Separate PDF/CDF plots\n if pdf_or_cdf=='pdf':\n # PDF\n hist_type='stepfilled'\n cumulative=False\n y_label='P(Cycle length = n)'\n cohort_filename = '{}/{}_pdf_cohort.pdf'.format(save_dir, attribute)\n per_group_filename = '{}/{}_pdf_per_group.pdf'.format(save_dir, attribute)\n elif pdf_or_cdf=='cdf':\n # CDF\n hist_type='step'\n cumulative=True\n y_label='P(Cycle length $\\leq$ n)'\n cohort_filename = '{}/{}_cdf_cohort.pdf'.format(save_dir, attribute)\n per_group_filename = '{}/{}_cdf_per_group.pdf'.format(save_dir, attribute)\n else:\n raise ValueError('Can only plot pdf or cdf, not {}'.format(pdf_or_cdf))\n \n # Population\n plt.hist(all_bins[:-1], all_bins, weights=all_counts, density=True, cumulative=cumulative, color='slateblue', alpha=0.5, histtype=hist_type)\n plt.autoscale(enable=True, tight=True)\n plt.xticks(np.arange(my_bins.min(), my_bins.max()+1, 10))\n plt.xlabel('Cycle length in days')\n plt.ylabel(y_label)\n plt.savefig(cohort_filename, format='pdf', bbox_inches='tight')\n plt.close()\n \n # Per-group\n plt.hist(bins_greater_than_cutoff[:-1], bins_greater_than_cutoff, weights=counts_greater_than_cutoff, density=True, cumulative=cumulative, color=colors[0], alpha=0.5, label=labels[0], histtype=hist_type)\n plt.hist(bins_less_than_cutoff[:-1], bins_less_than_cutoff, weights=counts_less_than_cutoff, density=True, cumulative=cumulative, color=colors[1], alpha=0.5, label=labels[1], histtype=hist_type)\n plt.autoscale(enable=True, tight=True)\n plt.xticks(np.arange(my_bins.min(), my_bins.max()+1, 10))\n plt.xlabel('Cycle length in days')\n plt.ylabel(y_label)\n # Add (a)/(b) labels for paper\n plt.text(12, 7, '(b)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)\n plt.savefig(per_group_filename, format='pdf', bbox_inches='tight')\n plt.close()\n \n elif attribute == 'period_length':\n # Compute histogram\n # Bins based on integer range of values\n my_bins=np.arange(\n np.min([cycles_users_greater_than_cutoff[attribute].dropna().min(), cycles_users_less_than_cutoff[attribute].dropna().min()]),\n np.max([cycles_users_greater_than_cutoff[attribute].dropna().max(), cycles_users_less_than_cutoff[attribute].dropna().max()])+1)\n all_counts, all_bins = np.histogram(cycle_df[attribute].dropna(), bins=my_bins, density=True)\n counts_greater_than_cutoff, bins_greater_than_cutoff = np.histogram(cycles_users_greater_than_cutoff[attribute].dropna(), bins=my_bins, density=True)\n counts_less_than_cutoff, bins_less_than_cutoff = np.histogram(cycles_users_less_than_cutoff[attribute].dropna(), bins=my_bins, density=True)\n \n # Separate PDF/CDF plots\n max_period_days=28\n if pdf_or_cdf=='pdf':\n # PDF\n hist_type='stepfilled'\n cumulative=False\n y_label='P(Period length = n)'\n cohort_filename = '{}/{}_pdf_cohort.pdf'.format(save_dir, attribute)\n per_group_filename = '{}/{}_pdf_per_group.pdf'.format(save_dir, attribute)\n elif pdf_or_cdf=='cdf':\n # CDF\n hist_type='step'\n cumulative=True\n y_label='P(Period length $\\leq$ n)'\n cohort_filename = '{}/{}_cdf_cohort.pdf'.format(save_dir, attribute)\n per_group_filename = '{}/{}_cdf_per_group.pdf'.format(save_dir, attribute)\n else:\n raise ValueError('Can only plot pdf or cdf, not {}'.format(pdf_or_cdf))\n \n # Population\n plt.hist(all_bins[:-1], all_bins, weights=all_counts, density=True, cumulative=cumulative, color='slateblue', alpha=0.5, histtype=hist_type)\n plt.autoscale(enable=True, tight=True)\n plt.xticks(np.append([1],np.arange(4,max_period_days+1, 4)))\n plt.xlim(1,max_period_days)\n plt.xlabel('Period length in days')\n plt.ylabel(y_label)\n plt.savefig(cohort_filename, format='pdf', bbox_inches='tight')\n plt.close()\n \n # Per-group\n plt.hist(bins_greater_than_cutoff[:-1], bins_greater_than_cutoff, weights=counts_greater_than_cutoff, density=True, cumulative=cumulative, color=colors[0], alpha=0.5, label=labels[0], histtype=hist_type)\n plt.hist(bins_less_than_cutoff[:-1], bins_less_than_cutoff, weights=counts_less_than_cutoff, density=True, cumulative=cumulative, color=colors[1], alpha=0.5, label=labels[1], histtype=hist_type)\n plt.autoscale(enable=True, tight=True)\n plt.xticks(np.append([1],np.arange(4,max_period_days+1, 4)))\n plt.xlim(1,max_period_days)\n plt.xlabel('Period length in days')\n plt.ylabel(y_label)\n # Add (a)/(b) labels for paper\n plt.text(12, 7, '(b)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)\n plt.savefig(per_group_filename, format='pdf', bbox_inches='tight')\n plt.close()\n \n else:\n raise ValueError('Unknown attribute {}'.format(attribute))\n\n# Bootstrapped-KS for cycle and period length\ndef bootstrapped_cycle_period_lengths_KS(cycle_stats_df, cycle_df, cutoff_criteria, cutoff, n_bootstrapping, results_dir):\n '''\n Function that computes cycle and period length Kolmogorov-Smirnov tests between group distributions, based on bootstrapping\n Input:\n cycle_stats_df: pandas dataframe, with information about user's cycle statistics\n cycle_df: pandas dataframe, with information about user's cycle\n cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)\n cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)\n n_bootstrapping: Number of bootstrapped samples to use for the analysis\n save_dir: path where to save plot\n Output:\n None\n '''\n # True separation of users into groups\n true_users_greater_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] > cutoff]['user_id'])\n true_users_less_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] <= cutoff]['user_id'])\n n_users_greater_than_cutoff=true_users_greater_than_cutoff.size\n n_users_less_than_cutoff=true_users_less_than_cutoff.size\n \n ########### TRUE OBSERVERD STATISTICS ##########\n # Cycles per-group\n true_cycles_users_greater_than_cutoff = cycle_df[cycle_df['user_id'].isin(true_users_greater_than_cutoff)]\n true_cycles_users_less_than_cutoff = cycle_df[cycle_df['user_id'].isin(true_users_less_than_cutoff)]\n # KS cycle_length\n true_KS_cycle_length, true_p_val_cycle_length = stats.ks_2samp(true_cycles_users_greater_than_cutoff['cycle_length'].dropna(), true_cycles_users_less_than_cutoff['cycle_length'].dropna())\n # KS period_length\n true_KS_period_length, true_p_val_period_length = stats.ks_2samp(true_cycles_users_greater_than_cutoff['period_length'].dropna(), true_cycles_users_less_than_cutoff['period_length'].dropna())\n \n ########### BOOTSTRAP BASED STATISTICS ##########\n # Computed suff statistics\n bootstrapped_KS_cycle_length=np.zeros(n_bootstrapping)\n bootstrapped_p_val_cycle_length=np.zeros(n_bootstrapping)\n bootstrapped_KS_period_length=np.zeros(n_bootstrapping)\n bootstrapped_p_val_period_length=np.zeros(n_bootstrapping)\n\n for n_bootstrap in np.arange(n_bootstrapping):\n #print('Sample={}/{}'.format(n_bootstrap,n_bootstrapping))\n # Bootstrapped sample indicators\n bootstrapped_users_greater_than_cutoff=np.random.choice(true_users_greater_than_cutoff,n_bootstrapping)\n bootstrapped_users_less_than_cutoff=np.random.choice(true_users_less_than_cutoff,n_bootstrapping)\n # Cycles per-group\n bootstrapped_cycles_users_greater_than_cutoff = cycle_df[cycle_df['user_id'].isin(bootstrapped_users_greater_than_cutoff)]\n bootstrapped_cycles_users_less_than_cutoff = cycle_df[cycle_df['user_id'].isin(bootstrapped_users_less_than_cutoff)]\n # KS cycle_length\n bootstrapped_KS_cycle_length[n_bootstrap], bootstrapped_p_val_cycle_length[n_bootstrap] = stats.ks_2samp(bootstrapped_cycles_users_greater_than_cutoff['cycle_length'].dropna(), bootstrapped_cycles_users_less_than_cutoff['cycle_length'].dropna())\n # KS period_length\n bootstrapped_KS_period_length[n_bootstrap], bootstrapped_p_val_period_length[n_bootstrap] = stats.ks_2samp(bootstrapped_cycles_users_greater_than_cutoff['period_length'].dropna(), bootstrapped_cycles_users_less_than_cutoff['period_length'].dropna())\n\n # Print bootstrap results\n print('*************************************************************************')\n print('******** Cycle-length KS={} (p={}) ***********'.format(true_KS_cycle_length, true_p_val_cycle_length))\n print('******** Cycle-length Bootstrapped KS={}+/-{} (p={} (+/-{}))***********'.format(\n bootstrapped_KS_cycle_length.mean(), bootstrapped_KS_cycle_length.std(), bootstrapped_p_val_cycle_length.mean(), bootstrapped_p_val_cycle_length.std()\n ))\n print('******** Cycle-length Bootstrapped KS={}({},{}) p={} ({},{}))***********'.format(\n bootstrapped_KS_cycle_length.mean(), np.percentile(bootstrapped_KS_cycle_length, 2.5, axis=0), np.percentile(bootstrapped_KS_cycle_length, 97.5, axis=0),\n bootstrapped_p_val_cycle_length.mean(), np.percentile(bootstrapped_p_val_cycle_length, 2.5, axis=0), np.percentile(bootstrapped_p_val_cycle_length, 97.5, axis=0)\n ))\n print('*************************************************************************')\n print('******** Period-length KS={} (p={}) ***********'.format(true_KS_period_length, true_p_val_period_length))\n print('******** Period-length Bootstrapped KS={}+/-{} (p={} (+/-{}))***********'.format(\n bootstrapped_KS_period_length.mean(), bootstrapped_KS_period_length.std(), bootstrapped_p_val_period_length.mean(), bootstrapped_p_val_period_length.std()\n ))\n print('******** Period-length Bootstrapped KS={}({},{}) p={} ({},{}))***********'.format(\n bootstrapped_KS_period_length.mean(), np.percentile(bootstrapped_KS_period_length, 2.5, axis=0), np.percentile(bootstrapped_KS_period_length, 97.5, axis=0),\n bootstrapped_p_val_period_length.mean(), np.percentile(bootstrapped_p_val_period_length, 2.5, axis=0), np.percentile(bootstrapped_p_val_period_length, 97.5, axis=0)\n ))\n print('*************************************************************************')\n\n# Average statistics over cycle-id\ndef plot_avg_lengths_by_attribute_cutoff(cycle_stats_df, cycle_df, attribute, cutoff_criteria, cutoff, save_dir):\n '''\n Function that plots cycle and period length average and standard deviation across user's timeline (i.e., by cycle-id) across groups\n Input:\n cycle_stats_df: pandas dataframe, with information about user's cycle statistics\n cycle_df: pandas dataframe, with information about each user's cycle\n attribute: whether to consider 'cycle_lengths' or 'period_lengths'\n cutoff_criteria: what statistic to use for separating users into groups ('cycle_lengths' for paper)\n cutoff: what statistic cutoff value to use for separating users into groups (9 for paper)\n save_dir: path where to save plot\n Output:\n None\n '''\n # Identify groups per cutoff criteria\n users_greater_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] > cutoff]['user_id'])\n users_less_than_cutoff = np.unique(cycle_stats_df[cycle_stats_df[cutoff_criteria] <= cutoff]['user_id'])\n cycles_users_greater_than_cutoff = cycle_df[cycle_df['user_id'].isin(users_greater_than_cutoff)]\n cycles_users_less_than_cutoff = cycle_df[cycle_df['user_id'].isin(users_less_than_cutoff)]\n \n # Plotting\n colors = ['slateblue', 'c', 'orange']\n max_cycle_id=20\n \n if attribute == 'cycle_length':\n fig, axes = plt.subplots(3, 1, sharex='all', sharey='all', figsize = (15,15))\n \n for index, dataset in enumerate([cycle_df, cycles_users_less_than_cutoff, cycles_users_greater_than_cutoff]):\n means = dataset.groupby(['cycle_id'])[attribute].mean()[:max_cycle_id]\n std = dataset.groupby(['cycle_id'])[attribute].std()[:max_cycle_id]\n # Plot\n axes[index].plot(np.unique(dataset['cycle_id'])[:20], means, color = colors[index])\n axes[index].autoscale(enable=True, tight=True, axis='x')\n axes[index].fill_between(np.unique(dataset['cycle_id'])[:max_cycle_id], means - std, means + std, alpha=0.4, color=colors[index])\n axes[index].set_xticks(np.append([1],np.arange(2,max_cycle_id+1,2)))\n axes[index].set_xlabel('Cycle ID')\n axes[index].set_ylabel('Cycle length')\n axes[index].set_ylim(20,55)\n \n # Add (a)/(b) labels for paper\n plt.text(12, 7, '(a)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)\n # Save and close\n plt.savefig('{}/avg_{}_per_cycle_id.pdf'.format(save_dir,attribute), format='pdf', bbox_inches='tight')\n plt.close()\n \n elif attribute == 'period_length':\n fig, axes = plt.subplots(3, 1, sharex='all', sharey='all', figsize = (15,15))\n \n for index, dataset in enumerate([cycle_df, cycles_users_less_than_cutoff, cycles_users_greater_than_cutoff]):\n means = dataset.groupby(['cycle_id'])[attribute].mean()[:max_cycle_id]\n std = dataset.groupby(['cycle_id'])[attribute].std()[:max_cycle_id]\n # Plot\n axes[index].plot(np.unique(dataset['cycle_id'])[:20], means, color = colors[index])\n axes[index].autoscale(enable=True, tight=True, axis='x')\n axes[index].fill_between(np.unique(dataset['cycle_id'])[:max_cycle_id], means - std, means + std, alpha=0.4, color=colors[index])\n axes[index].set_xticks(np.append([1],np.arange(2,max_cycle_id+1,2)))\n axes[index].set_xlabel('Cycle ID')\n axes[index].set_ylabel('Period length')\n axes[index].set_ylim(1,9)\n \n # Add (a)/(b) labels for paper\n plt.text(12, 7, '(b)', fontsize=14, fontweight='bold', horizontalalignment='center', verticalalignment='center', transform=None)\n # Save and close\n plt.savefig('{}/avg_{}_per_cycle_id.pdf'.format(save_dir,attribute), format='pdf', bbox_inches='tight')\n plt.close()\n\n else:\n raise ValueError('Unknown attribute {}'.format(attribute))\n\n# Plot for max intercycle length (i.e., CLD) histogram \ndef plot_max_intercycle_length_hists(cycle_stats, cycle_stats_exclude_flagged, save_dir):\n '''\n Function that plots max inter cycle length (max CLD) histograms with and without excluded cycles\n Input:\n cycle_stats: pandas dataframe, with information about user's cycle statistics\n cycle_stats_exclude_flagged: pandas dataframe for users after removing excluded flags, with information about user's cycle statistics \n save_dir: path where to save plot\n Output:\n None\n '''\n my_bins=np.arange(min(cycle_stats['max_inter_cycle_length']), max(cycle_stats['max_inter_cycle_length']) + 1)\n plt.hist(cycle_stats['max_inter_cycle_length'], bins=my_bins, label='With behaviorally-tainted cycles', color='blue', histtype='step')\n plt.hist(cycle_stats_exclude_flagged['max_inter_cycle_length'], bins=my_bins, label='Excluding behaviorally-tainted cycles', color='red', histtype='step')\n plt.autoscale(enable=True, tight=True, axis='x')\n plt.ylim(0,38000)\n plt.xlabel('Maximum CLD in days')\n plt.ylabel('User count with maximum CLD')\n plt.savefig('{}/hist_max_inter_cycle_length_with_and_without_excluded_flags.pdf'.format(save_dir), format='pdf', bbox_inches='tight')\n plt.close()\n\n# Plot for median Vs max intercycle length (i.e., CLD) histogram \ndef plot_median_vs_max_intercycle_length(cycle_stats, save_dir):\n '''\n Function that plots median Vs max inter cycle length (CLD) 2D scatter histogram\n Input:\n cycle_stats: pandas dataframe, with information about user's cycle statistics\n save_dir: path where to save plot\n Output:\n None\n '''\n plt.hist2d(cycle_stats['median_inter_cycle_length'], cycle_stats['max_inter_cycle_length'], bins=(75, 75), cmap='jet', norm=colors.LogNorm())\n plt.autoscale(enable=True, tight=True)\n range_vals_median = np.linspace(min(cycle_stats['median_inter_cycle_length']), max(cycle_stats['median_inter_cycle_length']), 100)\n plt.plot(range_vals_median, range_vals_median+10, label='Median CLD + 10', color='red')\n plt.xlabel('Median CLD')\n plt.ylabel('Maximum CLD')\n plt.xlim((0,75))\n plt.ylim((0, 75))\n plt.colorbar()\n plt.savefig('{}/median_vs_max_scatter_2d_hist.pdf'.format(save_dir), format='pdf', bbox_inches='tight')\n plt.close()\n\n# Plot for median intercycle length (i.e., CLD) histogram \ndef plot_median_CLD_hist(cycle_stats, pdf_or_cdf, save_dir):\n '''\n Function that plots median CLD histograms \n Input:\n cycle_stats: pandas dataframe, with information about user's cycle statistics\n pdf_or_cdf: whether to plot 'pdf's or 'cdf's\n save_dir: path where to save plot\n Output:\n None\n '''\n \n # Median CLD histogram\n my_bins=np.arange(cycle_stats['median_inter_cycle_length'].dropna().min(),cycle_stats['median_inter_cycle_length'].dropna().max()+1)\n all_counts, all_bins = np.histogram(cycle_stats['median_inter_cycle_length'].dropna(), bins=my_bins, density=True) \n \n # Separate PDF/CDF plots\n if pdf_or_cdf=='pdf':\n # PDF\n hist_type='stepfilled'\n cumulative=False\n y_label='P(Median CLD = n)'\n cohort_filename = '{}/median_CLD_pdf_cohort.pdf'.format(save_dir)\n elif pdf_or_cdf=='cdf':\n # CDF\n hist_type='step'\n cumulative=True\n y_label='P(Median CLD $\\leq$ n)'\n cohort_filename = '{}/median_CLD_cdf_cohort.pdf'.format(save_dir)\n else:\n raise ValueError('Can only plot pdf or cdf, not {}'.format(pdf_or_cdf))\n \n # Actual plot\n plt.hist(all_bins[:-1], all_bins, weights=all_counts, density=True, cumulative=cumulative, color='slateblue', alpha=0.5, histtype=hist_type)\n plt.autoscale(enable=True, tight=True)\n plt.xlabel('Median CLD in days')\n plt.ylabel('P(Median CLD $\\leq$ n)')\n plt.grid(True)\n plt.savefig('{}/median_CLD_cdf.pdf'.format(save_dir), format='pdf', bbox_inches='tight')\n plt.close()\n\n################################## MAIN ############################\ndef main():\n '''\n Main function of the script that runs the cycle and period length related analysis\n\n Input:\n None\n Output:\n None\n '''\n \n ### Directories\n data_dir='../data'\n preprocessed_data_dir='../preprocessed_data'\n results_dir = '../results/characterizing_cycle_and_symptoms/cycle_period_length_analysis'\n os.makedirs(results_dir, exist_ok = True)\n \n ################# SYMPTOMS TRACKED #################\n # Tracking\n with open('{}/tracking_enriched.pickle'.format(data_dir), 'rb') as f:\n\t tracking = pickle.load(f)\n\n print('Tracking-data loaded')\n\n ################# CYCLES #################\n with open('{}/cohort_cycle_stats.pickle'.format(preprocessed_data_dir), 'rb') as f:\n cohort_cycle_stats = pickle.load(f)\n\n # Cycles flagged\n with open('{}/cohort_cycles_flagged.pickle'.format(preprocessed_data_dir), 'rb') as f:\n cohort_cycles_flagged = pickle.load(f)\n\n # Exclude cycles flagged as badly tracked \n cohort_cycles = cohort_cycles_flagged[cohort_cycles_flagged['badly_tracked_cycle'] == 'f']\n \n # Cycles stats\n with open('{}/cohort_clean_cycle_stats.pickle'.format(preprocessed_data_dir), 'rb') as f:\n cohort_clean_cycle_stats = pickle.load(f)\n\n print('Cycles-data loaded')\n \n ################# PLOTTING #################\n #### PLOT histogram of max intercycle length, with and without excluding flagged cycles\n plot_max_intercycle_length_hists(cohort_cycle_stats, cohort_clean_cycle_stats, results_dir)\n #### PLOT Median Vs Max CLD 2D histogram \n plot_median_vs_max_intercycle_length(cohort_clean_cycle_stats, results_dir)\n #### PLOT Median CLD histogram \n plot_median_CLD_hist(cohort_clean_cycle_stats, 'cdf', results_dir)\n \n #### PLOT cycle and period length histograms: pdf\n plot_lengths_hist_by_attribute_cutoff(cohort_clean_cycle_stats, cohort_cycles, 'cycle_length', 'median_inter_cycle_length', 9, 'pdf', results_dir)\n plot_lengths_hist_by_attribute_cutoff(cohort_clean_cycle_stats, cohort_cycles, 'period_length', 'median_inter_cycle_length', 9, 'pdf', results_dir)\n \n #### Bootstrapped-KS cycle and period length\n bootstrapped_cycle_period_lengths_KS(cohort_clean_cycle_stats, cohort_cycles, 'median_inter_cycle_length', 9, 100000, results_dir)\n \n #### PLOT average cycle and average length over cycle-id\n plot_avg_lengths_by_attribute_cutoff(cohort_clean_cycle_stats, cohort_cycles, 'cycle_length', 'median_inter_cycle_length', 9, results_dir)\n plot_avg_lengths_by_attribute_cutoff(cohort_clean_cycle_stats, cohort_cycles, 'period_length', 'median_inter_cycle_length', 9, results_dir)\n\n #### PLOT random cycle length time-series\n random_time_series_embedding_lengths(cohort_clean_cycle_stats, 'cycle_lengths', 'median_inter_cycle_length', 9, results_dir)\n \n #### PLOT population level cycle and period length time-series\n population_time_series_embedding_lengths(cohort_clean_cycle_stats, 'cycle_lengths', 'median_inter_cycle_length', 9, 'random', results_dir)\n population_time_series_embedding_lengths(cohort_clean_cycle_stats, 'period_lengths', 'median_inter_cycle_length', 9, 'random', results_dir)\n \n# Making sure the main program is not executed when the module is imported\nif __name__ == '__main__':\n # Just run the main\n main()\n",
"#!/usr/bin/python\n\nimport numpy as np\nimport torch\nimport pdb\nimport sys, os, re, time\nimport pickle\n \n# Compute variance of MC samples\ndef mc_variance(samples, mean, weights, max_memory_in_bytes=None):\n '''\n Input:\n samples: I by M matrix of MC samples\n mean: I by 1 matrix of samples' empirical mean\n weights: I by M matrix of MC weights\n max_memory_in_bytes: maximum memory size to use in bytes\n Output:\n mc_variance: I by I by M matrix of MC sample variance\n '''\n # Figure out key dimensionalities\n # I can be equal to 1\n I=np.maximum(samples.shape[0], weights.shape[0])\n # M has to be given and equal in both inputs\n assert samples.shape[1] == weights.shape[1]\n M=samples.shape[1]\n \n # For Numpy objects\n if not torch.is_tensor(weights):\n # Memory requirement of mc-variance computation in bytes\n tmp_bytes_needed=np.prod([I, I, M]) * weights.itemsize\n \n # If no memory constraints are given or apply\n if max_memory_in_bytes is None or tmp_bytes_needed<max_memory_in_bytes:\n # Vectorized operation via einsum\n tmp=(samples-mean)\n mc_variance = np.sum(np.einsum('im,jm->ijm', tmp, tmp) * weights, axis=2, keepdims=True)\n \n # If computation can not be done once \n else:\n print('MC variance computation requires {} bytes, while maximum {} bytes are given'.format(tmp_bytes_needed,max_memory_in_bytes))\n # Pre-allocate mc_variance\n mc_variance=np.zeros((I,I,1))\n # Number of \"splits\" needed\n n_splits=np.ceil(tmp_bytes_needed/max_memory_in_bytes).astype(np.uint)\n # Resulting split sizes\n split_size=M/n_splits\n if split_size>=1:\n # Split arguments, across sample axis\n samples_splitted=np.array_split(samples, n_splits, 1)\n weights_splitted=np.array_split(weights, n_splits, 1)\n # Iterate over splits, sum across splits\n for n_split in np.arange(n_splits, dtype=np.uint):\n #print('Split {}/{} with {}/{}'.format(n_split,n_splits, samples_splitted[n_split].size, samples.size))\n tmp=(samples_splitted[n_split]-mean)\n mc_variance += np.sum(np.einsum('im,jm->ijm', tmp, tmp) * weights_splitted[n_split], axis=2, keepdims=True)\n else:\n raise ValueError('We can not split {} samples in {} splits needed for {} maximum bytes per split'.format(M, n_splits, max_memory_in_bytes))\n \n # For torch objects\n else:\n # Memory requirement of mc-variance computation in bytes\n tmp_bytes_needed=np.prod([I, I, M]) * weights.element_size()\n \n # If no memory constraints are given or apply\n if max_memory_in_bytes is None or tmp_bytes_needed<max_memory_in_bytes:\n # Vectorized operation via einsum\n tmp=(samples-mean)\n mc_variance = torch.sum(torch.einsum('im,jm->ijm', tmp, tmp) * weights, dim=2, keepdim=True)\n \n # If computation can not be done once \n else:\n print('MC variance computation requires {} bytes, while maximum {} bytes are given'.format(tmp_bytes_needed,max_memory_in_bytes))\n # Pre-allocate mc_variance\n mc_variance=torch.zeros((I,I,1), dtype=torch.double)\n # Number of \"splits\" needed\n n_splits=np.ceil(tmp_bytes_needed/max_memory_in_bytes).astype(np.uint)\n # Resulting split sizes\n split_size=int(M/n_splits)\n if split_size>0:\n # Split arguments, across sample axis\n samples_splitted=torch.split(samples, split_size, 1)\n weights_splitted=torch.split(weights, split_size, 1)\n # Iterate over splits (as determined by torch.split), sum across splits\n for n_split in torch.arange(len(samples_splitted), dtype=torch.int):\n #print('Split {}/{} with {}/{} elements'.format(n_split,n_splits,samples_splitted[n_split].nelement(), samples.nelement()))\n tmp=(samples_splitted[n_split]-mean)\n mc_variance += torch.sum(\n torch.einsum('im,jm->ijm', tmp, tmp) * weights_splitted[n_split],\n dim=2,\n keepdim=True,\n dtype=torch.double)\n else:\n raise ValueError('We can not split {} samples in {} splits needed for {} maximum bytes per split'.format(M, n_splits, max_memory_in_bytes))\n \n return mc_variance\n\n"
] | [
[
"matplotlib.pyplot.autoscale",
"matplotlib.pyplot.plot",
"numpy.unique",
"numpy.arange",
"matplotlib.pyplot.close",
"matplotlib.pyplot.text",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.random.choice",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"matplotlib.colors.LogNorm",
"matplotlib.use",
"matplotlib.pyplot.subplots",
"numpy.percentile",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel"
],
[
"numpy.maximum",
"numpy.einsum",
"torch.zeros",
"numpy.arange",
"torch.einsum",
"torch.is_tensor",
"numpy.ceil",
"numpy.prod",
"torch.split",
"numpy.array_split",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dylanljones/cmpy | [
"21adcf4dd9f873ae29d47aeaef4fbcd914bfce2c",
"21adcf4dd9f873ae29d47aeaef4fbcd914bfce2c"
] | [
"cmpy/disorder.py",
"scripts/gf_tevo.py"
] | [
"# coding: utf-8\n#\n# This code is part of cmpy.\n#\n# Copyright (c) 2022, Dylan Jones\n\n\"\"\"This module contains methods for modeling disorder.\"\"\"\n\nimport numpy as np\nfrom typing import Union, Sequence\n\n\ndef create_subst_array(\n size: int, values: Sequence[float], conc: Union[float, Sequence[float]]\n) -> np.ndarray:\n \"\"\"Creates an (ordered) array of values.\n\n Parameters\n ----------\n size : int\n The size of the output array.\n values : Sequence of float\n The values for filling the array. The size must match the size of the\n concentrations. If one concentration is given the value-array must be of size 2.\n conc : float or Sequence of float\n The concentrations of the values. If a single concentration is given\n it is interpreted as the concentration of the first of two values.\n\n Returns\n -------\n array : np.ndarray\n The (ordered) array filled with the given values.\n \"\"\"\n # Get sizes of sub-arrays\n if isinstance(conc, float):\n conc = [conc, 1 - conc]\n if sum(conc) != 1:\n raise ValueError(\"Fractions have to add up to 1!\")\n sizes = (size * np.array(conc)).astype(np.int64)\n sizes[-1] += size - sum(sizes)\n\n # create sub-arrays\n arrays = [np.full(size, val) for size, val in zip(sizes, values)]\n return np.concatenate(arrays)\n\n\ndef random_permutations(\n arr: Sequence[float], size: int, replace: bool = False, seed: int = None\n):\n \"\"\"Creates (optionally unique) permutations of a given array.\n\n Parameters\n ----------\n arr : (N) np.ndarray\n The input array to permute.\n size : int\n The number of permutations to generate.\n replace : bool, optional\n If `True`, only unique permutations are returned. The default is `True`.\n seed : int, optional\n A optional seed to initialize the random number generator.\n\n Yields\n ------\n perm : (N) np.ndarray\n The permuted array.\n\n Examples\n --------\n >>> a = [0, 0, 1, 1, 1]\n >>> perm = random_permutations(a, size=2, seed=0)\n >>> next(perm)\n array([1, 1, 1, 0, 0])\n >>> next(perm)\n array([0, 1, 1, 1, 0])\n \"\"\"\n rng = np.random.default_rng(seed)\n\n p = np.array(arr)\n seen = set()\n count = 0\n while True:\n if count >= size:\n break\n rng.shuffle(p)\n if not replace:\n phash = hash(p.data.tobytes())\n if phash not in seen:\n seen.add(phash)\n yield p\n count += 1\n else:\n yield p\n count += 1\n\n\ndef disorder_generator(\n size: int,\n values: Sequence[float],\n conc: Union[float, Sequence[float]],\n samples: int,\n replace: bool = False,\n seed=None,\n):\n \"\"\"Generates (optionally unique) random samples from a given 1-D array.\n\n See Also\n --------\n random_permutations\n\n Parameters\n ----------\n size : int\n The size of the output array.\n values : Sequence of float\n The values for filling the array. The size must match the size of the\n concentrations. If one concentration is given the value-array must be of size 2.\n conc : float or Sequence of float\n The concentrations of the values. If a single concentration is given\n it is interpreted as the concentration of the first of two values.\n samples : int\n The number of random arrays to generate.\n replace : bool, optional\n If `True`, only unique permutations are returned. The default is `True`.\n seed : int, optional\n A optional seed to initialize the random number generator.\n\n Yields\n ------\n perm : (N) np.ndarray\n The randomly sampled arrays.\n\n Examples\n --------\n >>> eps = disorder_generator(5, values=[0, +1], conc=[0.4, 0.6], samples=2, seed=0)\n >>> next(eps)\n array([1, 1, 1, 0, 0])\n >>> next(eps)\n array([0, 1, 1, 1, 0])\n \"\"\"\n ordered = create_subst_array(size, values, conc)\n return random_permutations(ordered, samples, replace, seed)\n",
"# coding: utf-8\n#\n# This code is part of cmpy.\n#\n# Copyright (c) 2022, Dylan Jones\n\nimport logging\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom cmpy.models import SingleImpurityAndersonModel\nfrom cmpy import exactdiag as ed\n\nlogger = logging.getLogger(\"cmpy\")\nlogger.setLevel(logging.DEBUG)\n\n\ndef main():\n num_bath = 3\n u = 4\n eps_imp = 0\n eps_bath = 0 * np.random.uniform(size=num_bath)\n siam = SingleImpurityAndersonModel(u, eps_imp, eps_bath, v=1.0, mu=None)\n # hamop = siam.hamilton_operator()\n # hamop.show()\n\n start, stop, dt = 0, 150, 0.1\n num = int(stop / dt) + 1\n gs = ed.compute_groundstate(siam)\n times, gf_greater = ed.greens_greater(siam, gs, start, stop, num)\n times, gf_lesser = ed.greens_lesser(siam, gs, start, stop, num)\n gf_t = gf_greater - gf_lesser\n\n fig, ax = plt.subplots()\n ax.plot(times, -gf_greater.imag, lw=0.5, label=\"GF$^>$\")\n ax.plot(times, -gf_lesser.imag, lw=0.5, label=\"GF$^<$\")\n ax.plot(times, -gf_t.imag, label=\"GF\", color=\"k\", lw=1.0)\n ax.set_xlim(times[0], times[-1])\n ax.set_ylim(-1.1, +1.1)\n ax.legend()\n ax.grid()\n\n w = np.linspace(-10, +10, 5000)\n z, gf_w = ed.fourier_t2z(times, gf_t, w, delta=1e-4)\n\n fig, ax = plt.subplots()\n ax.plot(z.real, -gf_w.imag)\n ax.set_xlim(min(z.real), max(z.real))\n ax.grid()\n\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.concatenate",
"numpy.array",
"numpy.random.default_rng",
"numpy.full"
],
[
"numpy.random.uniform",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots",
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
awesome-archive/minigo | [
"188fb197fdafbe9664a32142373b1cbd1459bc67",
"188fb197fdafbe9664a32142373b1cbd1459bc67"
] | [
"tests/test_coords.py",
"tests/test_go.py"
] | [
"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport numpy\n\nimport coords\nimport go\nfrom tests import test_utils\n\nclass TestCoords(test_utils.MiniGoUnitTest):\n def test_upperleft(self):\n self.assertEqual(coords.parse_sgf_coords('aa'), (0, 0))\n self.assertEqual(coords.unflatten_coords(0), (0, 0))\n self.assertEqual(coords.parse_kgs_coords('A9'), (0, 0))\n self.assertEqual(coords.parse_pygtp_coords((1,9)), (0, 0))\n\n self.assertEqual(coords.unparse_sgf_coords((0, 0)), 'aa')\n self.assertEqual(coords.flatten_coords((0, 0)), 0)\n self.assertEqual(coords.to_human_coord((0, 0)), 'A9')\n self.assertEqual(coords.unparse_pygtp_coords((0, 0)), (1, 9))\n\n def test_topleft(self):\n self.assertEqual(coords.parse_sgf_coords('ia'), (0, 8))\n self.assertEqual(coords.unflatten_coords(8), (0, 8))\n self.assertEqual(coords.parse_kgs_coords('J9'), (0, 8))\n self.assertEqual(coords.parse_pygtp_coords((9,9)), (0, 8))\n\n self.assertEqual(coords.unparse_sgf_coords((0, 8)), 'ia')\n self.assertEqual(coords.flatten_coords((0, 8)), 8)\n self.assertEqual(coords.to_human_coord((0, 8)), 'J9')\n self.assertEqual(coords.unparse_pygtp_coords((0, 8)), (9, 9))\n\n def test_pass(self):\n self.assertEqual(coords.parse_sgf_coords(''), None)\n self.assertEqual(coords.unflatten_coords(81), None)\n self.assertEqual(coords.parse_kgs_coords('pass'), None)\n self.assertEqual(coords.parse_pygtp_coords((0,0)), None)\n\n self.assertEqual(coords.unparse_sgf_coords(None), '')\n self.assertEqual(coords.flatten_coords(None), 81)\n self.assertEqual(coords.to_human_coord(None), 'pass')\n self.assertEqual(coords.unparse_pygtp_coords(None), (0, 0))\n\n def test_parsing_9x9(self):\n self.assertEqual(coords.parse_sgf_coords('aa'), (0, 0))\n self.assertEqual(coords.parse_sgf_coords('ac'), (2, 0))\n self.assertEqual(coords.parse_sgf_coords('ca'), (0, 2))\n self.assertEqual(coords.parse_sgf_coords(''), None)\n self.assertEqual(coords.unparse_sgf_coords(None), '')\n self.assertEqual(\n 'aa',\n coords.unparse_sgf_coords(coords.parse_sgf_coords('aa')))\n self.assertEqual(\n 'sa',\n coords.unparse_sgf_coords(coords.parse_sgf_coords('sa')))\n self.assertEqual(\n (1, 17),\n coords.parse_sgf_coords(coords.unparse_sgf_coords((1, 17))))\n self.assertEqual(coords.parse_kgs_coords('A1'), (8, 0))\n self.assertEqual(coords.parse_kgs_coords('A9'), (0, 0))\n self.assertEqual(coords.parse_kgs_coords('C2'), (7, 2))\n self.assertEqual(coords.parse_kgs_coords('J2'), (7, 8))\n self.assertEqual(coords.parse_pygtp_coords((1, 1)), (8, 0))\n self.assertEqual(coords.parse_pygtp_coords((1, 9)), (0, 0))\n self.assertEqual(coords.parse_pygtp_coords((3, 2)), (7, 2))\n self.assertEqual(coords.unparse_pygtp_coords((8, 0)), (1, 1))\n self.assertEqual(coords.unparse_pygtp_coords((0, 0)), (1, 9))\n self.assertEqual(coords.unparse_pygtp_coords((7, 2)), (3, 2))\n\n self.assertEqual(coords.to_human_coord((0,8)), 'J9')\n self.assertEqual(coords.to_human_coord((8,0)), 'A1')\n\n def test_flatten(self):\n self.assertEqual(coords.flatten_coords((0, 0)), 0)\n self.assertEqual(coords.flatten_coords((0, 3)), 3)\n self.assertEqual(coords.flatten_coords((3, 0)), 27)\n self.assertEqual(coords.unflatten_coords(27), (3, 0))\n self.assertEqual(coords.unflatten_coords(10), (1, 1))\n self.assertEqual(coords.unflatten_coords(80), (8, 8))\n self.assertEqual(coords.flatten_coords(coords.unflatten_coords(10)), 10)\n self.assertEqual(coords.unflatten_coords(coords.flatten_coords((5, 4))), (5, 4))\n\n def test_unflatten_coords_ndindex_equivalence(self):\n ndindices = list(numpy.ndindex(go.N, go.N))\n flat_coords = list(range(go.N * go.N))\n self.assertEqual(list(map(coords.unflatten_coords, flat_coords)), ndindices)\n\n",
"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport unittest\n\nfrom coords import parse_kgs_coords as parse_kgs_coords, parse_sgf_coords, unflatten_coords\nimport coords\nfrom go import Position, PlayerMove, LibertyTracker, WHITE, BLACK, EMPTY\nimport go\nimport sgf_wrapper\nfrom tests import test_utils\n\nEMPTY_ROW = '.' * go.N + '\\n'\nTEST_BOARD = test_utils.load_board('''\n.X.....OO\nX........\n''' + EMPTY_ROW * 7)\n\nNO_HANDICAP_SGF = \"(;CA[UTF-8]SZ[9]PB[Murakawa Daisuke]PW[Iyama Yuta]KM[6.5]HA[0]RE[W+1.5]GM[1];B[fd];W[cf];B[eg];W[dd];B[dc];W[cc];B[de];W[cd];B[ed];W[he];B[ce];W[be];B[df];W[bf];B[hd];W[ge];B[gd];W[gg];B[db];W[cb];B[cg];W[bg];B[gh];W[fh];B[hh];W[fg];B[eh];W[ei];B[di];W[fi];B[hg];W[dh];B[ch];W[ci];B[bh];W[ff];B[fe];W[hf];B[id];W[bi];B[ah];W[ef];B[dg];W[ee];B[di];W[ig];B[ai];W[ih];B[fb];W[hi];B[ag];W[ab];B[bd];W[bc];B[ae];W[ad];B[af];W[bd];B[ca];W[ba];B[da];W[ie])\"\n\ndef parse_kgs_coords_set(string):\n return frozenset(map(parse_kgs_coords, string.split()))\n\nclass TestBasicFunctions(test_utils.MiniGoUnitTest):\n def test_load_board(self):\n self.assertEqualNPArray(go.EMPTY_BOARD, np.zeros([go.N, go.N]))\n self.assertEqualNPArray(go.EMPTY_BOARD, test_utils.load_board('. \\n' * go.N ** 2))\n\n def test_neighbors(self):\n corner = parse_kgs_coords('A1')\n neighbors = [go.EMPTY_BOARD[c] for c in go.NEIGHBORS[corner]]\n self.assertEqual(len(neighbors), 2)\n\n side = parse_kgs_coords('A2')\n side_neighbors = [go.EMPTY_BOARD[c] for c in go.NEIGHBORS[side]]\n self.assertEqual(len(side_neighbors), 3)\n\n def test_is_koish(self):\n self.assertEqual(go.is_koish(TEST_BOARD, parse_kgs_coords('A9')), BLACK)\n self.assertEqual(go.is_koish(TEST_BOARD, parse_kgs_coords('B8')), None)\n self.assertEqual(go.is_koish(TEST_BOARD, parse_kgs_coords('B9')), None)\n self.assertEqual(go.is_koish(TEST_BOARD, parse_kgs_coords('E5')), None)\n\n def test_is_eyeish(self):\n board = test_utils.load_board('''\n .XX...XXX\n X.X...X.X\n XX.....X.\n ........X\n XXXX.....\n OOOX....O\n X.OXX.OO.\n .XO.X.O.O\n XXO.X.OO.\n ''')\n B_eyes = parse_kgs_coords_set('A2 A9 B8 J7 H8')\n W_eyes = parse_kgs_coords_set('H2 J1 J3')\n not_eyes = parse_kgs_coords_set('B3 E5')\n for be in B_eyes:\n self.assertEqual(go.is_eyeish(board, be), BLACK, str(be))\n for we in W_eyes:\n self.assertEqual(go.is_eyeish(board, we), WHITE, str(we))\n for ne in not_eyes:\n self.assertEqual(go.is_eyeish(board, ne), None, str(ne))\n\nclass TestLibertyTracker(test_utils.MiniGoUnitTest):\n def test_lib_tracker_init(self):\n board = test_utils.load_board('X........' + EMPTY_ROW * 8)\n\n lib_tracker = LibertyTracker.from_board(board)\n self.assertEqual(len(lib_tracker.groups), 1)\n self.assertNotEqual(lib_tracker.group_index[parse_kgs_coords('A9')], go.MISSING_GROUP_ID)\n self.assertEqual(lib_tracker.liberty_cache[parse_kgs_coords('A9')], 2)\n sole_group = lib_tracker.groups[lib_tracker.group_index[parse_kgs_coords('A9')]]\n self.assertEqual(sole_group.stones, parse_kgs_coords_set('A9'))\n self.assertEqual(sole_group.liberties, parse_kgs_coords_set('B9 A8'))\n self.assertEqual(sole_group.color, BLACK)\n\n def test_place_stone(self):\n board = test_utils.load_board('X........' + EMPTY_ROW * 8)\n lib_tracker = LibertyTracker.from_board(board)\n lib_tracker.add_stone(BLACK, parse_kgs_coords('B9'))\n self.assertEqual(len(lib_tracker.groups), 1)\n self.assertNotEqual(lib_tracker.group_index[parse_kgs_coords('A9')], go.MISSING_GROUP_ID)\n self.assertEqual(lib_tracker.liberty_cache[parse_kgs_coords('A9')], 3)\n self.assertEqual(lib_tracker.liberty_cache[parse_kgs_coords('B9')], 3)\n sole_group = lib_tracker.groups[lib_tracker.group_index[parse_kgs_coords('A9')]]\n self.assertEqual(sole_group.stones, parse_kgs_coords_set('A9 B9'))\n self.assertEqual(sole_group.liberties, parse_kgs_coords_set('C9 A8 B8'))\n self.assertEqual(sole_group.color, BLACK)\n\n def test_place_stone_opposite_color(self):\n board = test_utils.load_board('X........' + EMPTY_ROW * 8)\n lib_tracker = LibertyTracker.from_board(board)\n lib_tracker.add_stone(WHITE, parse_kgs_coords('B9'))\n self.assertEqual(len(lib_tracker.groups), 2)\n self.assertNotEqual(lib_tracker.group_index[parse_kgs_coords('A9')], go.MISSING_GROUP_ID)\n self.assertNotEqual(lib_tracker.group_index[parse_kgs_coords('B9')], go.MISSING_GROUP_ID)\n self.assertEqual(lib_tracker.liberty_cache[parse_kgs_coords('A9')], 1)\n self.assertEqual(lib_tracker.liberty_cache[parse_kgs_coords('B9')], 2)\n black_group = lib_tracker.groups[lib_tracker.group_index[parse_kgs_coords('A9')]]\n white_group = lib_tracker.groups[lib_tracker.group_index[parse_kgs_coords('B9')]]\n self.assertEqual(black_group.stones, parse_kgs_coords_set('A9'))\n self.assertEqual(black_group.liberties, parse_kgs_coords_set('A8'))\n self.assertEqual(black_group.color, BLACK)\n self.assertEqual(white_group.stones, parse_kgs_coords_set('B9'))\n self.assertEqual(white_group.liberties, parse_kgs_coords_set('C9 B8'))\n self.assertEqual(white_group.color, WHITE)\n\n def test_merge_multiple_groups(self):\n board = test_utils.load_board('''\n .X.......\n X.X......\n .X.......\n ''' + EMPTY_ROW * 6)\n lib_tracker = LibertyTracker.from_board(board)\n lib_tracker.add_stone(BLACK, parse_kgs_coords('B8'))\n self.assertEqual(len(lib_tracker.groups), 1)\n self.assertNotEqual(lib_tracker.group_index[parse_kgs_coords('B8')], go.MISSING_GROUP_ID)\n sole_group = lib_tracker.groups[lib_tracker.group_index[parse_kgs_coords('B8')]]\n self.assertEqual(sole_group.stones, parse_kgs_coords_set('B9 A8 B8 C8 B7'))\n self.assertEqual(sole_group.liberties, parse_kgs_coords_set('A9 C9 D8 A7 C7 B6'))\n self.assertEqual(sole_group.color, BLACK)\n\n liberty_cache = lib_tracker.liberty_cache\n for stone in sole_group.stones:\n self.assertEqual(liberty_cache[stone], 6, str(stone))\n\n def test_capture_stone(self):\n board = test_utils.load_board('''\n .X.......\n XO.......\n .X.......\n ''' + EMPTY_ROW * 6)\n lib_tracker = LibertyTracker.from_board(board)\n captured = lib_tracker.add_stone(BLACK, parse_kgs_coords('C8'))\n self.assertEqual(len(lib_tracker.groups), 4)\n self.assertEqual(lib_tracker.group_index[parse_kgs_coords('B8')], go.MISSING_GROUP_ID)\n self.assertEqual(captured, parse_kgs_coords_set('B8'))\n\n def test_capture_many(self):\n board = test_utils.load_board('''\n .XX......\n XOO......\n .XX......\n ''' + EMPTY_ROW * 6)\n lib_tracker = LibertyTracker.from_board(board)\n captured = lib_tracker.add_stone(BLACK, parse_kgs_coords('D8'))\n self.assertEqual(len(lib_tracker.groups), 4)\n self.assertEqual(lib_tracker.group_index[parse_kgs_coords('B8')], go.MISSING_GROUP_ID)\n self.assertEqual(captured, parse_kgs_coords_set('B8 C8'))\n\n left_group = lib_tracker.groups[lib_tracker.group_index[parse_kgs_coords('A8')]]\n self.assertEqual(left_group.stones, parse_kgs_coords_set('A8'))\n self.assertEqual(left_group.liberties, parse_kgs_coords_set('A9 B8 A7'))\n\n right_group = lib_tracker.groups[lib_tracker.group_index[parse_kgs_coords('D8')]]\n self.assertEqual(right_group.stones, parse_kgs_coords_set('D8'))\n self.assertEqual(right_group.liberties, parse_kgs_coords_set('D9 C8 E8 D7'))\n\n top_group = lib_tracker.groups[lib_tracker.group_index[parse_kgs_coords('B9')]]\n self.assertEqual(top_group.stones, parse_kgs_coords_set('B9 C9'))\n self.assertEqual(top_group.liberties, parse_kgs_coords_set('A9 D9 B8 C8'))\n\n bottom_group = lib_tracker.groups[lib_tracker.group_index[parse_kgs_coords('B7')]]\n self.assertEqual(bottom_group.stones, parse_kgs_coords_set('B7 C7'))\n self.assertEqual(bottom_group.liberties, parse_kgs_coords_set('B8 C8 A7 D7 B6 C6'))\n\n liberty_cache = lib_tracker.liberty_cache\n for stone in top_group.stones:\n self.assertEqual(liberty_cache[stone], 4, str(stone))\n for stone in left_group.stones:\n self.assertEqual(liberty_cache[stone], 3, str(stone))\n for stone in right_group.stones:\n self.assertEqual(liberty_cache[stone], 4, str(stone))\n for stone in bottom_group.stones:\n self.assertEqual(liberty_cache[stone], 6, str(stone))\n for stone in captured:\n self.assertEqual(liberty_cache[stone], 0, str(stone))\n\n def test_capture_multiple_groups(self):\n board = test_utils.load_board('''\n .OX......\n OXX......\n XX.......\n ''' + EMPTY_ROW * 6)\n lib_tracker = LibertyTracker.from_board(board)\n captured = lib_tracker.add_stone(BLACK, parse_kgs_coords('A9'))\n self.assertEqual(len(lib_tracker.groups), 2)\n self.assertEqual(captured, parse_kgs_coords_set('B9 A8'))\n\n corner_stone = lib_tracker.groups[lib_tracker.group_index[parse_kgs_coords('A9')]]\n self.assertEqual(corner_stone.stones, parse_kgs_coords_set('A9'))\n self.assertEqual(corner_stone.liberties, parse_kgs_coords_set('B9 A8'))\n\n surrounding_stones = lib_tracker.groups[lib_tracker.group_index[parse_kgs_coords('C9')]]\n self.assertEqual(surrounding_stones.stones, parse_kgs_coords_set('C9 B8 C8 A7 B7'))\n self.assertEqual(surrounding_stones.liberties, parse_kgs_coords_set('B9 D9 A8 D8 C7 A6 B6'))\n\n liberty_cache = lib_tracker.liberty_cache\n for stone in corner_stone.stones:\n self.assertEqual(liberty_cache[stone], 2, str(stone))\n for stone in surrounding_stones.stones:\n self.assertEqual(liberty_cache[stone], 7, str(stone))\n\n\n def test_same_friendly_group_neighboring_twice(self):\n board = test_utils.load_board('''\n XX.......\n X........\n ''' + EMPTY_ROW * 7)\n\n lib_tracker = LibertyTracker.from_board(board)\n captured = lib_tracker.add_stone(BLACK, parse_kgs_coords('B8'))\n self.assertEqual(len(lib_tracker.groups), 1)\n sole_group_id = lib_tracker.group_index[parse_kgs_coords('A9')]\n sole_group = lib_tracker.groups[sole_group_id]\n self.assertEqual(sole_group.stones, parse_kgs_coords_set('A9 B9 A8 B8'))\n self.assertEqual(sole_group.liberties, parse_kgs_coords_set('C9 C8 A7 B7'))\n self.assertEqual(captured, set())\n\n def test_same_opponent_group_neighboring_twice(self):\n board = test_utils.load_board('''\n XX.......\n X........\n ''' + EMPTY_ROW * 7)\n\n lib_tracker = LibertyTracker.from_board(board)\n captured = lib_tracker.add_stone(WHITE, parse_kgs_coords('B8'))\n self.assertEqual(len(lib_tracker.groups), 2)\n black_group = lib_tracker.groups[lib_tracker.group_index[parse_kgs_coords('A9')]]\n self.assertEqual(black_group.stones, parse_kgs_coords_set('A9 B9 A8'))\n self.assertEqual(black_group.liberties, parse_kgs_coords_set('C9 A7'))\n\n white_group = lib_tracker.groups[lib_tracker.group_index[parse_kgs_coords('B8')]]\n self.assertEqual(white_group.stones, parse_kgs_coords_set('B8'))\n self.assertEqual(white_group.liberties, parse_kgs_coords_set('C8 B7'))\n\n self.assertEqual(captured, set())\n\nclass TestPosition(test_utils.MiniGoUnitTest):\n def test_passing(self):\n start_position = Position(\n board=TEST_BOARD,\n n=0,\n komi=6.5,\n caps=(1, 2),\n ko=parse_kgs_coords('A1'),\n recent=tuple(),\n to_play=BLACK,\n )\n expected_position = Position(\n board=TEST_BOARD,\n n=1,\n komi=6.5,\n caps=(1, 2),\n ko=None,\n recent=(PlayerMove(BLACK, None),),\n to_play=WHITE,\n )\n pass_position = start_position.pass_move()\n self.assertEqualPositions(pass_position, expected_position)\n\n def test_flipturn(self):\n start_position = Position(\n board=TEST_BOARD,\n n=0,\n komi=6.5,\n caps=(1, 2),\n ko=parse_kgs_coords('A1'),\n recent=tuple(),\n to_play=BLACK,\n )\n expected_position = Position(\n board=TEST_BOARD,\n n=0,\n komi=6.5,\n caps=(1, 2),\n ko=None,\n recent=tuple(),\n to_play=WHITE,\n )\n flip_position = start_position.flip_playerturn()\n self.assertEqualPositions(flip_position, expected_position)\n\n def test_is_move_suicidal(self):\n board = test_utils.load_board('''\n ...O.O...\n ....O....\n XO.....O.\n OXO...OXO\n O.XO.OX.O\n OXO...OOX\n XO.......\n ......XXO\n .....XOO.\n ''')\n position = Position(\n board=board,\n to_play=BLACK,\n )\n suicidal_moves = parse_kgs_coords_set('E9 H5')\n nonsuicidal_moves = parse_kgs_coords_set('B5 J1 A9')\n for move in suicidal_moves:\n assert(position.board[move] == go.EMPTY) #sanity check my coordinate input\n self.assertTrue(position.is_move_suicidal(move), str(move))\n for move in nonsuicidal_moves:\n assert(position.board[move] == go.EMPTY) #sanity check my coordinate input\n self.assertFalse(position.is_move_suicidal(move), str(move))\n\n def test_legal_moves(self):\n board = test_utils.load_board('''\n .O.O.XOX.\n O..OOOOOX\n ......O.O\n OO.....OX\n XO.....X.\n .O.......\n OX.....OO\n XX...OOOX\n .....O.X.\n ''')\n position = Position(board=board, to_play=BLACK)\n illegal_moves = parse_kgs_coords_set('A9 E9 J9')\n legal_moves = parse_kgs_coords_set('A4 G1 J1 H7') | {None}\n for move in illegal_moves:\n with self.subTest(type='illegal', move=move):\n self.assertFalse(position.is_move_legal(move))\n for move in legal_moves:\n with self.subTest(type='legal', move=move):\n self.assertTrue(position.is_move_legal(move))\n # check that the bulk legal test agrees with move-by-move illegal test.\n bulk_legality = position.all_legal_moves()\n for i, bulk_legal in enumerate(bulk_legality):\n with self.subTest(type='bulk', move=unflatten_coords(i)):\n self.assertEqual(bulk_legal, position.is_move_legal(unflatten_coords(i)))\n\n # flip the colors and check that everything is still (il)legal\n position = Position(board=-board, to_play=WHITE)\n for move in illegal_moves:\n with self.subTest(type='illegal', move=move):\n self.assertFalse(position.is_move_legal(move))\n for move in legal_moves:\n with self.subTest(type='legal', move=move):\n self.assertTrue(position.is_move_legal(move))\n bulk_legality = position.all_legal_moves()\n for i, bulk_legal in enumerate(bulk_legality):\n with self.subTest(type='bulk', move=unflatten_coords(i)):\n self.assertEqual(bulk_legal, position.is_move_legal(unflatten_coords(i)))\n\n def test_move(self):\n start_position = Position(\n board=TEST_BOARD,\n n=0,\n komi=6.5,\n caps=(1, 2),\n ko=None,\n recent=tuple(),\n to_play=BLACK,\n )\n expected_board = test_utils.load_board('''\n .XX....OO\n X........\n ''' + EMPTY_ROW * 7)\n expected_position = Position(\n board=expected_board,\n n=1,\n komi=6.5,\n caps=(1, 2),\n ko=None,\n recent=(PlayerMove(BLACK, parse_kgs_coords('C9')),),\n to_play=WHITE,\n )\n actual_position = start_position.play_move(parse_kgs_coords('C9'))\n self.assertEqualPositions(actual_position, expected_position)\n\n expected_board2 = test_utils.load_board('''\n .XX....OO\n X.......O\n ''' + EMPTY_ROW * 7)\n expected_position2 = Position(\n board=expected_board2,\n n=2,\n komi=6.5,\n caps=(1, 2),\n ko=None,\n recent=(PlayerMove(BLACK, parse_kgs_coords('C9')), PlayerMove(WHITE, parse_kgs_coords('J8'))),\n to_play=BLACK,\n )\n actual_position2 = actual_position.play_move(parse_kgs_coords('J8'))\n self.assertEqualPositions(actual_position2, expected_position2)\n\n def test_move_with_capture(self):\n start_board = test_utils.load_board(EMPTY_ROW * 5 + '''\n XXXX.....\n XOOX.....\n O.OX.....\n OOXX.....\n ''')\n start_position = Position(\n board=start_board,\n n=0,\n komi=6.5,\n caps=(1, 2),\n ko=None,\n recent=tuple(),\n to_play=BLACK,\n )\n expected_board = test_utils.load_board(EMPTY_ROW * 5 + '''\n XXXX.....\n X..X.....\n .X.X.....\n ..XX.....\n ''')\n expected_position = Position(\n board=expected_board,\n n=1,\n komi=6.5,\n caps=(7, 2),\n ko=None,\n recent=(PlayerMove(BLACK, parse_kgs_coords('B2')),),\n to_play=WHITE,\n )\n actual_position = start_position.play_move(parse_kgs_coords('B2'))\n self.assertEqualPositions(actual_position, expected_position)\n\n def test_ko_move(self):\n start_board = test_utils.load_board('''\n .OX......\n OX.......\n ''' + EMPTY_ROW * 7)\n start_position = Position(\n board=start_board,\n n=0,\n komi=6.5,\n caps=(1, 2),\n ko=None,\n recent=tuple(),\n to_play=BLACK,\n )\n expected_board = test_utils.load_board('''\n X.X......\n OX.......\n ''' + EMPTY_ROW * 7)\n expected_position = Position(\n board=expected_board,\n n=1,\n komi=6.5,\n caps=(2, 2),\n ko=parse_kgs_coords('B9'),\n recent=(PlayerMove(BLACK, parse_kgs_coords('A9')),),\n to_play=WHITE,\n )\n actual_position = start_position.play_move(parse_kgs_coords('A9'))\n\n self.assertEqualPositions(actual_position, expected_position)\n\n # Check that retaking ko is illegal until two intervening moves\n with self.assertRaises(go.IllegalMove):\n actual_position.play_move(parse_kgs_coords('B9'))\n pass_twice = actual_position.pass_move().pass_move()\n ko_delayed_retake = pass_twice.play_move(parse_kgs_coords('B9'))\n expected_position = Position(\n board=start_board,\n n=4,\n komi=6.5,\n caps=(2, 3),\n ko=parse_kgs_coords('A9'),\n recent=(\n PlayerMove(BLACK, parse_kgs_coords('A9')),\n PlayerMove(WHITE, None),\n PlayerMove(BLACK, None),\n PlayerMove(WHITE, parse_kgs_coords('B9'))),\n to_play=BLACK,\n )\n self.assertEqualPositions(ko_delayed_retake, expected_position)\n\n def test_is_game_over(self):\n root = go.Position()\n self.assertFalse(root.is_game_over())\n first_pass = root.play_move(None)\n self.assertFalse(first_pass.is_game_over())\n second_pass = first_pass.play_move(None)\n self.assertTrue(second_pass.is_game_over())\n\n def test_scoring(self):\n board = test_utils.load_board('''\n .XX......\n OOXX.....\n OOOX...X.\n OXX......\n OOXXXXXX.\n OOOXOXOXX\n .O.OOXOOX\n .O.O.OOXX\n ......OOO\n ''')\n position = Position(\n board=board,\n n=54,\n komi=6.5,\n caps=(2, 5),\n ko=None,\n recent=tuple(),\n to_play=BLACK,\n )\n expected_score = 1.5\n self.assertEqual(position.score(), expected_score)\n\n board = test_utils.load_board('''\n XXX......\n OOXX.....\n OOOX...X.\n OXX......\n OOXXXXXX.\n OOOXOXOXX\n .O.OOXOOX\n .O.O.OOXX\n ......OOO\n ''')\n position = Position(\n board=board,\n n=55,\n komi=6.5,\n caps=(2, 5),\n ko=None,\n recent=tuple(),\n to_play=WHITE,\n )\n expected_score = 2.5\n self.assertEqual(position.score(), expected_score)\n\n def test_replay_position(self):\n sgf_positions = list(sgf_wrapper.replay_sgf(NO_HANDICAP_SGF))\n initial = sgf_positions[0]\n self.assertEqual(initial.result, go.WHITE)\n\n final = sgf_positions[-1].position.play_move(sgf_positions[-1].next_move)\n\n # sanity check to ensure we're working with the right position\n final_board = test_utils.load_board('''\n .OXX.....\n O.OX.X...\n .OOX.....\n OOOOXXXXX\n XOXXOXOOO\n XOOXOO.O.\n XOXXXOOXO\n XXX.XOXXO\n X..XOO.O.\n ''')\n expected_final_position = go.Position(\n final_board,\n n=62,\n komi=6.5,\n caps=(3, 2),\n ko=None,\n recent=tuple(),\n to_play=go.BLACK\n )\n self.assertEqualPositions(expected_final_position, final)\n self.assertEqual(final.n, len(final.recent))\n\n replayed_positions = list(go.replay_position(final))\n for sgf_pos, replay_pos in zip(sgf_positions, replayed_positions):\n self.assertEqualPositions(sgf_pos.position, replay_pos.position)\n"
] | [
[
"numpy.ndindex"
],
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ShizhuZhang/ontask_b | [
"ca4526871f26e7153b724b1e97b922a0b52f75d6",
"acbf05ff9b18dae0a41c67d1e41774e54a890c40"
] | [
"src/plugins/test_plugin_2/__init__.py",
"src/dataops/pandas_db.py"
] | [
"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, print_function\n\nimport pandas as pd\n\n# The field class_name contains the name of the class to load to execute the\n# plugin.\nclass_name = 'OntaskTestPlugin'\n\n\nclass OntaskTestPlugin(object):\n \"\"\"\n Example of a class that implements the OnTask plugin interface. The\n objects of this class have to provide the following elements:\n\n 1. name: Plugin name show to the users.\n\n 2. description_txt: A string with the detailed description of what the\n plugin does\n\n 3. input_column_names: A potentially empty list of column names (strings).\n If the list is empty, the columns are selected by the userat execution\n time.\n\n 4. output_column_names: Non empty list of names (strings) of the columns\n to be used for the output of the transformation.\n\n 5. parameters: an optionally empty list with tuples with the following\n structure:\n\n ('name', type, [list of allowed values], initial value, help_text)\n\n These elements will be requested from the user before executing the\n plugin through a form. The conditions on these values are:\n\n - name must be a string\n - type must be a string equal to \"integer\", \"double\", \"string\", \n \"datetime\" or \"boolean\". \n - The list of values is to restrict the\n possible values\n - The initial value must be of the type specified by the second \n element.\n - Help_text a string to show as help text\n\n 6. method \"run\" that receives:\n - a pandas data frame with the data to process\n - a string with the name of the key column that will be used to merge\n the result.\n - A dictionary of pairs (name, value) with the parameters described in\n the previous element.\n\n and returns a result Pandas data frame. This frame **must** have one\n column with the key column name provided so that it can be properly\n merged with the existing data.\n \"\"\"\n\n def __init__(self):\n self.name = 'Test Plungin 2 Name'\n self.description_txt = 'Test Plugin 2 Description Text'\n self.input_column_names = ['A1', 'A2']\n self.output_column_names = ['RESULT 3', 'RESULT 4']\n self.parameters = [\n ('param string', 'string', ['v1', 'v2'], 'v1', 'help param string'),\n ('param integer', 'integer', [], None, 'help param integer'),\n ('param double', 'double', [1.2, 2.2, 3.2], None,\n 'help param double'),\n ('param boolean', 'boolean', [], True, 'help param boolean'),\n ('param datetime', 'datetime', [], '2018-05-25 18:03:00+09:30',\n 'help param datetime'),\n ('param datetime2', 'datetime', \n [],\n '2018-05-25 18:03:00+09:30',\n 'help param datetime'),\n ]\n\n def run(self, data_frame, merge_key, parameters=dict):\n \"\"\"\n Method to overwrite. Receives a data frame wih a number of columns\n stipulated by the num_column_input pair, the name of a key column and a\n dictionary with parameters of the form name, value.\n\n Runs the algorithm and returns a pandas data frame structure that is\n merged with the existing data frame in the workflow using the merge_key.\n\n :param data_frame: Input data for the plugin\n :param merge_key: Name of the column key that will be used for merging\n :param parameters: Dictionary with (name, value) pairs.\n\n :return: a Pandas data_frame to merge with the existing one (must\n contain a column with name merge_key)\n \"\"\"\n\n # Extract the key column from the given data frame\n result = pd.DataFrame(data_frame[merge_key])\n\n # Process the given data and create the result\n result[self.output_column_names[0]] = \\\n data_frame[self.input_column_names[0]] + \\\n \t data_frame[self.input_column_names[1]]\n result[self.output_column_names[1]] = \\\n data_frame[self.input_column_names[0]] - \\\n \t data_frame[self.input_column_names[1]]\n\n return result\n",
"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, print_function\n\nimport logging\nimport os.path\nimport subprocess\nfrom collections import OrderedDict\nfrom itertools import izip\n\nimport numpy as np\nimport pandas as pd\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.db import connection\nfrom sqlalchemy import create_engine\n\nfrom dataops.formula_evaluation import evaluate_node_sql\nfrom ontask import fix_pctg_in_name\n\nSITE_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n\ntable_prefix = '__ONTASK_WORKFLOW_TABLE_'\ndf_table_prefix = table_prefix + '{0}'\nupload_table_prefix = table_prefix + 'UPLOAD_{0}'\n\n# Query to count the number of rows in a table\nquery_count_rows = 'SELECT count(*) from \"{0}\"'\n\nlogger = logging.getLogger(__name__)\n\n# Translation between pandas data type names, and those handled in OnTask\npandas_datatype_names = {\n 'object': 'string',\n 'int64': 'integer',\n 'float64': 'double',\n 'bool': 'boolean',\n 'datetime64[ns]': 'datetime'\n}\n\n# Translation between SQL data type names, and those handled in OnTask\nsql_datatype_names = {\n 'text': 'string',\n 'bigint': 'integer',\n 'double precision': 'double',\n 'boolean': 'boolean',\n 'timestamp without time zone': 'datetime'\n}\n\n# DB Engine to use with Pandas (required by to_sql, from_sql\nengine = None\n\n\ndef create_db_connection(dialect, driver, username, password, host, dbname):\n \"\"\"\n Function that creates the engine object to connect to the database. The\n object is required by the pandas functions to_sql and from_sql\n\n :param dialect: Dialect for the engine (oracle, mysql, postgresql, etc)\n :param driver: DBAPI driver (psycopg2, ...)\n :param username: Username to connect with the database\n :param password: Password to connect with the database\n :param host: Host to connect with the database\n :param dbname: database name\n :return: the engine\n \"\"\"\n\n # DB engine\n database_url = \\\n '{dialect}{driver}://{user}:{password}@{host}/{database_name}'.format(\n dialect=dialect,\n driver=driver,\n user=username,\n password=password,\n host=host,\n database_name=dbname,\n )\n return create_engine(database_url, echo=False, paramstyle='format')\n\n\ndef create_db_engine(dialect, driver, username, password, host, dbname):\n \"\"\"\n Function that creates the engine object to connect to the database. The\n object is required by the pandas functions to_sql and from_sql\n\n :param dialect: Dialect for the engine (oracle, mysql, postgresql, etc)\n :param driver: DBAPI driver (psycopg2, ...)\n :param username: Username to connect with the database\n :param password: Password to connect with the database\n :param host: Host to connect with the database\n :param dbname: database name\n :return: the engine\n \"\"\"\n\n # DB engine\n database_url = \\\n '{dialect}{driver}://{user}:{password}@{host}/{database_name}'.format(\n dialect=dialect,\n driver=driver,\n user=username,\n password=password,\n host=host,\n database_name=dbname,\n )\n engine = create_db_connection(dialect, driver, username, password, host,\n dbname)\n\n if settings.DEBUG:\n print('Creating engine with ', database_url)\n\n return engine\n\n\ndef destroy_db_engine(db_engine):\n \"\"\"\n Method that disposes of the given engine (to guarantee there are no\n connections available\n :param db_engine: Engine to destroy\n :return: Nothing\n \"\"\"\n db_engine.dispose()\n\n\ndef pg_restore_table(filename):\n \"\"\"\n Function that given a file produced with a pg_dump, it uploads its\n content to the existing database\n\n :param filename: File in pg_dump format to restore\n :return:\n \"\"\"\n process = subprocess.Popen(['psql',\n '-d',\n settings.DATABASES['default']['NAME'],\n '-q',\n '-f',\n filename])\n process.wait()\n\n\ndef delete_all_tables():\n \"\"\"\n Delete all tables related to existing workflows\n :return:\n \"\"\"\n\n cursor = connection.cursor()\n table_list = connection.introspection.get_table_list(cursor)\n for tinfo in table_list:\n if not tinfo.name.startswith(table_prefix):\n continue\n cursor.execute('DROP TABLE \"{0}\";'.format(tinfo.name))\n\n # To make sure the table is dropped.\n connection.commit()\n return\n\n\ndef is_table_in_db(table_name):\n cursor = connection.cursor()\n return next(\n (True for x in connection.introspection.get_table_list(cursor)\n if x.name == table_name),\n False\n )\n\n\ndef is_wf_table_in_db(workflow):\n return is_table_in_db(create_table_name(workflow.id))\n\n\ndef create_table_name(pk):\n \"\"\"\n\n :param pk: Primary Key of a workflow\n :return: The unique table name to use to store a workflow data frame\n \"\"\"\n return df_table_prefix.format(pk)\n\n\ndef create_upload_table_name(pk):\n \"\"\"\n\n :param pk: Primary key of a workflow\n :return: The unique table to use to upload a new data frame\n \"\"\"\n return upload_table_prefix.format(pk)\n\n\ndef load_from_db(pk, columns=None, filter_exp=None):\n \"\"\"\n Load the data frame stored for the workflow with the pk\n :param pk: Primary key of the workflow\n :param columns: Optional list of columns to load (all if NOne is given)\n :param filter_exp: JSON expression to filter a subset of rows\n :return: data frame\n \"\"\"\n return load_table(create_table_name(pk),\n columns=columns,\n filter_exp=filter_exp)\n\n\ndef load_table(table_name, columns=None, filter_exp=None):\n \"\"\"\n Load a data frame from the SQL DB.\n\n FUTURE WORK:\n Consider to store the dataframes in Redis to reduce load/store time.\n The trick is to use a compressed format:\n\n SET: redisConn.set(\"key\", df.to_msgpack(compress='zlib'))\n GET: pd.read_msgpack(redisConn.get(\"key\"))\n\n Need to agree on a sensible item name that does not collide with anything\n else and a policy to detect a cached dataframe and remove it when the data\n changes (difficult to detect? Perhaps df_new.equals(df_current))\n\n If feasible, a write-through system could be easily implemented.\n\n :param table_name: Table name to read from the db in to data frame\n :param view: Optional view object to restrict access to the DB\n :return: data_frame or None if it does not exist.\n \"\"\"\n if table_name not in connection.introspection.table_names():\n return None\n\n if settings.DEBUG:\n print('Loading table ', table_name)\n\n if columns or filter_exp:\n # A list of columns or a filter exp is given\n query, params = get_filter_query(table_name, columns, filter_exp)\n result = pd.read_sql_query(query, engine, params=params)\n else:\n # No view given, so simply get the whole table\n result = pd.read_sql(table_name, engine)\n\n # After reading from the DB, turn all None into NaN\n result.fillna(value=np.nan, inplace=True)\n return result\n\n\ndef load_query(query):\n \"\"\"\n Load a data frame from the SQL DB running the given query.\n\n :param query: Query to run in the DB\n :return: data_frame or None if it does not exist.\n \"\"\"\n\n if settings.DEBUG:\n print('Loading query ', query)\n\n result = pd.read_sql_query(query, engine)\n\n # After reading from the DB, turn all None into NaN\n result.fillna(value=np.nan, inplace=True)\n return result\n\n\ndef load_df_from_csvfile(file, skiprows=0, skipfooter=0):\n \"\"\"\n Given a file object, try to read the content as a CSV file and transform\n into a data frame. The skiprows and skipfooter are number of lines to skip\n from the top and bottom of the file (see read_csv in pandas).\n\n It also tries to convert as many columns as possible to date/time format\n (testing the conversion on every string column).\n\n :param filename: File object to read the CSV content\n :param skiprows: Number of lines to skip at the top of the document\n :param skipfooter: Number of lines to skip at the bottom of the document\n :return: Resulting data frame, or an Exception.\n \"\"\"\n data_frame = pd.read_csv(\n file,\n index_col=False,\n infer_datetime_format=True,\n quotechar='\"',\n skiprows=skiprows,\n skipfooter=skipfooter\n )\n\n # Strip white space from all string columns and try to convert to\n # datetime just in case\n for x in list(data_frame.columns):\n if data_frame[x].dtype.name == 'object':\n # Column is a string! Remove the leading and trailing white\n # space\n data_frame[x] = data_frame[x].str.strip().fillna(data_frame[x])\n\n # Try the datetime conversion\n try:\n series = pd.to_datetime(data_frame[x],\n infer_datetime_format=True)\n # Datetime conversion worked! Update the data_frame\n data_frame[x] = series\n except (ValueError, TypeError):\n pass\n return data_frame\n\n\ndef load_df_from_sqlconnection(conn_item, pwd=None):\n \"\"\"\n Load a DF from a SQL connection open with the parameters given in conn_item.\n\n :param conn_item: SQLConnection object with the connection parameters.\n :return: Data frame or raise an exception.\n \"\"\"\n\n # Get the connection\n db_connection = create_db_connection(conn_item.conn_type,\n conn_item.conn_driver,\n conn_item.db_user,\n pwd,\n conn_item.db_host,\n conn_item.db_name)\n\n # Try to fetch the data\n result = pd.read_sql(conn_item.db_table, db_connection)\n\n # After reading from the DB, turn all None into NaN\n result.fillna(value=np.nan, inplace=True)\n return result\n\n\ndef store_table(data_frame, table_name):\n \"\"\"\n Store a data frame in the DB\n :param data_frame: The data frame to store\n :param table_name: The name of the table in the DB\n :return: Nothing. Side effect in the DB\n \"\"\"\n\n with cache.lock(table_name):\n # We ovewrite the content and do not create an index\n data_frame.to_sql(table_name,\n engine,\n if_exists='replace',\n index=False)\n\n return\n\n\ndef delete_table(pk):\n \"\"\"Delete the table representing the workflow with the given PK. Due to\n the dual use of the database, the command has to be executed directly on\n the DB.\n \"\"\"\n try:\n cursor = connection.cursor()\n cursor.execute('DROP TABLE \"{0}\";'.format(create_table_name(pk)))\n connection.commit()\n except Exception:\n logger.error(\n 'Error while dropping table {0}'.format(create_table_name(pk))\n )\n\n\ndef delete_upload_table(pk):\n \"\"\"Delete the table used to merge data into the workflow with the given\n PK. Due to the dual use of the database, the command has to be executed\n directly on the DB.\n \"\"\"\n cursor = connection.cursor()\n cursor.execute('DROP TABLE \"{0}\"'.format(create_upload_table_name(pk)))\n connection.commit()\n\n\ndef get_table_column_types(table_name):\n \"\"\"\n :param table_name: Table name\n :return: List of pairs (column name, SQL type)\n \"\"\"\n cursor = connection.cursor()\n cursor.execute(\"\"\"select column_name, data_type from \n INFORMATION_SCHEMA.COLUMNS where table_name = '{0}'\"\"\".format(table_name))\n\n return cursor.fetchall()\n\n\ndef df_column_types_rename(table_name):\n \"\"\"\n \n :param table_name: Primary key of the workflow containing this data frame (table) \n :return: List of data type strings translated to the proper values\n \"\"\"\n column_types = get_table_column_types(table_name)\n\n # result = [table_name[x].dtype.name for x in list(table_name.columns)]\n # for tname, ntname in pandas_datatype_names.items():\n # result[:] = [x if x != tname else ntname for x in result]\n\n return [sql_datatype_names[x] for __, x in\n get_table_column_types(table_name)]\n\n\ndef df_drop_column(pk, column_name):\n \"\"\"\n Drop a column from the DB table storing a data frame\n :param pk: Workflow primary key to obtain table name\n :param column_name: Column name\n :return: Drops the column from the corresponding DB table\n \"\"\"\n\n query = 'ALTER TABLE \"{0}\" DROP COLUMN \"{1}\"'.format(\n create_table_name(pk),\n column_name\n )\n cursor = connection.cursor()\n cursor.execute(query)\n\n\ndef get_subframe(pk, cond_filter, column_names=None):\n \"\"\"\n Execute a select query to extract a subset of the dataframe and turn the\n resulting query set into a data frame.\n :param pk: Workflow primary key\n :param cond_filter: Condition object to filter the data (or None)\n :param column_names: [list of column names], QuerySet with the data rows\n :return:\n \"\"\"\n # Get the cursor\n cursor = get_table_cursor(pk, cond_filter, column_names)\n\n # Create the DataFrame and set the column names\n result = pd.DataFrame.from_records(cursor.fetchall(), coerce_float=True)\n result.columns = [c.name for c in cursor.description]\n\n return result\n\n\ndef get_table_cursor(pk, cond_filter, column_names=None):\n \"\"\"\n Execute a select query in the database with an optional filter obtained\n from the jquery QueryBuilder.\n\n :param pk: Primary key of the workflow storing the data\n :param cond_filter: Condition object to filter the data (or None)\n :param column_names: optional list of columns to select\n :return: ([list of column names], QuerySet with the data rows)\n \"\"\"\n\n # Create the query\n if column_names:\n safe_column_names = [fix_pctg_in_name(x) for x in column_names]\n query = 'SELECT \"{0}\" from \"{1}\"'.format(\n '\", \"'.join(safe_column_names),\n create_table_name(pk)\n )\n else:\n query = 'SELECT * from \"{0}\"'.format(create_table_name(pk))\n\n # See if the action has a filter or not\n fields = []\n if cond_filter is not None:\n cond_filter, fields = evaluate_node_sql(cond_filter.formula)\n if cond_filter:\n # The condition may be empty, in which case, nothing is needed.\n query += ' WHERE ' + cond_filter\n\n # Execute the query\n cursor = connection.cursor()\n cursor.execute(query, fields)\n\n return cursor\n\n\ndef get_table_data(pk, cond_filter, column_names=None):\n # Get first the cursor\n cursor = get_table_cursor(pk, cond_filter, column_names)\n\n # Return the data\n return cursor.fetchall()\n\n\ndef execute_select_on_table(pk, fields, values, column_names=None):\n \"\"\"\n Execute a select query in the database with an optional filter obtained\n from the jquery QueryBuilder.\n\n :param pk: Primary key of the workflow storing the data\n :param fields: List of fields to add to the WHERE clause\n :param values: parameters to match the previous fields\n :param column_names: optional list of columns to select\n :return: QuerySet with the data rows\n \"\"\"\n\n # Create the query\n if column_names:\n safe_column_names = ['\"' + fix_pctg_in_name(x) + '\"'\n for x in column_names]\n query = 'SELECT {0}'.format(','.join(safe_column_names))\n else:\n query = 'SELECT *'\n\n # Add the table\n query += ' FROM \"{0}\"'.format(create_table_name(pk))\n\n # See if the action has a filter or not\n cursor = connection.cursor()\n if fields:\n query += ' WHERE ' + \\\n ' AND '.join(['\"{0}\" = %s'.format(fix_pctg_in_name(x))\n for x in fields])\n cursor.execute(query, values)\n else:\n # Execute the query\n cursor.execute(query)\n\n # Get the data\n return cursor.fetchall()\n\n\ndef get_table_queryset(tablename):\n query = 'SELECT * from \"{0}\";'.format(tablename)\n try:\n cursor = connection.cursor()\n cursor.execute(query)\n except Exception:\n return None\n\n return cursor.fetchall()\n\n\ndef query_to_dicts(query_string, *query_args):\n \"\"\"\n Run a simple query and produce a generator that returns the results as\n a bunch of dictionaries with keys for the column values selected.\n \"\"\"\n cursor = connection.cursor()\n cursor.execute(query_string, query_args)\n col_names = [desc[0] for desc in cursor.description]\n while True:\n row = cursor.fetchone()\n if row is None:\n break\n row_dict = OrderedDict(izip(col_names, row))\n yield row_dict\n return\n\n\ndef update_row(pk, set_fields, set_values, where_fields, where_values):\n \"\"\"\n Given a primary key, pairs (set_field, set_value), and pairs (where_field,\n where_value), it updates the row in the table selected with the\n list of (where field = where value) with the values in the assignments in\n the list of (set_fields, set_values)\n\n :param pk: Primary key to detect workflow\n :param set_fields: List of field names to be updated\n :param set_values: List of values to update the fields of the previous list\n :param where_fields: List of fields used to filter the row in the table\n :param where_values: List of values of the previous fields to filter the row\n :return: The table in the workflow pointed by PK is modified.\n \"\"\"\n\n # First part of the query with the table name\n query = 'UPDATE \"{0}\"'.format(create_table_name(pk))\n # Add the SET field = value clauses\n query += ' SET ' + ', '.join(['\"{0}\" = %s'.format(fix_pctg_in_name(x))\n for x in set_fields])\n # And finally add the WHERE clause\n query += ' WHERE ' + ' AND '.join(['\"{0}\" = %s'.format(fix_pctg_in_name(x))\n for x in where_fields])\n\n # Concatenate the values as parameters to the query\n parameters = set_values + where_values\n\n # Execute the query\n cursor = connection.cursor()\n cursor.execute(query, parameters)\n connection.commit()\n\n\ndef increase_row_integer(pk, set_field, where_field, where_value):\n \"\"\"\n Given a primary key, a field set_field, and a pair (where_field,\n where_value), it increases the field in the appropriate row\n\n :param pk: Primary key to detect workflow\n :param set_field: name of the field to be increased\n :param where_field: Field used to filter the row in the table\n :param where_value: Value of the previous field to filter the row\n :return: The table in the workflow pointed by PK is modified.\n \"\"\"\n\n # First part of the query with the table name\n query = 'UPDATE \"{0}\" SET \"{1}\" = \"{1}\" + 1 WHERE \"{2}\" = %s'.format(\n create_table_name(pk),\n set_field,\n where_field\n )\n\n # Execute the query\n cursor = connection.cursor()\n cursor.execute(query, [where_value])\n connection.commit()\n\n\ndef get_table_row_by_key(workflow, cond_filter, kv_pair, column_names=None):\n \"\"\"\n Select the set of elements after filtering and with the key=value pair\n\n :param workflow: workflow object to get to the table\n :param cond_filter: Condition object to filter the data (or None)\n :param kv_pair: A key=value pair to identify the row. Key is suppose to\n be unique.\n :param column_names: Optional list of column names to select\n :return: A dictionary with the (column_name, value) data or None if the\n row has not been found\n \"\"\"\n\n # Create the query\n if column_names:\n safe_column_names = [fix_pctg_in_name(x) for x in column_names]\n query = 'SELECT \"{0}\"'.format('\", \"'.join(safe_column_names))\n else:\n query = 'SELECT *'\n\n # Add the table\n query += ' FROM \"{0}\"'.format(create_table_name(workflow.id))\n\n # Create the second part of the query setting key=value\n query += ' WHERE (\"{0}\" = %s)'.format(fix_pctg_in_name(kv_pair[0]))\n fields = [kv_pair[1]]\n\n # See if the action has a filter or not\n if cond_filter is not None:\n cond_filter, filter_fields = \\\n evaluate_node_sql(cond_filter.formula)\n query += ' AND (' + cond_filter + ')'\n fields = fields + filter_fields\n\n # Execute the query\n cursor = connection.cursor()\n cursor.execute(query, fields)\n\n # Get the data\n qs = cursor.fetchall()\n\n # If there is anything different than one element, return None\n if len(qs) != 1:\n return None\n\n # Get the only element\n qs = qs[0]\n\n # ZIP the values to create a dictionary\n return OrderedDict(zip(workflow.get_column_names(), qs))\n\n\ndef get_column_stats_from_df(df_column):\n \"\"\"\n Given a data frame with a single column, return a set of statistics\n depending on its type.\n\n :param df_column: data frame with a single column\n :return: A dictionary with keys depending on the type of column\n {'min': minimum value (integer, double an datetime),\n 'q1': Q1 value (0.25) (integer, double),\n 'mean': mean value (integer, double),\n 'median': median value (integer, double),\n 'mean': mean value (integer, double),\n 'q3': Q3 value (0.75) (integer, double),\n 'max': maximum value (integer, double an datetime),\n 'std': standard deviation (integer, double),\n 'counts': (integer, double, string, datetime, Boolean',\n 'mode': (integer, double, string, datetime, Boolean,\n\n or None if the column has all its values to NaN\n \"\"\"\n\n if len(df_column.loc[df_column.notnull()]) == 0:\n # The column has no data\n return None\n\n # Dictionary to return\n result = {\n 'min': 0,\n 'q1': 0,\n 'mean': 0,\n 'median': 0,\n 'q3': 0,\n 'max': 0,\n 'std': 0,\n 'mode': None,\n 'counts': {},\n }\n\n data_type = pandas_datatype_names[df_column.dtype.name]\n\n if data_type == 'integer' or data_type == 'double':\n quantiles = df_column.quantile([0, .25, .5, .75, 1])\n result['min'] = '{0:g}'.format(quantiles[0])\n result['q1'] = '{0:g}'.format(quantiles[.25])\n result['mean'] = '{0:g}'.format(df_column.mean())\n result['median'] = '{0:g}'.format(quantiles[.5])\n result['q3'] = '{0:g}'.format(quantiles[.75])\n result['max'] = '{0:g}'.format(quantiles[1])\n result['std'] = '{0:g}'.format(df_column.std())\n\n result['counts'] = df_column.value_counts().to_dict()\n mode = df_column.mode()\n if len(mode) == 0:\n mode = '--'\n result['mode'] = mode[0]\n\n return result\n\n\ndef get_filter_query(table_name, column_names, filter_exp):\n \"\"\"\n\n Given a set of columns and a filter expression, return a pair of SQL query\n and params to be executed\n :param table_name: Table to query\n :param column_names: list of columns to consider or None to consider all\n :param filter_exp: Text filter expression\n :return: (sql query, sql params)\n \"\"\"\n\n # Create the query\n if column_names:\n safe_column_names = [fix_pctg_in_name(x) for x in column_names]\n query = 'SELECT \"{0}\"'.format('\", \"'.join(safe_column_names))\n else:\n query = 'SELECT *'\n\n # Add the table\n query += ' FROM \"{0}\"'.format(table_name)\n\n # Calculate the first suffix to add to the query\n filter_txt = ''\n filter_fields = []\n if filter_exp:\n filter_txt, filter_fields = evaluate_node_sql(filter_exp)\n\n # Build the query so far appending the filter and/or the cv_tuples\n if filter_txt:\n query += ' WHERE '\n\n fields = []\n # If there has been a suffix from the filter, add it.\n if filter_txt:\n query += filter_txt\n\n if filter_fields:\n fields.extend(filter_fields)\n\n return (query, fields)\n\n\ndef search_table_rows(workflow_id,\n cv_tuples=None,\n any_join=True,\n order_col_name=None,\n order_asc=True,\n column_names=None,\n pre_filter=None):\n \"\"\"\n Select rows where for every (column, value) pair, column contains value (\n as in LIKE %value%, these are combined with OR if any is TRUE, or AND if\n any is false, and the result is ordered by the given column and type (if\n given)\n\n :param workflow_id: workflow object to get to the table\n :param cv_tuples: A column, value, type tuple to search the value in the\n column\n :param any_join: Boolean encoding if values should be combined with OR (or\n AND)\n :param order_col_name: Order results by this column\n :param order_asc: Order results in ascending values (or descending)\n :param column_names: Optional list of column names to select\n :param pre_filter: Optional filter condition to pre filter the query set.\n the query is built with these terms as requirement AND the cv_tuples.\n :return: The resulting query set\n \"\"\"\n\n # Create the query\n if column_names:\n safe_column_names = [fix_pctg_in_name(x) for x in column_names]\n query = 'SELECT \"{0}\"'.format('\", \"'.join(safe_column_names))\n else:\n query = 'SELECT *'\n\n # Add the table\n query += ' FROM \"{0}\"'.format(create_table_name(workflow_id))\n\n # Calculate the first suffix to add to the query\n filter_txt = ''\n filter_fields = []\n if pre_filter:\n filter_txt, filter_fields = evaluate_node_sql(pre_filter)\n\n if cv_tuples:\n likes = []\n tuple_fields = []\n for name, value, data_type in cv_tuples:\n # Make sure we escape the name and search as text\n name = fix_pctg_in_name(name)\n mod_name = '(CAST(\"{0}\" AS TEXT) LIKE %s)'.format(name)\n\n # Create the second part of the query setting column LIKE '%value%'\n likes.append(mod_name)\n tuple_fields.append('%' + value + '%')\n\n # Combine the search subqueries\n if any_join:\n tuple_txt = '(' + ' OR '.join(likes) + ')'\n else:\n tuple_txt = '(' + ' AND '.join(likes) + ')'\n\n # Build the query so far appending the filter and/or the cv_tuples\n if filter_txt or cv_tuples:\n query += ' WHERE '\n\n fields = []\n # If there has been a suffix from the filter, add it.\n if filter_txt:\n query += filter_txt\n fields.extend(filter_fields)\n\n # If there is a pre-filter, the suffix needs to be \"AND\" with the ones\n # just calculated\n if filter_txt and cv_tuples:\n query += ' AND '\n\n if cv_tuples:\n query += tuple_txt\n fields.extend(tuple_fields)\n\n # Add the order if needed\n if order_col_name:\n query += ' ORDER BY \"{0}\"'.format(fix_pctg_in_name(order_col_name))\n if not order_asc:\n query += ' DESC'\n\n # Execute the query\n cursor = connection.cursor()\n cursor.execute(query, fields)\n\n # Get the data\n return cursor.fetchall()\n\n\ndef delete_table_row_by_key(workflow_id, kv_pair):\n \"\"\"\n Delete the row in the table attached to a workflow with the given key,\n value pairs\n\n :param workflow_id: workflow object to get to the table\n :param kv_pair: A key=value pair to identify the row. Key is suppose to\n be unique.\n :return: Drops that row from the table in the DB\n \"\"\"\n\n # Create the query\n query = 'DELETE FROM \"{0}\"'.format(create_table_name(workflow_id))\n\n # Create the second part of the query setting key=value\n query += ' WHERE (\"{0}\" = %s)'.format(fix_pctg_in_name(kv_pair[0]))\n fields = [kv_pair[1]]\n\n # Execute the query\n cursor = connection.cursor()\n cursor.execute(query, fields)\n\n\ndef num_rows(pk, cond_filter=None):\n \"\"\"\n Obtain the number of rows of the table storing workflow with given pk\n :param pk: Primary key of the table storing the data frame\n :param cond_filter: Condition element to filter the query\n :return:\n \"\"\"\n return num_rows_by_name(create_table_name(pk), cond_filter)\n\n\ndef num_rows_by_name(table_name, cond_filter=None):\n \"\"\"\n Given a table name, get its number of rows\n :param table_name: Table name\n :param cond_filter: Condition element used to filter the query\n :return: integer\n \"\"\"\n\n # Initial query with the table name\n query = query_count_rows.format(table_name)\n\n fields = []\n if cond_filter is not None:\n cond_filter, fields = evaluate_node_sql(cond_filter)\n query += ' WHERE ' + cond_filter\n\n cursor = connection.cursor()\n cursor.execute(query, fields)\n return cursor.fetchone()[0]\n\n\ndef check_wf_df(workflow):\n \"\"\"\n Check the consistency between the information stored in the workflow\n and the structure of the underlying dataframe\n\n :param workflow: Workflow object\n :return: Boolean stating the result of the check. True: Correct.\n \"\"\"\n # Get the df\n df = load_from_db(workflow.id)\n\n # Set values in case there is no df\n if df is not None:\n dfnrows = df.shape[0]\n dfncols = df.shape[1]\n df_col_names = list(df.columns)\n else:\n dfnrows = 0\n dfncols = 0\n df_col_names = []\n\n # Check 1: Number of rows and columns\n if workflow.nrows != dfnrows:\n return False\n if workflow.ncols != dfncols:\n return False\n\n # Identical sets of columns\n wf_cols = workflow.columns.all()\n if [x.name for x in wf_cols] != df_col_names:\n return False\n\n # Identical data types\n for n1, n2 in zip(wf_cols, df_col_names):\n df_dt = pandas_datatype_names[df[n2].dtype.name]\n if n1.data_type == 'boolean' and df_dt == 'string':\n # This is the case of a column with Boolean and Nulls\n continue\n\n if n1.data_type != df_dt:\n return False\n\n return True\n"
] | [
[
"pandas.DataFrame"
],
[
"pandas.read_sql_query",
"pandas.read_csv",
"pandas.to_datetime",
"pandas.read_sql"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
bzamecnik/tensorpack | [
"e9a3c2b3cd441e5b288607b44f2fe44fbf3ad4bb"
] | [
"examples/FasterRCNN/train.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File: train.py\n\nimport argparse\nimport itertools\nimport numpy as np\nimport os\nimport shutil\nimport cv2\nimport six\nassert six.PY3, \"FasterRCNN requires Python 3!\"\nimport tensorflow as tf\nimport tqdm\n\nimport tensorpack.utils.viz as tpviz\nfrom tensorpack import *\nfrom tensorpack.tfutils import optimizer, collect_env_info\nfrom tensorpack.tfutils.common import get_tf_version_tuple\nfrom tensorpack.tfutils.summary import add_moving_summary\n\nimport model_frcnn\nimport model_mrcnn\nfrom basemodel import image_preprocess, resnet_c4_backbone, resnet_conv5, resnet_fpn_backbone\nfrom dataset import DetectionDataset\nfrom config import finalize_configs, config as cfg\nfrom data import get_all_anchors, get_all_anchors_fpn, get_eval_dataflow, get_train_dataflow\nfrom eval import DetectionResult, predict_image, multithread_predict_dataflow, EvalCallback\nfrom model_box import RPNAnchors, clip_boxes, crop_and_resize, roi_align\nfrom model_cascade import CascadeRCNNHead\nfrom model_fpn import fpn_model, generate_fpn_proposals, multilevel_roi_align, multilevel_rpn_losses\nfrom model_frcnn import BoxProposals, FastRCNNHead, fastrcnn_outputs, fastrcnn_predictions, sample_fast_rcnn_targets\nfrom model_mrcnn import maskrcnn_loss, maskrcnn_upXconv_head\nfrom model_rpn import generate_rpn_proposals, rpn_head, rpn_losses\nfrom viz import draw_annotation, draw_final_outputs, draw_predictions, draw_proposal_recall\n\ntry:\n import horovod.tensorflow as hvd\nexcept ImportError:\n pass\n\n\nclass DetectionModel(ModelDesc):\n def preprocess(self, image):\n image = tf.expand_dims(image, 0)\n image = image_preprocess(image, bgr=True)\n return tf.transpose(image, [0, 3, 1, 2])\n\n @property\n def training(self):\n return get_current_tower_context().is_training\n\n def optimizer(self):\n lr = tf.get_variable('learning_rate', initializer=0.003, trainable=False)\n tf.summary.scalar('learning_rate-summary', lr)\n\n # The learning rate in the config is set for 8 GPUs, and we use trainers with average=False.\n lr = lr / 8.\n opt = tf.train.MomentumOptimizer(lr, 0.9)\n if cfg.TRAIN.NUM_GPUS < 8:\n opt = optimizer.AccumGradOptimizer(opt, 8 // cfg.TRAIN.NUM_GPUS)\n return opt\n\n def get_inference_tensor_names(self):\n \"\"\"\n Returns two lists of tensor names to be used to create an inference callable.\n\n Returns:\n [str]: input names\n [str]: output names\n \"\"\"\n out = ['output/boxes', 'output/scores', 'output/labels']\n if cfg.MODE_MASK:\n out.append('output/masks')\n return ['image'], out\n\n def build_graph(self, *inputs):\n inputs = dict(zip(self.input_names, inputs))\n\n image = self.preprocess(inputs['image']) # 1CHW\n\n features = self.backbone(image)\n anchor_inputs = {k: v for k, v in inputs.items() if k.startswith('anchor_')}\n proposals, rpn_losses = self.rpn(image, features, anchor_inputs) # inputs?\n\n targets = [inputs[k] for k in ['gt_boxes', 'gt_labels', 'gt_masks'] if k in inputs]\n head_losses = self.roi_heads(image, features, proposals, targets)\n\n if self.training:\n wd_cost = regularize_cost(\n '.*/W', l2_regularizer(cfg.TRAIN.WEIGHT_DECAY), name='wd_cost')\n total_cost = tf.add_n(\n rpn_losses + head_losses + [wd_cost], 'total_cost')\n add_moving_summary(total_cost, wd_cost)\n return total_cost\n\n\nclass ResNetC4Model(DetectionModel):\n def inputs(self):\n ret = [\n tf.TensorSpec((None, None, 3), tf.float32, 'image'),\n tf.TensorSpec((None, None, cfg.RPN.NUM_ANCHOR), tf.int32, 'anchor_labels'),\n tf.TensorSpec((None, None, cfg.RPN.NUM_ANCHOR, 4), tf.float32, 'anchor_boxes'),\n tf.TensorSpec((None, 4), tf.float32, 'gt_boxes'),\n tf.TensorSpec((None,), tf.int64, 'gt_labels')] # all > 0\n if cfg.MODE_MASK:\n ret.append(\n tf.TensorSpec((None, None, None), tf.uint8, 'gt_masks')\n ) # NR_GT x height x width\n return ret\n\n def backbone(self, image):\n return [resnet_c4_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS[:3])]\n\n def rpn(self, image, features, inputs):\n featuremap = features[0]\n rpn_label_logits, rpn_box_logits = rpn_head('rpn', featuremap, cfg.RPN.HEAD_DIM, cfg.RPN.NUM_ANCHOR)\n anchors = RPNAnchors(get_all_anchors(), inputs['anchor_labels'], inputs['anchor_boxes'])\n anchors = anchors.narrow_to(featuremap)\n\n image_shape2d = tf.shape(image)[2:] # h,w\n pred_boxes_decoded = anchors.decode_logits(rpn_box_logits) # fHxfWxNAx4, floatbox\n proposal_boxes, proposal_scores = generate_rpn_proposals(\n tf.reshape(pred_boxes_decoded, [-1, 4]),\n tf.reshape(rpn_label_logits, [-1]),\n image_shape2d,\n cfg.RPN.TRAIN_PRE_NMS_TOPK if self.training else cfg.RPN.TEST_PRE_NMS_TOPK,\n cfg.RPN.TRAIN_POST_NMS_TOPK if self.training else cfg.RPN.TEST_POST_NMS_TOPK)\n\n if self.training:\n losses = rpn_losses(\n anchors.gt_labels, anchors.encoded_gt_boxes(), rpn_label_logits, rpn_box_logits)\n else:\n losses = []\n\n return BoxProposals(proposal_boxes), losses\n\n def roi_heads(self, image, features, proposals, targets):\n image_shape2d = tf.shape(image)[2:] # h,w\n featuremap = features[0]\n\n gt_boxes, gt_labels, *_ = targets\n\n if self.training:\n # sample proposal boxes in training\n proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)\n # The boxes to be used to crop RoIs.\n # Use all proposal boxes in inference\n\n boxes_on_featuremap = proposals.boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE)\n roi_resized = roi_align(featuremap, boxes_on_featuremap, 14)\n\n feature_fastrcnn = resnet_conv5(roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCKS[-1]) # nxcx7x7\n # Keep C5 feature to be shared with mask branch\n feature_gap = GlobalAvgPooling('gap', feature_fastrcnn, data_format='channels_first')\n fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs('fastrcnn', feature_gap, cfg.DATA.NUM_CLASS)\n\n fastrcnn_head = FastRCNNHead(proposals, fastrcnn_box_logits, fastrcnn_label_logits, gt_boxes,\n tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32))\n\n if self.training:\n all_losses = fastrcnn_head.losses()\n\n if cfg.MODE_MASK:\n gt_masks = targets[2]\n # maskrcnn loss\n # In training, mask branch shares the same C5 feature.\n fg_feature = tf.gather(feature_fastrcnn, proposals.fg_inds())\n mask_logits = maskrcnn_upXconv_head(\n 'maskrcnn', fg_feature, cfg.DATA.NUM_CATEGORY, num_convs=0) # #fg x #cat x 14x14\n\n target_masks_for_fg = crop_and_resize(\n tf.expand_dims(gt_masks, 1),\n proposals.fg_boxes(),\n proposals.fg_inds_wrt_gt, 14,\n pad_border=False) # nfg x 1x14x14\n target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')\n all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))\n return all_losses\n else:\n decoded_boxes = fastrcnn_head.decoded_output_boxes()\n decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')\n label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')\n final_boxes, final_scores, final_labels = fastrcnn_predictions(\n decoded_boxes, label_scores, name_scope='output')\n\n if cfg.MODE_MASK:\n roi_resized = roi_align(featuremap, final_boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE), 14)\n feature_maskrcnn = resnet_conv5(roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCKS[-1])\n mask_logits = maskrcnn_upXconv_head(\n 'maskrcnn', feature_maskrcnn, cfg.DATA.NUM_CATEGORY, 0) # #result x #cat x 14x14\n indices = tf.stack([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)\n final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx14x14\n tf.sigmoid(final_mask_logits, name='output/masks')\n return []\n\n\nclass ResNetFPNModel(DetectionModel):\n\n def inputs(self):\n ret = [\n tf.TensorSpec((None, None, 3), tf.float32, 'image')]\n num_anchors = len(cfg.RPN.ANCHOR_RATIOS)\n for k in range(len(cfg.FPN.ANCHOR_STRIDES)):\n ret.extend([\n tf.TensorSpec((None, None, num_anchors), tf.int32,\n 'anchor_labels_lvl{}'.format(k + 2)),\n tf.TensorSpec((None, None, num_anchors, 4), tf.float32,\n 'anchor_boxes_lvl{}'.format(k + 2))])\n ret.extend([\n tf.TensorSpec((None, 4), tf.float32, 'gt_boxes'),\n tf.TensorSpec((None,), tf.int64, 'gt_labels')]) # all > 0\n if cfg.MODE_MASK:\n ret.append(\n tf.TensorSpec((None, None, None), tf.uint8, 'gt_masks')\n ) # NR_GT x height x width\n return ret\n\n def slice_feature_and_anchors(self, p23456, anchors):\n for i, stride in enumerate(cfg.FPN.ANCHOR_STRIDES):\n with tf.name_scope('FPN_slice_lvl{}'.format(i)):\n anchors[i] = anchors[i].narrow_to(p23456[i])\n\n def backbone(self, image):\n c2345 = resnet_fpn_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS)\n p23456 = fpn_model('fpn', c2345)\n return p23456\n\n def rpn(self, image, features, inputs):\n assert len(cfg.RPN.ANCHOR_SIZES) == len(cfg.FPN.ANCHOR_STRIDES)\n\n image_shape2d = tf.shape(image)[2:] # h,w\n all_anchors_fpn = get_all_anchors_fpn()\n multilevel_anchors = [RPNAnchors(\n all_anchors_fpn[i],\n inputs['anchor_labels_lvl{}'.format(i + 2)],\n inputs['anchor_boxes_lvl{}'.format(i + 2)]) for i in range(len(all_anchors_fpn))]\n self.slice_feature_and_anchors(features, multilevel_anchors)\n\n # Multi-Level RPN Proposals\n rpn_outputs = [rpn_head('rpn', pi, cfg.FPN.NUM_CHANNEL, len(cfg.RPN.ANCHOR_RATIOS))\n for pi in features]\n multilevel_label_logits = [k[0] for k in rpn_outputs]\n multilevel_box_logits = [k[1] for k in rpn_outputs]\n multilevel_pred_boxes = [anchor.decode_logits(logits)\n for anchor, logits in zip(multilevel_anchors, multilevel_box_logits)]\n\n proposal_boxes, proposal_scores = generate_fpn_proposals(\n multilevel_pred_boxes, multilevel_label_logits, image_shape2d)\n\n if self.training:\n losses = multilevel_rpn_losses(\n multilevel_anchors, multilevel_label_logits, multilevel_box_logits)\n else:\n losses = []\n\n return BoxProposals(proposal_boxes), losses\n\n def roi_heads(self, image, features, proposals, targets):\n image_shape2d = tf.shape(image)[2:] # h,w\n assert len(features) == 5, \"Features have to be P23456!\"\n gt_boxes, gt_labels, *_ = targets\n\n if self.training:\n proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)\n\n fastrcnn_head_func = getattr(model_frcnn, cfg.FPN.FRCNN_HEAD_FUNC)\n if not cfg.FPN.CASCADE:\n roi_feature_fastrcnn = multilevel_roi_align(features[:4], proposals.boxes, 7)\n\n head_feature = fastrcnn_head_func('fastrcnn', roi_feature_fastrcnn)\n fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs(\n 'fastrcnn/outputs', head_feature, cfg.DATA.NUM_CLASS)\n fastrcnn_head = FastRCNNHead(proposals, fastrcnn_box_logits, fastrcnn_label_logits,\n gt_boxes, tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32))\n else:\n def roi_func(boxes):\n return multilevel_roi_align(features[:4], boxes, 7)\n\n fastrcnn_head = CascadeRCNNHead(\n proposals, roi_func, fastrcnn_head_func,\n (gt_boxes, gt_labels), image_shape2d, cfg.DATA.NUM_CLASS)\n\n if self.training:\n all_losses = fastrcnn_head.losses()\n\n if cfg.MODE_MASK:\n gt_masks = targets[2]\n # maskrcnn loss\n roi_feature_maskrcnn = multilevel_roi_align(\n features[:4], proposals.fg_boxes(), 14,\n name_scope='multilevel_roi_align_mask')\n maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)\n mask_logits = maskrcnn_head_func(\n 'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28\n\n target_masks_for_fg = crop_and_resize(\n tf.expand_dims(gt_masks, 1),\n proposals.fg_boxes(),\n proposals.fg_inds_wrt_gt, 28,\n pad_border=False) # fg x 1x28x28\n target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')\n all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))\n return all_losses\n else:\n decoded_boxes = fastrcnn_head.decoded_output_boxes()\n decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')\n label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')\n final_boxes, final_scores, final_labels = fastrcnn_predictions(\n decoded_boxes, label_scores, name_scope='output')\n if cfg.MODE_MASK:\n # Cascade inference needs roi transform with refined boxes.\n roi_feature_maskrcnn = multilevel_roi_align(features[:4], final_boxes, 14)\n maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)\n mask_logits = maskrcnn_head_func(\n 'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28\n indices = tf.stack([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)\n final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx28x28\n tf.sigmoid(final_mask_logits, name='output/masks')\n return []\n\n\ndef do_visualize(model, model_path, nr_visualize=100, output_dir='output'):\n \"\"\"\n Visualize some intermediate results (proposals, raw predictions) inside the pipeline.\n \"\"\"\n df = get_train_dataflow() # we don't visualize mask stuff\n df.reset_state()\n\n pred = OfflinePredictor(PredictConfig(\n model=model,\n session_init=get_model_loader(model_path),\n input_names=['image', 'gt_boxes', 'gt_labels'],\n output_names=[\n 'generate_{}_proposals/boxes'.format('fpn' if cfg.MODE_FPN else 'rpn'),\n 'generate_{}_proposals/scores'.format('fpn' if cfg.MODE_FPN else 'rpn'),\n 'fastrcnn_all_scores',\n 'output/boxes',\n 'output/scores',\n 'output/labels',\n ]))\n\n if os.path.isdir(output_dir):\n shutil.rmtree(output_dir)\n utils.fs.mkdir_p(output_dir)\n with tqdm.tqdm(total=nr_visualize) as pbar:\n for idx, dp in itertools.islice(enumerate(df), nr_visualize):\n img, gt_boxes, gt_labels = dp['image'], dp['gt_boxes'], dp['gt_labels']\n\n rpn_boxes, rpn_scores, all_scores, \\\n final_boxes, final_scores, final_labels = pred(img, gt_boxes, gt_labels)\n\n # draw groundtruth boxes\n gt_viz = draw_annotation(img, gt_boxes, gt_labels)\n # draw best proposals for each groundtruth, to show recall\n proposal_viz, good_proposals_ind = draw_proposal_recall(img, rpn_boxes, rpn_scores, gt_boxes)\n # draw the scores for the above proposals\n score_viz = draw_predictions(img, rpn_boxes[good_proposals_ind], all_scores[good_proposals_ind])\n\n results = [DetectionResult(*args) for args in\n zip(final_boxes, final_scores, final_labels,\n [None] * len(final_labels))]\n final_viz = draw_final_outputs(img, results)\n\n viz = tpviz.stack_patches([\n gt_viz, proposal_viz,\n score_viz, final_viz], 2, 2)\n\n if os.environ.get('DISPLAY', None):\n tpviz.interactive_imshow(viz)\n cv2.imwrite(\"{}/{:03d}.png\".format(output_dir, idx), viz)\n pbar.update()\n\n\ndef do_evaluate(pred_config, output_file):\n num_gpu = cfg.TRAIN.NUM_GPUS\n graph_funcs = MultiTowerOfflinePredictor(\n pred_config, list(range(num_gpu))).get_predictors()\n\n for dataset in cfg.DATA.VAL:\n logger.info(\"Evaluating {} ...\".format(dataset))\n dataflows = [\n get_eval_dataflow(dataset, shard=k, num_shards=num_gpu)\n for k in range(num_gpu)]\n all_results = multithread_predict_dataflow(dataflows, graph_funcs)\n output = output_file + '-' + dataset\n DetectionDataset().eval_or_save_inference_results(all_results, dataset, output)\n\n\ndef do_predict(pred_func, input_file):\n img = cv2.imread(input_file, cv2.IMREAD_COLOR)\n results = predict_image(img, pred_func)\n final = draw_final_outputs(img, results)\n viz = np.concatenate((img, final), axis=1)\n cv2.imwrite(\"output.png\", viz)\n logger.info(\"Inference output for {} written to output.png\".format(input_file))\n tpviz.interactive_imshow(viz)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--load', help='load a model for evaluation or training. Can overwrite BACKBONE.WEIGHTS')\n parser.add_argument('--logdir', help='log directory', default='train_log/maskrcnn')\n parser.add_argument('--visualize', action='store_true', help='visualize intermediate results')\n parser.add_argument('--evaluate', help=\"Run evaluation. \"\n \"This argument is the path to the output json evaluation file\")\n parser.add_argument('--predict', help=\"Run prediction on a given image. \"\n \"This argument is the path to the input image file\", nargs='+')\n parser.add_argument('--config', help=\"A list of KEY=VALUE to overwrite those defined in config.py\",\n nargs='+')\n\n if get_tf_version_tuple() < (1, 6):\n # https://github.com/tensorflow/tensorflow/issues/14657\n logger.warn(\"TF<1.6 has a bug which may lead to crash in FasterRCNN if you're unlucky.\")\n\n args = parser.parse_args()\n if args.config:\n cfg.update_args(args.config)\n\n MODEL = ResNetFPNModel() if cfg.MODE_FPN else ResNetC4Model()\n DetectionDataset() # initialize the config with information from our dataset\n\n if args.visualize or args.evaluate or args.predict:\n if not tf.test.is_gpu_available():\n from tensorflow.python.framework import test_util\n assert get_tf_version_tuple() >= (1, 7) and test_util.IsMklEnabled(), \\\n \"Inference requires either GPU support or MKL support!\"\n assert args.load\n finalize_configs(is_training=False)\n\n if args.predict or args.visualize:\n cfg.TEST.RESULT_SCORE_THRESH = cfg.TEST.RESULT_SCORE_THRESH_VIS\n\n if args.visualize:\n do_visualize(MODEL, args.load)\n else:\n predcfg = PredictConfig(\n model=MODEL,\n session_init=get_model_loader(args.load),\n input_names=MODEL.get_inference_tensor_names()[0],\n output_names=MODEL.get_inference_tensor_names()[1])\n if args.predict:\n predictor = OfflinePredictor(predcfg)\n for image_file in args.predict:\n do_predict(predictor, image_file)\n elif args.evaluate:\n assert args.evaluate.endswith('.json'), args.evaluate\n do_evaluate(predcfg, args.evaluate)\n else:\n is_horovod = cfg.TRAINER == 'horovod'\n if is_horovod:\n hvd.init()\n logger.info(\"Horovod Rank={}, Size={}\".format(hvd.rank(), hvd.size()))\n\n if not is_horovod or hvd.rank() == 0:\n logger.set_logger_dir(args.logdir, 'd')\n logger.info(\"Environment Information:\\n\" + collect_env_info())\n\n finalize_configs(is_training=True)\n stepnum = cfg.TRAIN.STEPS_PER_EPOCH\n\n # warmup is step based, lr is epoch based\n init_lr = cfg.TRAIN.WARMUP_INIT_LR * min(8. / cfg.TRAIN.NUM_GPUS, 1.)\n warmup_schedule = [(0, init_lr), (cfg.TRAIN.WARMUP, cfg.TRAIN.BASE_LR)]\n warmup_end_epoch = cfg.TRAIN.WARMUP * 1. / stepnum\n lr_schedule = [(int(warmup_end_epoch + 0.5), cfg.TRAIN.BASE_LR)]\n\n factor = 8. / cfg.TRAIN.NUM_GPUS\n for idx, steps in enumerate(cfg.TRAIN.LR_SCHEDULE[:-1]):\n mult = 0.1 ** (idx + 1)\n lr_schedule.append(\n (steps * factor // stepnum, cfg.TRAIN.BASE_LR * mult))\n logger.info(\"Warm Up Schedule (steps, value): \" + str(warmup_schedule))\n logger.info(\"LR Schedule (epochs, value): \" + str(lr_schedule))\n train_dataflow = get_train_dataflow()\n # This is what's commonly referred to as \"epochs\"\n total_passes = cfg.TRAIN.LR_SCHEDULE[-1] * 8 / train_dataflow.size()\n logger.info(\"Total passes of the training set is: {:.5g}\".format(total_passes))\n\n callbacks = [\n PeriodicCallback(\n ModelSaver(max_to_keep=10, keep_checkpoint_every_n_hours=1),\n every_k_epochs=20),\n # linear warmup\n ScheduledHyperParamSetter(\n 'learning_rate', warmup_schedule, interp='linear', step_based=True),\n ScheduledHyperParamSetter('learning_rate', lr_schedule),\n PeakMemoryTracker(),\n EstimatedTimeLeft(median=True),\n SessionRunTimeout(60000).set_chief_only(True), # 1 minute timeout\n ]\n if cfg.TRAIN.EVAL_PERIOD > 0:\n callbacks.extend([\n EvalCallback(dataset, *MODEL.get_inference_tensor_names(), args.logdir)\n for dataset in cfg.DATA.VAL\n ])\n if not is_horovod:\n callbacks.append(GPUUtilizationTracker())\n\n if is_horovod and hvd.rank() > 0:\n session_init = None\n else:\n if args.load:\n session_init = get_model_loader(args.load)\n else:\n session_init = get_model_loader(cfg.BACKBONE.WEIGHTS) if cfg.BACKBONE.WEIGHTS else None\n\n traincfg = TrainConfig(\n model=MODEL,\n data=QueueInput(train_dataflow),\n callbacks=callbacks,\n steps_per_epoch=stepnum,\n max_epoch=cfg.TRAIN.LR_SCHEDULE[-1] * factor // stepnum,\n session_init=session_init,\n starting_epoch=cfg.TRAIN.STARTING_EPOCH\n )\n if is_horovod:\n trainer = HorovodTrainer(average=False)\n else:\n # nccl mode appears faster than cpu mode\n trainer = SyncMultiGPUTrainerReplicated(cfg.TRAIN.NUM_GPUS, average=False, mode='nccl')\n launch_train_with_config(traincfg, trainer)\n"
] | [
[
"tensorflow.get_variable",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.gather_nd",
"tensorflow.shape",
"tensorflow.reshape",
"tensorflow.sigmoid",
"tensorflow.expand_dims",
"tensorflow.squeeze",
"tensorflow.size",
"numpy.concatenate",
"tensorflow.cast",
"tensorflow.train.MomentumOptimizer",
"tensorflow.add_n",
"tensorflow.test.is_gpu_available",
"tensorflow.python.framework.test_util.IsMklEnabled",
"tensorflow.summary.scalar",
"tensorflow.TensorSpec"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hmartelb/meme-search | [
"2042678b3a7252ba00699e7a0618aafdf2059465"
] | [
"data/scraper.py"
] | [
"\n\nimport json\nimport time\nimport os\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\nfrom tqdm import tqdm\n\ndef process_9gag(args):\n fetched_memes = []\n errors = 0\n # for i in tqdm(range(args.))\n pass\n\ndef process_me_dot_me(args):\n pass\n\ndef templates_imgflip(args):\n args.source_url = \"https://imgflip.com/memetemplates\"\n fetched_templates = []\n errors = 0\n for i in tqdm(range(args.from_page, args.pages + 1)):\n print(f\"Requesting: {args.source_url}?page={i}\")\n response = requests.get(f\"{args.source_url}?page={i}\")\n\n print(response)\n\n if response.status_code != 200:\n print(\"Bad response\")\n break\n \n body = BeautifulSoup(response.text, 'html.parser')\n templates = body.findAll(\"div\", {\"class\": \"mt-box\"})\n \n print(len(templates))\n\n for template in templates:\n try:\n template_url = \"https://\"+template.find('img', {\"class\": \"shadow\"})['src'][2:]\n template_id, template_format = os.path.splitext(template_url.split(\"/\")[-1])\n\n template_title = template.find(\"h3\", {\"class\": \"mt-title\"}).find(\"a\")\n template_title = \"\" if template_title is None else template_title.text\n\n template_data = {\n \"id\": template_id,\n \"format\": template_format,\n \"website\": \"imgflip\",\n \"url\": template_url,\n \"title\": template_title\n }\n fetched_templates.append(template_data)\n except:\n errors += 1\n # time.sleep(args.delay)\n\n print(f\"Fetched: {len(fetched_templates)} templates. Found {errors} error(s).\")\n return fetched_templates\n\n\ndef process_imgflip(args):\n '''\n https://gist.github.com/WalterSimoncini/defca6de456bb168ada303085358bf0a\n '''\n fetched_memes = []\n errors = 0\n for i in tqdm(range(args.from_page, args.pages + 1)):\n # print(f\"Processing page {i}\")\n response = requests.get(f\"{args.source_url}?page={i}\")\n body = BeautifulSoup(response.text, 'html.parser')\n\n if response.status_code != 200:\n # print(\"Something went wrong!\")\n break # Something went wrong (e.g. page limit)\n\n memes = body.findAll(\"div\", {\"class\": \"base-unit clearfix\"})\n for meme in memes:\n if \"not-safe-for-work images\" in str(meme):\n continue # NSFW memes are available only to logged in users\n \n try:\n meme_url = 'https://'+meme.find(\"img\", {\"class\": \"base-img\"})[\"src\"][2:]\n meme_id, meme_format = os.path.splitext(meme_url.split(\"/\")[-1])\n\n # Handle anonymous authors\n meme_author = meme.find(\"a\", {\"class\": \"u-username\"})\n meme_author = \"anonymous\" if meme_author is None else meme_author.text\n \n # Handle empty titles\n meme_title = meme.find(\"h2\", {\"class\": \"base-unit-title\"}).find(\"a\")\n meme_title = \"\" if meme_title is None else meme_title.text\n \n meme_text = meme.find(\"img\", {\"class\": \"base-img\"})[\"alt\"]\n meme_text = meme_text.split(\"|\")[1].strip()\n\n meme_data = {\n \"id\": meme_id,\n \"format\": meme_format,\n \"website\": \"imgflip\",\n \"url\": meme_url,\n \"author\": meme_author,\n \"title\": meme_title,\n \"text\": meme_text.lower()\n }\n fetched_memes.append(meme_data)\n except:\n errors += 1\n\n time.sleep(args.delay)\n\n print(f\"Fetched: {len(fetched_memes)} memes. Found {errors} error(s).\")\n return fetched_memes\n\nif __name__ == '__main__':\n import argparse\n ap = argparse.ArgumentParser()\n # ap.add_argument(\"--source_url\", default=\"https://imgflip.com/tag/programming\", help=\"Memes list url (e.g. https://imgflip.com/meme/Bird-Box)\", type=str)\n ap.add_argument(\"--tag\", required=True, type=str)#default=['programming', 'artificial intelligence', 'computer'], type=list)\n ap.add_argument(\"--from_page\", default=1, help=\"Initial page\", type=int)\n ap.add_argument(\"--pages\", default=44, help=\"Maximum page number to be scraped\", type=int)\n ap.add_argument(\"--delay\", default=2, help=\"Delay between page loads (seconds)\", type=int)\n ap.add_argument(\"-o\", \"--output\", default=\"templates.tsv\")\n args = ap.parse_args()\n\n # category = args.source_url.split(\"/\")[-1].replace(\"-\", \" \")\n\n # Get the data\n data = {}\n # for tag in args.tags:\n print(f\"Processing tag: {args.tag}\")\n \n # Get the data\n # args.source_url = f\"https://imgflip.com/tag/{args.tag.replace(' ', '+')}\"\n # data = process_imgflip(args)\n \n # args.source_url = f\"https://ww.9gag.com/search/?query={args.tag.replace(' ', '+')}\"\n # data = process_9gag(args)\n \n data = templates_imgflip(args)\n\n # Create a pd.DataFrame and save (append to existing .tsv)\n df = pd.DataFrame(data)\n print(df.head(20))\n df.to_csv(args.output, sep='\\t', index=False, mode='a')"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
AnielliRosane/lista-ser347 | [
"61a8ac8f675dc0ec05f45408c54e9d3a0e515ff4"
] | [
"exercicio-4.py"
] | [
"# -*- coding: utf-8 -*-\n\n# Lista de exercicio 06\n\n# Exercicio 4\n\n# importando as bibliotecas\n\nimport matplotlib as plt\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# informacoes da tabela relativas aos dados masculino e feminino (IBGE)\n\nidade = np.array(\n [\"0 a 4 anos\", \"5 a 9 anos\", \"10 a 14 anos\", \"15 a 19 anos\", \"20 a 24 anos\", \"25 a 29 anos\",\n \"30 a 34 anos\", \"35 a 39 anos\", \"40 a 44 anos\", \"45 a 49 anos\", \"50 a 54 anos\", \"55 a 59 anos\",\n \"60 a 64 anos\", \"65 a 69 anos\", \"70 a 74 anos\", \"75 a 79 anos\", \"80 a 84 anos\", \"85 a 89 anos\",\n \"90 a 94 anos\", \"95 a 99 anos\", \"100 anos e mais\"])\n\nfeminino = np.array([6779171, 7345231, 8441348, 8432004, 8614963, 8643419, 8026854, 7121915, 6688796, 6141338, 5305407,\n 4373877, 3468085, 2616745, 2074264, 1472930, 998349, 508724, 211594, 66806, 16989])\n\nmasculino = np.array([7016987, 7624144, 8725413, 8558868, 8630229, 8460995, 7717658, 6766664, 6320568, 5692014, 4834995,\n 3902344, 3041035, 2224065, 1667372, 1090517, 668623, 310759, 114964, 31529, 7247])\n\npop = [x for x in range( len(idade) ) ]\n\n# Configuracao do grafico\n\nplt.figure(figsize=(10, 8))\n\nplt.suptitle('Distribuição da População por sexo segundo os grupos de idade – Brasil – 2010', fontsize=18)\n\nplt.rc('axes.spines', **{'bottom': True, 'left': False, 'right': False, 'top': False}) # remove as linhas da figura\n\n# Subplot masculino\nplt.subplot(221)\nplt.barh(idade, masculino, align='center', color='blue', linewidth=0.5, label='Masculino')\nplt.xticks([0, 2000000, 4000000, 6000000, 8000000], [\"\", \"\", \"4000000\"])\n\nplt.legend(loc='upper left') # legenda\n\nplt.subplots_adjust(left=0.15, wspace=0.4) # coloca espaco entre os graficos\n\nplt.gca().invert_xaxis() # inverte\n\nplt.yticks([]) # remove o eixo y\n\n# colocando linhas\nplt.axvline(8000000, color='grey', alpha=0.15)\nplt.axvline(6000000, color='grey', alpha=0.15)\nplt.axvline(4000000, color='grey', alpha=0.15)\nplt.axvline(6000000, color='grey', alpha=0.15)\nplt.axvline(2000000, color='grey', alpha=0.15)\nplt.axvline(0, color='black', alpha=0.20)\n\n# subplot feminino\nplt.subplot(222)\nplt.barh(idade, feminino, align='center', color='orange', linewidth=0.5, label='Feminino')\nplt.xticks([0, 2000000, 4000000, 6000000, 8000000], [\"0\", \"\", \"4000000\"], )\n\nplt.legend(loc='upper right') # legenda\n\n# colocando linhas\nplt.axvline(8000000, color='grey', alpha=0.15)\nplt.axvline(6000000, color='grey', alpha=0.15)\nplt.axvline(4000000, color='grey', alpha=0.15)\nplt.axvline(6000000, color='grey', alpha=0.15)\nplt.axvline(2000000, color='grey', alpha=0.15)\nplt.axvline(0, color='black', alpha=0.30)\n\nplt.show();\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.barh",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.subplots_adjust",
"numpy.array",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dukebw/nerfies | [
"b30fe19edb6435e770b35dc07aab44ae62c96278"
] | [
"third_party/pycolmap/pycolmap/scene_manager.py"
] | [
"# Author: True Price <jtprice at cs.unc.edu>\n\nfrom collections import OrderedDict, defaultdict\nfrom io import StringIO\nfrom itertools import combinations\nimport os\nimport struct\n\nfrom .camera import Camera\nfrom .image import Image\nimport numpy as np\nfrom .rotation import Quaternion\n\n# -------------------------------------------------------------------------------\n#\n# SceneManager\n#\n# -------------------------------------------------------------------------------\n\n\nclass SceneManager:\n INVALID_POINT3D = np.uint64(-1)\n\n def __init__(self, colmap_results_folder, image_path=None):\n self.folder = colmap_results_folder\n if not self.folder.endswith(\"/\"):\n self.folder += \"/\"\n\n self.image_path = None\n self.load_colmap_project_file(image_path=image_path)\n\n self.cameras = OrderedDict()\n self.images = OrderedDict()\n self.name_to_image_id = dict()\n\n self.last_camera_id = 0\n self.last_image_id = 0\n\n # Nx3 array of point3D xyz's\n self.points3D = np.zeros((0, 3))\n\n # for each element in points3D, stores the id of the point\n self.point3D_ids = np.empty(0)\n\n # point3D_id => index in self.points3D\n self.point3D_id_to_point3D_idx = dict()\n\n # point3D_id => [(image_id, point2D idx in image)]\n self.point3D_id_to_images = dict()\n\n self.point3D_colors = np.zeros((0, 3), dtype=np.uint8)\n self.point3D_errors = np.zeros(0)\n\n # ---------------------------------------------------------------------------\n\n def load_colmap_project_file(self, project_file=None, image_path=None):\n if project_file is None:\n project_file = self.folder + \"project.ini\"\n\n self.image_path = image_path\n\n if self.image_path is None:\n try:\n with open(project_file, \"r\") as f:\n for line in iter(f.readline, \"\"):\n if line.startswith(\"image_path\"):\n self.image_path = line[11:].strip()\n break\n except:\n pass\n\n if self.image_path is None:\n print(\"Warning: image_path not found for reconstruction\")\n elif not self.image_path.endswith(\"/\"):\n self.image_path += \"/\"\n\n # ---------------------------------------------------------------------------\n\n def load(self):\n self.load_cameras()\n self.load_images()\n self.load_points3D()\n\n # ---------------------------------------------------------------------------\n\n def load_cameras(self, input_file=None):\n if input_file is None:\n input_file = self.folder + \"cameras.bin\"\n if os.path.exists(input_file):\n self._load_cameras_bin(input_file)\n else:\n input_file = self.folder + \"cameras.txt\"\n if os.path.exists(input_file):\n self._load_cameras_txt(input_file)\n else:\n raise IOError(\"no cameras file found\")\n\n def _load_cameras_bin(self, input_file):\n self.cameras = OrderedDict()\n\n with open(input_file, \"rb\") as f:\n num_cameras = struct.unpack(\"L\", f.read(8))[0]\n for _ in range(num_cameras):\n camera_id, camera_type, w, h = struct.unpack(\"IiLL\", f.read(24))\n num_params = Camera.GetNumParams(camera_type)\n params = struct.unpack(\"d\" * num_params, f.read(8 * num_params))\n self.cameras[camera_id] = Camera(camera_type, w, h, params)\n self.last_camera_id = max(self.last_camera_id, camera_id)\n\n def _load_cameras_txt(self, input_file):\n self.cameras = OrderedDict()\n\n with open(input_file, \"r\") as f:\n for line in iter(lambda: f.readline().strip(), \"\"):\n if not line or line.startswith(\"#\"):\n continue\n\n data = line.split()\n camera_id = int(data[0])\n self.cameras[camera_id] = Camera(\n data[1], int(data[2]), int(data[3]), list(map(float, data[4:]))\n )\n self.last_camera_id = max(self.last_camera_id, camera_id)\n\n # ---------------------------------------------------------------------------\n\n def load_images(self, input_file=None):\n if input_file is None:\n input_file = self.folder + \"images.bin\"\n if os.path.exists(input_file):\n self._load_images_bin(input_file)\n else:\n input_file = self.folder + \"images.txt\"\n if os.path.exists(input_file):\n self._load_images_txt(input_file)\n else:\n raise IOError(\"no images file found\")\n\n def _load_images_bin(self, input_file):\n self.images = OrderedDict()\n\n with open(input_file, \"rb\") as f:\n num_images = struct.unpack(\"L\", f.read(8))[0]\n for _ in range(num_images):\n image_id = struct.unpack(\"I\", f.read(4))[0]\n q = Quaternion(np.array(struct.unpack(\"dddd\", f.read(32))))\n t = np.array(struct.unpack(\"ddd\", f.read(24)))\n camera_id = struct.unpack(\"I\", f.read(4))[0]\n name = b\"\".join(c for c in iter(lambda: f.read(1), b\"\\x00\")).decode()\n\n image = Image(name, camera_id, q, t)\n\n num_points2D = struct.unpack(\"L\", f.read(8))[0]\n\n image.points2D = np.empty((num_points2D, 2))\n image.point3D_ids = np.empty(num_points2D, dtype=np.uint64)\n for j in range(num_points2D):\n image.points2D[j] = np.array(struct.unpack(\"dd\", f.read(16)))\n image.point3D_ids[j] = np.array(struct.unpack(\"Q\", f.read(8)))\n\n self.images[image_id] = image\n self.name_to_image_id[image.name] = image_id\n\n self.last_image_id = max(self.last_image_id, image_id)\n\n def _load_images_txt(self, input_file):\n self.images = OrderedDict()\n\n with open(input_file, \"r\") as f:\n is_camera_description_line = False\n\n for line in iter(lambda: f.readline().strip(), \"\"):\n if not line or line.startswith(\"#\"):\n continue\n\n is_camera_description_line = not is_camera_description_line\n\n data = line.split()\n\n if is_camera_description_line:\n image_id = int(data[0])\n image = Image(\n data[-1],\n int(data[-2]),\n Quaternion(np.array(list(map(float, data[1:5])))),\n np.array(list(map(float, data[5:8]))),\n )\n else:\n image.points2D = np.array(\n [list(map(float, data[::3])), list(map(float, data[1::3]))]\n ).T\n image.point3D_ids = np.array(list(map(np.uint64, data[2::3])))\n\n # automatically remove points without an associated 3D point\n # mask = (image.point3D_ids != SceneManager.INVALID_POINT3D)\n # image.points2D = image.points2D[mask]\n # image.point3D_ids = image.point3D_ids[mask]\n\n self.images[image_id] = image\n self.name_to_image_id[image.name] = image_id\n\n self.last_image_id = max(self.last_image_id, image_id)\n\n # ---------------------------------------------------------------------------\n\n def load_points3D(self, input_file=None):\n if input_file is None:\n input_file = self.folder + \"points3D.bin\"\n if os.path.exists(input_file):\n self._load_points3D_bin(input_file)\n else:\n input_file = self.folder + \"points3D.txt\"\n if os.path.exists(input_file):\n self._load_points3D_txt(input_file)\n else:\n raise IOError(\"no points3D file found\")\n\n def _load_points3D_bin(self, input_file):\n with open(input_file, \"rb\") as f:\n num_points3D = struct.unpack(\"L\", f.read(8))[0]\n\n self.points3D = np.empty((num_points3D, 3))\n self.point3D_ids = np.empty(num_points3D, dtype=np.uint64)\n self.point3D_colors = np.empty((num_points3D, 3), dtype=np.uint8)\n self.point3D_id_to_point3D_idx = dict()\n self.point3D_id_to_images = dict()\n self.point3D_errors = np.empty(num_points3D)\n\n for i in range(num_points3D):\n self.point3D_ids[i] = struct.unpack(\"L\", f.read(8))[0]\n self.points3D[i] = struct.unpack(\"ddd\", f.read(24))\n self.point3D_colors[i] = struct.unpack(\"BBB\", f.read(3))\n self.point3D_errors[i] = struct.unpack(\"d\", f.read(8))[0]\n\n self.point3D_id_to_point3D_idx[self.point3D_ids[i]] = i\n\n # load (image id, point2D idx) pairs\n track_len = struct.unpack(\"L\", f.read(8))[0]\n data = struct.unpack(\"I\" * (2 * track_len), f.read(2 * track_len * 4))\n self.point3D_id_to_images[self.point3D_ids[i]] = np.array(\n data, dtype=np.uint32\n ).reshape(track_len, 2)\n\n def _load_points3D_txt(self, input_file):\n self.points3D = []\n self.point3D_ids = []\n self.point3D_colors = []\n self.point3D_id_to_point3D_idx = dict()\n self.point3D_id_to_images = dict()\n self.point3D_errors = []\n\n with open(input_file, \"r\") as f:\n for line in iter(lambda: f.readline().strip(), \"\"):\n if not line or line.startswith(\"#\"):\n continue\n\n data = line.split()\n point3D_id = np.uint64(data[0])\n\n self.point3D_ids.append(point3D_id)\n self.point3D_id_to_point3D_idx[point3D_id] = len(self.points3D)\n self.points3D.append(list(map(np.float64, data[1:4])))\n self.point3D_colors.append(list(map(np.uint8, data[4:7])))\n self.point3D_errors.append(np.float64(data[7]))\n\n # load (image id, point2D idx) pairs\n self.point3D_id_to_images[point3D_id] = np.array(\n list(map(np.uint32, data[8:]))\n ).reshape(-1, 2)\n\n self.points3D = np.array(self.points3D)\n self.point3D_ids = np.array(self.point3D_ids)\n self.point3D_colors = np.array(self.point3D_colors)\n self.point3D_errors = np.array(self.point3D_errors)\n\n # ---------------------------------------------------------------------------\n\n def save(self, output_folder, binary=True):\n self.save_cameras(output_folder, binary=binary)\n self.save_images(output_folder, binary=binary)\n self.save_points3D(output_folder, binary=binary)\n\n # ---------------------------------------------------------------------------\n\n def save_cameras(self, output_folder, output_file=None, binary=True):\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n if output_file is None:\n output_file = \"cameras.bin\" if binary else \"cameras.txt\"\n\n output_file = os.path.join(output_folder, output_file)\n\n if binary:\n self._save_cameras_bin(output_file)\n else:\n self._save_cameras_txt(output_file)\n\n def _save_cameras_bin(self, output_file):\n with open(output_file, \"wb\") as fid:\n fid.write(struct.pack(\"L\", len(self.cameras)))\n\n camera_struct = struct.Struct(\"IiLL\")\n\n for camera_id, camera in sorted(self.cameras.items()):\n fid.write(\n camera_struct.pack(\n camera_id, camera.camera_type, camera.width, camera.height\n )\n )\n # TODO (True): should move this into the Camera class\n fid.write(camera.get_params().tobytes())\n\n def _save_cameras_txt(self, output_file):\n with open(output_file, \"w\") as fid:\n print(\"# Camera list with one line of data per camera:\", file=fid)\n print(\"# CAMERA_ID, MODEL, WIDTH, HEIGHT, PARAMS[]\", file=fid)\n print(\"# Number of cameras:\", len(self.cameras), file=fid)\n\n for camera_id, camera in sorted(self.cameras.items()):\n print(camera_id, camera, file=fid)\n\n # ---------------------------------------------------------------------------\n\n def save_images(self, output_folder, output_file=None, binary=True):\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n if output_file is None:\n output_file = \"images.bin\" if binary else \"images.txt\"\n\n output_file = os.path.join(output_folder, output_file)\n\n if binary:\n self._save_images_bin(output_file)\n else:\n self._save_images_txt(output_file)\n\n def _save_images_bin(self, output_file):\n with open(output_file, \"wb\") as fid:\n fid.write(struct.pack(\"L\", len(self.images)))\n\n for image_id, image in self.images.items():\n fid.write(struct.pack(\"I\", image_id))\n fid.write(image.q.q.tobytes())\n fid.write(image.tvec.tobytes())\n fid.write(struct.pack(\"I\", image.camera_id))\n fid.write(image.name + \"\\0\")\n fid.write(struct.pack(\"L\", len(image.points2D)))\n data = np.rec.fromarrays(\n (image.points2D[:, 0], image.points2D[:, 1], image.point3D_ids)\n )\n fid.write(data.tobytes())\n\n def _save_images_txt(self, output_file):\n with open(output_file, \"w\") as fid:\n print(\"# Image list with two lines of data per image:\", file=fid)\n print(\"# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\", file=fid)\n print(\"# POINTS2D[] as (X, Y, POINT3D_ID)\", file=fid)\n print(\"# Number of images: {},\".format(len(self.images)), file=fid)\n print(\"# mean observations per image: unknown\", file=fid)\n\n for image_id, image in self.images.items():\n print(image_id, file=fid)\n print(\" \".join(str(qi) for qi in image.q.q), file=fid)\n print(\" \".join(str(ti) for ti in image.tvec), file=fid)\n print(image.camera_id, image.name, file=fid)\n\n data = np.rec.fromarrays(\n (\n image.points2D[:, 0],\n image.points2D[:, 1],\n image.point3D_ids.astype(np.int64),\n )\n )\n if len(data) > 0:\n np.savetxt(fid, data, \"%.2f %.2f %d\", newline=\" \")\n fid.seek(-1, os.SEEK_CUR)\n fid.write(\"\\n\")\n\n # ---------------------------------------------------------------------------\n\n def save_points3D(self, output_folder, output_file=None, binary=True):\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n if output_file is None:\n output_file = \"points3D.bin\" if binary else \"points3D.txt\"\n\n output_file = os.path.join(output_folder, output_file)\n\n if binary:\n self._save_points3D_bin(output_file)\n else:\n self._save_points3D_txt(output_file)\n\n def _save_points3D_bin(self, output_file):\n num_valid_points3D = sum(\n 1\n for point3D_idx in self.point3D_id_to_point3D_idx.values()\n if point3D_idx != SceneManager.INVALID_POINT3D\n )\n\n iter_point3D_id_to_point3D_idx = self.point3D_id_to_point3D_idx.items()\n\n with open(output_file, \"wb\") as fid:\n fid.write(struct.pack(\"L\", num_valid_points3D))\n\n for point3D_id, point3D_idx in iter_point3D_id_to_point3D_idx:\n if point3D_idx == SceneManager.INVALID_POINT3D:\n continue\n\n fid.write(struct.pack(\"L\", point3D_id))\n fid.write(self.points3D[point3D_idx].tobytes())\n fid.write(self.point3D_colors[point3D_idx].tobytes())\n fid.write(self.point3D_errors[point3D_idx].tobytes())\n fid.write(struct.pack(\"L\", len(self.point3D_id_to_images[point3D_id])))\n fid.write(self.point3D_id_to_images[point3D_id].tobytes())\n\n def _save_points3D_txt(self, output_file):\n num_valid_points3D = sum(\n 1\n for point3D_idx in self.point3D_id_to_point3D_idx.values()\n if point3D_idx != SceneManager.INVALID_POINT3D\n )\n\n array_to_string = lambda arr: \" \".join(str(x) for x in arr)\n\n iter_point3D_id_to_point3D_idx = self.point3D_id_to_point3D_idx.items()\n\n with open(output_file, \"w\") as fid:\n print(\"# 3D point list with one line of data per point:\", file=fid)\n print(\"# POINT3D_ID, X, Y, Z, R, G, B, ERROR, TRACK[] as \", file=fid)\n print(\"# (IMAGE_ID, POINT2D_IDX)\", file=fid)\n print(\"# Number of points: {},\".format(num_valid_points3D), file=fid)\n print(\"# mean track length: unknown\", file=fid)\n\n for point3D_id, point3D_idx in iter_point3D_id_to_point3D_idx:\n if point3D_idx == SceneManager.INVALID_POINT3D:\n continue\n\n print(point3D_id, file=fid)\n print(array_to_string(self.points3D[point3D_idx]), file=fid)\n print(array_to_string(self.point3D_colors[point3D_idx]), file=fid)\n print(self.point3D_errors[point3D_idx], file=fid)\n print(\n array_to_string(self.point3D_id_to_images[point3D_id].flat),\n file=fid,\n )\n\n # ---------------------------------------------------------------------------\n\n # return the image id associated with a given image file\n def get_image_from_name(self, image_name):\n image_id = self.name_to_image_id[image_name]\n return image_id, self.images[image_id]\n\n # ---------------------------------------------------------------------------\n\n def get_camera(self, camera_id):\n return self.cameras[camera_id]\n\n # ---------------------------------------------------------------------------\n\n def get_points3D(self, image_id, return_points2D=True, return_colors=False):\n image = self.images[image_id]\n\n mask = image.point3D_ids != SceneManager.INVALID_POINT3D\n\n point3D_idxs = np.array(\n [\n self.point3D_id_to_point3D_idx[point3D_id]\n for point3D_id in image.point3D_ids[mask]\n ]\n )\n # detect filtered points\n filter_mask = point3D_idxs != SceneManager.INVALID_POINT3D\n point3D_idxs = point3D_idxs[filter_mask]\n result = [self.points3D[point3D_idxs, :]]\n\n if return_points2D:\n mask[mask] &= filter_mask\n result += [image.points2D[mask]]\n if return_colors:\n result += [self.point3D_colors[point3D_idxs, :]]\n\n return result if len(result) > 1 else result[0]\n\n # ---------------------------------------------------------------------------\n\n def point3D_valid(self, point3D_id):\n return (\n self.point3D_id_to_point3D_idx[point3D_id] != SceneManager.INVALID_POINT3D\n )\n\n # ---------------------------------------------------------------------------\n\n def get_filtered_points3D(self, return_colors=False):\n point3D_idxs = [\n idx\n for idx in self.point3D_id_to_point3D_idx.values()\n if idx != SceneManager.INVALID_POINT3D\n ]\n result = [self.points3D[point3D_idxs, :]]\n\n if return_colors:\n result += [self.point3D_colors[point3D_idxs, :]]\n\n return result if len(result) > 1 else result[0]\n\n # ---------------------------------------------------------------------------\n\n # return 3D points shared by two images\n def get_shared_points3D(self, image_id1, image_id2):\n point3D_ids = set(self.images[image_id1].point3D_ids) & set(\n self.images[image_id2].point3D_ids\n )\n point3D_ids.discard(SceneManager.INVALID_POINT3D)\n\n point3D_idxs = np.array(\n [self.point3D_id_to_point3D_idx[point3D_id] for point3D_id in point3D_ids]\n )\n\n return self.points3D[point3D_idxs, :]\n\n # ---------------------------------------------------------------------------\n\n # project *all* 3D points into image, return their projection coordinates,\n # as well as their 3D positions\n def get_viewed_points(self, image_id):\n image = self.images[image_id]\n\n # get unfiltered points\n point3D_idxs = set(self.point3D_id_to_point3D_idx.values())\n point3D_idxs.discard(SceneManager.INVALID_POINT3D)\n point3D_idxs = list(point3D_idxs)\n points3D = self.points3D[point3D_idxs, :]\n\n # orient points relative to camera\n R = image.q.ToR()\n points3D = points3D.dot(R.T) + image.tvec[np.newaxis, :]\n points3D = points3D[points3D[:, 2] > 0, :] # keep points with positive z\n\n # put points into image coordinates\n camera = self.cameras[image.camera_id]\n points2D = points3D.dot(camera.get_camera_matrix().T)\n points2D = points2D[:, :2] / points2D[:, 2][:, np.newaxis]\n\n # keep points that are within the image\n mask = (\n (points2D[:, 0] >= 0)\n & (points2D[:, 1] >= 0)\n & (points2D[:, 0] < camera.width - 1)\n & (points2D[:, 1] < camera.height - 1)\n )\n\n return points2D[mask, :], points3D[mask, :]\n\n # ---------------------------------------------------------------------------\n\n def add_camera(self, camera):\n self.last_camera_id += 1\n self.cameras[self.last_camera_id] = camera\n return self.last_camera_id\n\n # ---------------------------------------------------------------------------\n\n def add_image(self, image):\n self.last_image_id += 1\n self.images[self.last_image_id] = image\n return self.last_image_id\n\n # ---------------------------------------------------------------------------\n\n def delete_images(self, image_list):\n # delete specified images\n for image_id in image_list:\n if image_id in self.images:\n del self.images[image_id]\n\n keep_set = set(self.images.iterkeys())\n\n # delete references to specified images, and ignore any points that are\n # invalidated\n iter_point3D_id_to_point3D_idx = self.point3D_id_to_point3D_idx.items()\n\n for point3D_id, point3D_idx in iter_point3D_id_to_point3D_idx:\n if point3D_idx == SceneManager.INVALID_POINT3D:\n continue\n\n mask = np.array(\n [\n image_id in keep_set\n for image_id in self.point3D_id_to_images[point3D_id][:, 0]\n ]\n )\n if np.any(mask):\n self.point3D_id_to_images[point3D_id] = self.point3D_id_to_images[\n point3D_id\n ][mask]\n else:\n self.point3D_id_to_point3D_idx[\n point3D_id\n ] = SceneManager.INVALID_POINT3D\n\n # ---------------------------------------------------------------------------\n\n # camera_list: set of cameras whose points we'd like to keep\n # min/max triangulation angle: in degrees\n def filter_points3D(\n self,\n min_track_len=0,\n max_error=np.inf,\n min_tri_angle=0,\n max_tri_angle=180,\n image_set=set(),\n ):\n\n image_set = set(image_set)\n\n check_triangulation_angles = min_tri_angle > 0 or max_tri_angle < 180\n if check_triangulation_angles:\n max_tri_prod = np.cos(np.radians(min_tri_angle))\n min_tri_prod = np.cos(np.radians(max_tri_angle))\n\n iter_point3D_id_to_point3D_idx = self.point3D_id_to_point3D_idx.items()\n\n image_ids = []\n\n for point3D_id, point3D_idx in iter_point3D_id_to_point3D_idx:\n if point3D_idx == SceneManager.INVALID_POINT3D:\n continue\n\n if image_set or min_track_len > 0:\n image_ids = set(self.point3D_id_to_images[point3D_id][:, 0])\n\n # check if error and min track length are sufficient, or if none of\n # the selected cameras see the point\n if (\n len(image_ids) < min_track_len\n or self.point3D_errors[point3D_idx] > max_error\n or image_set\n and image_set.isdisjoint(image_ids)\n ):\n self.point3D_id_to_point3D_idx[\n point3D_id\n ] = SceneManager.INVALID_POINT3D\n\n # find dot product between all camera viewing rays\n elif check_triangulation_angles:\n xyz = self.points3D[point3D_idx, :]\n tvecs = np.array(\n [(self.images[image_id].tvec - xyz) for image_id in image_ids]\n )\n tvecs /= np.linalg.norm(tvecs, axis=-1)[:, np.newaxis]\n\n cos_theta = np.array([u.dot(v) for u, v in combinations(tvecs, 2)])\n\n # min_prod = cos(maximum viewing angle), and vice versa\n # if maximum viewing angle is too small or too large,\n # don't add this point\n if np.min(cos_theta) > max_tri_prod or np.max(cos_theta) < min_tri_prod:\n self.point3D_id_to_point3D_idx[\n point3D_id\n ] = SceneManager.INVALID_POINT3D\n\n # apply the filters to the image point3D_ids\n for image in self.images.values():\n mask = np.array(\n [\n self.point3D_id_to_point3D_idx.get(point3D_id, 0)\n == SceneManager.INVALID_POINT3D\n for point3D_id in image.point3D_ids\n ]\n )\n image.point3D_ids[mask] = SceneManager.INVALID_POINT3D\n\n # ---------------------------------------------------------------------------\n\n # scene graph: {image_id: [image_id: #shared points]}\n def build_scene_graph(self):\n self.scene_graph = defaultdict(lambda: defaultdict(int))\n point3D_iter = self.point3D_id_to_images.items()\n\n for i, (point3D_id, images) in enumerate(point3D_iter):\n if not self.point3D_valid(point3D_id):\n continue\n\n for image_id1, image_id2 in combinations(images[:, 0], 2):\n self.scene_graph[image_id1][image_id2] += 1\n self.scene_graph[image_id2][image_id1] += 1\n"
] | [
[
"numpy.radians",
"numpy.min",
"numpy.rec.fromarrays",
"numpy.linalg.norm",
"numpy.max",
"numpy.uint64",
"numpy.any",
"numpy.float64",
"numpy.savetxt",
"numpy.array",
"numpy.zeros",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bjnortier/ai-experiments-1 | [
"aff4496d84b059af6096f8f6b51d0ebcf6ed5c37"
] | [
"conveyor_2.py"
] | [
"import os\nimport glob\nfrom pathlib import Path\nimport numpy as np\nimport random\nimport carb\nfrom PIL import Image\nfrom tensorflow import keras\nfrom pxr import Usd, UsdGeom, Gf, UsdPhysics\nimport omni.kit\nfrom omni.isaac.examples.base_sample import BaseSample\nfrom omni.isaac.core.objects import DynamicCuboid\nfrom omni.isaac.core.utils.prims import create_prim, delete_prim\nfrom omni.usd import get_context\nfrom omni.kit.viewport import get_viewport_interface\nfrom omni.isaac.core.prims.xform_prim import XFormPrim\nfrom omni.isaac.core.materials import PreviewSurface\nfrom omni.isaac.core.utils.rotations import euler_angles_to_quat\nfrom omni.syntheticdata import sensors\nimport omni.syntheticdata._syntheticdata as sd\n\n\ndef setColliderSubtree(prim, approximationShape=\"none\", execute_command_fn=None):\n pit = iter(Usd.PrimRange(prim))\n for p in pit:\n if p.GetMetadata(\"hide_in_stage_window\"):\n pit.PruneChildren()\n continue\n if p.IsA(UsdGeom.Gprim) or p.IsInstanceable():\n if len(p.GetAttribute(\"faceVertexIndices\").Get()) > 0:\n omni.physx.scripts.utils.setCollider(p, approximationShape, execute_command_fn)\n\n\ndef setRigidBody(prim, approximationShape, kinematic, custom_execute_fn=None):\n omni.physx.scripts.utils.setPhysics(prim, kinematic, custom_execute_fn)\n\n if prim.IsA(UsdGeom.Xformable):\n setColliderSubtree(prim, approximationShape, custom_execute_fn)\n else:\n omni.physx.scripts.utils.setCollider(prim, approximationShape, custom_execute_fn)\n\n\ndef create_light():\n create_prim(\n \"/World/SphereLight\",\n \"SphereLight\",\n position=np.array([0, 500, 500]),\n attributes={\n \"radius\": 150,\n \"intensity\": 5e4\n }\n )\n\n\ndef create_classification_camera():\n create_prim(\n \"/World/ClassificationCamera\",\n \"Camera\",\n orientation=np.array([0.33, 0.197, 0.464, 0.794]),\n position=np.array([151, 250, 135])\n )\n\n\ndef find_usd_assets(shapenet_dir, categories, max_asset_size=50):\n \"\"\"Look for USD files under root/category for each category specified.\n For each category, generate a list of all USD files found and select\n assets up to split * len(num_assets) if `train=True`, otherwise select the\n remainder.\n \"\"\"\n from omni.isaac.shapenet.utils import LABEL_TO_SYNSET\n\n references = {}\n for category in categories:\n category_id = LABEL_TO_SYNSET[category]\n all_assets = glob.glob(\n os.path.join(shapenet_dir, category_id, \"*/*.usd\"),\n recursive=True)\n if max_asset_size is None:\n assets_filtered = all_assets\n else:\n assets_filtered = []\n for a in all_assets:\n if os.stat(a).st_size > max_asset_size * 1e6:\n carb.log_warn(\n f\"{a} skipped as it exceeded the max \\\n size {max_asset_size} MB.\")\n else:\n assets_filtered.append(a)\n num_assets = len(assets_filtered)\n if num_assets == 0:\n raise ValueError(\n f\"No USDs found for category {category} \\\n under max size {max_asset_size} MB.\")\n\n references[category] = assets_filtered\n\n return references\n\n\ndef create_conveyor_anchor(plate_size):\n size = 5\n conveyor_anchor = create_prim(\n \"/World/Conveyor/Anchor\",\n \"Cube\",\n position=np.array([0.0, -plate_size/2 - size, 0.0]),\n scale=np.array([plate_size / 2, size, size]))\n conveyor_anchor.GetAttribute(\"visibility\").Set(\"invisible\")\n return conveyor_anchor\n\n\ndef create_conveyor_plate(stage, size, index):\n plate_path = f\"/World/Conveyor/Plates/Plate{index + 1}\"\n plate = DynamicCuboid(\n prim_path=plate_path,\n position=np.array([0, index * 100, 0.0]),\n size=np.array([size - 5, size - 5, 10.0]),\n color=np.array([0.28, 0.65, 1.0])\n )\n\n # prismatic joint\n joint_path = f\"/World/Conveyor/Joints/PrismaticJoint{index + 1}\"\n prismatic_joint = UsdPhysics.PrismaticJoint.Define(stage, joint_path)\n prismatic_joint.CreateAxisAttr(\"Y\")\n prismatic_joint.CreateBody0Rel().SetTargets([\"/World/Conveyor/Anchor\"])\n prismatic_joint.CreateBody1Rel().SetTargets([plate_path])\n prismatic_joint.CreateLocalPos0Attr().Set(Gf.Vec3f(0.0, 1.0, 0.0))\n prismatic_joint.CreateLocalPos1Attr().Set(Gf.Vec3f(0.0, -0.5, 0.0))\n\n # add linear drive\n driver = UsdPhysics.DriveAPI.Apply(\n prismatic_joint.GetPrim(),\n \"linear\")\n driver.CreateTypeAttr(\"force\")\n driver.CreateMaxForceAttr(1000)\n driver.CreateTargetVelocityAttr(200.0)\n driver.CreateDampingAttr(1e10)\n driver.CreateStiffnessAttr(0)\n return plate\n\n\ndef create_pusher(stage, plate_size, index):\n actuator_path = f\"/World/Pushers/Actuators/Actuator{index + 1}\"\n anchor_path = f\"/World/Pushers/Anchors/Anchor{index + 1}\"\n depth = 10\n \n anchor = create_prim(\n anchor_path,\n \"Cube\",\n position=np.array([\n -plate_size/2 - depth - 5,\n (index + 2) * plate_size * 2,\n 20.0]),\n scale=np.array([5, 5, 5]))\n anchor.GetAttribute(\"visibility\").Set(\"invisible\")\n\n pusher = DynamicCuboid(\n prim_path=actuator_path,\n position=np.array([\n -plate_size/2 - 5,\n (index + 2) * plate_size * 2,\n 20.0]),\n size=np.array([depth, plate_size * 2, 30]),\n color=np.array([0.1, 0.1, 0.5])\n )\n\n mass_api = UsdPhysics.MassAPI.Apply(pusher.prim)\n mass_api.CreateMassAttr(1)\n\n # Prismatic joint \n joint_path = f\"/World/Pushers/Joints/Joint{index + 1}\"\n joint = UsdPhysics.PrismaticJoint.Define(stage, joint_path)\n joint.CreateAxisAttr(\"X\")\n joint.CreateBody0Rel().SetTargets([anchor_path])\n joint.CreateBody1Rel().SetTargets([actuator_path])\n joint.CreateLocalPos0Attr().Set(Gf.Vec3f(1.0, 0.0, 0.0))\n joint.CreateLocalPos1Attr().Set(Gf.Vec3f(-0.5, 0.0, 0.0))\n\n # Linear drive. No position target is set, only activated when needed. \n driver = UsdPhysics.DriveAPI.Apply(joint.GetPrim(), \"linear\")\n driver.CreateTypeAttr(\"force\")\n driver.CreateMaxForceAttr(1000)\n driver.CreateDampingAttr(2e4)\n driver.CreateStiffnessAttr(1e5)\n\n return driver\n\n\ndef create_bucket(stage, plate_size, index):\n bucket_path = f\"/World/Buckets/Bucket{index + 1}\"\n\n width = plate_size * 2\n depth = width\n height = 20\n a = create_prim(\n f\"{bucket_path}/a\",\n \"Cube\",\n position=np.array([\n plate_size/2 + depth/2 - 10,\n (index + 2) * 2 * plate_size - width / 2,\n -height - 5\n ]),\n scale=np.array([depth/2, 5, height]),\n attributes={\n \"primvars:displayColor\": [(1.0, 1.0, 1.0)]\n }\n )\n b = create_prim(\n f\"{bucket_path}/b\",\n \"Cube\",\n position=np.array([\n plate_size/2 + depth/2 - 10,\n (index + 2) * 2 * plate_size + width / 2,\n -height - 5\n ]),\n scale=np.array([depth/2, 5, height]),\n attributes={\n \"primvars:displayColor\": [(1.0, 1.0, 1.0)]\n }\n )\n c = create_prim(\n f\"{bucket_path}/c\",\n \"Cube\",\n position=np.array([\n plate_size/2 + 5 - 10,\n (index + 2) * 2 * plate_size,\n -height - 5\n ]),\n scale=np.array([5, width/2 - 5, height]),\n attributes={\n \"primvars:displayColor\": [(1.0, 1.0, 1.0)]\n }\n )\n d = create_prim(\n f\"{bucket_path}/d\",\n \"Cube\",\n position=np.array([\n plate_size/2 + depth - 5 - 10,\n (index + 2) * 2 * plate_size,\n -height - 5\n ]),\n scale=np.array([5, width/2 - 5, height]),\n attributes={\n \"primvars:displayColor\": [(1.0, 1.0, 1.0)]\n }\n )\n UsdPhysics.CollisionAPI.Apply(a)\n UsdPhysics.CollisionAPI.Apply(b)\n UsdPhysics.CollisionAPI.Apply(c)\n UsdPhysics.CollisionAPI.Apply(d)\n \n\nclass Conveyor2(BaseSample):\n def __init__(self) -> None:\n super().__init__()\n return\n\n def setup_scene(self):\n world = self.get_world()\n\n self.model = keras.models.load_model(\"/home/bjnortier/isaac/sorting/save_at_30-augmented-3.h5\")\n self.categories = [\n \"bus\", \"car\", \"plane\", \"rocket\", \"watercraft\"\n ]\n shapenet_dir = Path(os.environ[\"SHAPENET_LOCAL_DIR\"])\n self.asset_references = find_usd_assets(\n f\"{shapenet_dir}_nomat\",\n self.categories)\n\n self.num_classes = len(self.categories) \n self.num_plates = self.num_classes * 2 + 4\n \n plate_size = 100.0\n self.max_plate_position = plate_size * self.num_plates\n self.widget_index = 0\n self.plate_reset_count = 0\n\n stage = get_context().get_stage()\n world.scene.add_ground_plane(z_position=-45.0)\n create_light()\n create_classification_camera()\n create_conveyor_anchor(plate_size)\n \n self.plates = []\n for i in range(self.num_plates):\n self.plates.append(create_conveyor_plate(stage, plate_size, i))\n\n self.pushers = []\n for i in range(self.num_classes):\n self.pushers.append(create_pusher(stage, plate_size, i))\n\n for i in range(self.num_classes):\n create_bucket(stage, plate_size, i)\n\n viewport_interface = get_viewport_interface()\n viewport_handle = viewport_interface.create_instance()\n vp = viewport_interface.get_viewport_window(viewport_handle)\n vp.set_active_camera(\"/World/ClassificationCamera\")\n vp.set_texture_resolution(299, 299)\n self.classification_viewport = vp\n\n self.sd_interface = sd.acquire_syntheticdata_interface()\n self.is_sensor_initialized = False\n\n # # Create the first widget\n self.drop_widget(y_position=100.0)\n\n return\n\n def drop_widget(self, y_position=0.0):\n category = random.choice(self.categories)\n asset_reference = random.choice(self.asset_references[category])\n widget_path = f\"/World/widget_{self.widget_index}\"\n widget_prim = create_prim(\n widget_path,\n \"Xform\",\n scale=np.array([50.0, 50.0, 50.0]),\n orientation=euler_angles_to_quat(\n np.array([90.0, 0.0, 0.0]),\n degrees=True),\n position=np.array([0.0, y_position, 50.0]),\n usd_path=asset_reference,\n semantic_label=category)\n self.current_widget_category = category\n\n widget = XFormPrim(widget_path)\n material = PreviewSurface(\n prim_path=\"/World/Looks/ShapeMaterial\",\n color=np.array([0.1, 0.6, 0.1]))\n widget.apply_visual_material(material)\n\n # Determine bounds and translate to sit on the Z=0 plane\n orientation_on_plane = euler_angles_to_quat(\n np.array([90.0, 0.0, 0.0]),\n degrees=True)\n widget.set_local_pose(\n np.array([0.0, 0.0, 0.0]),\n orientation_on_plane)\n bounds = UsdGeom.Mesh(widget_prim).ComputeWorldBound(0.0, \"default\")\n new_position = np.array([0.0, 0.0, -bounds.GetBox().GetMin()[2] + 5.0])\n widget.set_local_pose(new_position)\n\n mass_api = UsdPhysics.MassAPI.Apply(widget_prim)\n mass_api.CreateMassAttr(1)\n\n setRigidBody(widget_prim, \"convexHull\", False) \n\n self.widget = widget\n self.widget_index += 1\n self.widget_class = None\n self.classification_requested = False\n self.classification_complete = False\n self.arm_activated = False\n for pusher in self.pushers:\n pusher.CreateTargetPositionAttr(0.0)\n\n async def setup_post_load(self):\n self._world = self.get_world()\n self._world.add_physics_callback(\"sim_step\", callback_fn=self.sim_step_callback) \n return\n\n def sim_step_callback(self, step_size):\n if not self.is_sensor_initialized:\n print(\"Waiting for sensor to initialize\")\n sensor = sensors.create_or_retrieve_sensor(\n self.classification_viewport, sd.SensorType.Rgb)\n self.is_sensor_initialized = \\\n self.sd_interface.is_sensor_initialized(sensor)\n if self.is_sensor_initialized:\n print(\"Sensor initialized!\")\n\n for plate in self.plates:\n # When a plate reaches the end ov the conveyour belt,\n # reset it's position to the start. Drop a widget if it's\n # the first plate\n plate_position, _ = plate.get_world_pose()\n if plate_position[1] > self.max_plate_position:\n plate_position[1] -= self.max_plate_position\n plate.set_world_pose(plate_position)\n self.plate_reset_count += 1\n if self.plate_reset_count == self.num_plates:\n self.plate_reset_count = 0\n self.drop_widget()\n\n # Classify the widget when it passes under the camera\n if not self.classification_requested:\n widget_position, _ = self.widget.get_world_pose()\n if widget_position[1] > 100:\n self.capture_gt()\n self.classification_requested = True\n\n if self.classification_complete and not self.arm_activated:\n widget_position, _ = self.widget.get_world_pose()\n if widget_position[1] > (self.widget_class + 1) * 200 + 100:\n self.arm_activated = True \n self.pushers[self.widget_class].CreateTargetPositionAttr(120.0)\n\n def capture_gt(self):\n rgb = sensors.get_rgb(self.classification_viewport)\n # Discard alpha channel\n rgb = rgb[:, :, :3]\n input = np.expand_dims(rgb, axis=0)\n prediction = self.model.predict(input)\n self.widget_class = np.argmax(prediction)\n \n print(f\"actual:predicted {self.current_widget_category}:{self.categories[self.widget_class]}\")\n image = Image.fromarray(rgb)\n image.save(\"/tmp/rgb.png\")\n self.classification_complete = True\n \n async def setup_pre_reset(self):\n return\n\n async def setup_post_reset(self):\n return\n\n def world_cleanup(self):\n return\n\n"
] | [
[
"tensorflow.keras.models.load_model",
"numpy.array",
"numpy.expand_dims",
"numpy.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
vixadd/tensorflow | [
"8c624204eb686a91779149dc500e6c8c60096074",
"8c624204eb686a91779149dc500e6c8c60096074",
"8c624204eb686a91779149dc500e6c8c60096074",
"8c624204eb686a91779149dc500e6c8c60096074",
"8c624204eb686a91779149dc500e6c8c60096074"
] | [
"tensorflow/python/ops/parallel_for/gradients.py",
"tensorflow/python/ops/ragged/ragged_where_op.py",
"tensorflow/python/data/experimental/ops/iterator_ops.py",
"tensorflow/python/data/kernel_tests/reduce_test.py",
"tensorflow/python/framework/random_seed.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Jacobian ops.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import gradients_impl as gradient_ops\nfrom tensorflow.python.ops.parallel_for import control_flow_ops\nfrom tensorflow.python.util import nest\n\n\ndef jacobian(output, inputs, use_pfor=True, parallel_iterations=None):\n \"\"\"Computes jacobian of `output` w.r.t. `inputs`.\n\n Args:\n output: A tensor.\n inputs: A tensor or a nested structure of tensor objects.\n use_pfor: If true, uses pfor for computing the jacobian. Else uses\n tf.while_loop.\n parallel_iterations: A knob to control how many iterations and dispatched in\n parallel. This knob can be used to control the total memory usage.\n\n Returns:\n A tensor or a nested structure of tensors with the same structure as\n `inputs`. Each entry is the jacobian of `output` w.r.t. to the corresponding\n value in `inputs`. If output has shape [y_1, ..., y_n] and inputs_i has\n shape [x_1, ..., x_m], the corresponding jacobian has shape\n [y_1, ..., y_n, x_1, ..., x_m]. Note that in cases where the gradient is\n sparse (IndexedSlices), jacobian function currently makes it dense and\n returns a Tensor instead. This may change in the future.\n \"\"\"\n flat_inputs = nest.flatten(inputs)\n output_tensor_shape = output.shape\n output_shape = array_ops.shape(output)\n output = array_ops.reshape(output, [-1])\n\n def loop_fn(i):\n y = array_ops.gather(output, i)\n return gradient_ops.gradients(y, flat_inputs)\n\n try:\n output_size = int(output.shape[0])\n except TypeError:\n output_size = array_ops.shape(output)[0]\n\n if use_pfor:\n pfor_outputs = control_flow_ops.pfor(\n loop_fn, output_size, parallel_iterations=parallel_iterations)\n else:\n pfor_outputs = control_flow_ops.for_loop(\n loop_fn,\n [output.dtype] * len(flat_inputs),\n output_size,\n parallel_iterations=parallel_iterations)\n\n for i, out in enumerate(pfor_outputs):\n if isinstance(out, ops.Tensor):\n new_shape = array_ops.concat(\n [output_shape, array_ops.shape(out)[1:]], axis=0)\n out = array_ops.reshape(out, new_shape)\n out.set_shape(output_tensor_shape.concatenate(flat_inputs[i].shape))\n pfor_outputs[i] = out\n\n return nest.pack_sequence_as(inputs, pfor_outputs)\n\n\ndef batch_jacobian(output, inp, use_pfor=True, parallel_iterations=None):\n \"\"\"Computes and stacks jacobians of `output[i,...]` w.r.t. `input[i,...]`.\n\n e.g.\n x = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)\n y = x * x\n jacobian = batch_jacobian(y, x)\n # => [[[2, 0], [0, 4]], [[6, 0], [0, 8]]]\n\n Args:\n output: A tensor with shape [b, y1, ..., y_n]. `output[i,...]` should\n only depend on `inp[i,...]`.\n inp: A tensor with shape [b, x1, ..., x_m]\n use_pfor: If true, uses pfor for computing the Jacobian. Else uses a\n tf.while_loop.\n parallel_iterations: A knob to control how many iterations are vectorized\n and dispatched in parallel. The default value of None, when use_pfor is\n true, corresponds to vectorizing all the iterations. When use_pfor is\n false, the default value of None corresponds to parallel_iterations=10.\n This knob can be used to control the total memory usage.\n\n Returns:\n A tensor `t` with shape [b, y_1, ..., y_n, x1, ..., x_m] where `t[i, ...]`\n is the jacobian of `output[i, ...]` w.r.t. `inp[i, ...]`, i.e. stacked\n per-example jacobians.\n\n Raises:\n ValueError: if first dimension of `output` and `inp` do not match.\n \"\"\"\n output_shape = output.shape\n if not output_shape[0].is_compatible_with(inp.shape[0]):\n raise ValueError(f\"Need first dimension of `output` shape ({output.shape}) \"\n f\"and `inp` shape ({inp.shape}) to match.\")\n if output_shape.is_fully_defined():\n batch_size = int(output_shape[0])\n output_row_size = output_shape.num_elements() // batch_size\n else:\n output_shape = array_ops.shape(output)\n batch_size = output_shape[0]\n output_row_size = array_ops.size(output) // batch_size\n inp_shape = array_ops.shape(inp)\n # Flatten output to 2-D.\n with ops.control_dependencies(\n [check_ops.assert_equal(batch_size, inp_shape[0])]):\n output = array_ops.reshape(output, [batch_size, output_row_size])\n\n def loop_fn(i):\n y = array_ops.gather(output, i, axis=1)\n return gradient_ops.gradients(y, inp)[0]\n\n if use_pfor:\n pfor_output = control_flow_ops.pfor(loop_fn, output_row_size,\n parallel_iterations=parallel_iterations)\n else:\n pfor_output = control_flow_ops.for_loop(\n loop_fn, output.dtype,\n output_row_size,\n parallel_iterations=parallel_iterations)\n if pfor_output is None:\n return None\n pfor_output = array_ops.reshape(pfor_output,\n [output_row_size, batch_size, -1])\n output = array_ops.transpose(pfor_output, [1, 0, 2])\n new_shape = array_ops.concat([output_shape, inp_shape[1:]], axis=0)\n return array_ops.reshape(output, new_shape)\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"where operation for RaggedTensors.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport typing\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops.ragged import ragged_concat_ops\nfrom tensorflow.python.ops.ragged import ragged_functional_ops\nfrom tensorflow.python.ops.ragged import ragged_gather_ops\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.ops.ragged import ragged_tensor_shape\nfrom tensorflow.python.util import dispatch\n\n\[email protected]_for_api(array_ops.where_v2)\ndef where_v2(condition: ragged_tensor.RaggedOrDense,\n x: typing.Optional[ragged_tensor.RaggedOrDense] = None,\n y: typing.Optional[ragged_tensor.RaggedOrDense] = None,\n name=None):\n \"\"\"Return the elements where `condition` is `True`.\n\n : If both `x` and `y` are None: Retrieve indices of true elements.\n\n Returns the coordinates of true elements of `condition`. The coordinates\n are returned in a 2-D tensor with shape\n `[num_true_values, dim_size(condition)]`, where `result[i]` is the\n coordinates of the `i`th true value (in row-major order).\n\n : If both `x` and `y` are non-`None`: Multiplex between `x` and `y`.\n\n Choose an output shape from the shapes of `condition`, `x`, and `y` that\n all three shapes are broadcastable to; and then use the broadcasted\n `condition` tensor as a mask that chooses whether the corredsponding element\n in the output should be taken from `x` (if `condition` is true) or `y` (if\n `condition` is false).\n\n >>> # Example: retrieve indices of true elements\n >>> tf.where(tf.ragged.constant([[True, False], [True]]))\n <tf.Tensor: shape=(2, 2), dtype=int64, numpy= array([[0, 0], [1, 0]])>\n\n >>> # Example: multiplex between `x` and `y`\n >>> tf.where(tf.ragged.constant([[True, False], [True, False, True]]),\n ... tf.ragged.constant([['A', 'B'], ['C', 'D', 'E']]),\n ... tf.ragged.constant([['a', 'b'], ['c', 'd', 'e']]))\n <tf.RaggedTensor [[b'A', b'b'], [b'C', b'd', b'E']]>\n\n Args:\n condition: A potentially ragged tensor of type `bool`\n x: A potentially ragged tensor (optional).\n y: A potentially ragged tensor (optional). Must be specified if `x` is\n specified. Must have the same rank and type as `x`.\n name: A name of the operation (optional).\n\n Returns:\n : If both `x` and `y` are `None`:\n A `Tensor` with shape `(num_true, rank(condition))`.\n : Otherwise:\n A potentially ragged tensor with the same type as `x` and `y`, and whose\n shape is broadcast-compatible with `x`, `y`, and `condition`.\n\n Raises:\n ValueError: When exactly one of `x` or `y` is non-`None`; or when\n `condition`, `x`, and `y` have incompatible shapes.\n \"\"\"\n if (x is None) != (y is None):\n raise ValueError('x and y must be either both None or both non-None')\n\n with ops.name_scope('RaggedWhere', name, [condition, x, y]):\n condition = ragged_tensor.convert_to_tensor_or_ragged_tensor(\n condition, name='condition')\n if x is None:\n return _coordinate_where(condition)\n else:\n x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, name='x')\n y = ragged_tensor.convert_to_tensor_or_ragged_tensor(y, name='y')\n condition, x, y = ragged_tensor.match_row_splits_dtypes(condition, x, y)\n return _elementwise_where_v2(condition, x, y)\n\n\[email protected]_for_api(array_ops.where)\ndef where(condition: ragged_tensor.RaggedOrDense,\n x: typing.Optional[ragged_tensor.RaggedOrDense] = None,\n y: typing.Optional[ragged_tensor.RaggedOrDense] = None,\n name=None):\n \"\"\"Return the elements, either from `x` or `y`, depending on the `condition`.\n\n : If both `x` and `y` are `None`:\n Returns the coordinates of true elements of `condition`. The coordinates\n are returned in a 2-D tensor with shape\n `[num_true_values, dim_size(condition)]`, where `result[i]` is the\n coordinates of the `i`th true value (in row-major order).\n\n : If both `x` and `y` are non-`None`:\n Returns a tensor formed by selecting values from `x` where condition is\n true, and from `y` when condition is false. In particular:\n\n : If `condition`, `x`, and `y` all have the same shape:\n\n * `result[i1...iN] = x[i1...iN]` if `condition[i1...iN]` is true.\n * `result[i1...iN] = y[i1...iN]` if `condition[i1...iN]` is false.\n\n : Otherwise:\n\n * `condition` must be a vector.\n * `x` and `y` must have the same number of dimensions.\n * The outermost dimensions of `condition`, `x`, and `y` must all have the\n same size.\n * `result[i] = x[i]` if `condition[i]` is true.\n * `result[i] = y[i]` if `condition[i]` is false.\n\n Args:\n condition: A potentially ragged tensor of type `bool`\n x: A potentially ragged tensor (optional).\n y: A potentially ragged tensor (optional). Must be specified if `x` is\n specified. Must have the same rank and type as `x`.\n name: A name of the operation (optional)\n\n Returns:\n : If both `x` and `y` are `None`:\n A `Tensor` with shape `(num_true, dim_size(condition))`.\n : Otherwise:\n A potentially ragged tensor with the same type, rank, and outermost\n dimension size as `x` and `y`.\n `result.ragged_rank = max(x.ragged_rank, y.ragged_rank)`.\n\n Raises:\n ValueError: When exactly one of `x` or `y` is non-`None`; or when\n `condition`, `x`, and `y` have incompatible shapes.\n\n #### Examples:\n\n >>> # Coordinates where condition is true.\n >>> condition = tf.ragged.constant([[True, False, True], [False, True]])\n >>> print(where(condition))\n tf.Tensor( [[0 0] [0 2] [1 1]], shape=(3, 2), dtype=int64)\n\n >>> # Elementwise selection between x and y, based on condition.\n >>> condition = tf.ragged.constant([[True, False, True], [False, True]])\n >>> x = tf.ragged.constant([['A', 'B', 'C'], ['D', 'E']])\n >>> y = tf.ragged.constant([['a', 'b', 'c'], ['d', 'e']])\n >>> print(where(condition, x, y))\n <tf.RaggedTensor [[b'A', b'b', b'C'], [b'd', b'E']]>\n\n >>> # Row selection between x and y, based on condition.\n >>> condition = [True, False]\n >>> x = tf.ragged.constant([['A', 'B', 'C'], ['D', 'E']])\n >>> y = tf.ragged.constant([['a', 'b', 'c'], ['d', 'e']])\n >>> print(where(condition, x, y))\n <tf.RaggedTensor [[b'A', b'B', b'C'], [b'd', b'e']]>\n \"\"\"\n if (x is None) != (y is None):\n raise ValueError('x and y must be either both None or both non-None')\n with ops.name_scope('RaggedWhere', name, [condition, x, y]):\n condition = ragged_tensor.convert_to_tensor_or_ragged_tensor(\n condition, name='condition')\n if x is None:\n return _coordinate_where(condition)\n else:\n x = ragged_tensor.convert_to_tensor_or_ragged_tensor(x, name='x')\n y = ragged_tensor.convert_to_tensor_or_ragged_tensor(y, name='y')\n condition, x, y = ragged_tensor.match_row_splits_dtypes(condition, x, y)\n return _elementwise_where(condition, x, y)\n\n\ndef _elementwise_where(condition, x, y):\n \"\"\"Ragged version of tf.where(condition, x, y).\"\"\"\n condition_is_ragged = isinstance(condition, ragged_tensor.RaggedTensor)\n x_is_ragged = isinstance(x, ragged_tensor.RaggedTensor)\n y_is_ragged = isinstance(y, ragged_tensor.RaggedTensor)\n\n if not (condition_is_ragged or x_is_ragged or y_is_ragged):\n return array_ops.where(condition, x, y)\n\n elif condition_is_ragged and x_is_ragged and y_is_ragged:\n return ragged_functional_ops.map_flat_values(array_ops.where, condition, x,\n y)\n elif not condition_is_ragged:\n # Concatenate x and y, and then use `gather` to assemble the selected rows.\n condition.shape.assert_has_rank(1)\n x_and_y = ragged_concat_ops.concat([x, y], axis=0)\n x_nrows = _nrows(x, out_type=x_and_y.row_splits.dtype)\n y_nrows = _nrows(y, out_type=x_and_y.row_splits.dtype)\n indices = array_ops.where(condition, math_ops.range(x_nrows),\n x_nrows + math_ops.range(y_nrows))\n return ragged_gather_ops.gather(x_and_y, indices)\n\n else:\n raise ValueError('Input shapes do not match.')\n\n\ndef _elementwise_where_v2(condition, x, y):\n \"\"\"Ragged version of tf.where_v2(condition, x, y).\"\"\"\n # Broadcast x, y, and condition to have the same shape.\n if not (condition.shape.is_fully_defined() and x.shape.is_fully_defined() and\n y.shape.is_fully_defined() and x.shape == y.shape and\n condition.shape == x.shape):\n shape_c = ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor(\n condition)\n shape_x = ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor(x)\n shape_y = ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor(y)\n shape = ragged_tensor_shape.broadcast_dynamic_shape(\n shape_c, ragged_tensor_shape.broadcast_dynamic_shape(shape_x, shape_y))\n condition = ragged_tensor_shape.broadcast_to(condition, shape)\n x = ragged_tensor_shape.broadcast_to(x, shape)\n y = ragged_tensor_shape.broadcast_to(y, shape)\n\n condition_is_ragged = isinstance(condition, ragged_tensor.RaggedTensor)\n x_is_ragged = isinstance(x, ragged_tensor.RaggedTensor)\n y_is_ragged = isinstance(y, ragged_tensor.RaggedTensor)\n if not (condition_is_ragged or x_is_ragged or y_is_ragged):\n return array_ops.where_v2(condition, x, y)\n\n return ragged_functional_ops.map_flat_values(array_ops.where_v2, condition, x,\n y)\n\n\ndef _coordinate_where(condition):\n \"\"\"Ragged version of tf.where(condition).\"\"\"\n if not isinstance(condition, ragged_tensor.RaggedTensor):\n return array_ops.where(condition)\n\n # The coordinate for each `true` value in condition.values.\n selected_coords = _coordinate_where(condition.values)\n\n # Convert the first index in each coordinate to a row index and column index.\n condition = condition.with_row_splits_dtype(selected_coords.dtype)\n first_index = selected_coords[:, 0]\n selected_rows = array_ops.gather(condition.value_rowids(), first_index)\n selected_row_starts = array_ops.gather(condition.row_splits, selected_rows)\n selected_cols = first_index - selected_row_starts\n\n # Assemble the row & column index with the indices for inner dimensions.\n return array_ops.concat([\n array_ops.expand_dims(selected_rows, 1),\n array_ops.expand_dims(selected_cols, 1), selected_coords[:, 1:]\n ],\n axis=1)\n\n\ndef _nrows(rt_input, out_type):\n if isinstance(rt_input, ragged_tensor.RaggedTensor):\n return rt_input.nrows(out_type=out_type)\n else:\n return array_ops.shape(rt_input, out_type=out_type)[0]\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Iterator ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.data.ops import iterator_ops\nfrom tensorflow.python.data.ops import options as options_lib\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.training import basic_session_run_hooks\nfrom tensorflow.python.training import checkpoint_management\nfrom tensorflow.python.training import saver as saver_lib\nfrom tensorflow.python.training import session_run_hook\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util.tf_export import tf_export\n\n\ndef _convert_external_state_policy_to_enum(external_state_policy):\n if isinstance(external_state_policy, options_lib.ExternalStatePolicy):\n return external_state_policy\n if external_state_policy == \"warn\":\n return options_lib.ExternalStatePolicy.WARN\n if external_state_policy == \"ignore\":\n return options_lib.ExternalStatePolicy.IGNORE\n if external_state_policy == \"fail\":\n return options_lib.ExternalStatePolicy.FAIL\n raise ValueError(\n f\"Invalid `ExternalStatePolicy.` Supported values include 'warn', \"\n f\"'ignore', and 'fail.' Received {external_state_policy}.\"\n )\n\n\n@tf_export(\"data.experimental.make_saveable_from_iterator\")\[email protected](\n None, \"`make_saveable_from_iterator` is intended for use in TF1 with \"\n \"`tf.compat.v1.Saver`. In TF2, use `tf.train.Checkpoint` instead.\")\ndef make_saveable_from_iterator(iterator, external_state_policy=None):\n \"\"\"Returns a SaveableObject for saving/restoring iterator state using Saver.\n\n Args:\n iterator: Iterator.\n external_state_policy: A string that identifies how to handle input\n pipelines that depend on external state. Possible values are\n 'ignore': The external state is silently ignored.\n 'warn': The external state is ignored, logging a warning.\n 'fail': The operation fails upon encountering external state.\n By default we set it to 'fail'.\n\n Returns:\n A SaveableObject for saving/restoring iterator state using Saver.\n\n Raises:\n ValueError: If iterator does not support checkpointing.\n ValueError: If `external_state_policy` is not one of 'warn', 'ignore' or\n 'fail'.\n\n For example:\n\n ```python\n with tf.Graph().as_default():\n ds = tf.data.Dataset.range(10)\n iterator = ds.make_initializable_iterator()\n # Build the iterator SaveableObject.\n saveable_obj = tf.data.experimental.make_saveable_from_iterator(iterator)\n # Add the SaveableObject to the SAVEABLE_OBJECTS collection so\n # it can be automatically saved using Saver.\n tf.compat.v1.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, saveable_obj)\n saver = tf.compat.v1.train.Saver()\n\n while continue_training:\n ... Perform training ...\n if should_save_checkpoint:\n saver.save()\n ```\n\n Note: When restoring the iterator, the existing iterator state is completely\n discarded. This means that any changes you may have made to the Dataset\n graph will be discarded as well! This includes the new Dataset graph\n that you may have built during validation. So, while running validation,\n make sure to run the initializer for the validation input pipeline after\n restoring the checkpoint.\n\n Note: Not all iterators support checkpointing yet. Attempting to save the\n state of an unsupported iterator will throw an error.\n \"\"\"\n if external_state_policy is None:\n external_state_policy = \"fail\"\n policy_enum = _convert_external_state_policy_to_enum(external_state_policy)\n return iterator_ops._IteratorSaveable( # pylint: disable=protected-access\n iterator._iterator_resource, # pylint: disable=protected-access\n iterator._iterator_resource.name, # pylint: disable=protected-access\n external_state_policy=policy_enum)\n\n\n@tf_export(\"data.experimental.CheckpointInputPipelineHook\")\nclass CheckpointInputPipelineHook(session_run_hook.SessionRunHook):\n \"\"\"Checkpoints input pipeline state every N steps or seconds.\n\n This hook saves the state of the iterators in the `Graph` so that when\n training is resumed the input pipeline continues from where it left off.\n This could potentially avoid overfitting in certain pipelines where the\n number of training steps per eval are small compared to the dataset\n size or if the training pipeline is pre-empted.\n\n Differences from `CheckpointSaverHook`:\n 1. Saves only the input pipelines in the \"iterators\" collection and not the\n global variables or other saveable objects.\n 2. Does not write the `GraphDef` and `MetaGraphDef` to the summary.\n\n Example of checkpointing the training pipeline:\n\n ```python\n est = tf.estimator.Estimator(model_fn)\n while True:\n est.train(\n train_input_fn,\n hooks=[tf.data.experimental.CheckpointInputPipelineHook(est)],\n steps=train_steps_per_eval)\n # Note: We do not pass the hook here.\n metrics = est.evaluate(eval_input_fn)\n if should_stop_the_training(metrics):\n break\n ```\n\n This hook should be used if the input pipeline state needs to be saved\n separate from the model checkpoint. Doing so may be useful for a few reasons:\n 1. The input pipeline checkpoint may be large, if there are large shuffle\n or prefetch buffers for instance, and may bloat the checkpoint size.\n 2. If the input pipeline is shared between training and validation, restoring\n the checkpoint during validation may override the validation input\n pipeline.\n\n For saving the input pipeline checkpoint alongside the model weights use\n `tf.data.experimental.make_saveable_from_iterator` directly to create a\n `SaveableObject` and add to the `SAVEABLE_OBJECTS` collection. Note, however,\n that you will need to be careful not to restore the training iterator during\n eval. You can do that by not adding the iterator to the SAVEABLE_OBJECTS\n collector when building the eval graph.\n \"\"\"\n\n def __init__(self, estimator, external_state_policy=None):\n \"\"\"Initializes a `CheckpointInputPipelineHook`.\n\n If the input pipeline depends on external state (e.g. seeds for\n RandomUniform) beyond the input pipeline, this hook would be unable to\n serialize and deserialize that state. If its acceptable to ignore that state\n change the external_state_policy argument to 'warn' or 'ignore'. For e.g.\n\n ```python\n est = tf.estimator.Estimator(model_fn)\n while True:\n est.train(\n train_input_fn,\n hooks=[tf.data.experimental.CheckpointInputPipelineHook(\n est, external_state_policy='warn')],\n steps=train_steps_per_eval)\n # Note: We do not pass the hook here.\n metrics = est.evaluate(eval_input_fn)\n if should_stop_the_training(metrics):\n break\n ```\n\n Args:\n estimator: Estimator.\n external_state_policy: A string that identifies how to handle input\n pipelines that depend on external state. Possible values are\n 'ignore': The external state is silently ignored.\n 'warn': The external state is ignored, logging a warning.\n 'fail': The operation fails upon encountering external state.\n By default we set it to 'fail'.\n\n Raises:\n ValueError: One of `save_steps` or `save_secs` should be set.\n ValueError: At most one of saver or scaffold should be set.\n ValueError: If `external_state_policy` is not one of 'warn', 'ignore' or\n 'fail'.\n \"\"\"\n if external_state_policy is None:\n external_state_policy = \"fail\"\n self._external_state_policy = _convert_external_state_policy_to_enum(\n external_state_policy)\n # `checkpoint_basename` is \"input.ckpt\" for non-distributed pipelines or\n # of the form \"input_<task_type>_<task_id>.ckpt\" for distributed pipelines.\n # Note: The default `checkpoint_basename` used by `CheckpointSaverHook` is\n # \"model.ckpt\". We intentionally choose the input pipeline checkpoint prefix\n # to be different to avoid conflicts with the model checkpoint.\n\n # pylint: disable=protected-access\n checkpoint_prefix = \"input\"\n if estimator._config.num_worker_replicas > 1:\n # Distributed setting.\n suffix = \"_{}_{}\".format(estimator._config.task_type,\n estimator._config.task_id)\n checkpoint_prefix += suffix\n # pylint: enable=protected-access\n\n # We use a composition paradigm instead of inheriting from\n # `CheckpointSaverHook` because `Estimator` does an `isinstance` check\n # to check whether a `CheckpointSaverHook` is already present in the list\n # of hooks and if not, adds one. Inheriting from `CheckpointSaverHook`\n # would thwart this behavior. This hook checkpoints *only the iterators*\n # and not the graph variables.\n self._checkpoint_saver_hook = basic_session_run_hooks.CheckpointSaverHook(\n estimator.model_dir,\n save_secs=estimator._config.save_checkpoints_secs, # pylint: disable=protected-access\n save_steps=estimator._config.save_checkpoints_steps, # pylint: disable=protected-access\n checkpoint_basename=checkpoint_prefix + \".ckpt\")\n\n # Name for the protocol buffer file that will contain the list of most\n # recent checkpoints stored as a `CheckpointState` protocol buffer.\n # This file, kept in the same directory as the checkpoint files, is\n # automatically managed by the `Saver` to keep track of recent checkpoints.\n # The default name used by the `Saver` for this file is \"checkpoint\". Here\n # we use the name \"checkpoint_<checkpoint_prefix>\" so that in case the\n # `checkpoint_dir` is the same as the model checkpoint directory, there are\n # no conflicts during restore.\n self._latest_filename = \"checkpoint_\" + checkpoint_prefix\n\n def begin(self):\n # Build a Saver that saves all iterators in the `GLOBAL_ITERATORS`\n # collection if no `Saver` or `Scaffold` is provided.\n # pylint: disable=protected-access\n if (self._checkpoint_saver_hook._saver is None and\n self._checkpoint_saver_hook._scaffold is None):\n iterators = ops.get_collection(iterator_ops.GLOBAL_ITERATORS)\n saveables = [\n iterator_ops._IteratorSaveable(\n i, i.name, external_state_policy=self._external_state_policy)\n for i in iterators\n ]\n self._checkpoint_saver_hook._saver = _CustomSaver(\n saveables, self._latest_filename, sharded=True)\n # pylint: enable=protected-access\n self._checkpoint_saver_hook.begin()\n\n def after_create_session(self, session, coord):\n # If a new session was created, we set _first_run to True so that we can\n # restore if needed.\n self._first_run = True\n\n def _restore_or_save_initial_ckpt(self, session):\n # Ideally this should be run in after_create_session but is not for the\n # following reason:\n # Currently there is no way of enforcing an order of running the\n # `SessionRunHooks`. Hence it is possible that the `_DatasetInitializerHook`\n # is run *after* this hook. That is troublesome because\n # 1. If a checkpoint exists and this hook restores it, the initializer hook\n # will override it.\n # 2. If no checkpoint exists, this hook will try to save an uninitialized\n # iterator which will result in an exception.\n #\n # As a temporary fix we enter the following implicit contract between this\n # hook and the _DatasetInitializerHook.\n # 1. The _DatasetInitializerHook initializes the iterator in the call to\n # after_create_session.\n # 2. This hook saves the iterator on the first call to `before_run()`, which\n # is guaranteed to happen after `after_create_session()` of all hooks\n # have been run.\n\n # Check if there is an existing checkpoint. If so, restore from it.\n # pylint: disable=protected-access\n latest_checkpoint_path = checkpoint_management.latest_checkpoint(\n self._checkpoint_saver_hook._checkpoint_dir,\n latest_filename=self._latest_filename)\n if latest_checkpoint_path:\n self._checkpoint_saver_hook._get_saver().restore(session,\n latest_checkpoint_path)\n else:\n # The checkpoint saved here is the state at step \"global_step\".\n # Note: We do not save the GraphDef or MetaGraphDef here.\n global_step = session.run(self._checkpoint_saver_hook._global_step_tensor)\n self._checkpoint_saver_hook._save(session, global_step)\n self._checkpoint_saver_hook._timer.update_last_triggered_step(global_step)\n # pylint: enable=protected-access\n\n def before_run(self, run_context):\n if self._first_run:\n self._restore_or_save_initial_ckpt(run_context.session)\n self._first_run = False\n return self._checkpoint_saver_hook.before_run(run_context)\n\n def after_run(self, run_context, run_values):\n self._checkpoint_saver_hook.after_run(run_context, run_values)\n\n def end(self, session):\n self._checkpoint_saver_hook.end(session)\n\n\nclass _CustomSaver(saver_lib.Saver):\n \"\"\"`Saver` with a different default `latest_filename`.\n\n This is used in the `CheckpointInputPipelineHook` to avoid conflicts with\n the model ckpt saved by the `CheckpointSaverHook`.\n \"\"\"\n\n def __init__(self, var_list, latest_filename, sharded=False):\n super(_CustomSaver, self).__init__(var_list, sharded=sharded)\n self._latest_filename = latest_filename\n\n def save(self,\n sess,\n save_path,\n global_step=None,\n latest_filename=None,\n meta_graph_suffix=\"meta\",\n write_meta_graph=True,\n write_state=True,\n strip_default_attrs=False):\n return super(_CustomSaver, self).save(\n sess, save_path, global_step, latest_filename or self._latest_filename,\n meta_graph_suffix, write_meta_graph, write_state, strip_default_attrs)\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for `tf.data.Dataset.reduce()`.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport time\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python.data.experimental.ops import testing\nfrom tensorflow.python.data.kernel_tests import test_base\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import combinations\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\n\n\nclass ReduceTest(test_base.DatasetTestBase, parameterized.TestCase):\n\n @combinations.generate(test_base.default_test_combinations())\n def testSum(self):\n for i in range(10):\n ds = dataset_ops.Dataset.range(1, i + 1)\n result = ds.reduce(np.int64(0), lambda x, y: x + y)\n self.assertEqual(((i + 1) * i) // 2, self.evaluate(result))\n\n @combinations.generate(test_base.default_test_combinations())\n def testSumTuple(self):\n\n def reduce_fn(state, value):\n v1, v2 = value\n return state + v1 + v2\n\n for i in range(10):\n ds = dataset_ops.Dataset.range(1, i + 1)\n ds = dataset_ops.Dataset.zip((ds, ds))\n result = ds.reduce(constant_op.constant(0, dtype=dtypes.int64), reduce_fn)\n self.assertEqual(((i + 1) * i), self.evaluate(result))\n\n @combinations.generate(test_base.default_test_combinations())\n def testSumAndCount(self):\n\n def reduce_fn(state, value):\n s, c = state\n return s + value, c + 1\n\n for i in range(10):\n ds = dataset_ops.Dataset.range(1, i + 1)\n result = ds.reduce((constant_op.constant(0, dtype=dtypes.int64),\n constant_op.constant(0, dtype=dtypes.int64)),\n reduce_fn)\n s, c = self.evaluate(result)\n self.assertEqual(((i + 1) * i) // 2, s)\n self.assertEqual(i, c)\n\n @combinations.generate(combinations.combine(tf_api_version=1, mode=\"graph\"))\n def testSquareUsingPlaceholder(self):\n delta = array_ops.placeholder(dtype=dtypes.int64)\n\n def reduce_fn(state, _):\n return state + delta\n\n for i in range(10):\n ds = dataset_ops.Dataset.range(1, i + 1)\n result = ds.reduce(np.int64(0), reduce_fn)\n with self.cached_session() as sess:\n square = sess.run(result, feed_dict={delta: i})\n self.assertEqual(i * i, square)\n\n @combinations.generate(test_base.default_test_combinations())\n def testSparse(self):\n\n def reduce_fn(_, value):\n return value\n\n def make_sparse_fn(i):\n return sparse_tensor.SparseTensorValue(\n indices=np.array([[0, 0]]),\n values=(i * np.array([1])),\n dense_shape=np.array([1, 1]))\n\n for i in range(10):\n ds = dataset_ops.Dataset.from_tensors(make_sparse_fn(i+1))\n result = ds.reduce(make_sparse_fn(0), reduce_fn)\n self.assertValuesEqual(make_sparse_fn(i + 1), self.evaluate(result))\n\n @combinations.generate(test_base.default_test_combinations())\n def testNested(self):\n\n def reduce_fn(state, value):\n state[\"dense\"] += value[\"dense\"]\n state[\"sparse\"] = value[\"sparse\"]\n return state\n\n def make_sparse_fn(i):\n return sparse_tensor.SparseTensorValue(\n indices=np.array([[0, 0]]),\n values=(i * np.array([1])),\n dense_shape=np.array([1, 1]))\n\n def map_fn(i):\n return {\"dense\": math_ops.cast(i, dtype=dtypes.int64),\n \"sparse\": make_sparse_fn(math_ops.cast(i, dtype=dtypes.int64))}\n\n for i in range(10):\n ds = dataset_ops.Dataset.range(1, i + 1).map(map_fn)\n result = ds.reduce(map_fn(0), reduce_fn)\n result = self.evaluate(result)\n self.assertEqual(((i + 1) * i) // 2, result[\"dense\"])\n self.assertValuesEqual(make_sparse_fn(i), result[\"sparse\"])\n\n @combinations.generate(test_base.default_test_combinations())\n def testDatasetSideEffect(self):\n counter_var = variables.Variable(0)\n\n def increment_fn(x):\n counter_var.assign_add(1)\n return x\n\n def dataset_fn():\n return dataset_ops.Dataset.range(10).map(increment_fn)\n\n def reduce_fn(state, value):\n return state + value\n\n @def_function.function\n def fn():\n _ = dataset_fn().reduce(np.int64(0), reduce_fn)\n return \"hello\"\n\n self.evaluate(counter_var.initializer)\n self.assertEqual(self.evaluate(fn()), b\"hello\")\n self.assertEqual(self.evaluate(counter_var), 10)\n\n @combinations.generate(test_base.default_test_combinations())\n def testSideEffect(self):\n counter_var = variables.Variable(0)\n\n def dataset_fn():\n return dataset_ops.Dataset.range(10)\n\n def reduce_fn(state, value):\n counter_var.assign_add(1)\n return state + value\n\n @def_function.function\n def fn():\n _ = dataset_fn().reduce(np.int64(0), reduce_fn)\n return \"hello\"\n\n self.evaluate(counter_var.initializer)\n self.assertEqual(self.evaluate(fn()), b\"hello\")\n self.assertEqual(self.evaluate(counter_var), 10)\n\n @combinations.generate(test_base.default_test_combinations())\n def testAutomaticControlDependencies(self):\n counter_var = variables.Variable(1)\n\n def dataset_fn():\n return dataset_ops.Dataset.range(1)\n\n def reduce1_fn(state, value):\n counter_var.assign(counter_var + 1)\n return state + value\n\n def reduce2_fn(state, value):\n counter_var.assign(counter_var * 2)\n return state + value\n\n @def_function.function\n def fn():\n _ = dataset_fn().reduce(np.int64(0), reduce1_fn)\n _ = dataset_fn().reduce(np.int64(0), reduce2_fn)\n return \"hello\"\n\n self.evaluate(counter_var.initializer)\n self.assertEqual(self.evaluate(fn()), b\"hello\")\n self.assertEqual(self.evaluate(counter_var), 4)\n\n @combinations.generate(test_base.default_test_combinations())\n def testNestedAutomaticControlDependencies(self):\n counter_var = variables.Variable(0)\n\n def map_fn(x):\n counter_var.assign_add(1)\n return x\n\n def dataset_fn():\n return dataset_ops.Dataset.range(10).map(map_fn)\n\n @def_function.function\n def fn():\n for _ in dataset_fn():\n pass\n return counter_var\n\n self.evaluate(counter_var.initializer)\n self.assertEqual(self.evaluate(fn()), 10)\n\n @combinations.generate(test_base.default_test_combinations())\n def testStateOnGPU(self):\n if not test_util.is_gpu_available():\n self.skipTest(\"No GPUs available.\")\n\n state = constant_op.constant(0, dtype=dtypes.int64)\n\n def reduce_fn(state, value):\n with ops.device(\"/gpu:0\"):\n return state + value\n\n for i in range(10):\n ds = dataset_ops.Dataset.range(1, i + 1)\n result = ds.reduce(state, reduce_fn)\n self.assertEqual(((i + 1) * i) // 2, self.evaluate(result))\n\n @combinations.generate(combinations.combine(tf_api_version=1, mode=\"graph\"))\n def testCancellation(self):\n ds = dataset_ops.Dataset.from_tensors(1).repeat()\n result = ds.reduce(0, lambda x, y: x + y)\n with self.cached_session() as sess:\n # The `result` op is guaranteed to not complete before cancelled because\n # the dataset that is being reduced is infinite.\n thread = self.checkedThread(self.assert_op_cancelled, args=(result,))\n thread.start()\n time.sleep(0.2)\n sess.close()\n thread.join()\n\n @combinations.generate(test_base.default_test_combinations())\n def testInvalidFunction(self):\n ds = dataset_ops.Dataset.range(5)\n with self.assertRaises(errors.InvalidArgumentError):\n self.evaluate(ds.reduce(0, lambda _, __: ()))\n\n @combinations.generate(test_base.default_test_combinations())\n def testOptions(self):\n dataset = dataset_ops.Dataset.range(5)\n dataset = dataset.apply(testing.assert_next([\"MapAndBatch\"]))\n dataset = dataset.map(lambda x: x * 2).batch(5)\n self.evaluate(dataset.reduce(0, lambda state, value: state))\n\n @combinations.generate(test_base.default_test_combinations())\n def testName(self):\n dataset = dataset_ops.Dataset.from_tensors(42)\n self.assertEqual(\n self.evaluate(\n dataset.reduce(0, lambda state, value: value, name=\"reduce\")), 42)\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"For seeding individual ops based on a graph-level seed.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport weakref\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import config\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util.tf_export import tf_export\n\n\nDEFAULT_GRAPH_SEED = 87654321\n_MAXINT32 = 2**31 - 1\n\n_graph_to_seed_dict = weakref.WeakKeyDictionary()\n\n\ndef _truncate_seed(seed):\n return seed % _MAXINT32 # Truncate to fit into 32-bit integer\n\n\n@tf_export(v1=['random.get_seed', 'get_seed'])\[email protected]_endpoints('get_seed')\ndef get_seed(op_seed):\n \"\"\"Returns the local seeds an operation should use given an op-specific seed.\n\n Given operation-specific seed, `op_seed`, this helper function returns two\n seeds derived from graph-level and op-level seeds. Many random operations\n internally use the two seeds to allow user to change the seed globally for a\n graph, or for only specific operations.\n\n For details on how the graph-level seed interacts with op seeds, see\n `tf.compat.v1.random.set_random_seed`.\n\n Args:\n op_seed: integer.\n\n Returns:\n A tuple of two integers that should be used for the local seed of this\n operation.\n \"\"\"\n eager = context.executing_eagerly()\n\n if eager:\n global_seed = context.global_seed()\n else:\n global_seed = ops.get_default_graph().seed\n\n if global_seed is not None:\n if op_seed is None:\n # pylint: disable=protected-access\n if hasattr(ops.get_default_graph(), '_seed_used'):\n ops.get_default_graph()._seed_used = True\n if eager:\n op_seed = context.internal_operation_seed()\n else:\n op_seed = _graph_to_seed_dict.setdefault(ops.get_default_graph(), 0)\n _graph_to_seed_dict[ops.get_default_graph()] += 1\n\n seeds = _truncate_seed(global_seed), _truncate_seed(op_seed)\n else:\n if op_seed is not None:\n seeds = DEFAULT_GRAPH_SEED, _truncate_seed(op_seed)\n else:\n seeds = None, None\n\n if seeds == (None, None) and config.is_op_determinism_enabled():\n raise RuntimeError( # pylint: disable=g-doc-exception\n 'Random ops require a seed to be set when determinism is enabled. '\n 'Please set a seed before running the op, e.g. by calling '\n 'tf.random.set_seed(1).')\n\n # Avoid (0, 0) as the C++ ops interpret it as nondeterminism, which would\n # be unexpected since Python docs say nondeterminism is (None, None).\n if seeds == (0, 0):\n return (0, _MAXINT32)\n return seeds\n\n\n@tf_export(v1=['random.set_random_seed', 'set_random_seed'])\ndef set_random_seed(seed):\n \"\"\"Sets the graph-level random seed for the default graph.\n\n Operations that rely on a random seed actually derive it from two seeds:\n the graph-level and operation-level seeds. This sets the graph-level seed.\n\n Its interactions with operation-level seeds is as follows:\n\n 1. If neither the graph-level nor the operation seed is set:\n A random seed is used for this op.\n 2. If the graph-level seed is set, but the operation seed is not:\n The system deterministically picks an operation seed in conjunction with\n the graph-level seed so that it gets a unique random sequence. Within the\n same version of tensorflow and user code, this sequence is deterministic.\n However across different versions, this sequence might change. If the\n code depends on particular seeds to work, specify both graph-level\n and operation-level seeds explicitly.\n 3. If the graph-level seed is not set, but the operation seed is set:\n A default graph-level seed and the specified operation seed are used to\n determine the random sequence.\n 4. If both the graph-level and the operation seed are set:\n Both seeds are used in conjunction to determine the random sequence.\n\n To illustrate the user-visible effects, consider these examples:\n\n To generate different sequences across sessions, set neither\n graph-level nor op-level seeds:\n\n ```python\n a = tf.random.uniform([1])\n b = tf.random.normal([1])\n\n print(\"Session 1\")\n with tf.compat.v1.Session() as sess1:\n print(sess1.run(a)) # generates 'A1'\n print(sess1.run(a)) # generates 'A2'\n print(sess1.run(b)) # generates 'B1'\n print(sess1.run(b)) # generates 'B2'\n\n print(\"Session 2\")\n with tf.compat.v1.Session() as sess2:\n print(sess2.run(a)) # generates 'A3'\n print(sess2.run(a)) # generates 'A4'\n print(sess2.run(b)) # generates 'B3'\n print(sess2.run(b)) # generates 'B4'\n ```\n\n To generate the same repeatable sequence for an op across sessions, set the\n seed for the op:\n\n ```python\n a = tf.random.uniform([1], seed=1)\n b = tf.random.normal([1])\n\n # Repeatedly running this block with the same graph will generate the same\n # sequence of values for 'a', but different sequences of values for 'b'.\n print(\"Session 1\")\n with tf.compat.v1.Session() as sess1:\n print(sess1.run(a)) # generates 'A1'\n print(sess1.run(a)) # generates 'A2'\n print(sess1.run(b)) # generates 'B1'\n print(sess1.run(b)) # generates 'B2'\n\n print(\"Session 2\")\n with tf.compat.v1.Session() as sess2:\n print(sess2.run(a)) # generates 'A1'\n print(sess2.run(a)) # generates 'A2'\n print(sess2.run(b)) # generates 'B3'\n print(sess2.run(b)) # generates 'B4'\n ```\n\n To make the random sequences generated by all ops be repeatable across\n sessions, set a graph-level seed:\n\n ```python\n tf.compat.v1.random.set_random_seed(1234)\n a = tf.random.uniform([1])\n b = tf.random.normal([1])\n\n # Repeatedly running this block with the same graph will generate the same\n # sequences of 'a' and 'b'.\n print(\"Session 1\")\n with tf.compat.v1.Session() as sess1:\n print(sess1.run(a)) # generates 'A1'\n print(sess1.run(a)) # generates 'A2'\n print(sess1.run(b)) # generates 'B1'\n print(sess1.run(b)) # generates 'B2'\n\n print(\"Session 2\")\n with tf.compat.v1.Session() as sess2:\n print(sess2.run(a)) # generates 'A1'\n print(sess2.run(a)) # generates 'A2'\n print(sess2.run(b)) # generates 'B1'\n print(sess2.run(b)) # generates 'B2'\n ```\n\n @compatibility(TF2)\n 'tf.compat.v1.set_random_seed' is compatible with eager mode. However,\n in eager mode this API will set the global seed instead of the\n graph-level seed of the default graph. In TF2 this API is changed to\n [tf.random.set_seed]\n (https://www.tensorflow.org/api_docs/python/tf/random/set_seed).\n @end_compatibility\n\n Args:\n seed: integer.\n \"\"\"\n if context.executing_eagerly():\n context.set_global_seed(seed)\n else:\n ops.get_default_graph().seed = seed\n\n\n@tf_export('random.set_seed', v1=[])\ndef set_seed(seed):\n \"\"\"Sets the global random seed.\n\n Operations that rely on a random seed actually derive it from two seeds:\n the global and operation-level seeds. This sets the global seed.\n\n Its interactions with operation-level seeds is as follows:\n\n 1. If neither the global seed nor the operation seed is set: A randomly\n picked seed is used for this op.\n 2. If the global seed is set, but the operation seed is not:\n The system deterministically picks an operation seed in conjunction with\n the global seed so that it gets a unique random sequence. Within the\n same version of tensorflow and user code, this sequence is deterministic.\n However across different versions, this sequence might change. If the\n code depends on particular seeds to work, specify both global\n and operation-level seeds explicitly.\n 3. If the operation seed is set, but the global seed is not set:\n A default global seed and the specified operation seed are used to\n determine the random sequence.\n 4. If both the global and the operation seed are set:\n Both seeds are used in conjunction to determine the random sequence.\n\n To illustrate the user-visible effects, consider these examples:\n\n If neither the global seed nor the operation seed is set, we get different\n results for every call to the random op and every re-run of the program:\n\n ```python\n print(tf.random.uniform([1])) # generates 'A1'\n print(tf.random.uniform([1])) # generates 'A2'\n ```\n\n (now close the program and run it again)\n\n ```python\n print(tf.random.uniform([1])) # generates 'A3'\n print(tf.random.uniform([1])) # generates 'A4'\n ```\n\n If the global seed is set but the operation seed is not set, we get different\n results for every call to the random op, but the same sequence for every\n re-run of the program:\n\n ```python\n tf.random.set_seed(1234)\n print(tf.random.uniform([1])) # generates 'A1'\n print(tf.random.uniform([1])) # generates 'A2'\n ```\n\n (now close the program and run it again)\n\n ```python\n tf.random.set_seed(1234)\n print(tf.random.uniform([1])) # generates 'A1'\n print(tf.random.uniform([1])) # generates 'A2'\n ```\n\n The reason we get 'A2' instead 'A1' on the second call of `tf.random.uniform`\n above is because the second call uses a different operation seed.\n\n Note that `tf.function` acts like a re-run of a program in this case. When\n the global seed is set but operation seeds are not set, the sequence of random\n numbers are the same for each `tf.function`. For example:\n\n ```python\n tf.random.set_seed(1234)\n\n @tf.function\n def f():\n a = tf.random.uniform([1])\n b = tf.random.uniform([1])\n return a, b\n\n @tf.function\n def g():\n a = tf.random.uniform([1])\n b = tf.random.uniform([1])\n return a, b\n\n print(f()) # prints '(A1, A2)'\n print(g()) # prints '(A1, A2)'\n ```\n\n If the operation seed is set, we get different results for every call to the\n random op, but the same sequence for every re-run of the program:\n\n ```python\n print(tf.random.uniform([1], seed=1)) # generates 'A1'\n print(tf.random.uniform([1], seed=1)) # generates 'A2'\n ```\n\n (now close the program and run it again)\n\n ```python\n print(tf.random.uniform([1], seed=1)) # generates 'A1'\n print(tf.random.uniform([1], seed=1)) # generates 'A2'\n ```\n\n The reason we get 'A2' instead 'A1' on the second call of `tf.random.uniform`\n above is because the same `tf.random.uniform` kernel (i.e. internal\n representation) is used by TensorFlow for all calls of it with the same\n arguments, and the kernel maintains an internal counter which is incremented\n every time it is executed, generating different results.\n\n Calling `tf.random.set_seed` will reset any such counters:\n\n ```python\n tf.random.set_seed(1234)\n print(tf.random.uniform([1], seed=1)) # generates 'A1'\n print(tf.random.uniform([1], seed=1)) # generates 'A2'\n tf.random.set_seed(1234)\n print(tf.random.uniform([1], seed=1)) # generates 'A1'\n print(tf.random.uniform([1], seed=1)) # generates 'A2'\n ```\n\n When multiple identical random ops are wrapped in a `tf.function`, their\n behaviors change because the ops no long share the same counter. For example:\n\n ```python\n @tf.function\n def foo():\n a = tf.random.uniform([1], seed=1)\n b = tf.random.uniform([1], seed=1)\n return a, b\n print(foo()) # prints '(A1, A1)'\n print(foo()) # prints '(A2, A2)'\n\n @tf.function\n def bar():\n a = tf.random.uniform([1])\n b = tf.random.uniform([1])\n return a, b\n print(bar()) # prints '(A1, A2)'\n print(bar()) # prints '(A3, A4)'\n ```\n\n The second call of `foo` returns '(A2, A2)' instead of '(A1, A1)' because\n `tf.random.uniform` maintains an internal counter. If you want `foo` to return\n '(A1, A1)' every time, use the stateless random ops such as\n `tf.random.stateless_uniform`. Also see `tf.random.experimental.Generator` for\n a new set of stateful random ops that use external variables to manage their\n states.\n\n Args:\n seed: integer.\n \"\"\"\n set_random_seed(seed)\n"
] | [
[
"tensorflow.python.ops.array_ops.transpose",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.parallel_for.control_flow_ops.pfor",
"tensorflow.python.ops.gradients_impl.gradients",
"tensorflow.python.ops.check_ops.assert_equal",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.ops.array_ops.size",
"tensorflow.python.ops.parallel_for.control_flow_ops.for_loop",
"tensorflow.python.util.nest.pack_sequence_as",
"tensorflow.python.util.nest.flatten"
],
[
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.array_ops.where",
"tensorflow.python.ops.array_ops.where_v2",
"tensorflow.python.ops.ragged.ragged_tensor.match_row_splits_dtypes",
"tensorflow.python.ops.ragged.ragged_gather_ops.gather",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.ragged.ragged_functional_ops.map_flat_values",
"tensorflow.python.ops.ragged.ragged_tensor_shape.broadcast_to",
"tensorflow.python.ops.ragged.ragged_concat_ops.concat",
"tensorflow.python.ops.ragged.ragged_tensor_shape.RaggedTensorDynamicShape.from_tensor",
"tensorflow.python.util.dispatch.dispatch_for_api",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.ops.ragged.ragged_tensor.convert_to_tensor_or_ragged_tensor",
"tensorflow.python.ops.ragged.ragged_tensor_shape.broadcast_dynamic_shape"
],
[
"tensorflow.python.framework.ops.get_collection",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.data.ops.iterator_ops._IteratorSaveable",
"tensorflow.python.training.checkpoint_management.latest_checkpoint",
"tensorflow.python.training.basic_session_run_hooks.CheckpointSaverHook",
"tensorflow.python.util.deprecation.deprecated"
],
[
"tensorflow.python.framework.ops.device",
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensors",
"tensorflow.python.data.kernel_tests.test_base.default_test_combinations",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.data.ops.dataset_ops.Dataset.zip",
"tensorflow.python.data.experimental.ops.testing.assert_next",
"tensorflow.python.platform.test.main",
"numpy.int64",
"tensorflow.python.data.ops.dataset_ops.Dataset.range",
"tensorflow.python.ops.math_ops.cast",
"numpy.array",
"tensorflow.python.framework.test_util.is_gpu_available",
"tensorflow.python.framework.combinations.combine",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.eager.context.internal_operation_seed",
"tensorflow.python.framework.config.is_op_determinism_enabled",
"tensorflow.python.eager.context.global_seed",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.util.deprecation.deprecated_endpoints",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.eager.context.set_global_seed",
"tensorflow.python.eager.context.executing_eagerly"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"2.7",
"2.6",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.10",
"2.8",
"2.7",
"2.9"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.12",
"2.6",
"2.7",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
}
] |
anonymous2submit/Pointsformer | [
"0eaa141b3d79d45cd925976bde6097b51e0d3819"
] | [
"classification/models/model23.py"
] | [
"\"\"\"\nExactly equals to Model21 (the best results so far), but differnt configurations.\nExactly based on Model10, but ReLU to GeLU\nBased on Model8, add dropout and max, avg combine.\nBased on Local model, add residual connections.\nThe extraction is doubled for depth.\n\nLearning Point Cloud with Progressively Local representation.\n[B,3,N] - {[B,G,K,d]-[B,G,d]} - {[B,G',K,d]-[B,G',d]} -cls\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import einsum\nfrom einops import rearrange, repeat\nfrom pointnet2_ops import pointnet2_utils\n\ndef square_distance(src, dst):\n \"\"\"\n Calculate Euclid distance between each two points.\n src^T * dst = xn * xm + yn * ym + zn * zm;\n sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;\n sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;\n dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2\n = sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst\n Input:\n src: source points, [B, N, C]\n dst: target points, [B, M, C]\n Output:\n dist: per-point square distance, [B, N, M]\n \"\"\"\n B, N, _ = src.shape\n _, M, _ = dst.shape\n dist = -2 * torch.matmul(src, dst.permute(0, 2, 1))\n dist += torch.sum(src ** 2, -1).view(B, N, 1)\n dist += torch.sum(dst ** 2, -1).view(B, 1, M)\n return dist\n\n\ndef index_points(points, idx):\n \"\"\"\n Input:\n points: input points data, [B, N, C]\n idx: sample index data, [B, S]\n Return:\n new_points:, indexed points data, [B, S, C]\n \"\"\"\n device = points.device\n B = points.shape[0]\n view_shape = list(idx.shape)\n view_shape[1:] = [1] * (len(view_shape) - 1)\n repeat_shape = list(idx.shape)\n repeat_shape[0] = 1\n batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)\n new_points = points[batch_indices, idx, :]\n return new_points\n\n\ndef farthest_point_sample(xyz, npoint):\n \"\"\"\n Input:\n xyz: pointcloud data, [B, N, 3]\n npoint: number of samples\n Return:\n centroids: sampled pointcloud index, [B, npoint]\n \"\"\"\n device = xyz.device\n B, N, C = xyz.shape\n centroids = torch.zeros(B, npoint, dtype=torch.long).to(device)\n distance = torch.ones(B, N).to(device) * 1e10\n farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device)\n batch_indices = torch.arange(B, dtype=torch.long).to(device)\n for i in range(npoint):\n centroids[:, i] = farthest\n centroid = xyz[batch_indices, farthest, :].view(B, 1, 3)\n dist = torch.sum((xyz - centroid) ** 2, -1)\n distance = torch.min(distance, dist)\n farthest = torch.max(distance, -1)[1]\n return centroids\n\n\ndef query_ball_point(radius, nsample, xyz, new_xyz):\n \"\"\"\n Input:\n radius: local region radius\n nsample: max sample number in local region\n xyz: all points, [B, N, 3]\n new_xyz: query points, [B, S, 3]\n Return:\n group_idx: grouped points index, [B, S, nsample]\n \"\"\"\n device = xyz.device\n B, N, C = xyz.shape\n _, S, _ = new_xyz.shape\n group_idx = torch.arange(N, dtype=torch.long).to(device).view(1, 1, N).repeat([B, S, 1])\n sqrdists = square_distance(new_xyz, xyz)\n group_idx[sqrdists > radius ** 2] = N\n group_idx = group_idx.sort(dim=-1)[0][:, :, :nsample]\n group_first = group_idx[:, :, 0].view(B, S, 1).repeat([1, 1, nsample])\n mask = group_idx == N\n group_idx[mask] = group_first[mask]\n return group_idx\n\n\ndef knn_point(nsample, xyz, new_xyz):\n \"\"\"\n Input:\n nsample: max sample number in local region\n xyz: all points, [B, N, C]\n new_xyz: query points, [B, S, C]\n Return:\n group_idx: grouped points index, [B, S, nsample]\n \"\"\"\n sqrdists = square_distance(new_xyz, xyz)\n _, group_idx = torch.topk(sqrdists, nsample, dim=-1, largest=False, sorted=False)\n return group_idx\n\n\nclass LocalGrouper(nn.Module):\n def __init__(self, groups, kneighbors, **kwargs):\n \"\"\"\n Give xyz[b,p,3] and fea[b,p,d], return new_xyz[b,g,3] and new_fea[b,g,k,2d]\n :param groups: groups number\n :param kneighbors: k-nerighbors\n :param kwargs: others\n \"\"\"\n super(LocalGrouper, self).__init__()\n self.groups = groups\n self.kneighbors = kneighbors\n\n def forward(self, xyz, points):\n B, N, C = xyz.shape\n S = self.groups\n xyz = xyz.contiguous() # xyz [btach, points, xyz]\n\n # fps_idx = farthest_point_sample(xyz, self.groups).long()\n fps_idx = pointnet2_utils.furthest_point_sample(xyz, self.groups).long() # [B, npoint]\n new_xyz = index_points(xyz, fps_idx)\n new_points = index_points(points, fps_idx)\n\n idx = knn_point(self.kneighbors, xyz, new_xyz)\n # idx = query_ball_point(radius, nsample, xyz, new_xyz)\n # grouped_xyz = index_points(xyz, idx) # [B, npoint, nsample, C]\n grouped_points = index_points(points, idx)\n grouped_points_norm = grouped_points - new_points.view(B, S, 1, -1)\n new_points = torch.cat([grouped_points_norm,\n new_points.view(B, S, 1, -1).repeat(1, 1, self.kneighbors, 1)]\n , dim=-1)\n return new_xyz, new_points\n\n\nclass FCBNReLU1D(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size=1, bias=False):\n super(FCBNReLU1D, self).__init__()\n self.net = nn.Sequential(\n nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, bias=bias),\n nn.BatchNorm1d(out_channels),\n nn.GELU()\n )\n\n def forward(self, x):\n return self.net(x)\n\nclass FCBNReLU1DRes(nn.Module):\n def __init__(self, channel, kernel_size=1, bias=False):\n super(FCBNReLU1DRes, self).__init__()\n self.net = nn.Sequential(\n nn.Conv1d(in_channels=channel, out_channels=channel, kernel_size=kernel_size, bias=bias),\n nn.BatchNorm1d(channel),\n nn.GELU(),\n nn.Conv1d(in_channels=channel, out_channels=channel, kernel_size=kernel_size, bias=bias),\n nn.BatchNorm1d(channel)\n )\n\n def forward(self, x):\n return F.gelu(self.net(x)+x)\n\nclass Attention(nn.Module):\n def __init__(self, dim, heads = 8, dim_head = 32, dropout = 0.):\n super().__init__()\n inner_dim = dim_head * heads\n # project_out = not (heads == 1 and dim_head == dim)\n self.heads = heads\n self.scale = dim_head ** -0.5\n\n self.attend = nn.Softmax(dim = -1)\n self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)\n\n self.to_out = nn.Sequential(\n nn.Conv1d(inner_dim, dim,1),\n nn.BatchNorm1d(dim)\n )\n\n def forward(self, x):\n x = x.permute(0,2,1)\n b, n, _, h = *x.shape, self.heads\n qkv = self.to_qkv(x).chunk(3, dim = -1)\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)\n\n dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale\n\n attn = self.attend(dots)\n\n out = einsum('b h i j, b h j d -> b h i d', attn, v)\n out = rearrange(out, 'b h n d -> b (h d) n')\n\n return self.to_out(out)\n\n\n\nclass TransformerBlock(nn.Module):\n def __init__(self, dim, heads=8, dim_head=32, **kwargs):\n \"\"\"\n [b batch, d dimension, k points]\n :param dim: input data dimension\n :param heads: heads number\n :param dim_head: dimension in each head\n :param kwargs:\n \"\"\"\n super(TransformerBlock, self).__init__()\n self.attention = Attention(dim=dim, heads=heads, dim_head=dim_head)\n self.ffn = nn.Sequential(\n nn.Conv1d(dim, dim, 1, bias=False),\n nn.BatchNorm1d(dim)\n )\n\n\n def forward(self, x):\n \"\"\"\n :input x: [b batch, d dimension, p points,]\n :return: [b batch, d dimension, p points,]\n \"\"\"\n att = self.attention(x)\n att = F.gelu(att+x)\n out = self.ffn(att)\n out = F.gelu(att+out)\n return out\n\n\n\n\n\n\n\nclass PreExtraction(nn.Module):\n def __init__(self, channels, blocks=1):\n \"\"\"\n input: [b,g,k,d]: output:[b,d,g]\n :param channels:\n :param blocks:\n \"\"\"\n super(PreExtraction, self).__init__()\n operation = []\n for _ in range(blocks):\n operation.append(\n FCBNReLU1DRes(channels)\n )\n self.operation = nn.Sequential(*operation)\n self.transformer = TransformerBlock(channels, heads=4)\n def forward(self, x):\n b, n, s, d = x.size() # torch.Size([32, 512, 32, 6])\n x = x.permute(0, 1, 3, 2)\n x = x.reshape(-1, d, s)\n batch_size, _, N = x.size()\n x = self.operation(x) # [b, d, k]\n x = self.transformer(x)\n x = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)\n x = x.reshape(b, n, -1).permute(0, 2, 1)\n return x\n\nclass PosExtraction(nn.Module):\n def __init__(self, channels, blocks=1):\n \"\"\"\n input[b,d,g]; output[b,d,g]\n :param channels:\n :param blocks:\n \"\"\"\n super(PosExtraction, self).__init__()\n operation = []\n for _ in range(blocks):\n operation.append(\n FCBNReLU1DRes(channels)\n )\n self.operation = nn.Sequential(*operation)\n self.transformer = TransformerBlock(channels, heads=4)\n\n def forward(self, x): # [b, d, k]\n return self.transformer(self.operation(x))\n\n\nclass Model23(nn.Module):\n def __init__(self, points=1024, class_num=40, embed_dim=64,\n pre_blocks=[2,2,2,2], pos_blocks=[2,2,2,2], k_neighbors=[32,32,32,32],\n reducers=[2,2,2,2], **kwargs):\n super(Model23, self).__init__()\n self.stages = len(pre_blocks)\n self.class_num = class_num\n self.points=points\n self.embedding = nn.Sequential(\n FCBNReLU1D(3, embed_dim),\n FCBNReLU1D(embed_dim, embed_dim)\n )\n assert len(pre_blocks)==len(k_neighbors)==len(reducers)==len(pos_blocks), \\\n \"Please check stage number consistent for pre_blocks, pos_blocks k_neighbors, reducers.\"\n self.local_grouper_list = nn.ModuleList()\n self.pre_blocks_list = nn.ModuleList()\n self.pos_blocks_list = nn.ModuleList()\n last_channel = embed_dim\n anchor_points = self.points\n for i in range(len(pre_blocks)):\n out_channel = last_channel*2\n pre_block_num=pre_blocks[i]\n pos_block_num = pos_blocks[i]\n kneighbor = k_neighbors[i]\n reduce = reducers[i]\n anchor_points = anchor_points//reduce\n\n # append local_grouper_list\n local_grouper = LocalGrouper(anchor_points, kneighbor) #[b,g,k,d]\n self.local_grouper_list.append(local_grouper)\n # append pre_block_list\n pre_block_module = PreExtraction(out_channel, pre_block_num)\n self.pre_blocks_list.append(pre_block_module)\n # append pos_block_list\n pos_block_module = PosExtraction(out_channel, pos_block_num)\n self.pos_blocks_list.append(pos_block_module)\n\n last_channel = out_channel\n\n self.classifier = nn.Sequential(\n nn.Linear(last_channel*2, 512),\n nn.BatchNorm1d(512),\n nn.GELU(),\n nn.Dropout(0.5),\n nn.Linear(512, 256),\n nn.BatchNorm1d(256),\n nn.GELU(),\n nn.Dropout(0.5),\n nn.Linear(256, self.class_num)\n )\n\n def forward(self, x):\n xyz = x.permute(0, 2, 1)\n batch_size, _, _ = x.size()\n x = self.embedding(x) # B,D,N\n for i in range(self.stages):\n xyz, x = self.local_grouper_list[i](xyz, x.permute(0, 2, 1)) # [b,g,3] [b,g,k,d]\n x = self.pre_blocks_list[i](x) # [b,d,g]\n x = self.pos_blocks_list[i](x) # [b,d,g]\n\n x_max = F.adaptive_max_pool1d(x,1).squeeze(dim=-1)\n x_mean = x.mean(dim=-1,keepdim=False)\n x = torch.cat([x_max, x_mean], dim=-1)\n x = self.classifier(x)\n return x\n\n\n\ndef model23A(num_classes=40, **kwargs) -> Model23: # 19201MiB\n return Model23(points=1024, class_num=num_classes, embed_dim=128,\n pre_blocks=[2,2], pos_blocks=[2,2], k_neighbors=[32,32],\n reducers=[4,4], **kwargs)\n\ndef model23B(num_classes=40, **kwargs) -> Model23: # 19185MiB\n return Model23(points=1024, class_num=num_classes, embed_dim=128,\n pre_blocks=[1,1], pos_blocks=[1,1], k_neighbors=[32,32],\n reducers=[4,4], **kwargs)\n\ndef model23C(num_classes=40, **kwargs) -> Model23: # 19537MiB\n return Model23(points=1024, class_num=num_classes, embed_dim=128,\n pre_blocks=[2,2,2], pos_blocks=[2,2,2], k_neighbors=[32,32,32],\n reducers=[4,2,2], **kwargs)\n\ndef model23D(num_classes=40, **kwargs) -> Model23: # 31927MiB\n return Model23(points=1024, class_num=num_classes, embed_dim=128,\n pre_blocks=[2,2,2], pos_blocks=[2,2,2], k_neighbors=[16,32,32],\n reducers=[2,2,2], **kwargs)\n\ndef model23E(num_classes=40, **kwargs) -> Model23: # 19215MiB # 93.476% on vis sever\n return Model23(points=1024, class_num=num_classes, embed_dim=128,\n pre_blocks=[3,3], pos_blocks=[3,3], k_neighbors=[32,32],\n reducers=[4,4], **kwargs)\n\ndef model23F(num_classes=40, **kwargs) -> Model23: # 6437MiB\n return Model23(points=1024, class_num=num_classes, embed_dim=128,\n pre_blocks=[2,2], pos_blocks=[2,2], k_neighbors=[16,16],\n reducers=[4,4], **kwargs)\n\ndef model23G(num_classes=40, **kwargs) -> Model23: # 19201MiB\n return Model23(points=1024, class_num=num_classes, embed_dim=128,\n pre_blocks=[2,2], pos_blocks=[2,2], k_neighbors=[24,24],\n reducers=[4,4], **kwargs)\n\n# don't train H, it is same to model21H\ndef model23H(num_classes=40, **kwargs) -> Model23:\n return Model23(points=1024, class_num=num_classes, embed_dim=128,\n pre_blocks=[4,4], pos_blocks=[4,4], k_neighbors=[32,32],\n reducers=[4,4], **kwargs)\n\ndef model23I(num_classes=40, **kwargs) -> Model23: # 20283MiB\n return Model23(points=1024, class_num=num_classes, embed_dim=256,\n pre_blocks=[2,2], pos_blocks=[2,2], k_neighbors=[32,32],\n reducers=[4,4], **kwargs)\n\n# Extremely large model, 101 layers in total.\ndef model23J(num_classes=40, **kwargs) -> Model23: # 24999MiB\n return Model23(points=1024, class_num=num_classes, embed_dim=128,\n pre_blocks=[4,4,4,4], pos_blocks=[4,4,4,4], k_neighbors=[16,16,16,16],\n reducers=[4,2,2,2], **kwargs)\n\n\n# Also Eextremely large model, 101 layers in total.\ndef model23K(num_classes=40, **kwargs) -> Model23:\n return Model23(points=1024, class_num=num_classes, embed_dim=128,\n pre_blocks=[10,10], pos_blocks=[10,10], k_neighbors=[32,32],\n reducers=[4,4], **kwargs)\n\n\nif __name__ == '__main__':\n data = torch.rand(2,128,10)\n att = Attention(128)\n out = att(data)\n print(out.shape)\n\n\n\n batch, groups,neighbors,dim=2,512,32,16\n x = torch.rand(batch,groups,neighbors,dim)\n pre_extractor = PreExtraction(dim,3)\n out = pre_extractor(x)\n print(out.shape)\n\n x = torch.rand(batch, dim, groups)\n pos_extractor = PosExtraction(dim, 3)\n out = pos_extractor(x)\n print(out.shape)\n\n\n data = torch.rand(2, 3, 1024)\n print(\"===> testing model ...\")\n model = Model23()\n out = model(data)\n print(out.shape)\n\n print(\"===> testing modelE ...\")\n model = model23E()\n out = model(data)\n print(out.shape)\n"
] | [
[
"torch.nn.Softmax",
"torch.randint",
"torch.max",
"torch.cat",
"torch.zeros",
"torch.nn.functional.adaptive_max_pool1d",
"torch.sum",
"torch.topk",
"torch.nn.Dropout",
"torch.ones",
"torch.einsum",
"torch.rand",
"torch.arange",
"torch.nn.Sequential",
"torch.nn.BatchNorm1d",
"torch.min",
"torch.nn.ModuleList",
"torch.nn.Linear",
"torch.nn.functional.gelu",
"torch.nn.Conv1d",
"torch.nn.GELU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DigitalBiomarkerDiscoveryPipeline/devicely | [
"9773fead4d3969a32ca2760b8db4ae728c4d5d50"
] | [
"devicely/empatica.py"
] | [
"\"\"\"\nEmpatica E4 is a wearable device that offers real-time physiological data\nacquisition such as blood volume pulse, electrodermal activity (EDA), heart\nrate, interbeat intervals, 3-axis acceleration and skin temperature.\n\"\"\"\n\nimport os\nimport random\n\nimport numpy as np\nimport pandas as pd\n\n\nclass EmpaticaReader:\n \"\"\"\n Read, timeshift and write data generated by Empatica E4.\n\n Attributes\n ----------\n start_times : dict\n Contain the timestamp of the first measurement for all\n measured signals (BVP, ACC, etc.).\n\n sample_freqs : dict ]\n Contain the sampling frequencies of all measured signals\n in Hz.\n\n IBI : pandas.DataFrame\n Contain inter-beat interval data. The column\n \"seconds_since_start\" is the time in seconds between the start of\n measurements and the column \"IBI\" is the duration in seconds between\n consecutive beats.\n\n ACC : pandas.DataFrame\n Contain the data measured with the onboard MEMS type\n 3-axis accelerometer, indexed by time of measurement.\n\n BVP : pandas.DataFrame\n Contain blood volume pulse data, indexed by time of\n measurement.\n\n EDA : pandas.DataFrame\n Contain data captured from the electrodermal activity\n sensor, indexed by time of measurement.\n\n HR : pandas.DataFrame\n Contain heart rate data, indexed by time of\n measurement.\n\n TEMP : pandas.DataFrame\n Contain temperature data, indexed by time of\n measurement.\n\n data : pandas.DataFrame\n Joined dataframe of the ACC, BVP, EDA, HR and TEMP\n dataframes (see above). May contain NaN values because sampling\n frequencies differ across signals.\n \"\"\"\n\n def __init__(self, path):\n \"\"\"\n Parse the csv files located in the specified directory into dataframes.\n\n Parameters\n ----------\n path : str\n Path of the directory that contains the individual signal csv\n files. The files must be named ACC.csv, BVP.csv, EDA.csv, HR.csv,\n IBI.csv and TEMP.csv. If present, the file tags.csv is also read.\n \"\"\"\n\n self.start_times = {}\n self.sample_freqs = {}\n\n files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]\n\n if files is None:\n print('Empty directory. Nothing to read.')\n return None\n\n self.ACC = self._read_signal(os.path.join(path, 'ACC.csv'), 'ACC', col_names=['X', 'Y', 'Z'])\n self.BVP = self._read_signal(os.path.join(path, 'BVP.csv'), 'BVP')\n self.EDA = self._read_signal(os.path.join(path, 'EDA.csv'), 'EDA')\n self.HR = self._read_signal(os.path.join(path, 'HR.csv'), 'HR')\n self.TEMP = self._read_signal(os.path.join(path, 'TEMP.csv'), 'TEMP')\n self.IBI = self._read_ibi(os.path.join(path, 'IBI.csv'))\n\n self.tags = self._read_tags(os.path.join(path, 'tags.csv'))\n\n self.data = self._get_joined_dataframe()\n\n def write(self, dir_path):\n \"\"\"\n Write the signal dataframes back to individual csv files formatted the\n same way as they were read.\n\n Parameters\n ----------\n path : str\n Path of the directory in which the csv files are created.\n\n If the directory exists, the csv files are written using writing mode 'w'\n ignoring other files in the directory.\n\n If the directory doe not exist, it will be created.\n \"\"\"\n\n if not os.path.exists(dir_path):\n os.mkdir(dir_path)\n if self.ACC is not None:\n self._write_signal(os.path.join(dir_path, 'ACC.csv'), self.ACC, 'ACC')\n if self.BVP is not None:\n self._write_signal(os.path.join(dir_path, 'BVP.csv'), self.BVP, 'BVP')\n if self.EDA is not None:\n self._write_signal(os.path.join(dir_path, 'EDA.csv'), self.EDA, 'EDA')\n if self.HR is not None:\n self._write_signal(os.path.join(dir_path, 'HR.csv'), self.HR, 'HR')\n if self.TEMP is not None:\n self._write_signal(os.path.join(dir_path, 'TEMP.csv'), self.TEMP, 'TEMP')\n if self.IBI is not None:\n self._write_ibi(os.path.join(dir_path, 'IBI.csv'))\n if self.tags is not None:\n self._write_tags(os.path.join(dir_path, 'tags.csv'))\n\n def _read_signal(self, path, signal_name, col_names=None):\n try:\n if os.stat(path).st_size > 0:\n with open(path, 'r') as file:\n start_time_str = file.readline().split(', ')[0]\n self.start_times[signal_name] = pd.Timestamp(float(start_time_str), unit='s')\n sample_freq_str = file.readline().split(', ')[0]\n self.sample_freqs[signal_name] = float(sample_freq_str)\n col_names = [signal_name] if col_names is None else col_names\n dataframe = pd.read_csv(file, header=None, names=col_names)\n dataframe.index = pd.date_range(\n start=self.start_times[signal_name],\n freq=f\"{1 / self.sample_freqs[signal_name]}S\",\n periods=len(dataframe))\n if col_names is not None:\n dataframe.rename(dict(enumerate(col_names)), inplace=True)\n else:\n dataframe.rename({0: signal_name}, inplace=True)\n\n return dataframe.squeeze()\n else:\n print(f\"Not reading signal because the file {path} is empty.\")\n except OSError:\n print(f\"Not reading signal because the file {path} does not exist.\")\n\n return None\n\n def _write_signal(self, path, dataframe, signal_name):\n n_cols = len(dataframe.columns) if isinstance(dataframe, pd.DataFrame) else 1\n meta = np.array([[self.start_times[signal_name].value / 1e9] * n_cols,\n [self.sample_freqs[signal_name]] * n_cols])\n with open(path, 'w') as file:\n np.savetxt(file, meta, fmt='%s', delimiter=', ', newline='\\n')\n dataframe.to_csv(file, index=None, header=None, line_terminator='\\n')\n\n def _read_ibi(self, path):\n try:\n if os.stat(path).st_size > 0:\n with open(path, 'r') as file:\n start_time = pd.Timestamp(float(file.readline().split(',')[0]), unit='s')\n self.start_times['IBI'] = start_time\n df = pd.read_csv(file, names=['time', 'IBI'], header=None)\n df['time'] = pd.to_timedelta(df['time'], unit='s')\n df['time'] = start_time + df['time']\n return df.set_index('time') \n else:\n print(f\"Not reading signal because the file {path} is empty.\")\n except OSError:\n print(f\"Not reading signal because the file {path} does not exist.\")\n\n return None\n\n def _write_ibi(self, path):\n with open(path, 'w') as file:\n file.write(f\"{self.start_times['IBI'].value // 1e9}, IBI\\n\")\n write_df = self.IBI.copy()\n write_df.index = (write_df.index - self.start_times['IBI']).values.astype(int) / 1e9\n write_df.to_csv(file, header=None, line_terminator='\\n')\n\n def _read_tags(self, path):\n try:\n if os.stat(path).st_size > 0:\n return pd.read_csv(path, header=None,\n parse_dates=[0],\n date_parser=lambda x : pd.to_datetime(x, unit='s'),\n names=['tags'],\n squeeze=True)\n\n else:\n print(f\"Not reading tags because the file {path} is empty.\")\n except OSError:\n print(f\"Not reading tags because the file {path} does not exist.\")\n\n return None\n\n def _write_tags(self, path):\n if self.tags is not None:\n tags_write_series = self.tags.map(lambda x: x.value / 1e9)\n tags_write_series.to_csv(path, header=None, index=None, line_terminator='\\n')\n\n def timeshift(self, shift='random'):\n \"\"\"\n Timeshift all time related columns as well as the starting_times dict.\n\n Parameters\n ----------\n shift : None/'random', pd.Timestamp or pd.Timedelta\n If shift is not specified, shifts the data by a random time interval\n between one month and two years to the past.\n\n If shift is a timdelta, adds that timedelta to all time-related attributes.\n\n If shift is a timestamp, shifts the data such that the earliest entry\n has that timestamp. The remaining values will mantain the same\n time difference to the first entry.\n \"\"\"\n\n if shift == 'random':\n one_month = pd.Timedelta('- 30 days').value\n two_years = pd.Timedelta('- 730 days').value\n random_timedelta = pd.Timedelta(random.uniform(one_month, two_years))\n self.timeshift(random_timedelta)\n\n dataframes = []\n variables = [self.ACC, self.BVP, self.EDA,\n self.HR, self.TEMP, self.data]\n for variable in variables:\n if variable is not None:\n dataframes.append(variable)\n\n if isinstance(shift, pd.Timestamp):\n min_start_time = min(self.start_times.values())\n new_start_times = dict()\n for signal_name, start_time in self.start_times.items():\n new_start_times[signal_name] = shift + (start_time - min_start_time)\n self.start_times = new_start_times\n if self.tags is not None:\n timedeltas = self.tags - self.tags.min()\n self.tags = shift + timedeltas\n for dataframe in dataframes:\n timedeltas = dataframe.index - dataframe.index.min()\n dataframe.index = shift + timedeltas\n\n if isinstance(shift, pd.Timedelta):\n for signal_name in self.start_times:\n self.start_times[signal_name] += shift\n if self.tags is not None:\n self.tags += shift\n for dataframe in dataframes:\n dataframe.index += shift\n\n def _get_joined_dataframe(self):\n dataframes = []\n variables = [self.ACC, self.BVP, self.EDA,\n self.HR, self.TEMP]\n for variable in variables:\n if variable is not None:\n dataframes.append(variable)\n\n if not dataframes:\n print('No joined dataframe possible due to lack of data.')\n return None\n\n joined_idx = pd.concat([pd.Series(dataframe.index) for dataframe in dataframes])\n joined_idx = pd.Index(joined_idx.drop_duplicates().sort_values())\n\n joined_dataframe = pd.DataFrame(index=joined_idx)\n if self.ACC is not None:\n joined_dataframe.loc[self.ACC.index, 'ACC_X'] = self.ACC['X']\n joined_dataframe.loc[self.ACC.index, 'ACC_Y'] = self.ACC['Y']\n joined_dataframe.loc[self.ACC.index, 'ACC_Z'] = self.ACC['Z']\n if self.BVP is not None:\n joined_dataframe.loc[self.BVP.index, 'BVP'] = self.BVP\n if self.EDA is not None:\n joined_dataframe.loc[self.EDA.index, 'EDA'] = self.EDA\n if self.HR is not None:\n joined_dataframe.loc[self.HR.index, 'HR'] = self.HR\n if self.TEMP is not None:\n joined_dataframe.loc[self.TEMP.index, 'TEMP'] = self.TEMP\n\n return joined_dataframe\n"
] | [
[
"pandas.read_csv",
"pandas.to_datetime",
"pandas.Series",
"pandas.Timedelta",
"pandas.DataFrame",
"pandas.to_timedelta",
"numpy.savetxt",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
Near32/Archi | [
"0005713fa4e37c7cd9b34cd257c481d08928db8a"
] | [
"Archi/tests/test_esbn_model.py"
] | [
"import Archi\nimport yaml \n\n\ndef test_model_loading():\n try:\n config = yaml.safe_load(\n open(\"./esbn_model_test_config.yaml\", 'r'),\n )\n except yaml.YANNLError as e:\n print(e)\n\n from Archi import load_model\n\n model = load_model(config)\n \n assert 'KeyValueMemory' in model.modules.keys()\n assert 'key_memory' in model.stream_handler.placeholders['inputs']['KeyValueMemory'].keys()\n assert 'value_memory' in model.stream_handler.placeholders['inputs']['KeyValueMemory'].keys()\n assert 'read_key_plus_conf' in model.stream_handler.placeholders['inputs']['KeyValueMemory'].keys()\n assert 'CoreLSTM' in model.modules.keys()\n assert 'CoreLSTM' in model.stream_handler.placeholders['inputs'].keys()\n assert 'hidden' in model.stream_handler.placeholders['inputs']['CoreLSTM'].keys()\n assert 'cell' in model.stream_handler.placeholders['inputs']['CoreLSTM'].keys()\n assert 'iteration' in model.stream_handler.placeholders['inputs']['CoreLSTM'].keys()\n \n\ndef test_model_forward():\n try:\n config = yaml.safe_load(\n open(\"./esbn_model_test_config.yaml\", 'r'),\n )\n except yaml.YANNLError as e:\n print(e)\n\n from Archi import load_model\n\n model = load_model(config)\n \n import torch \n\n inputs_dict = {\n 'x':torch.rand(4,3,64,64),\n }\n\n output = model(**inputs_dict)\n assert output['inputs']['KeyValueMemory']['read_key_plus_conf'][0].max() == 0.0 \n output1 = model(**inputs_dict)\n\n assert 'lstm_output' in output['modules']['CoreLSTM']\n assert 'processed_input' in output['modules']['Encoder']\n assert 'processed_input' in output['modules']['ToGateFCN']\n assert output['inputs']['KeyValueMemory']['read_key_plus_conf'][0].max() == 0.0 \n assert output1['inputs']['KeyValueMemory']['read_key_plus_conf'][0].max() != 0.0 \n assert len(dict(model.named_parameters())) != 0\n for np, p in model.named_parameters():\n print(np)\n\nif __name__ == '__main__':\n test_model_loading()\n test_model_forward()\n\n"
] | [
[
"torch.rand"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RubensZimbres/pytorch-metric-learning | [
"41e06ef5af398c05d238e0a74ee6c42fa7bd574c"
] | [
"tests/utils/test_calculate_accuracies.py"
] | [
"import unittest\r\nfrom pytorch_metric_learning.utils import accuracy_calculator\r\nimport numpy as np\r\n\r\n\r\nclass TestCalculateAccuracies(unittest.TestCase):\r\n def test_accuracy_calculator(self):\r\n query_labels = np.array([1, 1, 2, 3, 4])\r\n\r\n knn_labels1 = np.array(\r\n [\r\n [0, 1, 1, 2, 2],\r\n [1, 0, 1, 1, 3],\r\n [4, 4, 4, 4, 2],\r\n [3, 1, 3, 1, 3],\r\n [0, 0, 4, 2, 2],\r\n ]\r\n )\r\n label_counts1 = {1: 3, 2: 5, 3: 4, 4: 5}\r\n\r\n knn_labels2 = knn_labels1 + 5\r\n label_counts2 = {k + 5: v for k, v in label_counts1.items()}\r\n\r\n for avg_of_avgs in [False, True]:\r\n for i, (knn_labels, label_counts) in enumerate(\r\n [(knn_labels1, label_counts1), (knn_labels2, label_counts2)]\r\n ):\r\n\r\n AC = accuracy_calculator.AccuracyCalculator(\r\n exclude=(\"NMI\", \"AMI\"), avg_of_avgs=avg_of_avgs\r\n )\r\n kwargs = {\r\n \"query_labels\": query_labels,\r\n \"label_counts\": label_counts,\r\n \"knn_labels\": knn_labels,\r\n \"not_lone_query_mask\": np.ones(5).astype(np.bool)\r\n if i == 0\r\n else np.zeros(5).astype(np.bool),\r\n }\r\n\r\n function_dict = AC.get_function_dict()\r\n\r\n for ecfss in [False, True]:\r\n if ecfss:\r\n kwargs[\"knn_labels\"] = kwargs[\"knn_labels\"][:, 1:]\r\n kwargs[\"embeddings_come_from_same_source\"] = ecfss\r\n acc = AC._get_accuracy(function_dict, **kwargs)\r\n if i == 1:\r\n self.assertTrue(acc[\"precision_at_1\"] == 0)\r\n self.assertTrue(acc[\"r_precision\"] == 0)\r\n self.assertTrue(acc[\"mean_average_precision_at_r\"] == 0)\r\n self.assertTrue(acc[\"mean_average_precision\"] == 0)\r\n else:\r\n self.assertTrue(\r\n acc[\"precision_at_1\"]\r\n == self.correct_precision_at_1(ecfss, avg_of_avgs)\r\n )\r\n self.assertTrue(\r\n acc[\"r_precision\"]\r\n == self.correct_r_precision(ecfss, avg_of_avgs)\r\n )\r\n self.assertTrue(\r\n acc[\"mean_average_precision_at_r\"]\r\n == self.correct_mean_average_precision_at_r(\r\n ecfss, avg_of_avgs\r\n )\r\n )\r\n self.assertTrue(\r\n acc[\"mean_average_precision\"]\r\n == self.correct_mean_average_precision(ecfss, avg_of_avgs)\r\n )\r\n\r\n def correct_precision_at_1(self, embeddings_come_from_same_source, avg_of_avgs):\r\n if not embeddings_come_from_same_source:\r\n if not avg_of_avgs:\r\n return 0.4\r\n else:\r\n return (0.5 + 0 + 1 + 0) / 4\r\n else:\r\n if not avg_of_avgs:\r\n return 1.0 / 5\r\n else:\r\n return (0.5 + 0 + 0 + 0) / 4\r\n\r\n def correct_r_precision(self, embeddings_come_from_same_source, avg_of_avgs):\r\n if not embeddings_come_from_same_source:\r\n acc0 = 2.0 / 3\r\n acc1 = 2.0 / 3\r\n acc2 = 1.0 / 5\r\n acc3 = 2.0 / 4\r\n acc4 = 1.0 / 5\r\n else:\r\n acc0 = 1.0 / 1\r\n acc1 = 1.0 / 2\r\n acc2 = 1.0 / 4\r\n acc3 = 1.0 / 3\r\n acc4 = 1.0 / 4\r\n if not avg_of_avgs:\r\n return np.mean([acc0, acc1, acc2, acc3, acc4])\r\n else:\r\n return np.mean([(acc0 + acc1) / 2, acc2, acc3, acc4])\r\n\r\n def correct_mean_average_precision_at_r(\r\n self, embeddings_come_from_same_source, avg_of_avgs\r\n ):\r\n if not embeddings_come_from_same_source:\r\n acc0 = (1.0 / 2 + 2.0 / 3) / 3\r\n acc1 = (1 + 2.0 / 3) / 3\r\n acc2 = (1.0 / 5) / 5\r\n acc3 = (1 + 2.0 / 3) / 4\r\n acc4 = (1.0 / 3) / 5\r\n else:\r\n acc0 = 1\r\n acc1 = (1.0 / 2) / 2\r\n acc2 = (1.0 / 4) / 4\r\n acc3 = (1.0 / 2) / 3\r\n acc4 = (1.0 / 2) / 4\r\n if not avg_of_avgs:\r\n return np.mean([acc0, acc1, acc2, acc3, acc4])\r\n else:\r\n return np.mean([(acc0 + acc1) / 2, acc2, acc3, acc4])\r\n\r\n def correct_mean_average_precision(\r\n self, embeddings_come_from_same_source, avg_of_avgs\r\n ):\r\n if not embeddings_come_from_same_source:\r\n acc0 = (1.0 / 2 + 2.0 / 3) / 2\r\n acc1 = (1 + 2.0 / 3 + 3.0 / 4) / 3\r\n acc2 = (1.0 / 5) / 1\r\n acc3 = (1 + 2.0 / 3 + 3.0 / 5) / 3\r\n acc4 = (1.0 / 3) / 1\r\n else:\r\n acc0 = 1\r\n acc1 = (1.0 / 2 + 2.0 / 3) / 2\r\n acc2 = 1.0 / 4\r\n acc3 = (1.0 / 2 + 2.0 / 4) / 2\r\n acc4 = 1.0 / 2\r\n if not avg_of_avgs:\r\n return np.mean([acc0, acc1, acc2, acc3, acc4])\r\n else:\r\n return np.mean([(acc0 + acc1) / 2, acc2, acc3, acc4])\r\n\r\n def test_get_label_counts(self):\r\n label_counts, num_k = accuracy_calculator.get_label_counts(\r\n [0, 1, 3, 2, 3, 1, 3, 3, 4, 6, 5, 10, 4, 4, 4, 4, 6, 6, 5]\r\n )\r\n self.assertTrue(\r\n label_counts == {0: 1, 1: 2, 2: 1, 3: 4, 4: 5, 5: 2, 6: 3, 10: 1}\r\n )\r\n self.assertTrue(num_k == 5)\r\n\r\n def test_get_lone_query_labels(self):\r\n query_labels = np.array([0, 1, 2, 3, 4, 5, 6])\r\n reference_labels = np.array([0, 0, 0, 1, 2, 2, 3, 4, 5, 6])\r\n reference_label_counts, _ = accuracy_calculator.get_label_counts(\r\n reference_labels\r\n )\r\n\r\n lone_query_labels = accuracy_calculator.get_lone_query_labels(\r\n query_labels, reference_labels, reference_label_counts, True\r\n )\r\n self.assertTrue(\r\n np.all(np.unique(lone_query_labels) == np.array([1, 3, 4, 5, 6]))\r\n )\r\n\r\n query_labels = np.array([0, 1, 2, 3, 4])\r\n reference_labels = np.array([0, 0, 0, 1, 2, 2, 4, 5, 6])\r\n\r\n lone_query_labels = accuracy_calculator.get_lone_query_labels(\r\n query_labels, reference_labels, reference_label_counts, False\r\n )\r\n self.assertTrue(np.all(np.unique(lone_query_labels) == np.array([3])))\r\n\r\n\r\nclass TestCalculateAccuraciesAndFaiss(unittest.TestCase):\r\n def test_accuracy_calculator_and_faiss(self):\r\n AC = accuracy_calculator.AccuracyCalculator(exclude=(\"NMI\", \"AMI\"))\r\n\r\n query = np.arange(10)[:, None].astype(np.float32)\r\n reference = np.arange(10)[:, None].astype(np.float32)\r\n query_labels = np.arange(10).astype(np.int)\r\n reference_labels = np.arange(10).astype(np.int)\r\n acc = AC.get_accuracy(query, reference, query_labels, reference_labels, False)\r\n self.assertTrue(acc[\"precision_at_1\"] == 1)\r\n self.assertTrue(acc[\"r_precision\"] == 1)\r\n self.assertTrue(acc[\"mean_average_precision_at_r\"] == 1)\r\n\r\n reference = (np.arange(20) / 2.0)[:, None].astype(np.float32)\r\n reference_labels = np.zeros(20).astype(np.int)\r\n reference_labels[::2] = query_labels\r\n reference_labels[1::2] = np.ones(10).astype(np.int)\r\n acc = AC.get_accuracy(query, reference, query_labels, reference_labels, True)\r\n self.assertTrue(acc[\"precision_at_1\"] == 1)\r\n self.assertTrue(acc[\"r_precision\"] == 0.5)\r\n self.assertTrue(\r\n acc[\"mean_average_precision_at_r\"]\r\n == (1 + 2.0 / 2 + 3.0 / 5 + 4.0 / 7 + 5.0 / 9) / 10\r\n )\r\n\r\n def test_accuracy_calculator_and_faiss_avg_of_avgs(self):\r\n AC_global_average = accuracy_calculator.AccuracyCalculator(\r\n exclude=(\"NMI\", \"AMI\"), avg_of_avgs=False\r\n )\r\n AC_per_class_average = accuracy_calculator.AccuracyCalculator(\r\n exclude=(\"NMI\", \"AMI\"), avg_of_avgs=True\r\n )\r\n query = np.arange(10)[:, None].astype(np.float32)\r\n reference = np.arange(10)[:, None].astype(np.float32)\r\n query[-1] = 100\r\n reference[0] = -100\r\n query_labels = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1])\r\n reference_labels = np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0])\r\n acc = AC_global_average.get_accuracy(\r\n query, reference, query_labels, reference_labels, False\r\n )\r\n self.assertTrue(acc[\"precision_at_1\"] == 0.9)\r\n self.assertTrue(acc[\"r_precision\"] == 0.9)\r\n self.assertTrue(acc[\"mean_average_precision_at_r\"] == 0.9)\r\n\r\n acc = AC_per_class_average.get_accuracy(\r\n query, reference, query_labels, reference_labels, False\r\n )\r\n self.assertTrue(acc[\"precision_at_1\"] == 0.5)\r\n self.assertTrue(acc[\"r_precision\"] == 0.5)\r\n self.assertTrue(acc[\"mean_average_precision_at_r\"] == 0.5)\r\n"
] | [
[
"numpy.unique",
"numpy.arange",
"numpy.ones",
"numpy.mean",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
piquark6046/tensorflow | [
"57771c5d008f6d16fd147110213855d145a7e0bc"
] | [
"tensorflow/python/eager/backprop.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Code for backpropagation using the tape utilities.\"\"\"\n\n# TODO(b/159343581): Properly support CompositeTensor in all functions in this\n# file.\n\nimport functools\nimport operator\nimport sys\n\nimport six\n\nfrom tensorflow.python import pywrap_tfe\nfrom tensorflow.python.eager import backprop_util\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import execute\nfrom tensorflow.python.eager import imperative_grad\nfrom tensorflow.python.eager import tape\nfrom tensorflow.python.framework import composite_tensor_gradient\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import indexed_slices\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import control_flow_util\nfrom tensorflow.python.ops import default_gradient\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import gen_math_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops.unconnected_gradients import UnconnectedGradients\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util import _pywrap_utils\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import tf_contextlib\nfrom tensorflow.python.util import tf_inspect\nfrom tensorflow.python.util.lazy_loader import LazyLoader\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n# Note that we need to lazy load the following two modules to avoid creating\n# circular dependencies.\n# TODO(b/119775953): fix the circular dependencies.\npfor_ops = LazyLoader(\n \"pfor_ops\", globals(),\n \"tensorflow.python.ops.parallel_for.control_flow_ops\")\n\nfunction = LazyLoader(\"function\", globals(),\n \"tensorflow.python.eager.function\")\n\n_op_attr_type_cache = {}\n\n\ndef op_attr_type(op_type, attr_name):\n try:\n return _op_attr_type_cache[(op_type, attr_name)]\n except KeyError:\n context.ensure_initialized()\n h = context.context()._handle # pylint: disable=protected-access\n attr_type = pywrap_tfe.TFE_OpNameGetAttrType(h, op_type, attr_name)\n _op_attr_type_cache[(op_type, attr_name)] = attr_type\n return attr_type\n\n\ndef make_attr(attr_type, value):\n # pybind11 enums do not return the raw value like SWIG enums do. They are\n # useful when comparing amongst each other but not direct integers as we are\n # doing in most tests.\n # https://pybind11.readthedocs.io/en/stable/classes.html#enumerations-and-internal-types\n # TODO(amitpatankar): After all SWIG transitions, convert the enum comparisons\n # from integer value to class.\n if attr_type == int(pywrap_tfe.TF_ATTR_TYPE):\n return dtypes.as_dtype(value)\n if attr_type == [int(pywrap_tfe.TF_ATTR_TYPE)]:\n return [dtypes.as_dtype(v) for v in value]\n if attr_type == int(pywrap_tfe.TF_ATTR_SHAPE):\n return tensor_shape.as_shape(value).as_proto()\n if attr_type == [int(pywrap_tfe.TF_ATTR_SHAPE)]:\n return [tensor_shape.as_shape(v).as_proto() for v in value]\n if isinstance(value, str):\n return value.encode()\n return value\n\n\nclass _MockOp(object):\n \"\"\"Pretends to be a tf.Operation for the gradient functions.\"\"\"\n\n def __init__(self, attrs, inputs, outputs, typ, skip_input_indices):\n self.attrs = attrs\n self.inputs = inputs\n self.outputs = outputs\n self.type = typ\n self.skip_input_indices = skip_input_indices\n\n def get_attr(self, attr):\n typ = op_attr_type(self.type, attr)\n for i in range(0, len(self.attrs), 2):\n if self.attrs[i] == attr:\n return make_attr(typ, self.attrs[i + 1])\n raise KeyError(attr)\n\n def _get_control_flow_context(self):\n raise NotImplementedError(\n \"tf.GradientTape.gradients() does not support graph control flow \"\n \"operations like tf.cond or tf.while at this time. Use tf.gradients() \"\n \"instead. If you need this feature, please file a feature request at \"\n \"https://github.com/tensorflow/tensorflow/issues/new\"\n )\n\n\ndef _gradient_function(op_name, attr_tuple, num_inputs, inputs, outputs,\n out_grads, skip_input_indices, forward_pass_name_scope):\n \"\"\"Calls the gradient function of the op.\n\n Args:\n op_name: the name of the op to be differentiated.\n attr_tuple: the attrs, as a tuple.\n num_inputs: the number of inputs to the op.\n inputs: inputs to the original operation.\n outputs: outputs to the original operation.\n out_grads: gradients of the operation wrt its outputs.\n skip_input_indices: a tuple that is passed to the gradient function,\n indicating which inputs to skip calculating the gradient for\n forward_pass_name_scope: the namescope of the op in the forward pass.\n\n Returns:\n The gradients with respect to the inputs of the function, as a list.\n \"\"\"\n mock_op = _MockOp(attr_tuple, inputs, outputs, op_name, skip_input_indices)\n grad_fn = ops._gradient_registry.lookup(op_name) # pylint: disable=protected-access\n if grad_fn is None:\n return [None] * num_inputs\n\n # This does not work with v1 TensorArrays.\n if ops.executing_eagerly_outside_functions(\n ) or control_flow_util.EnableControlFlowV2(ops.get_default_graph()):\n gradient_name_scope = \"gradient_tape/\"\n if forward_pass_name_scope:\n gradient_name_scope += forward_pass_name_scope + \"/\"\n with ops.name_scope(gradient_name_scope):\n return grad_fn(mock_op, *out_grads)\n else:\n return grad_fn(mock_op, *out_grads)\n\n\npywrap_tfe.TFE_Py_RegisterGradientFunction(_gradient_function)\n\n\ndef _must_record_gradient():\n return not pywrap_tfe.TFE_Py_TapeSetIsEmpty()\n\n\n@tf_export(\"__internal__.record_gradient\", v1=[])\ndef record_gradient(op_name, inputs, attrs, outputs):\n \"\"\"Explicitly record the gradient for a given op.\n\n Args:\n op_name: The op name as listed in the `OpDef` for the op.\n inputs: A list of tensor inputs to the op.\n attrs: The op attributes as a flattened list of alternating attribute names\n and attribute values.\n outputs: A list of tensor outputs from the op.\n \"\"\"\n pywrap_tfe.TFE_Py_RecordGradient(op_name, inputs, attrs, outputs,\n ops.get_name_scope())\n\n\nexecute.must_record_gradient = _must_record_gradient\nexecute.record_gradient = record_gradient\n\n\ndef implicit_val_and_grad(f):\n \"\"\"Returns a function which differentiates f with respect to variables.\n\n The wrapped function returns the value and the gradient of f when called with\n the same arguments. The gradient is with respect to all trainable TFE\n variables accessed by `f`.\n\n This function is useful when the exact set of variables to differentiate with\n is not known ahead of time.\n\n Example:\n\n ```python\n dense_layer = tf.compat.v1.layers.Dense(1)\n def loss(x, y):\n return tf.reduce_sum(tf.square(dense_layer(x) - y))\n\n # Obtain the gradient function.\n val_grad_fn = tfe.implicit_value_and_gradients(loss)\n\n # Invoke the gradient function with concrete values of x and y.\n x = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n y = tf.constant([[10.0], [20.0]])\n value, grads_and_vars = val_grad_fn(x, y)\n print('Value of loss: %s' % value)\n\n # Apply the gradients to Variables.\n optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1)\n optimizer.apply_gradients(grads_and_vars)\n ```\n\n Args:\n f: function to be differentiated. If `f` returns a scalar, this scalar will\n be differentiated. If `f` returns a tensor or list of tensors, by default\n a scalar will be computed by adding all their values to produce a single\n scalar.\n\n Returns:\n A function which, when called, returns a tuple pair.\n Its first element is the value to which the function evaluates.\n Its second element is list of (gradient, variable) pairs.\n\n Raises:\n ValueError: if `f` returns None.\n \"\"\"\n # TODO(cais): Remove calls to tf.constant() once the gradients functions\n # accept lists and np.ndarrays.\n\n def grad_fn(*args, **kwds):\n \"\"\"Computes the gradient of the wrapped function.\"\"\"\n this_tape = tape.push_new_tape()\n try:\n end_node = f(*args, **kwds)\n if end_node is None:\n raise ValueError(\"Cannot differentiate a function that returns None; \"\n \"did you forget to return a value from {}?\".format(\n f.__name__))\n finally:\n tape.pop_tape(this_tape)\n # Note: variables are returned in construction order. This ensures unique\n # order across executions.\n variables = this_tape.watched_variables()\n if not variables:\n raise ValueError(\"No trainable variables were accessed while the \"\n \"function was being computed.\")\n\n sources = [v.handle for v in variables]\n for s in sources:\n if getattr(s, \"is_packed\", False):\n raise ValueError(\n \"GradientTape.gradient is not supported on packed EagerTensors yet.\"\n )\n grad = imperative_grad.imperative_grad(this_tape, nest.flatten(end_node),\n sources)\n return end_node, list(zip(grad, variables))\n\n return grad_fn\n\n\ndef implicit_grad(f):\n \"\"\"Returns a function which differentiates f with respect to variables.\n\n The wrapped function returns the gradient of f when called with the same\n arguments. The gradient is with respect to all trainable TFE variables\n accessed by `f`.\n\n This function is useful when the exact set of variables to differentiate with\n is not known ahead of time.\n\n Example:\n\n ```python\n dense_layer = tf.compat.v1.layers.Dense(1)\n def loss(x, y):\n return tf.reduce_sum(tf.square(dense_layer(x) - y))\n\n # Obtain the gradient function.\n grad_fn = tfe.implicit_gradients(loss)\n\n # Invoke the gradient function with concrete values of x and y.\n x = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n y = tf.constant([[10.0], [20.0]])\n grads_and_vars = grad_fn(x, y)\n\n # Apply the gradients to Variables.\n optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1)\n optimizer.apply_gradients(grads_and_vars)\n ```\n\n Args:\n f: function to be differentiated. If `f` returns a scalar, this scalar will\n be differentiated. If `f` returns a tensor or list of tensors, by default\n a scalar will be computed by adding all their values to produce a single\n scalar.\n\n Returns:\n A function which, when called, returns a list of (gradient, variable) pairs.\n \"\"\"\n # TODO(cais): Remove calls to tf.constant() once the gradients functions\n # accept lists and np.ndarrays.\n\n def grad_fn(*args, **kwds):\n \"\"\"Computes the gradient of the wrapped function.\"\"\"\n return implicit_val_and_grad(f)(*args, **kwds)[1]\n\n return grad_fn\n\n\ndef _get_arg_spec(f, params, param_args):\n \"\"\"The positions of the parameters of f to be differentiated in param_args.\"\"\"\n try:\n args = tf_inspect.getfullargspec(f).args\n except TypeError as e:\n # TypeError can happen when f is a callable object.\n if params is None:\n return range(len(param_args))\n elif all(isinstance(x, int) for x in params):\n return params\n raise ValueError(\"Either callable provided is not a function or could not \"\n \"inspect its arguments by name: %s. Original error: %s\"\n % (f, e))\n if params is None:\n if not args:\n return range(len(param_args))\n if args[0] == \"self\":\n return range(len(args) - 1)\n else:\n return range(len(args))\n elif all(isinstance(x, six.string_types) for x in params):\n return [args.index(n) for n in params]\n elif all(isinstance(x, int) for x in params):\n return params\n else:\n raise ValueError(\n \"params must be all strings or all integers; got %s.\" % params)\n\n\ndef gradients_function(f, params=None):\n \"\"\"Returns a function which differentiates f with respect to params.\n\n Example:\n ```python\n # f(x, y) = (x ^ 3) * y - x * (y ^ 2)\n # Therefore, the 1st order derivatives are:\n # df / dx = 3 * (x ^ 2) * y - y ^ 2\n # df / dy = x ^ 3 - 2 * x * y\n # The 2nd order derivatives with respect to x is:\n # d^2 f / (dx)^2 = 6 * x * y\n def f(x, y):\n return x * x * x * y - x * y * y\n\n # Obtain a function that returns 1st order gradients.\n grad_fn = tfe.gradients_function(f)\n\n x = 2.0\n y = 3.0\n\n # Invoke the 1st order gradient function.\n x_grad, y_grad = grad_fn(x, y)\n assert x_grad.numpy() == 3 * (2 ** 2) * 3 - 3 ** 2\n assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3\n\n # Obtain a function that returns the 2nd order gradient with respect to x.\n gradgrad_fn = tfe.gradients_function(lambda x, y: grad_fn(x, y)[0])\n\n # Invoke the 2nd order gradient function.\n x_gradgrad = gradgrad_fn(x, y)[0]\n assert x_gradgrad.numpy() == 6 * 2 * 3\n\n # To obtain a callable that returns the gradient(s) of `f` with respect to a\n # subset of its inputs, use the `params` keyword argument with\n # `gradients_function()`.\n ygrad_fn = tfe.gradients_function(f, params=[1])\n\n (y_grad,) = ygrad_fn(x, y)\n assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3\n ```\n\n Note that only tensors with real or complex dtypes are differentiable.\n\n Args:\n f: function to be differentiated. If `f` returns a scalar, this scalar will\n be differentiated. If `f` returns a tensor or list of tensors, by default\n a scalar will be computed by adding all their values to produce a single\n scalar. If desired, the tensors can be elementwise multiplied by the\n tensors passed as the `dy` keyword argument to the returned gradient\n function.\n params: list of parameter names of f or list of integers indexing the\n parameters with respect to which we'll differentiate. Passing None\n differentiates with respect to all parameters.\n\n Returns:\n function which, when called, returns the value of f and the gradient\n of `f` with respect to all of `params`. The function takes an extra optional\n keyword argument `dy`. Setting it allows computation of vector jacobian\n products for vectors other than the vector of ones.\n\n Raises:\n ValueError: if the params are not all strings or all integers.\n \"\"\"\n\n def decorated(*args, **kwds):\n \"\"\"Computes the gradient of the decorated function.\"\"\"\n\n _, grad = val_and_grad_function(f, params=params)(*args, **kwds)\n return grad\n\n return decorated\n\n\ndef _ensure_unique_tensor_objects(parameter_positions, args):\n \"\"\"Make each of the parameter_positions in args a unique ops.Tensor object.\n\n Ensure that each parameter is treated independently.\n For example:\n\n def f(x, y): return x * y\n g = gradients_function(f)\n one = tf.constant(1.)\n\n g(one, one) should return [1., 1.]\n (even though the two arguments are the same Tensor object).\n\n Args:\n parameter_positions: List of indices into args defining the arguments to\n differentiate against.\n args: A list of arguments to the function to be differentiated.\n\n Returns:\n args, possibly edited in-place.\n \"\"\"\n s = set()\n for (i, t) in enumerate(args):\n if i in parameter_positions:\n tid = ops.tensor_id(t)\n if tid in s:\n args[i] = gen_array_ops.identity(args[i])\n else:\n s.add(tid)\n return args\n\n\ndef val_and_grad_function(f, params=None):\n \"\"\"Returns a function that computes f and its derivative w.r.t. params.\n\n Example:\n ```python\n # f(x, y) = (x ^ 3) * y - x * (y ^ 2)\n # Therefore, the 1st order derivatives are:\n # df / dx = 3 * (x ^ 2) * y - y ^ 2\n # df / dy = x ^ 3 - 2 * x * y\n def f(x, y):\n return x * x * x * y - x * y * y\n\n # Obtain a function that returns the function value and the 1st order\n # gradients.\n val_grads_fn = tfe.value_and_gradients_function(f)\n\n x = 2.0\n y = 3.0\n\n # Invoke the value-and-gradients function.\n f_val, (x_grad, y_grad) = val_grads_fn(x, y)\n assert f_val.numpy() == (2 ** 3) * 3 - 2 * (3 ** 2)\n assert x_grad.numpy() == 3 * (2 ** 2) * 3 - 3 ** 2\n assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3\n\n # To obtain a callable that returns the value of `f` and the gradient(s) of\n # `f` with respect to a subset of its inputs, use the `params` keyword\n # argument with `value_and_gradients_function()`.\n val_ygrad_fn = tfe.value_and_gradients_function(f, params=[1])\n\n f_val, (y_grad,) = val_ygrad_fn(x, y)\n assert f_val.numpy() == (2 ** 3) * 3 - 2 * (3 ** 2)\n assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3\n ```\n\n Args:\n f: function to be differentiated. If `f` returns a scalar, this scalar will\n be differentiated. If `f` returns a tensor or list of tensors, by default\n a scalar will be computed by adding all their values to produce a single\n scalar. If desired, the tensors can be elementwise multiplied by the\n tensors passed as the `dy` keyword argument to the returned gradient\n function.\n params: list of parameter names of f or list of integers indexing the\n parameters with respect to which we'll differentiate. Passing `None`\n differentiates with respect to all parameters.\n\n Returns:\n function which, when called, returns the value of f and the gradient\n of f with respect to all of `params`. The function takes an extra optional\n keyword argument \"dy\". Setting it allows computation of vector jacobian\n products for vectors other than the vector of ones.\n\n Raises:\n ValueError: if the params are not all strings or all integers.\n \"\"\"\n\n def decorated(*args, **kwds):\n \"\"\"Computes the value and gradient of the decorated function.\"\"\"\n dy = kwds.pop(\"dy\", None)\n if kwds:\n raise ValueError(\"Functions to be differentiated cannot \"\n \"receive keyword arguments.\")\n val, vjp = make_vjp(f, params)(*args, **kwds)\n return val, vjp(dy=dy)\n\n return decorated\n\n\ndef make_vjp(f, params=None, persistent=True):\n \"\"\"Returns a function that computes f and its vjp w.r.t.\n\n params.\n\n The term \"vjp\" here is an abbreviation for vector-jacobian product.\n\n Args:\n f: the function to be differentiated.\n params: the parameters (numbers or names) to differentiate with respect to.\n A value of None will differentiate with respect to all parameters.\n persistent: Boolean controlling whether the VJP function can be re-used.\n Must be True or False.\n\n Returns:\n A function, which when called, returns a tuple (value, vjp), where:\n - value is the result of calling f.\n - vjp is a function, which takes a vector as an argument and\n returns the product of that vector with the Jacobian of f.\n Providing no argument to vjp is equivalent to providing a\n vector of ones.\n\n For example,\n ```python\n def f(x):\n return x * x\n\n wrapped_fn = tfe.make_vjp(f)\n result, vjp = wrapped_fn(tf.constant(3.0))\n # result is 9.0\n vjp() # the vjp function returns 6.0\n\n Raises:\n ValueError: if `f` returns None.\n \"\"\"\n\n def decorated(*args, **kwds):\n \"\"\"Computes the value and gradient of the decorated function.\"\"\"\n parameter_positions = _get_arg_spec(f, params, args)\n assert not kwds, \"The gradient function can't take keyword arguments.\"\n this_tape = tape.push_new_tape(persistent=persistent)\n try:\n sources = []\n args = [\n ops.convert_to_tensor(arg) if i in parameter_positions else arg\n for i, arg in enumerate(args)\n ]\n args = _ensure_unique_tensor_objects(parameter_positions, args)\n for i in parameter_positions:\n if getattr(args[i], \"is_packed\", False):\n raise ValueError(\n \"GradientTape.gradient is not supported on packed EagerTensors\"\n \"yet.\")\n sources.append(args[i])\n tape.watch(this_tape, args[i])\n result = f(*args)\n if result is None:\n raise ValueError(\"Cannot differentiate a function that returns None; \"\n \"did you forget to return a value from {}?\".format(\n f.__name__))\n flat_result = nest.flatten(result)\n flat_result = [gen_array_ops.identity(x) for x in flat_result]\n result = nest.pack_sequence_as(result, flat_result)\n finally:\n tape.pop_tape(this_tape)\n def vjp(dy=None):\n if dy is not None:\n dy = [ops.convert_to_tensor(x) for x in nest.flatten(dy)]\n return imperative_grad.imperative_grad(\n this_tape, nest.flatten(result), sources, output_gradients=dy)\n\n return result, vjp\n\n return decorated\n\n\ndef flatten_nested_indexed_slices(grad):\n assert isinstance(grad, indexed_slices.IndexedSlices)\n if isinstance(grad.values, ops.Tensor):\n return grad\n else:\n assert isinstance(grad.values, indexed_slices.IndexedSlices)\n g = flatten_nested_indexed_slices(grad.values)\n return indexed_slices.IndexedSlices(\n g.values, array_ops.gather(grad.indices, g.indices), g.dense_shape)\n\n\ndef aggregate_indexed_slices_gradients(grads):\n \"\"\"Aggregates gradients containing `IndexedSlices`s.\"\"\"\n if len(grads) < 1:\n return None\n if len(grads) == 1:\n return grads[0]\n grads = [g for g in grads if g is not None]\n # If any gradient is a `Tensor`, sum them up and return a dense tensor\n # object.\n if any(isinstance(g, ops.Tensor) for g in grads):\n return math_ops.add_n(grads)\n\n # The following `_as_indexed_slices_list` casts ids of IndexedSlices into\n # int64. It is to make sure the inputs of `concat` all have same the data\n # type.\n grads = math_ops._as_indexed_slices_list(grads) # pylint: disable=protected-access\n\n grads = [flatten_nested_indexed_slices(x) for x in grads]\n # Form IndexedSlices out of the concatenated values and indices.\n concat_grad = indexed_slices.IndexedSlices(\n array_ops.concat([x.values for x in grads], axis=0),\n array_ops.concat([x.indices for x in grads], axis=0),\n grads[0].dense_shape)\n\n return concat_grad\n\n\ndef _aggregate_grads(gradients):\n \"\"\"Aggregate gradients from multiple sources.\n\n Args:\n gradients: A list of 'Tensor' or 'IndexedSlices' gradients.\n\n Returns:\n If 'gradients' only has 'Tensor', returns an aggregated 'Tensor'.\n Otherwise returns an aggregated 'IndexedSlices'.\n \"\"\"\n assert gradients, \"No gradients to aggregate\"\n\n if len(gradients) == 1:\n return gradients[0]\n if all(isinstance(g, ops.Tensor) for g in gradients):\n return gen_math_ops.add_n(gradients)\n else:\n assert all(\n isinstance(g, (ops.Tensor, indexed_slices.IndexedSlices))\n for g in gradients)\n return aggregate_indexed_slices_gradients(gradients)\n\n\ndef _num_elements(grad):\n \"\"\"The number of elements in the `grad` tensor.\"\"\"\n if isinstance(grad, ops.Tensor):\n shape_tuple = grad._shape_tuple() # pylint: disable=protected-access\n elif isinstance(grad, indexed_slices.IndexedSlices):\n shape_tuple = grad.values._shape_tuple() # pylint: disable=protected-access\n else:\n raise ValueError(\"`grad` not a Tensor or IndexedSlices.\")\n if shape_tuple is None or None in shape_tuple:\n return 0\n return functools.reduce(operator.mul, shape_tuple, 1)\n\n\ndef _fast_fill(value, shape, dtype):\n return array_ops.fill(\n constant_op.constant(shape, dtype=dtypes.int32),\n constant_op.constant(value, dtype=dtype))\n\n\ndef _zeros(shape, dtype):\n \"\"\"Helper to return (possibly cached) zero tensors in eager mode.\"\"\"\n # Note: variants will use _zeros_like\n if dtype == dtypes.string or dtype == dtypes.resource:\n return None\n\n ctx = context.context()\n if not ctx.executing_eagerly():\n return array_ops.zeros(shape, dtype)\n\n device = ctx.device_name\n\n if tensor_util.is_tf_type(shape):\n shape_key = shape.ref()\n else:\n shape_key = shape\n cache_key = shape_key, dtype, device\n cached = ctx.zeros_cache().get(cache_key)\n if cached is None:\n if dtypes.as_dtype(dtype).is_bool:\n value = False\n else:\n value = 0\n cached = _fast_fill(value, shape, dtype)\n ctx.zeros_cache().put(cache_key, cached)\n return cached\n\n\ndef _ones(shape, dtype):\n as_dtype = dtypes.as_dtype(dtype)\n if as_dtype == dtypes.string:\n return None\n\n if not context.executing_eagerly():\n return array_ops.ones(shape, dtype)\n\n if as_dtype.is_bool:\n value = True\n else:\n value = 1\n\n if shape == (): # pylint: disable=g-explicit-bool-comparison\n return constant_op.constant(value, dtype=dtype)\n return _fast_fill(value, shape, dtype)\n\n\n_default_vspace = imperative_grad.VSpace(\n num_elements_fn=_num_elements,\n aggregate_fn=_aggregate_grads,\n zeros_fn=_zeros,\n ones_fn=_ones,\n zeros_like_fn=default_gradient.zeros_like,\n ones_like_fn=default_gradient.ones_like,\n graph_shape_fn=gen_array_ops.shape)\npywrap_tfe.TFE_Py_RegisterVSpace(_default_vspace)\n\n\ndef _handle_or_self(x):\n \"\"\"Unwrap resource variable/ndarray to return tensors.\"\"\"\n if resource_variable_ops.is_resource_variable(x):\n return x.handle\n return x\n\n\n@tf_export(\"GradientTape\", \"autodiff.GradientTape\", v1=[\"GradientTape\"])\nclass GradientTape(object):\n \"\"\"Record operations for automatic differentiation.\n\n Operations are recorded if they are executed within this context manager and\n at least one of their inputs is being \"watched\".\n\n Trainable variables (created by `tf.Variable` or `tf.compat.v1.get_variable`,\n where `trainable=True` is default in both cases) are automatically watched.\n Tensors can be manually watched by invoking the `watch` method on this context\n manager.\n\n For example, consider the function `y = x * x`. The gradient at `x = 3.0` can\n be computed as:\n\n >>> x = tf.constant(3.0)\n >>> with tf.GradientTape() as g:\n ... g.watch(x)\n ... y = x * x\n >>> dy_dx = g.gradient(y, x)\n >>> print(dy_dx)\n tf.Tensor(6.0, shape=(), dtype=float32)\n\n GradientTapes can be nested to compute higher-order derivatives. For example,\n\n >>> x = tf.constant(5.0)\n >>> with tf.GradientTape() as g:\n ... g.watch(x)\n ... with tf.GradientTape() as gg:\n ... gg.watch(x)\n ... y = x * x\n ... dy_dx = gg.gradient(y, x) # dy_dx = 2 * x\n >>> d2y_dx2 = g.gradient(dy_dx, x) # d2y_dx2 = 2\n >>> print(dy_dx)\n tf.Tensor(10.0, shape=(), dtype=float32)\n >>> print(d2y_dx2)\n tf.Tensor(2.0, shape=(), dtype=float32)\n\n By default, the resources held by a GradientTape are released as soon as\n GradientTape.gradient() method is called. To compute multiple gradients over\n the same computation, create a persistent gradient tape. This allows multiple\n calls to the gradient() method as resources are released when the tape object\n is garbage collected. For example:\n\n >>> x = tf.constant(3.0)\n >>> with tf.GradientTape(persistent=True) as g:\n ... g.watch(x)\n ... y = x * x\n ... z = y * y\n >>> dz_dx = g.gradient(z, x) # (4*x^3 at x = 3)\n >>> print(dz_dx)\n tf.Tensor(108.0, shape=(), dtype=float32)\n >>> dy_dx = g.gradient(y, x)\n >>> print(dy_dx)\n tf.Tensor(6.0, shape=(), dtype=float32)\n\n By default GradientTape will automatically watch any trainable variables that\n are accessed inside the context. If you want fine grained control over which\n variables are watched you can disable automatic tracking by passing\n `watch_accessed_variables=False` to the tape constructor:\n\n >>> x = tf.Variable(2.0)\n >>> w = tf.Variable(5.0)\n >>> with tf.GradientTape(\n ... watch_accessed_variables=False, persistent=True) as tape:\n ... tape.watch(x)\n ... y = x ** 2 # Gradients will be available for `x`.\n ... z = w ** 3 # No gradients will be available as `w` isn't being watched.\n >>> dy_dx = tape.gradient(y, x)\n >>> print(dy_dx)\n tf.Tensor(4.0, shape=(), dtype=float32)\n >>> # No gradients will be available as `w` isn't being watched.\n >>> dz_dw = tape.gradient(z, w)\n >>> print(dz_dw)\n None\n\n Note that when using models you should ensure that your variables exist when\n using `watch_accessed_variables=False`. Otherwise it's quite easy to make your\n first iteration not have any gradients:\n\n ```python\n a = tf.keras.layers.Dense(32)\n b = tf.keras.layers.Dense(32)\n\n with tf.GradientTape(watch_accessed_variables=False) as tape:\n tape.watch(a.variables) # Since `a.build` has not been called at this point\n # `a.variables` will return an empty list and the\n # tape will not be watching anything.\n result = b(a(inputs))\n tape.gradient(result, a.variables) # The result of this computation will be\n # a list of `None`s since a's variables\n # are not being watched.\n ```\n\n Note that only tensors with real or complex dtypes are differentiable.\n \"\"\"\n\n def __init__(self, persistent=False, watch_accessed_variables=True):\n \"\"\"Creates a new GradientTape.\n\n Args:\n persistent: Boolean controlling whether a persistent gradient tape\n is created. False by default, which means at most one call can\n be made to the gradient() method on this object.\n watch_accessed_variables: Boolean controlling whether the tape will\n automatically `watch` any (trainable) variables accessed while the tape\n is active. Defaults to True meaning gradients can be requested from any\n result computed in the tape derived from reading a trainable `Variable`.\n If False users must explicitly `watch` any `Variable`s they want to\n request gradients from.\n \"\"\"\n self._tape = None\n self._persistent = persistent\n self._watch_accessed_variables = watch_accessed_variables\n self._watched_variables = ()\n self._recording = False\n\n def __enter__(self):\n \"\"\"Enters a context inside which operations are recorded on this tape.\"\"\"\n self._push_tape()\n return self\n\n def __exit__(self, typ, value, traceback):\n \"\"\"Exits the recording context, no further operations are traced.\"\"\"\n if self._recording:\n self._pop_tape()\n\n def _push_tape(self):\n \"\"\"Pushes a new tape onto the tape stack.\"\"\"\n if self._recording:\n raise ValueError(\"Tape is still recording, This can happen if you try to \"\n \"re-enter an already-active tape.\")\n if self._tape is None:\n self._tape = tape.push_new_tape(\n persistent=self._persistent,\n watch_accessed_variables=self._watch_accessed_variables)\n else:\n tape.push_tape(self._tape)\n self._recording = True\n\n def _pop_tape(self):\n if not self._recording:\n raise ValueError(\"Tape is not recording.\")\n tape.pop_tape(self._tape)\n self._recording = False\n\n @tf_contextlib.contextmanager\n def _ensure_recording(self):\n \"\"\"Ensures that this tape is recording.\"\"\"\n if not self._recording:\n try:\n self._push_tape()\n yield\n finally:\n self._pop_tape()\n else:\n yield\n\n def watch(self, tensor):\n \"\"\"Ensures that `tensor` is being traced by this tape.\n\n Args:\n tensor: a Tensor or list of Tensors.\n\n Raises:\n ValueError: if it encounters something that is not a tensor.\n \"\"\"\n for t in nest.flatten(tensor, expand_composites=True):\n if not (_pywrap_utils.IsTensor(t) or _pywrap_utils.IsVariable(t)):\n raise ValueError(\"Passed in object of type {}, not tf.Tensor\".format(\n type(t)))\n if not backprop_util.IsTrainable(t):\n logging.log_first_n(\n logging.WARN, \"The dtype of the watched tensor must be \"\n \"floating (e.g. tf.float32), got %r\", 5, t.dtype)\n if hasattr(t, \"handle\"):\n # There are many variable-like objects, all of them currently have\n # `handle` attribute that points to a tensor. If this changes, internals\n # of watch_variable need to change as well.\n tape.watch_variable(self._tape, t)\n else:\n tape.watch(self._tape, t)\n\n @tf_contextlib.contextmanager\n def stop_recording(self):\n \"\"\"Temporarily stops recording operations on this tape.\n\n Operations executed while this context manager is active will not be\n recorded on the tape. This is useful for reducing the memory used by tracing\n all computations.\n\n For example:\n\n >>> x = tf.constant(4.0)\n >>> with tf.GradientTape() as tape:\n ... with tape.stop_recording():\n ... y = x ** 2\n >>> dy_dx = tape.gradient(y, x)\n >>> print(dy_dx)\n None\n\n Yields:\n None\n Raises:\n RuntimeError: if the tape is not currently recording.\n \"\"\"\n if self._tape is None:\n raise RuntimeError(\n \"Trying to stop recording a tape which is not recording.\")\n self._pop_tape()\n try:\n yield\n finally:\n self._push_tape()\n\n def reset(self):\n \"\"\"Clears all information stored in this tape.\n\n Equivalent to exiting and reentering the tape context manager with a new\n tape. For example, the two following code blocks are equivalent:\n\n ```\n with tf.GradientTape() as t:\n loss = loss_fn()\n with tf.GradientTape() as t:\n loss += other_loss_fn()\n t.gradient(loss, ...) # Only differentiates other_loss_fn, not loss_fn\n\n\n # The following is equivalent to the above\n with tf.GradientTape() as t:\n loss = loss_fn()\n t.reset()\n loss += other_loss_fn()\n t.gradient(loss, ...) # Only differentiates other_loss_fn, not loss_fn\n ```\n\n This is useful if you don't want to exit the context manager for the tape,\n or can't because the desired reset point is inside a control flow construct:\n\n ```\n with tf.GradientTape() as t:\n loss = ...\n if loss > k:\n t.reset()\n ```\n \"\"\"\n self._pop_tape()\n self._tape = None\n self._push_tape()\n\n def watched_variables(self):\n \"\"\"Returns variables watched by this tape in order of construction.\"\"\"\n if self._tape is not None:\n self._watched_variables = self._tape.watched_variables()\n return self._watched_variables\n\n def gradient(self,\n target,\n sources,\n output_gradients=None,\n unconnected_gradients=UnconnectedGradients.NONE):\n \"\"\"Computes the gradient using operations recorded in context of this tape.\n\n Note: Unless you set `persistent=True` a GradientTape can only be used to\n compute one set of gradients (or jacobians).\n\n In addition to Tensors, gradient also supports RaggedTensors. For example,\n\n >>> x = tf.ragged.constant([[1.0, 2.0], [3.0]])\n >>> with tf.GradientTape() as g:\n ... g.watch(x)\n ... y = x * x\n >>> g.gradient(y, x)\n <tf.RaggedTensor [[2.0, 4.0], [6.0]]>\n\n Args:\n target: a list or nested structure of Tensors or Variables or\n CompositeTensors to be differentiated.\n sources: a list or nested structure of Tensors or Variables or\n CompositeTensors. `target` will be differentiated against elements in\n `sources`.\n output_gradients: a list of gradients, one for each differentiable\n element of target. Defaults to None.\n unconnected_gradients: a value which can either hold 'none' or 'zero' and\n alters the value which will be returned if the target and sources are\n unconnected. The possible values and effects are detailed in\n 'UnconnectedGradients' and it defaults to 'none'.\n\n Returns:\n a list or nested structure of Tensors (or IndexedSlices, or None, or\n CompositeTensor), one for each element in `sources`. Returned structure\n is the same as the structure of `sources`.\n\n Raises:\n RuntimeError: If called on a used, non-persistent tape.\n RuntimeError: If called inside the context of the tape.\n TypeError: If the target is a None object.\n ValueError: If the target is a variable or if unconnected gradients is\n called with an unknown value.\n \"\"\"\n if self._tape is None:\n raise RuntimeError(\"A non-persistent GradientTape can only be used to \"\n \"compute one set of gradients (or jacobians)\")\n if self._recording:\n if not self._persistent:\n self._pop_tape()\n else:\n logging.log_first_n(\n logging.WARN, \"Calling GradientTape.gradient on a persistent \"\n \"tape inside its context is significantly less \"\n \"efficient than calling it outside the context (it \"\n \"causes the gradient ops to be recorded on the \"\n \"tape, leading to increased CPU and memory usage). \"\n \"Only call GradientTape.gradient inside the \"\n \"context if you actually want to trace the \"\n \"gradient in order to compute higher order \"\n \"derivatives.\", 1)\n\n if target is None:\n raise TypeError(\"Argument `target` should be a list or nested structure\"\n \" of Tensors, Variables or CompositeTensors to be \"\n \"differentiated, but received None.\")\n\n flat_targets = []\n for t in nest.flatten(target):\n if not backprop_util.IsTrainable(t):\n logging.vlog(\n logging.WARN, \"The dtype of the target tensor must be \"\n \"floating (e.g. tf.float32) when calling GradientTape.gradient, \"\n \"got %r\", t.dtype)\n if resource_variable_ops.is_resource_variable(t):\n with self:\n t = ops.convert_to_tensor(t)\n flat_targets.append(t)\n flat_targets = composite_tensor_gradient.get_flat_tensors_for_gradients(\n flat_targets)\n\n flat_sources = nest.flatten(sources)\n for t in flat_sources:\n if not backprop_util.IsTrainable(t):\n logging.vlog(\n logging.WARN, \"The dtype of the source tensor must be \"\n \"floating (e.g. tf.float32) when calling GradientTape.gradient, \"\n \"got %r\", t.dtype)\n if getattr(t, \"is_packed\", False):\n raise ValueError(\n \"GradientTape.gradient is not supported on packed EagerTensors yet.\"\n )\n flat_sources_raw = flat_sources\n flat_sources = composite_tensor_gradient.get_flat_tensors_for_gradients(\n flat_sources)\n flat_sources = [_handle_or_self(x) for x in flat_sources]\n\n if output_gradients is not None:\n output_gradients = nest.flatten(output_gradients)\n output_gradients = (\n composite_tensor_gradient.get_flat_tensors_for_gradients(\n output_gradients))\n output_gradients = [None if x is None else ops.convert_to_tensor(x)\n for x in output_gradients]\n\n flat_grad = imperative_grad.imperative_grad(\n self._tape,\n flat_targets,\n flat_sources,\n output_gradients=output_gradients,\n sources_raw=flat_sources_raw,\n unconnected_gradients=unconnected_gradients)\n\n if not self._persistent:\n # Keep track of watched variables before setting tape to None\n self._watched_variables = self._tape.watched_variables()\n self._tape = None\n\n flat_grad = composite_tensor_gradient.replace_flat_tensors_for_gradients(\n flat_sources_raw, flat_grad)\n grad = nest.pack_sequence_as(sources, flat_grad)\n return grad\n\n def jacobian(self,\n target,\n sources,\n unconnected_gradients=UnconnectedGradients.NONE,\n parallel_iterations=None,\n experimental_use_pfor=True):\n \"\"\"Computes the jacobian using operations recorded in context of this tape.\n\n Note: Unless you set `persistent=True` a GradientTape can only be used to\n compute one set of gradients (or jacobians).\n\n Note: By default the jacobian implementation uses parallel for (pfor), which\n creates a tf.function under the hood for each jacobian call. For better\n performance, and to avoid recompilation and vectorization rewrites on each\n call, enclose GradientTape code in @tf.function.\n\n See[wikipedia\n article](http://en.wikipedia.org/wiki/jacobian_matrix_and_determinant)\n for the definition of a Jacobian.\n\n Example usage:\n\n ```python\n with tf.GradientTape() as g:\n x = tf.constant([1.0, 2.0])\n g.watch(x)\n y = x * x\n jacobian = g.jacobian(y, x)\n # jacobian value is [[2., 0.], [0., 4.]]\n ```\n\n Args:\n target: Tensor to be differentiated.\n sources: a list or nested structure of Tensors or Variables. `target`\n will be differentiated against elements in `sources`.\n unconnected_gradients: a value which can either hold 'none' or 'zero' and\n alters the value which will be returned if the target and sources are\n unconnected. The possible values and effects are detailed in\n 'UnconnectedGradients' and it defaults to 'none'.\n parallel_iterations: A knob to control how many iterations are dispatched\n in parallel. This knob can be used to control the total memory usage.\n experimental_use_pfor: If true, vectorizes the jacobian computation. Else\n falls back to a sequential while_loop. Vectorization can sometimes fail\n or lead to excessive memory usage. This option can be used to disable\n vectorization in such cases.\n\n Returns:\n A list or nested structure of Tensors (or None), one for each element in\n `sources`. Returned structure is the same as the structure of `sources`.\n Note if any gradient is sparse (IndexedSlices), jacobian function\n currently makes it dense and returns a Tensor instead. This may change in\n the future.\n\n\n Raises:\n RuntimeError: If called on a used, non-persistent tape.\n RuntimeError: If called on a non-persistent tape with eager execution\n enabled and without enabling experimental_use_pfor.\n ValueError: If vectorization of jacobian computation fails.\n \"\"\"\n if self._tape is None:\n raise RuntimeError(\"A non-persistent GradientTape can only be used to \"\n \"compute one set of gradients (or jacobians)\")\n\n flat_sources = nest.flatten(sources)\n target_static_shape = target.shape\n target_shape = array_ops.shape(target)\n # Note that we push and pop the tape here and below. This is needed since we\n # need gradients through the enclosed operations.\n with self._ensure_recording():\n target = array_ops.reshape(target, [-1])\n\n def loop_fn(i):\n with self._ensure_recording():\n y = array_ops.gather(target, i)\n return self.gradient(y, flat_sources,\n unconnected_gradients=unconnected_gradients)\n\n try:\n target_size = int(target.shape[0])\n except TypeError:\n target_size = array_ops.shape(target)[0]\n\n if experimental_use_pfor:\n try:\n output = pfor_ops.pfor(loop_fn, target_size,\n parallel_iterations=parallel_iterations)\n except ValueError as err:\n six.reraise(\n ValueError,\n ValueError(\n str(err) + \"\\nEncountered an exception while vectorizing the \"\n \"jacobian computation. Vectorization can be disabled by setting\"\n \" experimental_use_pfor to False.\"),\n sys.exc_info()[2])\n else:\n if context.executing_eagerly() and not self._persistent:\n raise RuntimeError(\n \"GradientTape must be created with persistent=True\"\n \" to compute the jacobian with eager execution enabled and with \"\n \" experimental_use_pfor set to False.\")\n output = pfor_ops.for_loop(\n loop_fn, [target.dtype] * len(flat_sources), target_size,\n parallel_iterations=parallel_iterations)\n\n for i, out in enumerate(output):\n if out is not None:\n new_shape = array_ops.concat(\n [target_shape, array_ops.shape(out)[1:]], axis=0)\n out = array_ops.reshape(out, new_shape)\n if context.executing_eagerly():\n out.set_shape(target_static_shape.concatenate(flat_sources[i].shape))\n output[i] = out\n\n return nest.pack_sequence_as(sources, output)\n\n def batch_jacobian(self,\n target,\n source,\n unconnected_gradients=UnconnectedGradients.NONE,\n parallel_iterations=None,\n experimental_use_pfor=True):\n \"\"\"Computes and stacks per-example jacobians.\n\n See [wikipedia article](http://en.wikipedia.org/wiki/jacobian_matrix_and_determinant)\n for the definition of a Jacobian. This function is essentially an efficient\n implementation of the following:\n\n `tf.stack([self.jacobian(y[i], x[i]) for i in range(x.shape[0])])`.\n\n Note that compared to `GradientTape.jacobian` which computes gradient of\n each output value w.r.t each input value, this function is useful when\n `target[i,...]` is independent of `source[j,...]` for `j != i`. This\n assumption allows more efficient computation as compared to\n `GradientTape.jacobian`. The output, as well as intermediate activations,\n are lower dimensional and avoid a bunch of redundant zeros which would\n result in the jacobian computation given the independence assumption.\n\n Note: Unless you set `persistent=True` a GradientTape can only be used to\n compute one set of gradients (or jacobians).\n\n Note: By default the batch_jacobian implementation uses parallel for (pfor),\n which creates a tf.function under the hood for each batch_jacobian call.\n For better performance, and to avoid recompilation and vectorization\n rewrites on each call, enclose GradientTape code in @tf.function.\n\n\n Example usage:\n\n ```python\n with tf.GradientTape() as g:\n x = tf.constant([[1., 2.], [3., 4.]], dtype=tf.float32)\n g.watch(x)\n y = x * x\n batch_jacobian = g.batch_jacobian(y, x)\n # batch_jacobian is [[[2, 0], [0, 4]], [[6, 0], [0, 8]]]\n ```\n\n Args:\n target: A tensor with rank 2 or higher and with shape [b, y1, ..., y_n].\n `target[i,...]` should only depend on `source[i,...]`.\n source: A tensor with rank 2 or higher and with shape [b, x1, ..., x_m].\n unconnected_gradients: a value which can either hold 'none' or 'zero' and\n alters the value which will be returned if the target and sources are\n unconnected. The possible values and effects are detailed in\n 'UnconnectedGradients' and it defaults to 'none'.\n parallel_iterations: A knob to control how many iterations are dispatched\n in parallel. This knob can be used to control the total memory usage.\n experimental_use_pfor: If true, uses pfor for computing the Jacobian. Else\n uses a tf.while_loop.\n\n Returns:\n A tensor `t` with shape [b, y_1, ..., y_n, x1, ..., x_m] where `t[i, ...]`\n is the jacobian of `target[i, ...]` w.r.t. `source[i, ...]`, i.e. stacked\n per-example jacobians.\n\n Raises:\n RuntimeError: If called on a used, non-persistent tape.\n RuntimeError: If called on a non-persistent tape with eager execution\n enabled and without enabling experimental_use_pfor.\n ValueError: If vectorization of jacobian computation fails or if first\n dimension of `target` and `source` do not match.\n \"\"\"\n if self._tape is None:\n raise RuntimeError(\"A non-persistent GradientTape can only be used to\"\n \"compute one set of gradients (or jacobians)\")\n target_shape = target.shape\n if target_shape.rank is None:\n dim = tensor_shape.Dimension(None)\n else:\n dim = target_shape.dims[0]\n if not (target_shape.with_rank_at_least(2) and\n source.shape.with_rank_at_least(2) and\n dim.is_compatible_with(source.shape[0])):\n raise ValueError(\n \"Need first dimension of target shape (%s) and \"\n \"source shape (%s) to match.\" % (target.shape, source.shape))\n if target_shape.is_fully_defined():\n batch_size = int(target_shape[0])\n target_row_size = target_shape.num_elements() // batch_size\n else:\n target_shape = array_ops.shape(target)\n batch_size = target_shape[0]\n target_row_size = array_ops.size(target) // batch_size\n source_shape = array_ops.shape(source)\n # Flatten target to 2-D.\n # Note that we push and pop the tape here and below. This is needed since we\n # need gradients through the enclosed operations.\n with self._ensure_recording():\n with ops.control_dependencies(\n [check_ops.assert_equal(batch_size, source_shape[0])]):\n target = array_ops.reshape(target, [batch_size, target_row_size])\n\n run_once = False\n\n def loop_fn(i):\n nonlocal run_once\n if run_once and not self._persistent:\n if parallel_iterations is not None:\n raise RuntimeError(\n \"GradientTape must be created with persistent=True\"\n \" to compute the batch_jacobian with parallel_iterations.\")\n else:\n raise RuntimeError(\n \"GradientTape must be created with persistent=True\"\n \" to compute the batch_jacobian.\")\n run_once = True\n\n with self._ensure_recording():\n y = array_ops.gather(target, i, axis=1)\n return self.gradient(y, source,\n unconnected_gradients=unconnected_gradients)\n\n if experimental_use_pfor:\n try:\n output = pfor_ops.pfor(loop_fn, target_row_size,\n parallel_iterations=parallel_iterations)\n except ValueError as err:\n six.reraise(\n ValueError,\n ValueError(\n str(err) + \"\\nEncountered an exception while vectorizing the \"\n \"batch_jacobian computation. Vectorization can be disabled by \"\n \"setting experimental_use_pfor to False.\"),\n sys.exc_info()[2])\n else:\n if context.executing_eagerly() and not self._persistent:\n raise RuntimeError(\n \"GradientTape must be created with persistent=True\"\n \" to compute the batch_jacobian with eager execution enabled and \"\n \" with experimental_use_pfor set to False.\")\n output = pfor_ops.for_loop(loop_fn, target.dtype, target_row_size,\n parallel_iterations=parallel_iterations)\n new_shape = array_ops.concat([target_shape, source_shape[1:]], axis=0)\n if output is None:\n # Note that this block is returning zeros when it could use `None` to\n # represent unconnected gradients. This is to maintain compatibility with\n # the previous behavior, which ignored `unconnected_gradients`.\n output = array_ops.zeros(new_shape, target.dtype)\n return output\n else:\n output = array_ops.reshape(output,\n [target_row_size, batch_size, -1])\n output = array_ops.transpose(output, [1, 0, 2])\n\n output = array_ops.reshape(output, new_shape)\n return output\n"
] | [
[
"tensorflow.python.ops.gen_math_ops.add_n",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.util.tf_inspect.getfullargspec",
"tensorflow.python.pywrap_tfe.TFE_Py_TapeSetIsEmpty",
"tensorflow.python.framework.ops.executing_eagerly_outside_functions",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.eager.context.context",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.framework.composite_tensor_gradient.get_flat_tensors_for_gradients",
"tensorflow.python.eager.context.ensure_initialized",
"tensorflow.python.eager.tape.push_tape",
"tensorflow.python.util._pywrap_utils.IsTensor",
"tensorflow.python.eager.imperative_grad.imperative_grad",
"tensorflow.python.pywrap_tfe.TFE_OpNameGetAttrType",
"tensorflow.python.framework.ops._gradient_registry.lookup",
"tensorflow.python.ops.array_ops.transpose",
"tensorflow.python.pywrap_tfe.TFE_Py_RegisterVSpace",
"tensorflow.python.pywrap_tfe.TFE_Py_RegisterGradientFunction",
"tensorflow.python.eager.imperative_grad.VSpace",
"tensorflow.python.framework.composite_tensor_gradient.replace_flat_tensors_for_gradients",
"tensorflow.python.framework.ops.tensor_id",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.platform.tf_logging.log_first_n",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.ops.gen_array_ops.identity",
"tensorflow.python.ops.array_ops.size",
"tensorflow.python.util._pywrap_utils.IsVariable",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.platform.tf_logging.vlog",
"tensorflow.python.eager.tape.push_new_tape",
"tensorflow.python.eager.tape.watch",
"tensorflow.python.ops.check_ops.assert_equal",
"tensorflow.python.framework.dtypes.as_dtype",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.framework.ops.get_name_scope",
"tensorflow.python.util.nest.pack_sequence_as",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.math_ops._as_indexed_slices_list",
"tensorflow.python.ops.math_ops.add_n",
"tensorflow.python.framework.tensor_shape.Dimension",
"tensorflow.python.eager.tape.pop_tape",
"tensorflow.python.eager.tape.watch_variable",
"tensorflow.python.framework.tensor_util.is_tf_type",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.eager.backprop_util.IsTrainable",
"tensorflow.python.ops.resource_variable_ops.is_resource_variable",
"tensorflow.python.framework.tensor_shape.as_shape",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.framework.constant_op.constant"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.9",
"2.5",
"2.6",
"2.10"
]
}
] |
ammar1510/pytorch | [
"ec8d6777255821bed73b471eadddde068cd60c0b"
] | [
"test/test_fx.py"
] | [
"# Owner(s): [\"oncall: fx\"]\n\nimport builtins\nimport contextlib\nimport copy\nimport functools\nimport inspect\nimport math\nimport numbers\nimport operator\nimport os\nimport pickle\nimport sys\nimport torch\nimport traceback\nimport typing\nimport types\nimport warnings\nimport unittest\nfrom math import sqrt\nfrom torch.multiprocessing import Process\nfrom torch.testing import FileCheck\nfrom torch.testing._internal.common_methods_invocations import op_db\nfrom torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests\nimport torch.utils._pytree as pytree\nimport torch.fx._pytree as fx_pytree\nfrom torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap, PH, CodeGen\nfrom torch.fx.node import Target, Argument\nfrom torch.fx.passes import shape_prop\nfrom torch.fx.immutable_collections import immutable_dict, immutable_list\nfrom torch.fx.experimental.rewriter import RewritingTracer\nfrom torch.fx.operator_schemas import get_signature_for_torch_op\nfrom copy import deepcopy\nfrom collections import namedtuple\n\nfrom torch.fx.proxy import TraceError\nfrom torch.fx._compatibility import _BACK_COMPAT_OBJECTS, _MARKED_WITH_COMATIBLITY\n\nfrom fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401\nfrom fx.test_dce_pass import TestDCE # noqa: F401\nfrom fx.test_fx_const_fold import TestConstFold # noqa: F401\nfrom fx.test_fx_param_shape_control_flow import TestConstParamShapeInControlFlow # noqa: F401\n\nif sys.version_info >= (3, 7):\n from fx.test_gradual_type import AnnotationsTest # noqa: F401\nif sys.version_info >= (3, 7):\n from fx.test_gradual_type import TypeCheckerTest # noqa: F401\nfrom typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union\nfrom torch.testing._internal.common_utils import (\n IS_FBCODE,\n IS_MACOS,\n IS_WINDOWS,\n TEST_WITH_ROCM,\n find_library_location,\n run_tests,\n)\nfrom torch.testing._internal.jit_utils import JitTestCase\n\nfrom fx.named_tup import MyNamedTup\n\ntry:\n from torchvision import models as torchvision_models\n HAS_TORCHVISION = True\nexcept ImportError:\n HAS_TORCHVISION = False\nskipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, \"no torchvision\")\n\nclass SimpleTest(torch.nn.Module):\n def forward(self, x):\n return torch.relu(x + 3.0)\n\ndef a_non_torch_leaf(a, b):\n return a + b\n\n# Used for test_autowrap_function. Autowrapped functions need to be global\ndef fx_int(x: float) -> int:\n return int(x)\n\ndef fx_int_x2(x: float) -> int:\n return int(x) * 2\n\n# used in test_pytree. It's all the way out here because pickling a GraphModule\n# that uses Point errors out if Point is local to the function\nPoint = namedtuple('Point', ['x', 'y'])\n\n# Test wrap() passing both a function name as well as a function\n# directly\ndef a_lifted_leaf(a, b):\n return a[0] + a[1] + b\n\nwrap('a_lifted_leaf')\n# Test wrapping twice doesn't break anything\nwrap('a_lifted_leaf')\n\ndef a_lifted_leaf2(a, b):\n return a[0] + a[1] + b\n\nwrap(a_lifted_leaf2)\n\nwrap('len')\n\nwrap('getattr')\n\n@wrap\ndef wrapped_via_decorator(a):\n return a + 1\n\nwrap('wrapped_with_submodule')\n\ndef wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d):\n return batchnorm1d(x)\n\n\nreal_wrapped_via_decorator = wrapped_via_decorator\nreal_a_lifed_leaf = a_lifted_leaf\nreal_a_lifed_leaf2 = a_lifted_leaf2\n_sqrt = sqrt\n\nwrap('wrapper_fn')\n\ndef wrapper_fn(x):\n return torch.foo(x)\n\nclass Pair(NamedTuple):\n x : torch.Tensor\n y : torch.Tensor\n\n# for testing pytrees\nclass Foo(object): # noqa: B209\n def __init__(self, a, b):\n self.a = a\n self.b = b\n\nclass TestFX(JitTestCase):\n def setUp(self):\n # Checking for mutable operations whil tracing is feature flagged\n # Enable it in testing but not by default\n self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations\n torch.fx.proxy.TracerBase.check_mutable_operations = True\n\n if not (TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS):\n lib_file_path = find_library_location('libtorchbind_test.so')\n torch.ops.load_library(str(lib_file_path))\n\n def tearDown(self):\n torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag\n\n def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None):\n \"\"\"Check that an nn.Module's results match the GraphModule version\n for a given set of args/kwargs.\n \"\"\"\n kwargs = kwargs if kwargs else {}\n ref_outs = m(*args, **kwargs)\n gm = symbolic_trace(m)\n gm.graph.lint()\n test_outs = gm(*args, **kwargs)\n self.assertEqual(ref_outs, test_outs)\n\n def test_graph_module(self):\n class MySub(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.w = torch.nn.Parameter(torch.rand(4, 3))\n\n def forward(self, x):\n return self.w + x\n\n class MyModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.lin = torch.nn.Linear(4, 3)\n self.sub_mod = MySub()\n self.w = torch.nn.Parameter(torch.rand(3))\n\n def forward(self, A, B, c):\n t = torch.sigmoid(A) + self.lin(c)\n return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3))\n\n m = MyModule()\n gm = symbolic_trace(m)\n\n ms = torch.jit.script(gm)\n\n class M2(torch.nn.Module):\n def forward(self, A):\n m, idx = torch.max(A, 0)\n return m + 1, idx + 1\n\n m2 = M2()\n gm2 = symbolic_trace(m2)\n\n class T(torch.nn.Module):\n\n def forward(self, A, b=4, *args, c=5, **kwargs):\n x = A + 1 + args[0] + kwargs['3']\n return x\n\n t = T()\n symbolic_trace(t)\n\n # test for issue described at https://github.com/pytorch/pytorch/issues/63883\n class M3(torch.nn.Module):\n def forward(self, x):\n return torch.relu(x)\n\n m3 = M3()\n gm3 = symbolic_trace(m3)\n new_instance = gm3.__new__(type(gm3))\n new_instance.__init__(gm3, gm3.graph)\n\n x = torch.randn(5, 3)\n torch.testing.assert_allclose(new_instance(x), torch.relu(x))\n\n def test_custom_import(self):\n graph = torch.fx.Graph()\n a = graph.placeholder('x')\n b = graph.placeholder('y')\n c = graph.call_function(a_non_torch_leaf, (a, b))\n d = graph.call_function(torch.sin, (c,))\n graph.output(d)\n gm = GraphModule(torch.nn.Module(), graph)\n x, y = torch.rand(1), torch.rand(1)\n self.assertEqual(torch.sin(x + y), gm(x, y))\n\n def test_args_kwargs(self):\n class T(torch.nn.Module):\n def forward(self, *args, **kwargs):\n x = args[0] + kwargs['foo']\n return x\n\n t = T()\n self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})\n\n def test_args_kwargs_no_self(self):\n class T(torch.nn.Module):\n def forward(*args, **kwargs): # noqa: B902\n self = args[0]\n return torch.relu(args[1])\n\n t = T()\n with self.assertRaisesRegex(RuntimeError, r'cannot be part of \\*args expansion'):\n self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})\n\n def test_fx_shifts(self):\n class MyModule(torch.nn.Module):\n def forward(self, x):\n return x << 3, x >> 3\n\n input = torch.LongTensor(10).random_(0, 1024)\n\n m = MyModule()\n self.checkGraphModule(m, (input,))\n\n def test_fx_and_or(self):\n class MyModule(torch.nn.Module):\n def forward(self, x):\n return x & x, x | x\n\n input = torch.LongTensor(10).random_(0, 1024)\n\n m = MyModule()\n self.checkGraphModule(m, (input,))\n\n def test_dict(self):\n class MyDictMod(torch.nn.Module):\n def forward(self, d):\n return d['3'].relu(), {'4' : d['3'].neg()}\n\n input_dict = {'3': torch.rand(3, 4)}\n m = MyDictMod()\n\n self.checkGraphModule(m, (input_dict,))\n\n def test_matmul_tracing(self):\n const = torch.randn(3)\n\n def matmul_f(x):\n return x @ const\n\n mod = symbolic_trace(matmul_f)\n inp = torch.randn(3)\n self.assertEqual(mod(inp), matmul_f(inp))\n\n def rmatmul_f(x):\n return const @ x\n\n mod = symbolic_trace(rmatmul_f)\n inp = torch.randn(3)\n self.assertEqual(mod(inp), rmatmul_f(inp))\n\n\n def test_disallow_override(self):\n # Custom delegate to disallow in-place tensor operations\n class NoMutableCallTracer(Tracer):\n def create_node(self, kind : str, target : Union[str, Callable],\n args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,\n type_expr : Optional[Any] = None) -> Node:\n name = target if isinstance(target, str) else torch.typename(target)\n if name[-1] == '_':\n raise RuntimeError('In-place operations are not supported')\n return super().create_node(kind, target, args, kwargs, name)\n\n # Test method\n class MyInplaceMod(torch.nn.Module):\n def forward(self, x):\n x.add_(3.0)\n return x\n\n m = MyInplaceMod()\n\n with self.assertRaisesRegex(RuntimeError, 'In-place operations'):\n NoMutableCallTracer().trace(m)\n\n # Test free function\n class MyInplaceMod2(torch.nn.Module):\n def forward(self, x):\n torch.log_(x)\n return x\n m2 = MyInplaceMod2()\n with self.assertRaisesRegex(RuntimeError, 'In-place operations'):\n NoMutableCallTracer().trace(m2)\n\n # Test symbolic node as an arg\n class MyInplaceMod3(torch.nn.Module):\n def forward(self, x):\n y = torch.ones(3, 4)\n y.add_(x)\n return x\n m3 = MyInplaceMod3()\n with self.assertRaisesRegex(RuntimeError, 'In-place operations'):\n NoMutableCallTracer().trace(m3)\n\n def test_leaf_module(self):\n # Custom delegate to make it so that there are no leaf modules, everything\n # should get traced through\n class NoLeafModulesTracer(Tracer):\n def is_leaf_module(self, m, qualname):\n return False\n\n class MyReluMod(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.relu = torch.nn.ReLU()\n\n def forward(self, x):\n return self.relu(x)\n\n mrm = MyReluMod()\n sym = NoLeafModulesTracer().trace(mrm)\n for node in sym.nodes:\n self.assertNotEqual(node.op, 'call_module')\n sym.lint()\n\n def test_wrap(self):\n self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))\n\n def to_trace(y):\n return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y)\n\n m = symbolic_trace(to_trace)\n self.assertIn('a_lifted_leaf', m.code)\n self.assertEqual(27, m(2))\n self.assertIs(a_lifted_leaf, real_a_lifed_leaf)\n\n def test_wrap_fn_directly(self):\n self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))\n\n def to_trace(y):\n return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y)\n\n m = symbolic_trace(to_trace)\n self.assertIn('a_lifted_leaf2', m.code)\n self.assertEqual(27, m(2))\n self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)\n\n def test_wrapped_via_decorator(self):\n self.assertEqual(wrapped_via_decorator(0), 1)\n\n def to_trace(y):\n return wrapped_via_decorator(y)\n\n m = symbolic_trace(to_trace)\n self.assertIn('wrapped_via_decorator', m.code)\n self.assertEqual(m(0), 1)\n self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)\n self.assertFalse(hasattr(wrapped_via_decorator, \"__fx_already_patched\"))\n\n def test_wrapped_via_decorator_and_transformed(self):\n self.assertEqual(wrapped_via_decorator(0), 1)\n\n def to_trace(y):\n return wrapped_via_decorator(y)\n\n m = symbolic_trace(to_trace)\n self.assertIn('wrapped_via_decorator', m.code)\n self.assertEqual(m(0), 1)\n self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)\n self.assertFalse(hasattr(wrapped_via_decorator, \"__fx_already_patched\"))\n\n transformed = torch.fx.Transformer(m).transform()\n self.assertIn('wrapped_via_decorator', transformed.code)\n self.assertEqual(transformed(0), 1)\n self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)\n self.assertFalse(hasattr(wrapped_via_decorator, \"__fx_already_patched\"))\n\n def test_wrap_with_submodule(self):\n\n class M(torch.nn.Module):\n def __init__(self):\n super(M, self).__init__()\n self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)\n\n def forward(self, x: torch.Tensor):\n return wrapped_with_submodule(x, self.batchnorm1d)\n\n m = symbolic_trace(M())\n\n self.assertIn(\"wrapped_with_submodule\", m.code)\n\n input = torch.rand(3, 2)\n ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)\n self.assertEqual(ref_batchnorm1d(input), m(input))\n\n def test_wrapped_retrace(self):\n def to_trace(y):\n return wrapped_via_decorator(y)\n\n m = symbolic_trace(to_trace)\n self.assertIn('wrapped_via_decorator', m.code)\n self.assertEqual(m(0), 1)\n\n retraced = symbolic_trace(m)\n self.assertIn('wrapped_via_decorator', retraced.code)\n self.assertEqual(retraced(0), 1)\n\n def test_graph_edit_with_proxy(self):\n class M(torch.nn.Module):\n def forward(self, a, b):\n return a + b\n m = M()\n g = symbolic_trace(m).graph\n new_g = torch.fx.Graph()\n val_map : Dict[Node, Node] = {}\n output_val = new_g.graph_copy(g, val_map)\n t = Proxy(output_val)\n # test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.\n new_g.output((t + t).node)\n gm = GraphModule(m, new_g)\n gm.graph.lint()\n self.assertEqual(gm(3, 4), 14)\n\n def test_graph_unique_names(self):\n class M(torch.nn.Module):\n def forward(self, a, b):\n return a + b\n m = M()\n g = symbolic_trace(m).graph\n new_g = torch.fx.Graph()\n val_map : Dict[Node, Node] = {}\n output_val = new_g.graph_copy(g, val_map)\n t = Proxy(output_val)\n # test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.\n new_g.output((t + t).node)\n gm = GraphModule(m, new_g)\n seen_names : Set[str] = set()\n for node in gm.graph.nodes:\n assert node.name not in seen_names\n seen_names.add(node.name)\n\n def test_stack_traces(self):\n class M(torch.nn.Module):\n def forward(self, a, b):\n return a + b\n\n tracer = torch.fx.Tracer()\n tracer.record_stack_traces = True\n\n graph = tracer.trace(M())\n # saving the original list because we will insert new nodes as a part of a test\n orig_graph_nodes = list(graph.nodes)\n for node in orig_graph_nodes:\n if node.op == 'output':\n continue\n self.assertTrue(node.stack_trace is not None)\n assert 'test_fx.py' in node.stack_trace\n\n # verify that copying the node does not lose the stack trace\n new_node = graph.node_copy(node)\n self.assertTrue(new_node.stack_trace is not None)\n assert 'test_fx.py' in new_node.stack_trace\n\n def test_graph_unique_names_manual(self):\n graph : torch.fx.Graph = torch.fx.Graph()\n a : torch.fx.Node = graph.create_node('placeholder', 'x')\n b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1')\n c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1')\n d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))\n graph.output(d)\n graph2 = torch.fx.Graph()\n val_map : Dict[Node, Node] = {}\n graph2.graph_copy(graph, val_map)\n seen_names : Set[str] = set()\n for node in graph2.nodes:\n assert node.name not in seen_names\n seen_names.add(node.name)\n\n def test_unpack(self):\n class M(torch.nn.Module):\n def forward(self, a, b):\n c, d = a\n return c + d + b\n\n a = (torch.rand(1), torch.rand(1))\n b = torch.rand(1)\n m = M()\n self.checkGraphModule(m, (a, b))\n\n def test_native_callable(self):\n if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:\n raise unittest.SkipTest(\"non-portable load_library call used in test\")\n # This test exercises the case where we use FX to translate from Python\n # code to some native callable object\n #\n # For the purposes of testing, we use ElementwiseInterpreter defined\n # in test_custom_class.cpp.\n #\n # We test that we can\n # 1) Construct a native callable from FX IR\n # 2) Construct a drop-in replacement module that delegates to the\n # native callable rather than the original code\n # 3) Run both the original code and native callable wrapper with\n # equivalent results\n # 4) TorchScript compile the native callable wrapper and confirm\n # equivalent results with the reference\n # 5) TorchScript serialize and deserialize the native callable\n # and confirm equivalent results with the reference\n\n # We use this simple Module as a reference computation\n class MySimpleMod(torch.nn.Module):\n def forward(self, x):\n return 3.0 * x + x\n\n msm = MySimpleMod()\n\n # This is what a lowering pass might look like: a function that takes\n # a valid nn.Module, symbolically traces it, lowers the Module to some\n # representation, and wraps that representation up into another\n # nn.Module instance that handles dispatch to the compiled/lowered code.\n def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module:\n # ===== Stage 1: Symbolic trace the module =====\n mod = symbolic_trace(orig_mod)\n\n # ===== Stage 2: Lower GraphModule representation to the C++\n # interpreter's instruction format ======\n instructions = []\n constant_idx = 0\n constants = {}\n fn_input_names = []\n\n target_to_name = {\n operator.add : \"add\",\n operator.mul : \"mul\"\n }\n\n output_node : Optional[Node] = None\n # For each instruction, create a triple\n # (instruction_name : str, inputs : List[str], output : str)\n # to feed into the C++ interpreter\n for n in mod.graph.nodes:\n target, args, out_name = n.target, n.args, n.name\n assert len(n.kwargs) == 0, \"kwargs currently not supported\"\n\n if n.op == 'placeholder':\n # Placeholders specify function argument names. Save these\n # for later when we generate the wrapper GraphModule\n fn_input_names.append(target)\n elif n.op == 'call_function':\n assert target in target_to_name, \"Unsupported call target \" + target\n arg_names = []\n for arg in args:\n if not isinstance(arg, Node):\n # Pull out constants. These constants will later be\n # fed to the interpreter C++ object via add_constant()\n arg_name = f'constant_{constant_idx}'\n constants[arg_name] = torch.tensor(\n [arg] if isinstance(arg, numbers.Number) else arg)\n arg_names.append(arg_name)\n constant_idx += 1\n else:\n arg_names.append(arg.name)\n instructions.append((target_to_name[target], arg_names, out_name))\n elif n.op == 'output':\n if output_node is not None:\n raise RuntimeError('Multiple output nodes!')\n output_node = n\n else:\n raise RuntimeError('Unsupported opcode ' + n.op)\n\n interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter()\n # Load constants\n for k, v in constants.items():\n interpreter.add_constant(k, v)\n # Specify names for positional input arguments\n interpreter.set_input_names(fn_input_names)\n # Load instructions\n interpreter.set_instructions(instructions)\n # Specify name for single output\n assert isinstance(output_node.args[0], torch.fx.Node)\n interpreter.set_output_name(output_node.args[0].name)\n\n # ===== Stage 3: Create a wrapper GraphModule around the interpreter =====\n class WrapperModule(torch.nn.Module):\n def __init__(self, interpreter):\n super().__init__()\n self.interpreter = interpreter\n\n wrapper = WrapperModule(interpreter)\n\n # Create a graph that: 1) Takes function arguments 2) Invokes the interpreter\n # 3) Returns the speficied return value\n\n # FIXME: The following code could be greatly simplified by symbolic_trace'ing\n # the wrapper with a Tracer that considers the Wrapper instance a root\n # module, however, I can't get `__call__` exposed on TorchBind classes\n # without it messing up Python `hasattr` for some reason. More digging\n # into CPython's implementation of hasattr is probably in order...\n\n graph = torch.fx.Graph()\n # Add placeholders for fn inputs\n placeholder_nodes = []\n for name in fn_input_names:\n placeholder_nodes.append(graph.create_node('placeholder', name))\n\n # Get the interpreter object\n interpreter_node = graph.create_node('get_attr', 'interpreter')\n\n # Add a node to call the interpreter instance\n output_node = graph.create_node(\n op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes))\n\n # Register output\n graph.output(output_node)\n\n graph.lint()\n\n # Return final GraphModule!!!\n return GraphModule(wrapper, graph)\n\n\n # Lower GraphModule to C++ interpreter\n lowered = lower_to_elementwise_interpreter(msm)\n\n # Compare correctness with original module\n x = torch.rand(3, 4)\n ref_out = msm(x)\n test_out = lowered(x)\n torch.testing.assert_close(test_out, ref_out)\n\n # Test TorchScript compilation\n scripted_lowered = torch.jit.script(lowered)\n script_out = scripted_lowered(x)\n torch.testing.assert_close(script_out, ref_out)\n\n # Test TorchScript ser/de\n import_copy = self.getExportImportCopy(scripted_lowered)\n imported_out = import_copy(x)\n torch.testing.assert_close(imported_out, ref_out)\n\n def test_reserved_getattr(self):\n \"\"\"Ensure that we do not name any nodes with a reserved builtin like `getattr`\"\"\"\n class M(torch.nn.Module):\n def forward(self, a):\n return a.foo.bar.baz\n\n m = M()\n m_g = symbolic_trace(m)\n m_g.graph.lint()\n for node in m_g.graph.nodes:\n self.assertTrue(node.name != \"getattr\")\n\n def test_node_tagging(self):\n class TaggingTracer(Tracer):\n def create_node(self, kind : str, target : Union[str, Callable],\n args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,\n type_expr : Optional[Any] = None) -> Node:\n n = super().create_node(kind, target, args, kwargs, name)\n n.tag = 'foo'\n return n\n\n class M(torch.nn.Module):\n def forward(self, a, b):\n return a + b\n\n m = M()\n g = TaggingTracer().trace(m)\n g.lint()\n for n in g.nodes:\n self.assertTrue(hasattr(n, 'tag'))\n self.assertEqual(n.tag, 'foo')\n\n def test_tensor_attribute(self):\n class TensorAttribute(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.tensor = torch.rand(3, 4)\n\n def forward(self, x):\n return torch.nn.functional.linear(x, self.tensor)\n\n ta = TensorAttribute()\n traced = symbolic_trace(ta)\n traced(torch.rand(4, 4))\n\n class WrapperForQualname(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.ta = TensorAttribute()\n\n def forward(self, x):\n return torch.nn.functional.linear(x, self.ta.tensor)\n\n wfq = WrapperForQualname()\n traced2 = symbolic_trace(wfq)\n traced2.graph.lint()\n traced2(torch.rand(4, 4))\n\n def test_tensor_attribute_coalseced(self):\n\n def count_attrs(fx_module):\n targets = set()\n for node in traced.graph.nodes:\n if node.op == 'get_attr':\n targets.add(node.target)\n return len(targets)\n\n val = torch.tensor(5)\n\n def f(x):\n return x + val + val\n traced = symbolic_trace(f)\n traced.graph.lint()\n self.assertEqual(count_attrs(traced), 1)\n\n val2 = torch.tensor(5)\n\n def f(x):\n val = torch.tensor(5)\n return x + val + val2\n\n traced = symbolic_trace(f)\n traced.graph.lint()\n self.assertEqual(count_attrs(traced), 2)\n\n\n def test_symbolic_trace_sequential(self):\n class Simple(torch.nn.Module):\n def forward(self, x):\n return torch.neg(x)\n\n seq = torch.nn.Sequential(\n Simple(),\n Simple(),\n Simple()\n )\n traced = symbolic_trace(seq)\n traced.graph.lint()\n x = torch.rand(3, 4)\n self.assertEqual(traced(x), seq(x))\n\n def test_tensor_constant(self):\n class ConstTensor(torch.nn.Module):\n def forward(self, x):\n return torch.nn.functional.linear(x, torch.zeros(3, 4))\n\n ct = ConstTensor()\n traced = symbolic_trace(ct)\n traced.graph.lint()\n traced(torch.rand(4, 4))\n\n def test_pickle_graphmodule(self):\n class Nested(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.st = torch.nn.Linear(4, 4)\n\n def forward(self, x):\n return self.st(x)\n\n n = Nested()\n traced = symbolic_trace(n)\n traced.graph.lint()\n pickled = pickle.dumps(traced)\n loaded = pickle.loads(pickled)\n loaded.graph.lint()\n x = torch.rand(3, 4)\n self.assertEqual(loaded(x), traced(x))\n\n def test_pickle_custom_import(self):\n graph = torch.fx.Graph()\n a = graph.placeholder('x')\n b = graph.placeholder('y')\n c = graph.call_function(a_non_torch_leaf, (a, b))\n d = graph.call_function(torch.sin, (c,))\n graph.output(d)\n gm = GraphModule(torch.nn.Module(), graph)\n pickled = pickle.dumps(gm)\n loaded = pickle.loads(pickled)\n loaded.graph.lint()\n x, y = torch.rand(1), torch.rand(1)\n self.assertEqual(loaded(x, y), gm(x, y))\n\n def test_all_input_nodes(self):\n graph : torch.fx.Graph = torch.fx.Graph()\n a : torch.fx.Node = graph.placeholder('x')\n b : torch.fx.Node = graph.call_module('linear_mod', args=(a,))\n c : torch.fx.Node = graph.get_attr('y_attr')\n d : torch.fx.Node = graph.call_function(operator.add, args=(b, c))\n e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0))\n graph.output(e)\n graph.lint()\n\n self.assertEqual(b.all_input_nodes, [a])\n self.assertEqual(c.all_input_nodes, [])\n self.assertEqual(d.all_input_nodes, [b, c])\n self.assertEqual(e.all_input_nodes, [d])\n\n def test_deepcopy_graphmodule_with_transform(self):\n st = SimpleTest()\n traced = symbolic_trace(st)\n traced.graph.lint()\n\n def transform(traced):\n new_graph = torch.fx.Graph()\n val_map : Dict[Node, Node] = {}\n output_value = new_graph.graph_copy(traced.graph, val_map)\n relu_out = new_graph.create_node(\n op='call_method', target='neg', args=(output_value,), kwargs={})\n new_graph.output(relu_out)\n return GraphModule(traced, new_graph)\n transformed = transform(traced)\n transformed.graph.lint()\n copied = copy.deepcopy(transformed)\n self.assertNotEqual(id(type(transformed)), id(type(copied)))\n x = torch.randn(3, 4)\n self.assertEqual(copied(x), transformed(x))\n\n def test_deepcopy_with_submods_params(self):\n class Bar(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.param = torch.nn.Parameter(torch.rand(3, 4))\n\n def forward(self, x):\n return torch.relu(x) + self.param\n\n class Baz(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.param = torch.nn.Parameter(torch.rand(3, 4))\n self.bar = Bar()\n\n def forward(self, x):\n return self.bar(x) - self.param\n\n baz = Baz()\n traced = symbolic_trace(baz)\n traced.graph.lint()\n copied = copy.deepcopy(traced)\n copied.graph.lint()\n\n def test_deepcopy_graph_with_tracer_cls(self):\n class TestTracer(Tracer):\n def is_leaf_module(self, module, name):\n return True\n\n g = Graph(tracer_cls=TestTracer)\n x = g.placeholder(\"x\")\n g.output(x)\n\n h = copy.deepcopy(g)\n self.assertIsNotNone(h._tracer_cls)\n self.assertTrue(g._tracer_cls == h._tracer_cls)\n\n def test_unpack_list_better_error(self):\n class SomeArgs(torch.nn.Module):\n def forward(self, a, b):\n return torch.rand(3, 4)\n\n class UnpacksList(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.sa = SomeArgs()\n\n def forward(self, x : list):\n return self.sa(*x)\n\n ul = UnpacksList()\n with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):\n symbolic_trace(ul)\n\n def test_unpack_dict_better_error(self):\n class SomeKwargs(torch.nn.Module):\n def forward(self, x=3, y=4):\n return torch.rand(3, 4)\n\n class UnpacksDict(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.sk = SomeKwargs()\n\n def forward(self, x : dict):\n return self.sk(**x)\n\n ud = UnpacksDict()\n with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):\n symbolic_trace(ud)\n\n def test_pretty_print_targets(self):\n # Test that Graph pretty-print prints friendly name for targets\n # in `operator` and `builtins`\n\n class SomeMod(torch.nn.Module):\n def forward(self, x):\n return torch.add(x.foo + x.bar, 3.0)\n\n traced = symbolic_trace(SomeMod())\n graph_str = str(traced.graph)\n self.assertIn('builtins.getattr', graph_str)\n self.assertIn('operator.add', graph_str)\n self.assertIn('torch.add', graph_str)\n\n def test_pretty_print_node(self):\n class M(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.param: torch.nn.Parameter = torch.nn.Parameter(\n torch.rand(3, 4))\n self.linear = torch.nn.Linear(4, 5)\n\n def forward(self, x: torch.Tensor, y: int = 2):\n return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0)\n\n traced = symbolic_trace(M())\n\n all_formatted = \"\\n\".join([n.format_node() for n in traced.graph.nodes])\n\n FileCheck().check(\"x\").check(\"placeholder\") \\\n .check(\"y\").check(\"placeholder\") \\\n .check(\"getitem\").check(\"call_function\") \\\n .check(\"param\").check(\"get_attr\") \\\n .check(\"add\").check(\"call_function\") \\\n .check(\"linear\").check(\"call_module\") \\\n .check(\"clamp\").check(\"call_method\") \\\n .run(all_formatted)\n\n def test_script_tensor_constant(self):\n # TorchScript seems to ignore attributes that start with `__`.\n # We used to call anonymous Tensor values `__tensor_constant*`, but\n # they were getting ignored by script. Now they're called\n # `_tensor_constant*`\n class IHaveATensorConstant(torch.nn.Module):\n def forward(self, x):\n return x + torch.rand(3, 4)\n\n traced = torch.fx.symbolic_trace(IHaveATensorConstant())\n torch.jit.script(traced)\n\n def test_autowrap_functions(self):\n class AutowrapFnTest(torch.nn.Module):\n def forward(self, x):\n return fx_int(x.shape[0] / 2)\n\n class AutowrapFnTest2(torch.nn.Module):\n def forward(self, x):\n return fx_int(x.shape[0] / 2) + fx_int_x2(x.shape[0] / 2)\n\n # Check function(s) are wrapped\n # `int` would normally throw a TypeError as argument can't be `Proxy`\n tracer = Tracer(autowrap_functions=(fx_int,))\n graph = tracer.trace(AutowrapFnTest())\n traced = GraphModule(tracer.root, graph, 'test')\n tracer_2 = Tracer(autowrap_functions=(fx_int, fx_int_x2))\n tracer_2.trace(AutowrapFnTest2())\n\n # Test scriptability\n traced_scripted = torch.jit.script(traced)\n self.assertEqual(traced_scripted(torch.rand(4)), 2)\n\n def test_torch_fx_len(self):\n class FXLenTest(torch.nn.Module):\n def forward(self, x):\n return len(x)\n\n traced = symbolic_trace(FXLenTest())\n self.assertEqual(traced(torch.rand(3, 4)), 3)\n\n # Test scriptability\n scripted = torch.jit.script(FXLenTest())\n self.assertEqual(scripted(torch.rand(3)), 3)\n\n traced_scripted = torch.jit.script(traced)\n self.assertEqual(traced_scripted(torch.rand(3)), 3)\n\n # Test non-proxy len\n class FXLenTest2(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.l = [3, 4, 5]\n\n def forward(self, x):\n return x + len(self.l)\n\n traced2 = symbolic_trace(FXLenTest2())\n inp = torch.rand(3, 4)\n self.assertEqual(traced2(inp), inp + 3.0)\n self.assertIs(len, builtins.len)\n\n def test_torch_fx_getattr(self):\n class FXGetattrTest(torch.nn.Module):\n def forward(self, x):\n return getattr(x, 'nonexistent_attr', torch.Tensor([2, 3]))\n\n traced = symbolic_trace(FXGetattrTest())\n self.assertEqual(traced(torch.rand(3, 4)), torch.Tensor([2, 3]))\n\n def test_sqrt(self):\n class Sqrt1(torch.nn.Module):\n def forward(self, x):\n return sqrt(x.size(0))\n\n class Sqrt2(torch.nn.Module):\n def forward(self, x):\n return math.sqrt(x.size(0))\n\n class Sqrt3(torch.nn.Module):\n def forward(self, x):\n return x + math.sqrt(2) + sqrt(2)\n\n self.checkGraphModule(Sqrt1(), [torch.zeros(8)])\n self.checkGraphModule(Sqrt2(), [torch.zeros(8)])\n self.checkGraphModule(Sqrt3(), [torch.zeros(8)])\n self.assertIs(sqrt, _sqrt)\n self.assertIs(math.sqrt, _sqrt)\n\n def test_torch_custom_ops(self):\n class M(torch.nn.Module):\n def forward(self, a):\n b = torch.ops.aten.sigmoid(a)\n c = torch.ops.aten.cat([a, b])\n return torch.ops.aten.cat((c, c))\n m = M()\n input = torch.randn(3)\n ref_out = m(input)\n gm = symbolic_trace(m)\n gm.graph.lint()\n out = gm(input)\n self.assertEqual(out, ref_out)\n\n def test_pickle_torch_custom_ops(self):\n class M(torch.nn.Module):\n def forward(self, a):\n b = torch.ops.aten.sigmoid(a)\n c = torch.ops.aten.cat([a, b])\n return torch.ops.aten.cat((c, c))\n m = M()\n input = torch.randn(3)\n ref_out = m(input)\n gm = symbolic_trace(m)\n gm.graph.lint()\n pickled = pickle.dumps(gm)\n loaded = pickle.loads(pickled)\n self.assertEqual(loaded(input), gm(input))\n\n def test_pretty_print(self):\n st = SimpleTest()\n traced = symbolic_trace(st)\n traced.graph.lint()\n printed = str(traced)\n assert 'SimpleTest()' in printed\n assert 'torch.relu' in printed\n\n def test_pretty_print_graph(self):\n class KwargPrintTest(torch.nn.Module):\n def forward(self, x):\n return torch.squeeze(x + 3.0, dim=2)\n st = KwargPrintTest()\n traced = symbolic_trace(st)\n traced.graph.lint()\n stringed = str(traced.graph)\n for s in ['args', 'kwargs', '#users']:\n assert s in stringed\n\n def test_custom_proxy_type(self):\n class TensorPair:\n def __init__(self, left, right):\n self.left, self.right = left, right\n\n def add(self, other):\n l = self.left + other.left\n r = self.right + other.right\n return TensorPair(l, r)\n\n def mul(self, other):\n l = self.left * other.left\n r = self.right * other.right\n return TensorPair(l, r)\n\n def use_tensor_pair(x : TensorPair, y : TensorPair):\n s = x.add(y)\n return s.mul(x)\n\n x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))\n y = TensorPair(torch.randn(5, 3), torch.randn(5, 3))\n\n ref_out = use_tensor_pair(x, y)\n\n traced = symbolic_trace(use_tensor_pair)\n\n traced_out = traced(x, y)\n self.assertEqual(traced_out.left, ref_out.left)\n self.assertEqual(traced_out.right, ref_out.right)\n\n def test_custom_proxy_type_literal(self):\n class TensorPair(metaclass=torch.fx.ProxyableClassMeta):\n def __init__(self, left, right):\n self.left, self.right = left, right\n\n def add(self, other):\n l = self.left + other.left\n r = self.right + other.right\n return TensorPair(l, r)\n\n def mul(self, other):\n l = self.left * other.left\n r = self.right * other.right\n return TensorPair(l, r)\n\n def use_tensor_pair_literal(x : TensorPair):\n s = x.add(TensorPair(torch.zeros(5, 3), torch.zeros(5, 3)))\n return s.mul(x)\n\n x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))\n\n ref_out = use_tensor_pair_literal(x)\n\n traced = symbolic_trace(use_tensor_pair_literal)\n\n traced_out = traced(x)\n self.assertEqual(traced_out.left, ref_out.left)\n self.assertEqual(traced_out.right, ref_out.right)\n\n def test_custom_proxy_dynamic_value(self):\n class TensorPair(metaclass=torch.fx.ProxyableClassMeta):\n def __init__(self, left, right):\n self.left, self.right = left, right\n\n def add(self, other):\n l = self.left + other.left\n r = self.right + other.right\n return TensorPair(l, r)\n\n def mul(self, other):\n l = self.left * other.left\n r = self.right * other.right\n return TensorPair(l, r)\n\n def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor):\n s = x.add(TensorPair(y, y))\n return s.mul(x)\n\n x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))\n y = torch.randn(5, 3)\n ref_out = use_tensor_pair_ctor(x, y)\n\n traced = symbolic_trace(use_tensor_pair_ctor)\n\n traced_out = traced(x, y)\n self.assertEqual(traced_out.left, ref_out.left)\n self.assertEqual(traced_out.right, ref_out.right)\n\n def test_custom_proxy_input_dependent_control_flow(self):\n class ZeroTensor(metaclass=torch.fx.ProxyableClassMeta):\n def __init__(self, inp):\n if inp.sum() == 0:\n self.is_zero = True\n self.tensor = torch.tensor([])\n else:\n self.is_zero = False\n self.tensor = inp\n\n def add(self, other):\n if self.is_zero:\n return ZeroTensor(other.tensor)\n elif other.is_zero:\n return self\n\n def use_zero_tensor(x : torch.Tensor, y : torch.Tensor):\n return ZeroTensor(x + y)\n\n x, y = torch.randn(5, 3), torch.randn(5, 3)\n\n ref_out = use_zero_tensor(x, y)\n\n traced = symbolic_trace(use_zero_tensor)\n\n traced_out = traced(x, y)\n\n self.assertEqual(traced_out.is_zero, ref_out.is_zero)\n self.assertEqual(traced_out.tensor, ref_out.tensor)\n\n def test_graph_fns(self):\n g = Graph()\n a = g.placeholder('a')\n b = g.call_module('linear', (a,))\n c = g.get_attr('bias')\n d = g.call_method('add', (b, c))\n e = g.call_function(torch.sin, (d,))\n g.output(e)\n mod = torch.nn.Module()\n mod.linear = torch.nn.Linear(3, 4)\n mod.bias = torch.rand(4)\n gm = GraphModule(mod, g)\n gm.graph.lint()\n input = torch.rand(3)\n r = gm(input)\n ref = torch.sin(mod.linear(input) + mod.bias)\n self.assertEqual(r, ref)\n\n def test_remove_uses(self):\n g : torch.fx.Graph = Graph()\n x : torch.fx.Node = g.placeholder('x')\n relu : torch.fx.Node = g.call_function(torch.relu, (x,))\n neg : torch.fx.Node = g.call_function(torch.neg, (relu,))\n g.output(neg)\n\n neg.replace_all_uses_with(relu)\n g.erase_node(neg)\n\n self.assertTrue(neg not in relu.users)\n\n def test_nonetype_annotation(self):\n eb = torch.nn.EmbeddingBag(3, 4)\n symbolic_trace(eb)\n\n def test_pickle_nonetype_annotation(self):\n eb = torch.nn.EmbeddingBag(10, 3, mode='sum')\n traced = symbolic_trace(eb)\n pickled = pickle.dumps(traced)\n loaded = pickle.loads(pickled)\n loaded.graph.lint()\n input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])\n offsets = torch.LongTensor([0, 4])\n self.assertEqual(loaded(input, offsets), traced(input, offsets))\n\n def test_return_tuple(self):\n class M(torch.nn.Module):\n def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n return (x, x + x)\n\n\n original = M()\n traced = symbolic_trace(original)\n self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1)))\n\n def test_construct_root_dict(self):\n graph : torch.fx.Graph = torch.fx.Graph()\n a : torch.fx.Node = graph.create_node('placeholder', 'x')\n b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))\n c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')\n d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))\n graph.output(d)\n\n linear_mod : torch.nn.Module = torch.nn.Linear(3, 4)\n add_param : torch.Tensor = torch.rand(3, 4)\n gm : torch.fx.GraphModule = torch.fx.GraphModule(\n {'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph)\n gm.graph.lint()\n\n assert 'self.foo.bar.baz' in gm.code\n\n x : torch.Tensor = torch.rand(3, 3)\n out : torch.Tensor = gm(x)\n ref_out : torch.Tensor = linear_mod(x) + add_param\n self.assertEqual(out, ref_out)\n\n def test_symbolic_trace_assert(self):\n\n class AssertsTensorShape(torch.nn.Module):\n def forward(self, x):\n torch._assert(x.shape[1] > 4, \"assert_foobar\")\n return x\n\n m = AssertsTensorShape()\n # verify traceability\n traced = symbolic_trace(m)\n # verify assertion on traced model works correctly at runtime\n traced(torch.rand(4, 5))\n with self.assertRaisesRegex(AssertionError, \"assert_foobar\"):\n traced(torch.rand(4, 3))\n # verify the symbolically traced module is scriptable\n ms = torch.jit.script(m)\n with self.assertRaisesRegex(torch.jit.Error, \"assert_foobar\"):\n ms(torch.rand(4, 3))\n\n def test_fx_create_arg(self):\n class CustomArgObject:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __fx_create_arg__(self, tracer: torch.fx.Tracer):\n return tracer.create_node(\n \"call_function\",\n CustomArgObject,\n args=(\n tracer.create_arg(self.x),\n tracer.create_arg(self.y),\n ),\n kwargs={},\n )\n\n class HasCustomArgObjectWhenLeaf(torch.nn.Module):\n def forward(self, o: CustomArgObject):\n # Not normally traceable; good reason to make\n # this module a leaf.\n for x in o.x:\n o.y += x\n return o.y\n\n class Root(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.inner = HasCustomArgObjectWhenLeaf()\n\n def forward(self, x, y):\n o = CustomArgObject(x, y)\n return self.inner(o)\n\n class CreateArgTracer(torch.fx.Tracer):\n def is_leaf_module(self, m, module_qualified_name):\n return type(m) is HasCustomArgObjectWhenLeaf\n\n m = Root()\n graph = CreateArgTracer().trace(m)\n gm = torch.fx.GraphModule(m, graph)\n assert \"CustomArgObject(\" in gm.code\n\n def test_trace_fn_constant(self):\n some_constant = torch.rand(3, 4)\n\n def add_const(x):\n return some_constant + x\n\n traced = symbolic_trace(add_const)\n\n input = torch.rand(3, 4)\n self.assertEqual(traced(input), add_const(input))\n\n def test_copy_no_remap(self):\n traced = symbolic_trace(SimpleTest())\n g = traced.graph\n copied = torch.fx.Graph()\n for node in g.nodes:\n copied.node_copy(node)\n with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'):\n copied.lint()\n\n def test_wrong_topo(self):\n graph : torch.fx.Graph = torch.fx.Graph()\n a : torch.fx.Node = graph.create_node('placeholder', 'x')\n b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))\n c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')\n d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))\n graph.output(d)\n nodes = list(graph.nodes)\n nodes[3].append(nodes[2])\n with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'):\n graph.lint()\n\n def test_wrong_target_type(self):\n graph : torch.fx.Graph = torch.fx.Graph()\n with self.assertRaises(ValueError):\n n = torch.fx.Node(graph=graph, name='foo', op='call_function', target='foo',\n args=(), kwargs={})\n\n def test_example_shape_prop(self):\n class TestCase(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.attr = torch.randn(3, 4)\n self.submod = torch.nn.Linear(4, 4)\n\n def forward(self, x):\n return torch.neg(self.submod(x.relu() + self.attr))\n tc = TestCase()\n tc_traced = symbolic_trace(tc)\n ref_out = tc_traced(torch.rand(3, 4))\n shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4))\n\n # Make sure we're testing all opcodes\n opcodes = set()\n output_shape : Optional[torch.Shape] = None\n output_stride : Optional[Tuple[int]] = None\n for node in tc_traced.graph.nodes:\n opcodes.add(node.op)\n if node.op == 'output':\n output_shape = node.args[0].meta['tensor_meta'].shape\n output_stride = node.args[0].meta['tensor_meta'].stride\n self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method',\n 'call_module', 'output']))\n\n # Test shape propogation and make sure results match actual\n self.assertEqual(output_shape, ref_out.shape)\n self.assertEqual(output_stride, ref_out.stride())\n\n def test_shape_prop_layout(self):\n class ConvTest(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv_mod = torch.nn.Conv2d(5, 5, 3)\n\n def forward(self, x):\n return self.conv_mod(x)\n\n # contiguous layout\n test_mod = ConvTest()\n traced = symbolic_trace(test_mod)\n x = torch.randn(5, 5, 224, 224)\n shape_prop.ShapeProp(traced).propagate(x)\n\n assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format\n for node in traced.graph.nodes))\n\n x_channels_last = x.contiguous(memory_format=torch.channels_last)\n traced.to(memory_format=torch.channels_last)\n shape_prop.ShapeProp(traced).propagate(x_channels_last)\n for node in traced.graph.nodes:\n # NB: the implementation of conv may not preserve the memory format,\n # unfortunately. The best we can do is just check that the placeholder\n # node is channels-last\n if node.op in {'placeholder'}:\n self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last)\n\n def test_shape_prop_aggregate(self):\n class ReturnTwo(torch.nn.Module):\n def forward(self, x):\n return (3, torch.sum(x))\n\n class UnderTest(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.rt = ReturnTwo()\n\n def forward(self, x):\n return self.rt(x)\n\n ut = UnderTest()\n\n class RTTracer(torch.fx.Tracer):\n def is_leaf_module(self, m, module_qualified_name):\n return type(m) is ReturnTwo\n\n graph = RTTracer().trace(ut)\n mod = torch.fx.GraphModule(ut, graph)\n\n shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4))\n\n for node in mod.graph.nodes:\n if node.op == 'call_module':\n assert 'tensor_meta' in node.meta\n tensor_meta = node.meta['tensor_meta']\n assert tensor_meta[0] == 3\n assert tensor_meta[1].shape == torch.Size([])\n\n def test_shape_prop_layout_3d(self):\n class ConvTest3d(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.conv_mod = torch.nn.Conv3d(5, 5, 3)\n\n def forward(self, x):\n return self.conv_mod(x)\n\n test_mod_3d = ConvTest3d()\n traced_3d = symbolic_trace(test_mod_3d)\n x_3d = torch.randn(5, 5, 224, 224, 15)\n shape_prop.ShapeProp(traced_3d).propagate(x_3d)\n assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format\n for node in traced_3d.graph.nodes))\n\n x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d)\n traced_3d.to(memory_format=torch.channels_last_3d)\n shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d)\n for node in traced_3d.graph.nodes:\n # NB: the implementation of conv may not preserve the memory format,\n # unfortunately. The best we can do is just check that the placeholder\n # node is channels-last\n if node.op in {'placeholder'}:\n self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d)\n\n def test_interpreter(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.param = torch.nn.Parameter(torch.rand(3, 4))\n self.linear = torch.nn.Linear(4, 5)\n\n def forward(self, x):\n return self.linear(x + self.param).clamp(min=0.0, max=1.0)\n\n m = MyModule()\n gm = torch.fx.symbolic_trace(m)\n\n interpreter = Interpreter(gm)\n input = torch.randn(3, 4)\n self.assertEqual(interpreter.run(input), gm(input))\n self.assertEqual(interpreter.run(input), m(input))\n\n def test_interpreter_run_node_override(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.param = torch.nn.Parameter(torch.rand(3, 4))\n self.linear = torch.nn.Linear(4, 5)\n\n def forward(self, x):\n return self.linear(x + self.param).clamp(min=0.0, max=1.0)\n\n m = MyModule()\n gm = torch.fx.symbolic_trace(m)\n\n class RunNodeInterpreter(Interpreter):\n def __init__(self, module):\n super().__init__(module)\n\n def run_node(self, n : Node) -> Any:\n result = super().run_node(n)\n n.cached_value = result\n return result\n\n input = torch.randn(3, 4)\n RunNodeInterpreter(gm).run(input)\n for node in gm.graph.nodes:\n assert hasattr(node, 'cached_value')\n\n def test_interpreter_onthefly_swap(self):\n\n def fn(x):\n return torch.sigmoid(x).neg()\n\n gm = torch.fx.symbolic_trace(fn)\n\n class NegSigmSwapInterpreter(Interpreter):\n def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:\n if target == torch.sigmoid:\n return torch.neg(*args, **kwargs)\n return super().call_function(n)\n\n def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:\n if target == 'neg':\n call_self, *args_tail = args\n return call_self.sigmoid(*args_tail, **kwargs)\n return super().call_method(n)\n\n input = torch.randn(3, 4)\n result = NegSigmSwapInterpreter(gm).run(input)\n self.assertEqual(result, torch.neg(input).sigmoid())\n\n def test_interpreter_partial_eval(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.param = torch.nn.Parameter(torch.rand(3, 4))\n self.linear = torch.nn.Linear(4, 5)\n\n def forward(self, x):\n return self.linear(x + self.param).clamp(min=0.0, max=1.0)\n\n gm = torch.fx.symbolic_trace(MyModule())\n interp = Interpreter(gm)\n env = {}\n for node in gm.graph.nodes:\n if node.op == 'call_module' and node.target == 'linear':\n env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0\n break\n assert len(env) == 1\n x = torch.randn(3, 4)\n result = interp.run(x, initial_env=env)\n self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0))\n\n def test_interpreter_star_args(self):\n def with_star_args(x, *args):\n return x + args[0]\n\n gm = torch.fx.symbolic_trace(with_star_args)\n interp = Interpreter(gm)\n result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4))\n self.assertEqual(result, torch.ones(3, 4) * 2.0)\n\n @skipIfNoTorchVision\n def test_interpreter_noop_resnet18(self):\n rn18 = torchvision_models.resnet18()\n transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform()\n inp = torch.randn(5, 3, 224, 224)\n self.assertEqual(transformed(inp), rn18(inp))\n\n @skipIfNoTorchVision\n def test_interpreter_gc_values(self):\n rn18 = torchvision_models.resnet18()\n interp = Interpreter(symbolic_trace(rn18))\n inp = torch.rand(5, 3, 224, 224)\n out = interp.run(inp)\n env_key_names = set(n.name for n in interp.env.keys())\n self.assertEqual(env_key_names, set(['output']))\n\n def test_interpreter_default_args(self):\n class Model(torch.nn.Module):\n def forward(self, x, y=3.14159):\n return x + y\n\n model = Model()\n gm = torch.fx.symbolic_trace(model)\n\n interp = Interpreter(gm)\n x = torch.randn(5, 3)\n out = interp.run(x)\n torch.testing.assert_allclose(out, x + 3.14159)\n\n def test_interpreter_not_enough_args(self):\n class Model(torch.nn.Module):\n def forward(self, x, y):\n return x + y\n\n model = Model()\n gm = torch.fx.symbolic_trace(model)\n\n interp = Interpreter(gm)\n x = torch.randn(5, 3)\n with self.assertRaisesRegex(RuntimeError,\n 'Expected positional argument for parameter y, but one was not passed in'):\n out = interp.run(x)\n\n def test_transformer_noop(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.param = torch.nn.Parameter(torch.rand(3, 4))\n self.linear = torch.nn.Linear(4, 5)\n\n def forward(self, x):\n return self.linear(x + self.param).clamp(min=0.0, max=1.0)\n\n m = MyModule()\n gm = torch.fx.symbolic_trace(m)\n\n new_gm = Transformer(gm).transform()\n\n input = torch.randn(3, 4)\n self.assertEqual(new_gm(input), gm(input))\n\n def test_transformer_op_swap(self):\n\n def fn(x):\n return torch.sigmoid(x).neg()\n\n gm = torch.fx.symbolic_trace(fn)\n\n class NegSigmSwapXformer(Transformer):\n def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:\n if target == torch.sigmoid:\n return torch.neg(*args, **kwargs)\n return super().call_function(n)\n\n def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:\n if target == 'neg':\n call_self, *args_tail = args\n return call_self.sigmoid(*args_tail, **kwargs)\n return super().call_method(n)\n\n transformed = NegSigmSwapXformer(gm).transform()\n input = torch.randn(3, 4)\n self.assertEqual(transformed(input), torch.neg(input).sigmoid())\n\n def test_transformer_multi_outputs(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.param = torch.nn.Parameter(torch.rand(3, 4))\n self.linear = torch.nn.Linear(4, 5)\n\n def forward(self, x):\n x = x + self.param\n out = self.linear(x)\n return x, out\n\n m = MyModule()\n gm = torch.fx.symbolic_trace(m)\n\n new_gm = Transformer(gm).transform()\n\n input = torch.randn(3, 4)\n self.assertEqual(new_gm(input), gm(input))\n\n def test_fn_type_annotations(self):\n class Foo(torch.nn.Module):\n def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]:\n return {'a': p.x + p.y + z + i}\n\n foo_scripted = torch.jit.script(Foo())\n foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)\n\n fxed = symbolic_trace(Foo())\n fxed_scripted = torch.jit.script(fxed)\n fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)\n\n def test_fn_type_annotation_empty(self):\n def forward(a : List[torch.Tensor]):\n return a[0]\n torch.jit.script(symbolic_trace(forward))\n\n def test_wrapped_method(self):\n def wrap_with_relu(fn):\n @functools.wraps(fn)\n def wrapper(*args, **kwargs):\n return torch.relu(fn(*args, **kwargs))\n return wrapper\n\n class Foo(torch.nn.Module):\n @wrap_with_relu\n def forward(self, x, w):\n return torch.matmul(x, w)\n\n f = Foo()\n traced = symbolic_trace(f)\n x, w = torch.rand(3, 4), torch.rand(4, 4)\n self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes))\n\n def test_empty_graph_codegen(self):\n graph = torch.fx.Graph()\n gm = torch.fx.GraphModule(torch.nn.Module(), graph)\n self.assertEqual(gm(), None)\n\n def test_sequential(self):\n m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1))\n gm = torch.fx.symbolic_trace(m)\n gm_copy = copy.deepcopy(gm)\n\n def test_ctx_mgr(self):\n @contextlib.contextmanager\n def do_nothing():\n yield\n\n class M(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @do_nothing()\n def forward(self, x):\n return torch.relu(x)\n\n m = M()\n self.checkGraphModule(m, (torch.rand(3, 4),))\n\n def test_typename_print(self):\n graph : torch.fx.Graph = torch.fx.Graph()\n x : torch.fx.Node = graph.create_node('placeholder', 'x')\n b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,),\n type_expr=List[float])\n output : torch.fx.Node = graph.output(b)\n\n self.assertTrue('typing.List[float]' in str(graph))\n\n def test_layout(self):\n class M(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, x):\n return torch.empty_like(x, layout=torch.strided, pin_memory=False).fill_(0)\n\n traced = symbolic_trace(M())\n x = torch.rand(5, 9, 3, 4)\n self.assertEqual(traced(x), torch.zeros_like(x))\n\n def test_ellipsis(self):\n class M(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, x, y):\n return x + y[:, 1:10, ...]\n\n traced = symbolic_trace(M())\n x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4)\n self.assertEqual(traced(x, y), x + y[:, 1:10, ...])\n\n def test_inf_nan(self):\n class FooMod(torch.nn.Module):\n def forward(self, x):\n return x + float('inf'), x + float('-inf'), x + float('nan')\n\n fm = FooMod()\n self.checkGraphModule(fm, (torch.rand(3, 4),))\n\n def test_inf_nan_kwds(self):\n graph : torch.fx.Graph = torch.fx.Graph()\n x : torch.fx.Node = graph.create_node('placeholder', 'x')\n b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf')\n c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan')\n graph.output((b, c))\n\n gm = torch.fx.GraphModule(torch.nn.Module(), graph)\n x = torch.rand(3, 4)\n self.assertEqual(gm(x), (x + float('inf'), x + float('nan')))\n\n def test_deepcopy_recursion_depth(self):\n depth = sys.getrecursionlimit() + 20\n\n g = torch.fx.Graph()\n x = g.placeholder('x')\n for i in range(depth):\n x = g.call_function(torch.relu, (x,))\n g.output(x)\n\n copied_graph = copy.deepcopy(g)\n\n val_map = {}\n for orig_node, new_node in zip(g.nodes, copied_graph.nodes):\n val_map[orig_node] = new_node\n\n for orig_node, new_node in zip(g.nodes, copied_graph.nodes):\n orig_users = set(orig_node.users.keys())\n orig_users_equiv = set(val_map[u] for u in orig_users)\n new_users = set(new_node.users.keys())\n self.assertEqual(orig_users_equiv, new_users)\n\n @skipIfNoTorchVision\n def test_replace_uses(self):\n rn18 = torchvision_models.resnet18()\n\n class LowerReluTracer(torch.fx.Tracer):\n def is_leaf_module(self, m : torch.nn.Module, qualname : str):\n if isinstance(m, torch.nn.ReLU):\n return False\n return super().is_leaf_module(m, qualname)\n\n rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18))\n\n to_erase = []\n for node in rn18_traced.graph.nodes:\n if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]:\n kwargs = node.kwargs.copy()\n # Neg doesn't have in-place\n kwargs.pop('inplace')\n with rn18_traced.graph.inserting_before(node):\n new_node = rn18_traced.graph.call_function(\n the_function=torch.neg, args=node.args, kwargs=node.kwargs)\n node.replace_all_uses_with(replace_with=new_node)\n to_erase.append(node)\n\n for node in to_erase:\n rn18_traced.graph.erase_node(node)\n\n\n def test_replace_input(self):\n graph : torch.fx.Graph = torch.fx.Graph()\n x : torch.fx.Node = graph.create_node('placeholder', 'x')\n y : torch.fx.Node = graph.create_node('placeholder', 'y')\n b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))\n output : torch.fx.Node = graph.output(b)\n\n b.replace_input_with(x, y)\n\n gm = torch.fx.GraphModule(torch.nn.Module(), graph)\n\n input_x = torch.randn(33, 44)\n input_y = torch.randn(11, 22)\n self.assertEqual(gm(input_x, input_y), torch.relu(input_y))\n\n def test_insertion_point(self):\n graph : torch.fx.Graph = torch.fx.Graph()\n x : torch.fx.Node = graph.create_node('placeholder', 'x')\n b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))\n output : torch.fx.Node = graph.output(b)\n\n with graph.inserting_before(b):\n neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))\n _, *relu_args = b.args\n b.args = (neg, *relu_args)\n\n gm = torch.fx.GraphModule(torch.nn.Module(), graph)\n\n input = torch.randn(33, 44)\n self.assertEqual(gm(input), torch.relu(torch.neg(input)))\n\n def test_update_args_api(self):\n graph : torch.fx.Graph = torch.fx.Graph()\n x : torch.fx.Node = graph.create_node('placeholder', 'x')\n y : torch.fx.Node = graph.create_node('placeholder', 'y')\n b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))\n output : torch.fx.Node = graph.output(b)\n\n orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)\n inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)\n self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))\n\n\n b.update_arg(0, y)\n new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)\n self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))\n\n def test_update_kwargs_api(self):\n graph : torch.fx.Graph = torch.fx.Graph()\n x : torch.fx.Node = graph.create_node('placeholder', 'x')\n y : torch.fx.Node = graph.create_node('placeholder', 'y')\n b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, kwargs={'input': x})\n output : torch.fx.Node = graph.output(b)\n\n orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)\n inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)\n self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))\n\n\n b.update_kwarg('input', y)\n new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)\n self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))\n\n def test_move_before(self):\n graph : torch.fx.Graph = torch.fx.Graph()\n x : torch.fx.Node = graph.create_node('placeholder', 'x')\n b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))\n output : torch.fx.Node = graph.output(b)\n\n neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))\n _, *relu_args = b.args\n b.args = (neg, *relu_args)\n b.prepend(neg)\n\n gm = torch.fx.GraphModule(torch.nn.Module(), graph)\n\n input = torch.randn(33, 44)\n self.assertEqual(gm(input), torch.relu(torch.neg(input)))\n\n def test_prepend_self(self):\n graph : torch.fx.Graph = torch.fx.Graph()\n x : torch.fx.Node = graph.create_node('placeholder', 'x')\n b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))\n output : torch.fx.Node = graph.output(b)\n\n b.prepend(b)\n x.append(b)\n self.assertEqual(len(graph.nodes), 3)\n\n def test_erase_node_error(self):\n st = SimpleTest()\n traced = symbolic_trace(st)\n\n for node in traced.graph.nodes:\n # Test deleting with uses both in another Node and at the output\n if node.target in [operator.add, torch.relu]:\n with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'):\n traced.graph.erase_node(node)\n\n def test_copy_it(self):\n d = immutable_dict([(3, 4), (5, 6)])\n l = immutable_list([(3, 4), (5, 6)])\n\n self.assertEqual(d, deepcopy(d))\n self.assertEqual(l, deepcopy(l))\n\n def test_get_torch_func_signature(self):\n for key in dir(torch):\n obj = getattr(torch, key)\n if callable(obj):\n schemas = get_signature_for_torch_op(obj)\n\n def test_find_uses(self):\n graph = torch.fx.Graph()\n x = torch.fx.Proxy(graph.placeholder('x'))\n\n y = torch.relu(x)\n z = x + x\n u = torch.neg(x)\n graph.output((y + z + u).node)\n graph.lint()\n\n users_of_x = x.node.users\n self.assertEqual(len(users_of_x), 3)\n expected_ops = set(['relu', 'add', 'neg'])\n for use in users_of_x:\n assert any(use.name.startswith(prefix) for prefix in expected_ops)\n\n def test_inline_graph(self):\n class InlineInto(torch.nn.Module):\n def forward(self, x):\n return torch.relu(x)\n\n class ToInline(torch.nn.Module):\n def forward(self, x):\n return torch.neg(x)\n\n inline_into = symbolic_trace(InlineInto())\n to_inline = symbolic_trace(ToInline())\n\n combined_graph = torch.fx.Graph()\n output_node = combined_graph.graph_copy(inline_into.graph, {})\n\n input_node = list(to_inline.graph.nodes)[0]\n assert input_node and input_node.op == 'placeholder'\n\n val_map = {input_node : output_node}\n output = combined_graph.graph_copy(to_inline.graph, val_map)\n combined_graph.output(output)\n\n combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph)\n\n input = torch.rand(3, 4)\n self.assertEqual(combined_module(input), input.relu().neg())\n\n def test_multi_insert_point(self):\n graph = torch.fx.Graph()\n x = torch.fx.Proxy(graph.placeholder('x'))\n relu = torch.relu(x)\n\n with graph.inserting_before(relu.node):\n y = torch.neg(x)\n z = torch.tanh(y)\n\n graph.output((relu.node, z.node))\n graph.lint()\n\n expected_ops = ['x', 'neg', 'tanh', 'relu']\n for node, expected in zip(graph.nodes, expected_ops):\n assert expected in node.name\n\n def test_reassign_args_kwargs_uses(self):\n graph = torch.fx.Graph()\n x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y'))\n z = x + y\n zed = z + z + z\n graph.output(zed.node)\n graph.lint()\n\n # zed = z + z + z -> zed = z + z + x\n zed.node.args = (zed.node.args[0], x.node)\n self.assertEqual(list(x.node.users.keys()), [z.node, zed.node])\n\n # z = x + y -> z = y + y\n z.node.args = (y.node, y.node)\n self.assertEqual(list(x.node.users.keys()), [zed.node])\n\n def test_trace_function(self):\n def foo(x, y):\n return torch.relu(x) + y\n\n x, y = torch.randn(3, 4), torch.randn(3, 4)\n self.checkGraphModule(foo, (x, y))\n\n def test_trace_dict_int_keys(self):\n class ModWithDictArg(torch.nn.Module):\n def forward(self, d : Dict[int, torch.Tensor]):\n return d[42]\n\n class CallsModWithDict(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.m = ModWithDictArg()\n\n def forward(self, x):\n return self.m({42: x})\n\n class MyTracer(torch.fx.Tracer):\n def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:\n return isinstance(m, ModWithDictArg)\n\n traced_graph = MyTracer().trace(CallsModWithDict())\n\n def test_trace_dict_proxy_keys(self):\n class ModWithDictArg(torch.nn.Module):\n def forward(self, d : Dict[torch.Tensor, torch.Tensor]):\n return d[42]\n\n class CallsModWithDict(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.m = ModWithDictArg()\n\n def forward(self, x):\n return self.m({x: x})\n\n class MyTracer(torch.fx.Tracer):\n def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:\n return isinstance(m, ModWithDictArg)\n\n with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'):\n traced_graph = MyTracer().trace(CallsModWithDict())\n\n def test_module_deepcopy_edit_nodes(self):\n class Foo(torch.nn.Module):\n def forward(self, x):\n return torch.relu(x)\n\n traced1 = symbolic_trace(Foo())\n copied = copy.deepcopy(traced1)\n\n for node in copied.graph.nodes:\n if node.target == torch.relu:\n node.target = torch.neg\n\n copied.recompile()\n traced1.recompile()\n\n x = torch.randn(15, 15)\n torch.testing.assert_allclose(traced1(x), torch.relu(x))\n torch.testing.assert_allclose(copied(x), torch.neg(x))\n\n def test_direct_param_use(self):\n class TransposeTest(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.b = torch.nn.Parameter(torch.rand(4, 3))\n\n def forward(self, x):\n return self.b\n\n class Foo(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.a = TransposeTest()\n\n def forward(self, x):\n return self.a.b, self.a.b.t(), self.a.b.view(12)\n\n traced = torch.fx.symbolic_trace(Foo())\n assert(all('constant' not in node.target for node in traced.graph.nodes))\n\n def test_single_default_arg(self):\n class M(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, y=1):\n return y\n\n m = M()\n self.checkGraphModule(m, ())\n self.checkGraphModule(m, (3,))\n\n def test_multiple_default_args(self):\n class M(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, y=1, z=2):\n return y + z\n\n m = M()\n self.checkGraphModule(m, ())\n self.checkGraphModule(m, (3,))\n self.checkGraphModule(m, (3, 4))\n\n def test_regular_and_default_args(self):\n class M(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, x, y=1):\n return x + y\n\n m = M()\n self.checkGraphModule(m, (2,))\n self.checkGraphModule(m, (2, 3))\n\n def test_string_literal_return(self):\n class M(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self):\n return \"foo\"\n\n m = M()\n self.checkGraphModule(m, ())\n\n def test_namedtuple_return_qualname(self):\n class NamedTupReturn(torch.nn.Module):\n def forward(self, x):\n return MyNamedTup(x, x)\n\n traced = symbolic_trace(NamedTupReturn())\n input = torch.rand(3, 4)\n self.assertEqual(traced(input), MyNamedTup(input, input))\n\n def test_update_args_kwargs_yells_at_you(self):\n symtraced = symbolic_trace(SimpleTest())\n node = next(iter(symtraced.graph.nodes))\n with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'):\n node.__update_args_kwargs((), {})\n\n def test_torchbind_class_attribute_in_fx(self):\n if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:\n self.skipTest(\"torch.classes._TorchScriptTesting._StackString is registered, skipping\")\n\n class FooBar1234(torch.nn.Module):\n def __init__(self):\n super(FooBar1234, self).__init__()\n self.f = torch.classes._TorchScriptTesting._StackString([\"3\", \"4\"])\n\n def forward(self):\n return self.f.top()\n\n m = FooBar1234()\n self.checkGraphModule(m, ())\n\n def test_torchbind_class_attribute_in_fx_tensor_arg(self):\n if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:\n self.skipTest(\"torch.classes._TorchScriptTesting._ReLUClass is registered, skipping\")\n\n class FooBar2341(torch.nn.Module):\n def __init__(self):\n super(FooBar2341, self).__init__()\n self.f = torch.classes._TorchScriptTesting._ReLUClass()\n\n def forward(self, x):\n return self.f.run(x)\n\n m = FooBar2341()\n\n traced = symbolic_trace(m)\n input = torch.randn(3, 4)\n self.assertEqual(traced(input), m(input))\n\n self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))\n\n def test_script_method_trace(self):\n class Scripted(torch.nn.Module):\n def forward(self, x):\n return torch.relu(x)\n\n class Holder(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.s = torch.jit.script(Scripted())\n\n def forward(self, x):\n return self.s(x)\n\n h = Holder()\n traced = symbolic_trace(h)\n input = torch.randn(3, 4)\n self.assertEqual(traced(input), h(input))\n\n self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))\n\n def test_namedtuple_return_trace(self):\n class NamedTupReturn(torch.nn.Module):\n def forward(self, x):\n return Pair(x, x)\n\n traced = symbolic_trace(NamedTupReturn())\n input = torch.rand(3, 4)\n self.assertEqual(traced(input), Pair(input, input))\n\n def test_return_type_exists(self):\n class ReturnTypeModule(torch.nn.Module):\n def other(self, x: List[str]) -> List[str]:\n return x\n\n def forward(self, x: List[str]) -> List[str]:\n return self.other(x)\n\n traced = symbolic_trace(ReturnTypeModule())\n self.assertIn(\"-> typing_List[str]\", traced._code)\n scripted = torch.jit.script(traced)\n self.assertIn(\"-> List[str]\", scripted.code)\n\n def getitem_inner(self):\n class GetItemBase(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.register_buffer('pe', torch.randn(8, 8))\n\n class GetItem1(GetItemBase):\n def forward(self, x):\n return self.pe[:, :x.size(0)]\n\n class GetItem2(GetItemBase):\n def forward(self, x):\n return self.pe[x.size(0)]\n\n class GetItem3(GetItemBase):\n def forward(self, x):\n return self.pe[4] # fx creates `self._tensor_constant0` here\n\n self.checkGraphModule(GetItem1(), [torch.zeros(4)])\n self.checkGraphModule(GetItem2(), [torch.zeros(4)])\n self.checkGraphModule(GetItem3(), [torch.zeros(4)])\n\n @unittest.skipUnless(os.environ.get(\"FX_PATCH_GETITEM\") == \"1\",\n \"Will be checked in test_getitem_subproc\")\n def test_getitem(self):\n self.getitem_inner()\n\n def test_getitem_subproc(self):\n # need to run this test in a subproc to work around:\n # https://github.com/pytorch/pytorch/issues/50710\n proc = Process(target=run_getitem_target)\n proc.start()\n proc.join()\n self.assertEqual(proc.exitcode, 0)\n\n\n def test_user_friendly_call_provenance_with_function(self):\n def fn(x):\n return wrapper_fn(x)\n\n traced = torch.fx.symbolic_trace(fn)\n\n with self.assertRaisesRegex(RuntimeError, \"'wrapper_fn' is \"\n \"being compiled since it was called\"\n \" from 'fn.forward'\"):\n scripted = torch.jit.script(traced)\n\n def test_user_friendly_call_provenance_with_module(self):\n class M(torch.nn.Module):\n def forward(self, x):\n return wrapper_fn(x)\n\n traced = torch.fx.symbolic_trace(M())\n\n with self.assertRaisesRegex(RuntimeError, \"'wrapper_fn' is \"\n \"being compiled since it was called\"\n \" from 'M.forward'\"):\n scripted = torch.jit.script(traced)\n\n def test_snake_case(self):\n class M(torch.nn.Module):\n def __init__(self):\n super(M, self).__init__()\n self.activations = torch.nn.ModuleDict([\n [\"snake_case\", torch.nn.ReLU()],\n [\"PascalCase\", torch.nn.LeakyReLU()],\n [\"ALL_CAPS\", torch.nn.PReLU()]\n ])\n\n def forward(self, x):\n a = self.activations[\"snake_case\"](x)\n b = self.activations[\"PascalCase\"](x)\n c = self.activations[\"ALL_CAPS\"](x)\n return a, b, c\n\n traced = symbolic_trace(M())\n\n check = [\n (\"activations_snake_case\", \"activations.snake_case\"),\n (\"activations_pascal_case\", \"activations.PascalCase\"),\n (\"activations_all_caps\", \"activations.ALL_CAPS\")\n ]\n\n i = 0\n for node in traced.graph.nodes:\n if node.op == \"placeholder\" or node.op == \"output\":\n continue\n name = check[i][0]\n target = check[i][1]\n self.assertEqual(name, node.name)\n self.assertEqual(target, node.target)\n i += 1\n self.assertEqual(i, 3)\n\n def test_no_mutation(self):\n from torch.fx.immutable_collections import immutable_list\n x = immutable_list([3, 4])\n with self.assertRaisesRegex(NotImplementedError, \"new_args\"):\n x[0] = 4\n\n def test_partial_trace(self):\n class Foo(torch.nn.Module):\n def forward(self, x, y):\n if y:\n return 2 * x\n else:\n return x\n mod = Foo()\n mod_true = symbolic_trace(mod, concrete_args={'y': True})\n mod_false = symbolic_trace(mod, concrete_args={'y': False})\n self.assertEqual(mod_true(3, True), 6)\n print(mod_true.code)\n assert(any([i.target == torch._assert for i in mod_true.graph.nodes]))\n with self.assertRaises(AssertionError):\n mod_true(3, False)\n self.assertEqual(mod_false(3, False), 3)\n with self.assertRaises(AssertionError):\n mod_false(3, True)\n\n def f_higher(a, f):\n return f(a)\n\n nf = symbolic_trace(f_higher, concrete_args={'f': lambda x: x * 2})\n self.assertEqual(nf(3, lambda x: x * 2), 6)\n\n def test_custom_traceback_raised_when_exception_source_is_graphmodule(self):\n class M(torch.nn.Module):\n def __init__(self):\n super(M, self).__init__()\n self.W = torch.nn.Parameter(torch.randn(5))\n\n def forward(self, x):\n return torch.dot(self.W, x)\n\n traced = torch.fx.symbolic_trace(M())\n\n out = [n for n in traced.graph.nodes if n.op == \"output\"][-1]\n with traced.graph.inserting_before(out):\n relu_out = traced.graph.call_method(method_name='relu',\n args=(out.args[0],))\n out.args = (relu_out,)\n\n traced.recompile()\n\n with self.capture_stderr() as captured:\n with self.assertRaises(TypeError):\n traced(5)\n\n self.assertRegex(captured[0],\n r\"Call using an FX-traced Module, line .* of the \"\n r\"traced Module's generated forward function:\")\n\n def test_custom_traceback_not_raised_when_exception_source_is_submodule(self):\n class M(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.linear = torch.nn.Linear(3, 4)\n\n def forward(self, x):\n return self.linear(x)\n\n traced = torch.fx.symbolic_trace(M())\n\n # Do not change this to `capture_stderr` or another context\n # manager without ensuring that the output is as expected\n try:\n traced(torch.rand(5, 5))\n except RuntimeError:\n captured = traceback.format_exc()\n\n self.assertNotRegex(captured,\n r\"Call using an FX-traced Module, line .* of the \"\n r\"traced Module's generated forward function:\")\n\n def test_graph_module_replicate_for_dp(self):\n class Foo(torch.nn.Module):\n def forward(self, x):\n return torch.relu(x)\n\n gm = torch.fx.symbolic_trace(Foo())\n\n x = torch.randn(5, 3)\n out = gm(x)\n\n replica = gm._replicate_for_data_parallel()\n out_replica = replica(x)\n\n torch.testing.assert_allclose(out_replica, out)\n\n def test_ast_rewriter_rewrites_assert(self):\n class M(torch.nn.Module):\n def forward(self, x: torch.Tensor, y: int, z: int):\n assert y == z\n return torch.add(x, x)\n\n ast_rewriter = RewritingTracer()\n graph = ast_rewriter.trace(M())\n traced = GraphModule(ast_rewriter.root, graph, \"gm\")\n\n traced.graph.lint()\n\n def test_ast_rewriter_rewrites_assert_with_message(self):\n class M(torch.nn.Module):\n def forward(self, x: torch.Tensor, y: int, z: int):\n assert y == z, \"msg\"\n return torch.add(x, x)\n\n ast_rewriter = RewritingTracer()\n graph = ast_rewriter.trace(M())\n traced = GraphModule(ast_rewriter.root, graph, \"gm\")\n\n traced.graph.lint()\n\n def test_throw_out_variant(self):\n def foo(x):\n y = torch.rand_like(x)\n torch.sigmoid(x, out=y)\n return y\n\n class MyTracer(torch.fx.Tracer):\n check_mutable_operations = True\n\n tracer = MyTracer()\n with self.assertRaisesRegex(RuntimeError, 'mutable operation aten::sigmoid.out'):\n traced_graph = tracer.trace(foo)\n\n def test_ast_rewriter_reassigns_submodules(self):\n class M(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.bn = torch.nn.BatchNorm2d(100)\n\n def forward(self, x: torch.Tensor):\n return torch.add(x, x)\n\n ast_rewriter = RewritingTracer()\n graph = ast_rewriter.trace(M())\n traced = GraphModule(ast_rewriter.root, graph, \"gm\")\n\n traced.graph.lint()\n\n def test_ast_rewriter_wrap(self):\n self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))\n\n def to_trace(y):\n return (\n a_lifted_leaf((4, y), 3)\n + a_lifted_leaf((3, 4), 5)\n + a_lifted_leaf((y, y), y)\n )\n\n ast_rewriter = RewritingTracer()\n graph = ast_rewriter.trace(to_trace)\n traced = GraphModule(ast_rewriter.root, graph, \"gm\")\n\n self.assertIn(\"a_lifted_leaf\", traced.code)\n self.assertEqual(27, traced(2))\n self.assertIs(a_lifted_leaf, real_a_lifed_leaf)\n\n def test_ast_rewriter_wrap_fn_directly(self):\n self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))\n\n def to_trace(y):\n return (\n a_lifted_leaf2((4, y), 3)\n + a_lifted_leaf2((3, 4), 5)\n + a_lifted_leaf2((y, y), y)\n )\n\n ast_rewriter = RewritingTracer()\n graph = ast_rewriter.trace(to_trace)\n traced = GraphModule(ast_rewriter.root, graph, \"gm\")\n\n self.assertIn(\"a_lifted_leaf2\", traced.code)\n self.assertEqual(27, traced(2))\n self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)\n\n def test_profiler_ranges_side_effect(self):\n g = torch.fx.Graph()\n handle = g.call_function(torch.ops.profiler._record_function_enter, ('test_range',))\n g.call_function(torch.ops.profiler._record_function_exit, (handle,))\n g.output(None)\n\n found_targets = {}\n for node in g.nodes:\n if node.op == 'call_function':\n found_targets.setdefault(node.target)\n self.assertEqual(\n list(found_targets.keys()),\n [torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]\n )\n\n g.eliminate_dead_code()\n found_targets = {}\n for node in g.nodes:\n if node.op == 'call_function':\n found_targets.setdefault(node.target)\n self.assertEqual(\n list(found_targets.keys()),\n [torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]\n )\n\n def test_ast_rewriter_wrapped_via_decorator(self):\n class F(torch.nn.Module):\n def forward(self, x):\n return wrapped_via_decorator(x)\n\n ast_rewriter = RewritingTracer()\n graph = ast_rewriter.trace(F())\n traced = GraphModule(ast_rewriter.root, graph, \"gm\")\n\n self.assertIn(\"wrapped_via_decorator\", traced.code)\n self.assertEqual(traced(0), 1)\n self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)\n self.assertFalse(hasattr(wrapped_via_decorator, \"__fx_already_patched\"))\n\n def test_ast_rewriter_wrapped_via_decorator_and_transformed(self):\n self.assertEqual(wrapped_via_decorator(0), 1)\n\n def to_trace(y):\n return wrapped_via_decorator(y)\n\n ast_rewriter = RewritingTracer()\n graph = ast_rewriter.trace(to_trace)\n traced = GraphModule(ast_rewriter.root, graph, \"gm\")\n\n self.assertIn(\"wrapped_via_decorator\", traced.code)\n self.assertEqual(traced(0), 1)\n self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)\n self.assertFalse(hasattr(wrapped_via_decorator, \"__fx_already_patched\"))\n\n transformed = torch.fx.Transformer(traced).transform()\n self.assertIn(\"wrapped_via_decorator\", transformed.code)\n self.assertEqual(transformed(0), 1)\n self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)\n self.assertFalse(hasattr(wrapped_via_decorator, \"__fx_already_patched\"))\n\n def test_ast_rewriter_wrap_with_submodule(self):\n class M(torch.nn.Module):\n def __init__(self):\n super(M, self).__init__()\n self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)\n\n def forward(self, x: torch.Tensor):\n return wrapped_with_submodule(x, self.batchnorm1d)\n\n ast_rewriter = RewritingTracer()\n graph = ast_rewriter.trace(M())\n traced = GraphModule(ast_rewriter.root, graph, \"gm\")\n\n self.assertIn(\"wrapped_with_submodule\", traced.code)\n\n input = torch.rand(3, 2)\n ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)\n self.assertEqual(ref_batchnorm1d(input), traced(input))\n\n def test_submodule_manipulation_API(self):\n class C(torch.nn.Module):\n def __init__(self):\n super(C, self).__init__()\n self.conv = torch.nn.Conv2d(16, 33, 3, stride=2)\n self.param = torch.nn.Parameter(torch.rand(2, 3))\n\n def forward(self, x):\n return self.conv(torch.cat([self.param, x]))\n\n class B(torch.nn.Module):\n def __init__(self):\n super(B, self).__init__()\n self.linear = torch.nn.Linear(100, 200)\n self.register_buffer(\"buf\", torch.randn(2, 3))\n self.net_c = C()\n\n def forward(self, x):\n return self.linear(torch.cat([self.buf, self.net_c(x)]))\n\n class A(torch.nn.Module):\n def __init__(self):\n super(A, self).__init__()\n self.net_b = B()\n self.param = torch.nn.Parameter(torch.rand(2, 3))\n\n def forward(self, x):\n return self.net_b(x) + self.param\n\n a = symbolic_trace(A())\n\n a.add_submodule(\"net_b.net_c.dropout\", torch.nn.Dropout(p=0.2))\n\n conv = [n for n in a.graph.nodes if n.target == \"net_b.net_c.conv\"][-1]\n with a.graph.inserting_before(conv):\n with warnings.catch_warnings(record=True) as w:\n dropout = a.graph.call_module(module_name=\"net_b.net_c.dropout\",\n args=conv.args)\n self.assertEqual(len(w), 0)\n\n conv.replace_all_uses_with(dropout)\n a.graph.erase_node(conv)\n a.recompile()\n\n def module_exists(gm: GraphModule, path: str) -> bool:\n return any(path == name for name, _ in gm.named_modules())\n\n def parameter_exists(gm: GraphModule, path: str) -> bool:\n return (any(path == name for name, _ in gm.named_parameters())\n and any(path == name for name in gm.state_dict().keys()))\n\n def buffer_exists(gm: GraphModule, path: str) -> bool:\n return (any(path == name for name, _ in gm.named_buffers())\n and any(path == name for name in gm.state_dict().keys()))\n\n # Test that we added the \"dropout\" submodule\n self.assertTrue(module_exists(a, \"net_b.net_c.dropout\"))\n\n # Test `get_submodule` with an added submodule\n self.assertIsNotNone(a.get_submodule(\"net_b.net_c.dropout\"))\n\n # Test that the \"conv\" submodule is still there\n self.assertTrue(module_exists(a, \"net_b.net_c.conv\"))\n\n # Test `get_submodule` with an original module\n self.assertIsNotNone(a.get_submodule(\"net_b.net_c.conv\"))\n\n # Test that the \"conv\" node is NOT still there\n conv = [n for n in a.graph.nodes if n.target == \"net_b.net_c.conv\"]\n self.assertEqual(conv, [])\n\n a.delete_submodule(\"net_b.net_c.conv\")\n\n # Test that the \"conv\" submodule is now gone\n self.assertFalse(module_exists(a, \"net_b.net_c.conv\"))\n\n # Test `get_submodule` with a deleted submodule\n with self.assertRaisesRegex(AttributeError, \"has no attribute \"\n \"`conv`\"):\n self.assertIsNone(a.get_submodule(\"net_b.net_c.conv\"))\n\n # Test `get_attr` warnings\n cat = [n for n in a.graph.nodes if n.target == torch.cat][-1]\n\n with a.graph.inserting_before(cat):\n\n with warnings.catch_warnings(record=True) as w:\n param = a.graph.get_attr(qualified_name=\"net_b.net_c.param\")\n self.assertEqual(len(w), 0)\n\n with self.assertWarnsRegex(UserWarning, \"Attempted to \"\n \"insert a get_attr Node with no \"\n \"underlying reference in the \"\n \"owning GraphModule\"):\n bad_param = a.graph.get_attr(qualified_name=\"net_b.param\")\n a.graph.erase_node(bad_param)\n\n cat.args = (*cat.args, param)\n\n a.recompile()\n\n a.graph.lint()\n\n # Test `get_parameter`\n a.get_parameter(\"net_b.net_c.param\")\n with self.assertRaisesRegex(AttributeError, \"is not an \"\n \"nn.Parameter\"):\n a.get_parameter(\"net_b.buf\")\n with self.assertRaisesRegex(AttributeError, \"has no attribute \"\n \"`param`\"):\n a.get_parameter(\"net_b.param\")\n\n # Test `get_buffer`\n a.get_buffer(\"net_b.buf\")\n with self.assertRaisesRegex(AttributeError, \"is not a \"\n \"buffer\"):\n a.get_buffer(\"net_b.net_c.param\")\n with self.assertRaisesRegex(AttributeError, \"has no attribute \"\n \"`buf`\"):\n a.get_buffer(\"net_b.net_c.buf\")\n\n # Test non-nested attributes\n a.get_submodule(\"\")\n a.get_parameter(\"param\")\n\n # Insert some unused submodules\n a.add_submodule(\"net_b.embedding\", torch.nn.Embedding(10, 3))\n a.add_submodule(\"net_b.net_c.embedding\", torch.nn.Embedding(10, 3))\n a.add_submodule(\"net_b.net_c.rnn\", torch.nn.RNN(10, 20, 2))\n a.add_submodule(\"batch_norm_2d\", torch.nn.BatchNorm2d(100))\n\n # Garbage collection\n a.delete_all_unused_submodules()\n\n # Test that all the unused submodules are gone\n self.assertFalse(module_exists(a, \"net_b.embedding\"))\n self.assertFalse(module_exists(a, \"net_b.net_c.embedding\"))\n self.assertFalse(module_exists(a, \"net_b.net_c.rnn\"))\n self.assertFalse(module_exists(a, \"batch_norm_2d\"))\n\n # Test that we didn't delete any unused Parameters or buffers\n self.assertTrue(parameter_exists(a, \"net_b.net_c.param\"))\n self.assertTrue(buffer_exists(a, \"net_b.buf\"))\n\n a.graph.lint()\n\n def test_delete_unused_submodules_leaf(self):\n class SubModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.linear = torch.nn.Linear(10, 10)\n self.relu = torch.nn.ReLU()\n\n def forward(self, x):\n x = self.linear(x)\n x = self.relu(x)\n return x\n\n class Model(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.submod = SubModule()\n\n def forward(self, x):\n x = self.submod(x)\n return x\n\n model = Model()\n\n class MyCustomTracer(torch.fx.Tracer):\n def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:\n return module_qualified_name == \"submod\"\n\n inputs = torch.randn(1, 10)\n traced_graph = MyCustomTracer().trace(model)\n gm2 = torch.fx.GraphModule(model, traced_graph)\n gm2.delete_all_unused_submodules()\n torch.testing.assert_allclose(gm2(inputs), model(inputs))\n\n def test_tracing_graphmodules_as_leaf_submodules(self):\n class A(torch.nn.Module):\n def forward(self, t):\n return t + t\n\n class B(torch.nn.Module):\n def __init__(self):\n super(type(self), self).__init__()\n self.calling = False\n self.called = False\n\n def forward(self, t):\n if self.calling:\n return t - t\n else:\n return t + t\n\n def __call__(self, *args):\n self.called = True\n self.calling = True\n return super(type(self), self).__call__(*args)\n self.calling = False\n\n class M(torch.nn.Module):\n def __init__(self, a, b):\n super().__init__()\n self.a = a\n self.b = b\n\n def forward(self, t):\n x = self.a(t)\n y = self.b(t)\n return x + y\n\n class LeafTracer(Tracer):\n def is_leaf_module(self, module, name):\n return True\n\n class LeafTracerNotB(Tracer):\n def is_leaf_module(self, module, name):\n return False if \"b\" in name else True\n\n # Recompile calls added \"for fun\", since they\n # chain __call__ wrappers.\n\n #\n # Test: B as a regular, non-leaf module\n #\n a = symbolic_trace(A())\n a.recompile()\n m = M(a, B())\n graph = LeafTracerNotB().trace(m)\n gm = GraphModule(m, graph)\n gm.recompile()\n\n # Test graphmodule/submodule a is not inlined.\n self.assertTrue(isinstance(gm.get_submodule(\"a\"), GraphModule))\n match = [n for n in gm.graph.nodes if n.op == \"call_module\" and n.target == \"a\"]\n self.assertTrue(len(match) == 1)\n\n # Test submodule b is not treated as leaf.\n self.assertFalse(hasattr(gm, \"b\"))\n\n # Test assert custom __call__ on submodule b was honored.\n match = [\n n\n for n in gm.graph.nodes\n if n.op == \"call_function\" and n.target == operator.sub\n ]\n self.assertTrue(len(match) == 1)\n\n #\n # Test: B as a regular, leaf module\n # symbolic_trace should only patch torch.nn.Module.__call__,\n # which means B.__call__ should still execute\n #\n a = symbolic_trace(A())\n a.recompile()\n b = B()\n m = M(a, b)\n graph = LeafTracer().trace(m)\n gm = GraphModule(m, graph)\n gm.recompile()\n\n # Test graphmodule/submodule a is not inlined.\n self.assertTrue(isinstance(gm.get_submodule(\"a\"), GraphModule))\n match = [n for n in gm.graph.nodes if n.op == \"call_module\" and n.target == \"a\"]\n self.assertTrue(len(match) == 1)\n\n # Test submodule b is leaf:\n self.assertTrue(isinstance(gm.get_submodule(\"b\"), torch.nn.Module))\n match = [n for n in gm.graph.nodes if n.op == \"call_module\" and n.target == \"b\"]\n self.assertTrue(len(match) == 1)\n\n # Test b.__call__ was run\n self.assertTrue(b.called)\n self.assertTrue(gm.get_submodule(\"b\").called)\n\n #\n # Test: B as GraphModule leaf\n # __call__ not honored since symbolic_trace directly invokes forward()\n #\n a = symbolic_trace(A())\n a.recompile()\n b = symbolic_trace(B())\n b.recompile()\n m = M(a, b)\n graph = LeafTracer().trace(m)\n gm = GraphModule(m, graph)\n gm.recompile()\n\n self.assertTrue(isinstance(gm.get_submodule(\"a\"), GraphModule))\n match = [n for n in gm.graph.nodes if n.op == \"call_module\" and n.target == \"a\"]\n self.assertTrue(len(match) == 1)\n\n self.assertTrue(isinstance(gm.get_submodule(\"b\"), torch.nn.Module))\n match = [n for n in gm.graph.nodes if n.op == \"call_module\" and n.target == \"b\"]\n self.assertTrue(len(match) == 1)\n\n def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.register_buffer(\"my_buff\", torch.rand(3, 4))\n self.register_parameter(\n \"my_param\", torch.nn.Parameter(torch.rand(3, 4))\n )\n\n def forward(self, x):\n return x + self.my_buff + self.my_param\n\n mod = MyModule()\n mod_traced = symbolic_trace(mod)\n\n # Create new GraphModule based on original, either w/ dict or root module.\n orig_buff = mod_traced.get_buffer(\"my_buff\")\n orig_param = mod_traced.get_parameter(\"my_param\")\n mod_traced_new = GraphModule(\n {\"my_buff\": orig_buff, \"my_param\": orig_param} if use_dict_init else mod,\n mod_traced.graph,\n )\n\n # Check that both my_buff and my_param are found and the same.\n try:\n new_buff = mod_traced_new.get_buffer(\"my_buff\")\n except Exception:\n self.fail(\"Did not find my_buff\")\n self.assertEqual(orig_buff, new_buff)\n\n try:\n new_param = mod_traced_new.get_parameter(\"my_param\")\n except Exception:\n self.fail(\"Did not find my_param\")\n self.assertEqual(orig_param, new_param)\n\n x = torch.rand(3, 4)\n orig_out = mod_traced(x)\n submodules_out = mod_traced_new(x)\n\n self.assertEqual(orig_out, submodules_out)\n\n def test_graph_module_init_buffer_param_copied_dict_init(self):\n self._test_graph_module_init_buffer_param_copied(use_dict_init=True)\n\n def test_graph_module_init_buffer_param_copied_mod_init(self):\n self._test_graph_module_init_buffer_param_copied(use_dict_init=False)\n\n def test_annotations_with_no_forward_references(self):\n class A:\n def __call__(self, x: torch.Tensor):\n return torch.add(x, x)\n\n class M(torch.nn.Module):\n def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:\n return a(x)\n\n self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)\n\n def test_annotations_with_forward_references(self):\n class A:\n def __call__(self, x: torch.Tensor):\n return torch.add(x, x)\n\n class M(torch.nn.Module):\n def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':\n return a(x)\n\n self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)\n\n def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self):\n class A:\n def __call__(self, x: torch.Tensor):\n return torch.add(x, x)\n\n class M(torch.nn.Module):\n def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor:\n return a(x[0])\n\n self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)\n\n def test_annotations_with_non_torch_reference_and_internal_forward_references(self):\n class A:\n def __call__(self, x: torch.Tensor):\n return torch.add(x, x)\n\n class M(torch.nn.Module):\n def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor':\n return a(x)[0]\n\n self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)\n\n @unittest.skipIf(sys.version_info < (3, 7), \"`__future__` feature \"\n \"`annotations` is not defined in Python <3.7\")\n def test_annotation_with_future(self):\n try:\n import fx.test_future # noqa: F401\n finally:\n del sys.modules[\"__future__\"]\n\n def test_annotations_empty_tuple(self):\n class Foo(torch.nn.Module):\n def forward(self, x: Tuple[()], y: Tuple[str, Tuple[()]]):\n return \"foo\"\n\n traced = torch.fx.symbolic_trace(Foo())\n\n x = ()\n y = (\"bar\", ())\n\n traced(x, y)\n\n FileCheck().check(\"_Tuple[()]\") \\\n .check(\"typing_Tuple[str,typing_Tuple[()]]\") \\\n .run(traced.code)\n\n scripted = torch.jit.script(traced)\n\n scripted(x, y)\n\n FileCheck().check(\"Tuple[()]\") \\\n .check(\"Tuple[str, Tuple[()]]\") \\\n .run(scripted.code)\n\n @unittest.skipIf(IS_WINDOWS, \"Python Windows bug? https://bugs.python.org/issue45108\")\n def test_assert(self):\n def f(x):\n assert x > 1\n return x + 1\n try:\n torch.fx.proxy.TracerBase.trace_asserts = True\n traced = symbolic_trace(f)\n finally:\n torch.fx.proxy.TracerBase.trace_asserts = False\n\n self.assertEqual(f(2), traced(2))\n with self.assertRaises(AssertionError):\n traced(0)\n\n def test_pytree(self):\n def f_sum(x):\n return sum(x)\n\n def f_sum_dict(x):\n out = 0\n for k, v in x.items():\n out += v\n return out\n\n def f_dict_list_map(x):\n new_dict = {}\n for k, v in x.items():\n new_dict[k] = [i + 1 for i in v]\n return new_dict\n\n def f_dict_add(x):\n return x['a'] + sum(x['z'])\n\n def f_namedtuple_add(x):\n return x.x + x.y\n\n pytree._register_pytree_node(\n Foo,\n lambda x: ([x.a, x.b], None),\n lambda x, _: Foo(x[0], x[1]),\n )\n fx_pytree.register_pytree_flatten_spec(Foo, lambda x, _: [x.a, x.b])\n\n def f_custom(x):\n return x.a + x.b\n\n def f_custom_dict(x):\n return f_sum_dict(x.a) + x.b\n\n def f_return_custom(x):\n return Foo(x.b, x.a)\n\n tests = [\n (f_sum, [PH, PH, PH]),\n (f_sum, []),\n (f_sum_dict, {'a': PH, 'b': PH, 'c': PH}),\n (f_dict_list_map, {'a': (PH, PH), 'b': [PH], 'c': []}),\n (f_dict_list_map, {5: (PH, PH, PH)}),\n (f_dict_add, {'a': PH, 'z': (PH, PH, PH)}),\n (f_dict_add, {'a': PH, 'z': []}),\n (f_custom, Foo(PH, PH)),\n (f_custom, Foo(PH, 3)),\n (f_custom_dict, Foo({'a': PH, 'b': PH}, PH)),\n # (f_return_custom, Foo(PH, PH)), # Don't currently support output pytrees\n (f_namedtuple_add, Point(PH, PH)),\n ]\n\n def verify_pytree(f, inp):\n val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)\n num_flat_args = len([i == PH for i in pytree.tree_flatten(inp)[0]])\n orig_out = f(val)\n nf = symbolic_trace(f, concrete_args={'x': inp})\n self.assertEqual(nf(val), orig_out)\n\n bare_fx = GraphModule({}, copy.deepcopy(nf.graph))\n bare_fx.graph.set_codegen(CodeGen())\n bare_fx.recompile()\n self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(val))), orig_out)\n\n assert num_flat_args == 0 or \"tree_flatten_spec\" in nf.code\n assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)\n\n nf = symbolic_trace(nf)\n self.assertEqual(nf(val), orig_out)\n assert \"tree_flatten_spec\" not in nf.code\n assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1)\n\n nf = symbolic_trace(nf, concrete_args={'x': inp})\n self.assertEqual(nf(val), orig_out)\n assert num_flat_args == 0 or \"tree_flatten_spec\" in nf.code\n assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)\n\n pickled = pickle.dumps(nf)\n nf = pickle.loads(pickled)\n self.assertEqual(nf(val), orig_out)\n\n for f, inp in tests:\n verify_pytree(f, inp)\n\n def test_pytree_concrete(self):\n def f(b, a):\n if b:\n return a['a']\n else:\n return a['z']\n\n inp = {'a': {'a': PH, 'z': PH}, 'b': True}\n nf = symbolic_trace(f, concrete_args=inp)\n val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)\n self.assertEqual(nf(**val), f(**val))\n\n nf = symbolic_trace(nf)\n self.assertEqual(nf(**val), f(**val))\n\n def test_custom_codegen(self):\n class ListCodeGen(CodeGen):\n def gen_fn_def(self, free_vars, maybe_return_annotation):\n lst_unpack = f\"\"\"\ndef forward(self, args_list: List[torch.Tensor]){maybe_return_annotation}:\n {', '.join(free_vars)} = args_list\"\"\"\n return lst_unpack\n\n def additional_globals(self):\n return [('List', typing.List)]\n\n def process_inputs(self, *inputs):\n assert(len(inputs) == 1)\n return inputs[0]\n\n def f(a, b):\n return a + b\n\n nf = symbolic_trace(f)\n vals = [torch.randn(3), torch.randn(3)]\n self.assertEqual(nf(*vals), f(*vals))\n\n nf.graph.set_codegen(ListCodeGen())\n nf.recompile()\n\n bare_fx = GraphModule({}, copy.deepcopy(nf.graph))\n bare_fx.graph.set_codegen(CodeGen())\n bare_fx.recompile()\n\n self.assertEqual(nf(vals), f(*vals))\n self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(vals))), f(*vals))\n\n ts_f = torch.jit.script(nf)\n self.assertEqual(nf(vals), ts_f(vals))\n\n\n def test_imul_code_print(self):\n graph = torch.fx.Graph()\n a = graph.placeholder(\"a\")\n b = graph.placeholder(\"b\")\n graph.call_function(operator.imul, (a, b), {})\n graph.output(a)\n gm = torch.fx.GraphModule({}, graph)\n gm.recompile()\n self.assertEqual(gm(2, 3), 6)\n self.assertIn(\"a *= b\", gm.code)\n\n\ndef run_getitem_target():\n from torch.fx._symbolic_trace import _wrapped_methods_to_patch\n _wrapped_methods_to_patch.append((torch.Tensor, \"__getitem__\"))\n try:\n TestFX().getitem_inner()\n finally:\n _wrapped_methods_to_patch.pop()\n\n\nclass TestOperatorSignatures(JitTestCase):\n def setUp(self):\n # Checking for mutable operations whil tracing is feature flagged\n # Enable it in testing but not by default\n self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations\n torch.fx.proxy.TracerBase.check_mutable_operations = True\n\n def tearDown(self):\n torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag\n\n @onlyCPU\n @ops(op_db, allowed_dtypes=(torch.float,))\n def test_get_torch_func_signature_exhaustive(self, device, dtype, op):\n if not isinstance(op.op, types.BuiltinFunctionType):\n raise unittest.SkipTest(\"This path doesn't work on Python functions\")\n sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)\n schemas = get_signature_for_torch_op(op.op)\n if not schemas:\n raise RuntimeError('No Schemas Returned')\n for sample_input in sample_inputs_itr:\n # Iterate through overloads until we hit a match. If we exit this\n # loop via `else`, we haven't found a match\n for schema in schemas:\n try:\n bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs)\n bound_args.apply_defaults()\n op(*bound_args.args, **bound_args.kwargs)\n break\n except TypeError as e:\n pass\n else:\n raise RuntimeError(f'Did not match any schemas for op {op.name}!')\n\n\nclass TestFXAPIBackwardCompatibility(JitTestCase):\n def setUp(self):\n self.maxDiff = None\n\n # Checking for mutable operations whil tracing is feature flagged\n # Enable it in testing but not by default\n self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations\n torch.fx.proxy.TracerBase.check_mutable_operations = True\n\n def tearDown(self):\n torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag\n\n\n def _fn_to_stable_annotation_str(self, obj):\n \"\"\"\n Unfortunately we have to serialize function signatures manually since\n serialization for `inspect.Signature` objects is not stable across\n python versions\n \"\"\"\n fn_name = torch.typename(obj)\n\n signature = inspect.signature(obj)\n\n sig_str = f'{fn_name}{signature}'\n\n arg_strs = []\n for k, v in signature.parameters.items():\n maybe_type_annotation = f': {self._annotation_type_to_stable_str(v.annotation, sig_str)}'\\\n if v.annotation is not inspect.Signature.empty else ''\n\n def default_val_str(val):\n if isinstance(val, (tuple, list)):\n str_pieces = ['(' if isinstance(val, tuple) else '[']\n str_pieces.append(', '.join(default_val_str(v) for v in val))\n if isinstance(val, tuple) and len(str_pieces) == 2:\n str_pieces.append(',')\n str_pieces.append(')' if isinstance(val, tuple) else ']')\n return ''.join(str_pieces)\n\n # Need to fix up some default value strings.\n # First case: modules. Default module `repr` contains the FS path of the module.\n # Don't leak that\n if isinstance(val, types.ModuleType):\n return f'<module {val.__name__}>'\n\n # Second case: callables. Callables (such as lambdas) encode their address in\n # their string repr. Don't do that\n if callable(val):\n return f'<function {val.__name__}>'\n\n return str(val)\n\n if v.default is not inspect.Signature.empty:\n default_val_str = default_val_str(v.default) if not isinstance(v.default, str) else f\"'{v.default}'\"\n maybe_default = f' = {default_val_str}'\n else:\n maybe_default = ''\n maybe_stars = ''\n if v.kind == inspect.Parameter.VAR_POSITIONAL:\n maybe_stars = '*'\n elif v.kind == inspect.Parameter.VAR_KEYWORD:\n maybe_stars = '**'\n arg_strs.append(f'{maybe_stars}{k}{maybe_type_annotation}{maybe_default}')\n\n return_annot = f' -> {self._annotation_type_to_stable_str(signature.return_annotation, sig_str)}'\\\n if signature.return_annotation is not inspect.Signature.empty else ''\n\n return f'{fn_name}({\", \".join(arg_strs)}){return_annot}'\n\n def _annotation_type_to_stable_str(self, t, sig_str):\n if t is inspect.Signature.empty:\n return ''\n\n # Forward ref\n if isinstance(t, str):\n return f\"'{t}'\"\n if hasattr(typing, 'ForwardRef') and isinstance(t, typing.ForwardRef):\n return t.__forward_arg__\n if hasattr(typing, '_ForwardRef') and isinstance(t, typing._ForwardRef):\n return t.__forward_arg__\n\n trivial_mappings = {\n str : 'str',\n int : 'int',\n float: 'float',\n bool: 'bool',\n torch.dtype: 'torch.dtype',\n torch.Tensor: 'torch.Tensor',\n torch.device: 'torch.device',\n torch.memory_format: 'torch.memory_format',\n slice: 'slice',\n torch.nn.Module: 'torch.nn.modules.module.Module',\n torch.fx.Graph : 'torch.fx.graph.Graph',\n torch.fx.Node : 'torch.fx.node.Node',\n torch.fx.Proxy : 'torch.fx.proxy.Proxy',\n torch.fx.node.Target : 'torch.fx.node.Target',\n torch.fx.node.Argument : 'torch.fx.node.Argument',\n torch.fx.graph.PythonCode : 'torch.fx.graph.PythonCode',\n torch.fx.graph_module.GraphModule: 'torch.fx.graph_module.GraphModule',\n torch.fx.subgraph_rewriter.Match: 'torch.fx.subgraph_rewriter.Match',\n Ellipsis : '...',\n typing.Any: 'Any',\n type(None): 'NoneType',\n None: 'None',\n typing.Iterator: 'Iterator',\n }\n\n mapping = trivial_mappings.get(t, None)\n if mapping:\n return mapping\n\n # Handle types with contained types\n contained = getattr(t, '__args__', None) or []\n\n # Callables contain a bare List for arguments\n contained = t if isinstance(t, list) else contained\n\n # Python 3.8 puts type vars into __args__ for unbound types such as Dict\n if all(isinstance(ct, typing.TypeVar) for ct in contained):\n contained = []\n\n contained_type_annots = [self._annotation_type_to_stable_str(ct, sig_str) for ct in contained]\n contained_type_str = f'[{\", \".join(contained_type_annots)}]' if len(contained_type_annots) > 0 else ''\n\n\n origin = getattr(t, '__origin__', None)\n if origin is None:\n # Unbound types don't have `__origin__` in some Python versions, so fix that up here.\n origin = t if t in {typing.Tuple, typing.Union, typing.Dict, typing.List, typing.Type, typing.Callable} else origin\n\n if origin in {tuple, typing.Tuple}:\n return f'Tuple{contained_type_str}'\n if origin in {typing.Union}:\n # Annoying hack to detect Optional\n if len(contained) == 2 and (contained[0] is type(None)) ^ (contained[1] is type(None)):\n not_none_param = contained[0] if contained[0] is not type(None) else contained[1]\n return f'Optional[{self._annotation_type_to_stable_str(not_none_param, sig_str)}]'\n return f'Union{contained_type_str}'\n if origin in {dict, typing.Dict}:\n return f'Dict{contained_type_str}'\n if origin in {list, typing.List}:\n return f'List{contained_type_str}'\n if origin in {type, typing.Type}:\n return f'Type{contained_type_str}'\n if isinstance(t, typing.Callable):\n if len(contained) > 0 and contained[0] is not Ellipsis:\n return f'Callable[[{\", \".join(contained_type_annots[:-1])}], {contained_type_annots[-1]}]'\n else:\n return f'Callable{contained_type_str}'\n\n raise RuntimeError(f'Unrecognized type {t} used in BC-compatible type signature {sig_str}.'\n f'Please add support for this type and confirm with the '\n f'FX team that your signature change is valid.')\n\n\n def test_function_back_compat(self):\n \"\"\"\n Test backward compatibility for function signatures with\n @compatibility(is_backward_compatible=True). Currently this checks for\n exact signature matches, which may lead to false positives. If this\n becomes too annoying, we can refine this check to actually parse out\n the saved schema strings and check if the change is truly backward-\n incompatible.\n \"\"\"\n signature_strs = []\n\n for obj in _BACK_COMPAT_OBJECTS:\n if not isinstance(obj, type):\n signature_strs.append(self._fn_to_stable_annotation_str(obj))\n\n signature_strs.sort()\n\n try:\n self.assertExpected('\\n'.join(signature_strs), 'fx_backcompat_function_signatures')\n except AssertionError as e:\n msg = f\"{e}\\n****** ERROR ******\\nAn FX function that has been marked \" \\\n f\"as backwards-compatible has experienced a signature change. See the \" \\\n f\"above exception context for more information. If this change was \" \\\n f\"unintended, please revert it. If it was intended, check with the FX \" \\\n f\"team to ensure that the proper deprecation protocols have been followed \" \\\n f\"and subsequently --accept the change.\"\n raise AssertionError(msg)\n\n def test_class_member_back_compat(self):\n \"\"\"\n Test backward compatibility for members of classes with\n @compatibility(is_backward_compatible=True). Currently this checks for\n exact matches on the publicly visible members of the class.\n \"\"\"\n class_method_strs = []\n\n for obj in _BACK_COMPAT_OBJECTS:\n if isinstance(obj, type):\n public_members = [name for name in obj.__dict__ if not name.startswith('_')]\n class_method_strs.append(f'{torch.typename(obj)} {sorted(public_members)}')\n\n class_method_strs.sort()\n\n try:\n self.assertExpected('\\n'.join(class_method_strs), 'fx_backcompat_class_members')\n except AssertionError as e:\n msg = f\"{e}\\n****** ERROR ******\\nAn FX class that has been marked \" \\\n f\"as backwards-compatible has experienced change in its public members. See the \" \\\n f\"above exception context for more information. If this change was \" \\\n f\"unintended, please revert it. If it was intended, check with the FX \" \\\n f\"team to ensure that the proper deprecation protocols have been followed \" \\\n f\"and subsequently --accept the change.\"\n raise AssertionError(msg)\n\n def test_public_api_surface(self):\n non_back_compat_objects = {}\n\n def check_symbols_have_bc_designation(m, prefix):\n if not m.__name__.startswith('torch.fx'):\n return\n if m.__name__.startswith('torch.fx.experimental'):\n return\n for k, v in m.__dict__.items():\n if v is m:\n continue\n if k.startswith('_'):\n continue\n if isinstance(v, types.ModuleType):\n check_symbols_have_bc_designation(v, prefix + [k])\n elif isinstance(v, type) or isinstance(v, types.FunctionType):\n if v not in _MARKED_WITH_COMATIBLITY:\n non_back_compat_objects.setdefault(v)\n\n check_symbols_have_bc_designation(torch.fx, ['torch', 'fx'])\n check_symbols_have_bc_designation(torch.fx.passes, ['torch', 'fx', 'passes'])\n\n non_back_compat_strs = [torch.typename(obj) for obj in non_back_compat_objects.keys()]\n # Only want objects in torch.fx\n non_back_compat_strs = [\n s for s in non_back_compat_strs if s.startswith('torch.fx') and not s.startswith('torch.fx.experimental')]\n # Only want objects in public namespaces\n non_back_compat_strs = [\n s for s in non_back_compat_strs if all(not atom.startswith('_') for atom in s.split('.'))]\n non_back_compat_strs.sort()\n\n if len(non_back_compat_strs) != 0:\n raise AssertionError(f\"Public FX API(s) {non_back_compat_strs} introduced but not given a \"\n f\"backwards-compatibility classification! Please decorate these \"\n f\"API(s) with `@torch.fx._compatibility.compatibility` to specify \"\n f\"BC guarantees.\")\n\nclass TestFunctionalTracing(JitTestCase):\n def setUp(self):\n # Checking for mutable operations whil tracing is feature flagged\n # Enable it in testing but not by default\n self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations\n torch.fx.proxy.TracerBase.check_mutable_operations = True\n\n def tearDown(self):\n torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag\n\n IGNORE_FUNCS = (\"has_torch_function\", \"has_torch_function_unary\",\n \"has_torch_function_variadic\", \"handle_torch_function\",\n \"boolean_dispatch\")\n TO_PATCH = {\"has_torch_function\": None,\n \"has_torch_function_unary\": None,\n \"has_torch_function_variadic\": None}\n\n BUILT_IN_FUNC = (AssertionError, \"\")\n PROXY_ITERABLE = (TypeError, r\"argument of type 'Proxy' is not iterable\")\n PROXY_ITERATED = (TraceError, r\"Proxy object cannot be iterated\")\n LEN_ERROR = (RuntimeError, r\"'len' is not supported in symbolic tracing by default\")\n ARG_TYPE_MISMATCH = (TypeError, r\", not Proxy$\")\n CONTROL_FLOW = (TraceError, r\"symbolically traced variables cannot be used as inputs to control flow\")\n INTERPOLATE_ARGS_CONFLICT = (ValueError, r\"only one of size or scale_factor should be defined\")\n MUTABLE = (RuntimeError, r\"Tried to trace mutable operation\")\n\n UNTRACEABLE_FUNCTIONALS = {\n \"adaptive_avg_pool1d\": BUILT_IN_FUNC,\n \"avg_pool1d\": BUILT_IN_FUNC,\n \"avg_pool2d\": BUILT_IN_FUNC,\n \"avg_pool3d\": BUILT_IN_FUNC,\n \"bilinear\": BUILT_IN_FUNC,\n \"celu_\": BUILT_IN_FUNC,\n \"channel_shuffle\": BUILT_IN_FUNC,\n \"native_channel_shuffle\": BUILT_IN_FUNC,\n \"conv1d\": BUILT_IN_FUNC,\n \"conv2d\": BUILT_IN_FUNC,\n \"conv3d\": BUILT_IN_FUNC,\n \"conv_tbc\": BUILT_IN_FUNC,\n \"conv_transpose1d\": BUILT_IN_FUNC,\n \"conv_transpose2d\": BUILT_IN_FUNC,\n \"conv_transpose3d\": BUILT_IN_FUNC,\n \"cosine_similarity\": BUILT_IN_FUNC,\n \"elu_\": BUILT_IN_FUNC,\n \"gelu\": BUILT_IN_FUNC,\n \"hardshrink\": BUILT_IN_FUNC,\n \"hardtanh_\": BUILT_IN_FUNC,\n \"leaky_relu_\": BUILT_IN_FUNC,\n \"linear\": BUILT_IN_FUNC,\n \"logsigmoid\": BUILT_IN_FUNC,\n \"one_hot\": BUILT_IN_FUNC,\n \"pairwise_distance\": BUILT_IN_FUNC,\n \"pdist\": BUILT_IN_FUNC,\n \"pixel_shuffle\": BUILT_IN_FUNC,\n \"pixel_unshuffle\": BUILT_IN_FUNC,\n \"prelu\": BUILT_IN_FUNC,\n \"relu_\": BUILT_IN_FUNC,\n \"rrelu_\": BUILT_IN_FUNC,\n \"selu_\": BUILT_IN_FUNC,\n \"softplus\": BUILT_IN_FUNC,\n \"softshrink\": BUILT_IN_FUNC,\n \"threshold_\": BUILT_IN_FUNC,\n\n \"adaptive_avg_pool2d\": LEN_ERROR,\n \"adaptive_avg_pool3d\": LEN_ERROR,\n \"adaptive_max_pool2d_with_indices\": LEN_ERROR,\n \"adaptive_max_pool3d_with_indices\": LEN_ERROR,\n \"instance_norm\": CONTROL_FLOW,\n \"pad\": LEN_ERROR,\n\n \"adaptive_max_pool1d\": PROXY_ITERABLE,\n \"adaptive_max_pool2d\": PROXY_ITERABLE,\n \"adaptive_max_pool3d\": PROXY_ITERABLE,\n \"fractional_max_pool2d\": PROXY_ITERABLE,\n \"fractional_max_pool3d\": PROXY_ITERABLE,\n \"max_pool1d\": PROXY_ITERABLE,\n \"max_pool2d\": PROXY_ITERABLE,\n \"max_pool3d\": PROXY_ITERABLE,\n\n \"group_norm\": PROXY_ITERATED,\n \"lp_pool2d\": PROXY_ITERATED,\n \"max_unpool1d\": PROXY_ITERATED,\n \"max_unpool2d\": PROXY_ITERATED,\n \"max_unpool3d\": PROXY_ITERATED,\n\n \"adaptive_max_pool1d_with_indices\": ARG_TYPE_MISMATCH,\n \"fractional_max_pool2d_with_indices\": ARG_TYPE_MISMATCH,\n \"fractional_max_pool3d_with_indices\": ARG_TYPE_MISMATCH,\n \"layer_norm\": ARG_TYPE_MISMATCH,\n \"lp_pool1d\": ARG_TYPE_MISMATCH,\n\n \"affine_grid\": CONTROL_FLOW,\n \"alpha_dropout\": CONTROL_FLOW,\n \"batch_norm\": CONTROL_FLOW,\n \"binary_cross_entropy\": CONTROL_FLOW,\n \"binary_cross_entropy_with_logits\": CONTROL_FLOW,\n \"celu\": CONTROL_FLOW,\n \"cosine_embedding_loss\": CONTROL_FLOW,\n \"cross_entropy\": CONTROL_FLOW,\n \"ctc_loss\": CONTROL_FLOW,\n \"dropout\": CONTROL_FLOW,\n \"dropout2d\": CONTROL_FLOW,\n \"dropout3d\": CONTROL_FLOW,\n \"elu\": CONTROL_FLOW,\n \"embedding\": CONTROL_FLOW,\n \"embedding_bag\": CONTROL_FLOW,\n \"feature_alpha_dropout\": CONTROL_FLOW,\n \"fold\": CONTROL_FLOW,\n \"gaussian_nll_loss\": CONTROL_FLOW,\n \"glu\": CONTROL_FLOW,\n \"grid_sample\": CONTROL_FLOW,\n \"gumbel_softmax\": CONTROL_FLOW,\n \"hardsigmoid\": CONTROL_FLOW,\n \"hardswish\": CONTROL_FLOW,\n \"hardtanh\": CONTROL_FLOW,\n \"hinge_embedding_loss\": CONTROL_FLOW,\n \"huber_loss\": CONTROL_FLOW,\n \"interpolate\": CONTROL_FLOW,\n \"kl_div\": CONTROL_FLOW,\n \"l1_loss\": CONTROL_FLOW,\n \"leaky_relu\": CONTROL_FLOW,\n \"local_response_norm\": CONTROL_FLOW,\n \"margin_ranking_loss\": CONTROL_FLOW,\n \"max_pool1d_with_indices\": CONTROL_FLOW,\n \"max_pool2d_with_indices\": CONTROL_FLOW,\n \"max_pool3d_with_indices\": CONTROL_FLOW,\n \"mse_loss\": CONTROL_FLOW,\n \"multi_head_attention_forward\": CONTROL_FLOW,\n \"multi_margin_loss\": CONTROL_FLOW,\n \"multilabel_margin_loss\": CONTROL_FLOW,\n \"multilabel_soft_margin_loss\": CONTROL_FLOW,\n \"nll_loss\": CONTROL_FLOW,\n \"poisson_nll_loss\": CONTROL_FLOW,\n \"relu\": CONTROL_FLOW,\n \"relu6\": CONTROL_FLOW,\n \"rrelu\": CONTROL_FLOW,\n \"selu\": CONTROL_FLOW,\n \"silu\": CONTROL_FLOW,\n \"mish\": CONTROL_FLOW,\n \"smooth_l1_loss\": CONTROL_FLOW,\n \"soft_margin_loss\": CONTROL_FLOW,\n \"threshold\": CONTROL_FLOW,\n \"triplet_margin_loss\": CONTROL_FLOW,\n \"triplet_margin_with_distance_loss\": CONTROL_FLOW,\n \"unfold\": CONTROL_FLOW,\n \"upsample\": CONTROL_FLOW,\n\n \"upsample_bilinear\": INTERPOLATE_ARGS_CONFLICT,\n \"upsample_nearest\": INTERPOLATE_ARGS_CONFLICT,\n\n \"normalize\" : MUTABLE,\n }\n\n # List of nn.functionals with Tensor inputs but not with type annotation\n FUNCTIONALS_WITHOUT_ANNOTATION = (\n \"adaptive_max_pool1d\",\n \"adaptive_max_pool2d\",\n \"adaptive_max_pool3d\",\n \"fractional_max_pool2d\",\n \"fractional_max_pool3d\",\n \"max_pool1d\",\n \"max_pool2d\",\n \"max_pool3d\",\n \"gaussian_nll_loss\",\n \"upsample\",\n \"upsample_bilinear\",\n \"upsample_nearest\",\n )\n\n # Inconsistent behavior between Python 3.8 and other Python versions:\n # - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED`\n # - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same\n # internal exception above\n # Use the following map to override the expected exception for Python 3.8\n UNTRACEABLE_FUNCTIONALS_PY38 = {\n \"adaptive_max_pool1d\": PROXY_ITERATED,\n \"adaptive_max_pool2d\": PROXY_ITERATED,\n \"adaptive_max_pool3d\": PROXY_ITERATED,\n \"fractional_max_pool2d\": PROXY_ITERATED,\n \"fractional_max_pool3d\": PROXY_ITERATED,\n \"max_pool1d\": PROXY_ITERATED,\n \"max_pool2d\": PROXY_ITERATED,\n \"max_pool3d\": PROXY_ITERATED,\n\n \"group_norm\": LEN_ERROR\n }\n\n @classmethod\n def _get_functional(cls):\n functional_list = []\n for f in dir(torch.nn.functional):\n if not f.islower():\n continue\n # Ignore internal functions\n if f.startswith('_'):\n continue\n # Ignore supporting functions\n if f in cls.IGNORE_FUNCS:\n continue\n fn = getattr(torch.nn.functional, f)\n # Ignore non-callable object like modules\n if not isinstance(fn, Callable):\n continue\n if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION:\n try:\n sig = inspect.signature(fn)\n has_tensor_arg = False\n for arg, param in sig.parameters.items():\n if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor):\n has_tensor_arg = True\n if not has_tensor_arg:\n continue\n # No signature or Object is not supported\n except ValueError:\n pass\n functional_list.append((f, fn))\n return functional_list\n\n @classmethod\n def generate_test_func(cls, func_name, fn):\n\n def functional_test(self):\n if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \\\n sys.version_info >= (3, 8) and sys.version_info < (3, 10):\n exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]\n with self.assertRaisesRegex(exc, err):\n symbolic_trace(fn)\n elif func_name in self.UNTRACEABLE_FUNCTIONALS:\n exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name]\n with self.assertRaisesRegex(exc, err):\n symbolic_trace(fn)\n else:\n symbolic_trace(fn)\n return functional_test\n\n @classmethod\n def generate_tests(cls):\n functional_list = cls._get_functional()\n for func_name, fn in functional_list:\n test_name = \"test_nn_functional_\" + func_name\n functional_test = cls.generate_test_func(func_name, fn)\n setattr(cls, test_name, functional_test)\n\n @classmethod\n def setUpClass(cls):\n\n def no(*args, **kwargs):\n return False\n\n for name in cls.TO_PATCH.keys():\n cls.TO_PATCH[name] = getattr(torch.nn.functional, name)\n setattr(torch.nn.functional, name, no)\n\n @classmethod\n def tearDownClass(cls):\n for name in cls.TO_PATCH.keys():\n setattr(torch.nn.functional, name, cls.TO_PATCH[name])\n\nTestFunctionalTracing.generate_tests()\n\n\ninstantiate_device_type_tests(TestOperatorSignatures, globals())\n\n@skipIfNoTorchVision\nclass TestVisionTracing(JitTestCase):\n def setUp(self):\n # Checking for mutable operations whil tracing is feature flagged\n # Enable it in testing but not by default\n self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations\n torch.fx.proxy.TracerBase.check_mutable_operations = True\n\n def tearDown(self):\n torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag\n\n PROXY_ITERATED = (TraceError, r\"Proxy object cannot be iterated\")\n INCONSISTENT_TYPE = (\n RuntimeError,\n r\"Return value was annotated as having type __torch__.torchvision.models[.\\w]+ but is actually of type Tensor\"\n )\n\n UNTRACEABLE_MODELS = {\n \"fasterrcnn_resnet50_fpn\": PROXY_ITERATED,\n \"fasterrcnn_mobilenet_v3_large_320_fpn\": PROXY_ITERATED,\n \"fasterrcnn_mobilenet_v3_large_fpn\": PROXY_ITERATED,\n \"maskrcnn_resnet50_fpn\": PROXY_ITERATED,\n \"keypointrcnn_resnet50_fpn\": PROXY_ITERATED,\n \"retinanet_resnet50_fpn\": PROXY_ITERATED,\n }\n UNSCRIPTABLE_MODELS = {\n \"googlenet\": INCONSISTENT_TYPE,\n \"inception_v3\": INCONSISTENT_TYPE,\n }\n\n output_transform = {\n \"fcn_resnet50\": lambda x: x[\"out\"],\n \"fcn_resnet101\": lambda x: x[\"out\"],\n \"deeplabv3_resnet50\": lambda x: x[\"out\"],\n \"deeplabv3_resnet101\": lambda x: x[\"out\"],\n \"deeplabv3_mobilenet_v3_large\": lambda x: x[\"out\"],\n \"lraspp_mobilenet_v3_large\": lambda x: x[\"out\"],\n \"fasterrcnn_resnet50_fpn\": lambda x: x[1],\n \"fasterrcnn_mobilenet_v3_large_fpn\": lambda x: x[1],\n \"fasterrcnn_mobilenet_v3_large_320_fpn\": lambda x: x[1],\n \"maskrcnn_resnet50_fpn\": lambda x: x[1],\n \"keypointrcnn_resnet50_fpn\": lambda x: x[1],\n \"retinanet_resnet50_fpn\": lambda x: x[1],\n }\n\n @classmethod\n def generate_test_fn(cls, name, model_fn, x, kwargs):\n def run_test(self):\n model = model_fn(**kwargs)\n model = model.eval()\n if name in self.UNTRACEABLE_MODELS:\n err, exc = self.UNTRACEABLE_MODELS[name]\n with self.assertRaisesRegex(err, exc):\n graph = symbolic_trace(model)\n else:\n out_transform = self.output_transform.get(name, lambda x: x)\n graph : torch.fx.GraphModule = symbolic_trace(model)\n a = out_transform(model(x))\n b = out_transform(graph(x))\n self.assertEqual(a, b)\n\n if name in self.UNSCRIPTABLE_MODELS:\n err, exc = self.UNSCRIPTABLE_MODELS[name]\n with self.assertRaisesRegex(err, exc):\n script = torch.jit.script(graph)\n else:\n script = torch.jit.script(graph)\n c = out_transform(script(x))\n self.assertEqual(a, c)\n\n return run_test\n\n @classmethod\n def generate_classification_tests(cls):\n for k, v in torchvision_models.__dict__.items():\n if callable(v) and k[0].lower() == k[0] and k[0] != \"_\":\n test_name = 'test_torchvision_models_' + k\n x = torch.rand(1, 3, 299, 299) if k in ['inception_v3'] else torch.rand(1, 3, 224, 224)\n kwargs = dict(num_classes=50)\n model_test = cls.generate_test_fn(k, v, x, kwargs)\n setattr(cls, test_name, model_test)\n\n @classmethod\n def generate_segmentation_tests(cls):\n for k, v in torchvision_models.segmentation.__dict__.items():\n if callable(v) and k[0].lower() == k[0] and k[0] != \"_\":\n test_name = 'test_torchvision_models_segmentation_' + k\n x = torch.rand(1, 3, 32, 32)\n kwargs = dict(num_classes=10, pretrained_backbone=False)\n model_test = cls.generate_test_fn(k, v, x, kwargs)\n setattr(cls, test_name, model_test)\n\n @classmethod\n def generate_detection_tests(cls):\n for k, v in torchvision_models.detection.__dict__.items():\n if callable(v) and k[0].lower() == k[0] and k[0] != \"_\":\n test_name = 'test_torchvision_models_detection_' + k\n x = [torch.rand(3, 300, 300)]\n kwargs = dict(num_classes=10, pretrained_backbone=False)\n model_test = cls.generate_test_fn(k, v, x, kwargs)\n setattr(cls, test_name, model_test)\n\n @classmethod\n def generate_video_tests(cls):\n for k, v in torchvision_models.video.__dict__.items():\n if callable(v) and k[0].lower() == k[0] and k[0] != \"_\":\n test_name = 'test_torchvision_models_video_' + k\n x = torch.rand(1, 3, 4, 112, 112)\n kwargs = dict(num_classes=50)\n model_test = cls.generate_test_fn(k, v, x, kwargs)\n setattr(cls, test_name, model_test)\n\n @classmethod\n def generate_tests(cls):\n cls.generate_classification_tests()\n cls.generate_detection_tests()\n cls.generate_segmentation_tests()\n cls.generate_video_tests()\n\nif HAS_TORCHVISION:\n TestVisionTracing.generate_tests()\n\nif __name__ == '__main__':\n run_tests()\n"
] | [
[
"torch.fx.immutable_collections.immutable_dict",
"torch.max",
"torch.rand_like",
"torch.fx.wrap",
"torch.typename",
"torch.sin",
"torch.zeros",
"torch.nn.RNN",
"torch.sum",
"torch.neg",
"torch.nn.Embedding",
"torch.fx.Interpreter",
"torch.tanh",
"torch.cat",
"torch.testing.FileCheck",
"torch.fx.Graph",
"torch.log_",
"torch.nn.EmbeddingBag",
"torch.jit.script",
"torch.nn.Dropout",
"torch.Size",
"torch.ones",
"torch.testing.assert_allclose",
"torch.add",
"torch.fx._symbolic_trace._wrapped_methods_to_patch.append",
"torch.randn",
"torch.fx.operator_schemas.get_signature_for_torch_op",
"torch.tensor",
"torch.fx.symbolic_trace",
"torch.relu",
"torch.rand",
"torch.arange",
"torch.nn.functional.linear",
"torch.classes._TorchScriptTesting._ReLUClass",
"torch.testing.assert_close",
"torch.squeeze",
"torch.dot",
"torch.nn.BatchNorm1d",
"torch.fx.GraphModule",
"torch.fx.Tracer",
"torch.fx.Proxy",
"torch.LongTensor",
"torch.sigmoid",
"torch.fx.passes.shape_prop.ShapeProp",
"torch.empty_like",
"torch.nn.PReLU",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.zeros_like",
"torch.utils._pytree.tree_flatten",
"torch.fx._symbolic_trace._wrapped_methods_to_patch.pop",
"torch.fx.CodeGen",
"torch.nn.Module",
"torch.nn.Linear",
"torch.nn.Conv3d",
"torch.foo",
"torch.fx.immutable_collections.immutable_list",
"torch.nn.BatchNorm2d",
"torch.nn.LeakyReLU",
"torch.testing._internal.common_utils.run_tests",
"torch.fx.Node",
"torch.testing._internal.common_device_type.ops",
"torch.classes._TorchScriptTesting._ElementwiseInterpreter",
"torch.Tensor",
"torch._assert",
"torch.ops.aten.sigmoid",
"torch.fx.experimental.rewriter.RewritingTracer",
"torch.fx.Transformer",
"torch.matmul",
"torch.fx._pytree.register_pytree_flatten_spec",
"torch.classes._TorchScriptTesting._StackString",
"torch.ops.aten.cat",
"torch.testing._internal.common_utils.find_library_location",
"torch.multiprocessing.Process"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
deepneuralmachine/google-research | [
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231",
"d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231"
] | [
"bam/task_specific/task.py",
"seq2act/models/input.py",
"robust_loss/fit_partition_spline.py",
"tcc/algos/tcn.py",
"mobilebert/optimization.py",
"cnn_quantization/tf_cnn_benchmarks/all_reduce_benchmark.py",
"soft_sort/sinkhorn.py",
"task_set/train_inner_test.py",
"non_semantic_speech_benchmark/eval_embedding/finetune/train_keras_test.py",
"tunas/depthwise_initializers_test.py",
"tft/data_formatters/electricity.py",
"many_constraints/intersectional_fairness.py",
"summae/tokenizer.py",
"aloe/aloe/synthetic/utils.py",
"cache_replacement/policy_learning/cache/main.py",
"aloe/aloe/fuzz/main_varlen.py",
"group_agnostic_fairness/ips_reweighting_model.py",
"disarm/networks.py",
"goemotions/extract_words.py",
"social_rl/adversarial_env/utils.py",
"smith/bert/optimization_test.py",
"tf3d/utils/pointcloud_utils_test.py",
"recursive_optimizer/recursive_optimizer.py",
"gfsa/training/train_var_misuse_lib_test.py",
"action_gap_rl/evaluate.py",
"fairness_teaching/baseline/model.py",
"tf3d/object_detection/model_utils_test.py",
"tf3d/eval.py",
"optimizing_interpretability/regularizers.py",
"poem/core/loss_utils.py",
"munchausen_rl/agents/m_dqn.py",
"simulation_research/tf_risk/util_test.py",
"rl_metrics_aaai2021/aggregate_value_iteration.py",
"hyperbolic/learning/losses.py",
"igt_optimizer/cloud_tpu_resnet/hyperparameters/hyperparameters.py",
"uq_benchmark_2019/cifar/run_train.py",
"enas_lm/src/tpu/utils.py",
"hyperbolic/models/base.py",
"unprocessing/dnd_denoise.py",
"tcc/algos/sal.py",
"ravens/ravens/agents/gt_state_2_step.py",
"clustering_normalized_cuts/cnc_net.py",
"factorize_a_city/libs/image_alignment.py",
"state_of_sparsity/sparse_transformer/models/sparse_transformer.py",
"task_set/optimizers/adam8p.py",
"m_theory/dim4/so8_supergravity_extrema/code/octonion_example.py",
"task_set/tasks/conv_pooling_test.py",
"moew/crime.py",
"jax_dft/jax_dft/scf_test.py",
"task_set/tasks/fixed/fixed_image_conv_test.py",
"task_set/tasks/fixed/fixed_maf_test.py"
] | [
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Base class for tasks.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport csv\nimport os\nimport tensorflow.compat.v1 as tf\n\n\nclass Example(object):\n __metaclass__ = abc.ABCMeta\n\n def __init__(self, task_name):\n self.task_name = task_name\n\n\nclass Task(object):\n \"\"\"Override this class to add a new task.\"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n def __init__(self, config, name, long_sequences=False):\n self.config = config\n self.name = name\n self.long_sequences = long_sequences\n\n def get_examples(self, split):\n return self.load_data(split + \".tsv\", split)\n\n def get_test_splits(self):\n return [\"test\"]\n\n def load_data(self, fname, split):\n examples = self._create_examples(\n read_tsv(os.path.join(self.config.raw_data_dir(self.name), fname),\n max_lines=50 if self.config.debug else None),\n split)\n return examples\n\n @abc.abstractmethod\n def _create_examples(self, lines, split):\n pass\n\n @abc.abstractmethod\n def get_scorer(self):\n pass\n\n @abc.abstractmethod\n def get_feature_specs(self):\n pass\n\n @abc.abstractmethod\n def featurize(self, example, is_training):\n pass\n\n @abc.abstractmethod\n def get_prediction_module(self, bert_model, features, is_training,\n percent_done):\n pass\n\n def __repr__(self):\n return \"Task(\" + self.name + \")\"\n\n\ndef read_tsv(input_file, quotechar=None, max_lines=None):\n \"\"\"Reads a tab separated value file.\"\"\"\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for i, line in enumerate(reader):\n if max_lines and i >= max_lines:\n break\n lines.append(line)\n return lines\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"The input function of seq2act models.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom enum import Enum\nfrom tensor2tensor.layers import common_layers\nimport tensorflow.compat.v1 as tf # tf\n\nNUM_TOKENS_PER_OBJ = 30\nNUM_TOKENS_PER_SYN = 30\n\n\nclass DataSource(Enum):\n \"\"\"The class that represents word2act data source.\"\"\"\n RICO_SCA = 'rico_sca'\n ANDROID_HOWTO = 'android_howto'\n PIXEL_HELP = 'pixel_help'\n\n @staticmethod\n def from_str(label):\n if label == 'rico_sca':\n return DataSource.RICO_SCA\n elif label == 'android_howto':\n return DataSource.ANDROID_HOWTO\n elif label == 'pixel_help':\n return DataSource.PIXEL_HELP\n else:\n raise ValueError('Unrecognized source %s' % label)\n\n\nMAX_UI_OBJECT_NUM = {\n DataSource.PIXEL_HELP: 93,\n}\n\nMAX_TOKEN_NUM = {\n DataSource.ANDROID_HOWTO: 30,\n DataSource.RICO_SCA: 30,\n DataSource.PIXEL_HELP: 153,\n}\n\n# ['connect_str', token_id(connector_str)]\n# token id based on all_source_lower_case_vocab_59429\nPADDED_CONCATENATORS = [\n [5, 0, 0],\n [115, 0, 0],\n [8, 32, 0],\n [115, 8, 32],\n [12, 0, 0],\n]\n\nCONCATENATORS_STR = [\n ', ',\n ' , ',\n ' and then ',\n ' , and then ',\n '. '\n]\n\n\ndef _construct_padding_info(data_source, load_dom_dist, load_extra):\n \"\"\"Constructs the padding info tuple.\"\"\"\n token_num = MAX_TOKEN_NUM[data_source]\n # Model uses this anchor padding value to mask out the padded features.\n anchor_padding_value_int = tf.cast(-1, tf.int32)\n padding_value_int = tf.cast(0, tf.int32)\n padding_value_str = tf.cast('', tf.string)\n # Tuple of (feature name, padded_shape, padded_value)\n padding_info = [\n ('task', [None], padding_value_int),\n ('rule', [], padding_value_int),\n ('verbs', [None], padding_value_int),\n ('input_refs', [None, 2], padding_value_int),\n ('obj_refs', [None, 2], padding_value_int),\n ('verb_refs', [None, 2], padding_value_int),\n ('objects', [None], padding_value_int),\n ('obj_text', [None, None, token_num], padding_value_int),\n ('obj_type', [None, None], anchor_padding_value_int),\n ('obj_clickable', [None, None], padding_value_int),\n ('obj_screen_pos', [None, None, 4], tf.cast(0, tf.int32)),\n ('obj_dom_pos', [None, None, 3], padding_value_int),\n ('agreement_count', [], padding_value_int),\n ('data_source', [], padding_value_int),\n ]\n if load_dom_dist:\n padding_info.append(('obj_dom_dist', [None, None, None], padding_value_int))\n if load_extra:\n padding_info.append(('task_id', [], padding_value_str))\n padding_info.append(('raw_task', [], padding_value_str))\n padding_info.append(('obj_raw_text', [None, None], padding_value_str))\n\n padded_shapes = {}\n padded_values = {}\n for (key, padding_shape, padding_value) in padding_info:\n padded_shapes[key] = padding_shape\n padded_values[key] = padding_value\n return padded_shapes, padded_values\n\n\ndef input_fn(data_files,\n batch_size,\n repeat=-1,\n data_source=DataSource.RICO_SCA,\n required_agreement=2,\n max_range=1000,\n max_dom_pos=2000,\n max_pixel_pos=100,\n load_dom_dist=False,\n load_extra=False,\n buffer_size=8 * 1024,\n shuffle_size=8 * 1024,\n required_rule_id_list=None,\n shuffle_repeat=True,\n mean_synthetic_length=1.0,\n stddev_synthetic_length=0.0,\n load_screen=True,\n shuffle_files=True):\n \"\"\"Retrieves batches of data for training.\n\n Adds padding to ensure all dimension in one batch are always same.\n\n Args:\n data_files: A list of file names to initialize the TFRecordDataset\n batch_size: Number for the size of the batch.\n repeat: the number of times to repeat the input data.\n data_source: A DataSource instance.\n required_agreement: the minimum agreement required.\n max_range: the max range.\n max_dom_pos: the max dom pos.\n max_pixel_pos: the max screen pixels.\n load_dom_dist: whether to load the dom distance feature.\n load_extra: whether to load the raw text data.\n buffer_size: the buffer size for prefetching.\n shuffle_size: the shuffle size.\n required_rule_id_list: the list of required rule ids.\n shuffle_repeat: whether to shuffle and repeat.\n mean_synthetic_length: the mean length for synthetic sequence.\n stddev_synthetic_length: the stddev length for synthetic sequence.\n load_screen: whether to load screen features.\n shuffle_files: shuffling file names.\n Returns:\n a tf.dataset.Dateset object.\n Raises:\n ValueError: The data_format is neither 'recordio' nor 'tfrecord'.\n \"\"\"\n if not isinstance(data_source, DataSource):\n assert False, 'data_source %s unsupported' % str(data_source)\n padded_shapes, padded_values = _construct_padding_info(\n data_source, load_dom_dist, load_extra)\n if not isinstance(data_files, (list,)):\n data_files = [data_files]\n all_files = tf.concat(\n values=[tf.matching_files(f) for f in data_files], axis=0)\n if repeat == -1 and shuffle_files:\n all_files = tf.random.shuffle(all_files)\n if data_files[0].endswith('.recordio'):\n dataset = tf.data.RecordIODataset(all_files)\n elif data_files[0].endswith('.tfrecord'):\n dataset = tf.data.TFRecordDataset(\n all_files, num_parallel_reads=10 if repeat == -1 else None)\n else:\n assert False, 'Data_format %s is not supported.' % data_files[0]\n\n def _map_fn(x):\n return parse_tf_example(x, data_source, max_range, max_dom_pos,\n max_pixel_pos, load_dom_dist=load_dom_dist,\n load_extra=load_extra,\n append_eos=(data_source != DataSource.RICO_SCA or\n mean_synthetic_length == 1.0),\n load_screen=load_screen)\n dataset = dataset.map(_map_fn)\n def _is_enough_agreement(example):\n return tf.greater_equal(example['agreement_count'], required_agreement)\n dataset = dataset.filter(_is_enough_agreement)\n\n def _length_filter(example):\n return tf.less(tf.shape(example['obj_refs'])[0], 20)\n dataset = dataset.filter(_length_filter)\n\n def _filter_data_by_rule(example, rule_id_list):\n return tf.reduce_any(\n [tf.equal(example['rule'], rule_id) for rule_id in rule_id_list])\n if data_source == DataSource.RICO_SCA and required_rule_id_list is not None:\n dataset = dataset.filter(\n lambda x: _filter_data_by_rule(x, required_rule_id_list))\n\n # (TODO: liyang) tf.data.experimental.bucket_by_sequence_length\n if shuffle_repeat:\n dataset = dataset.apply(tf.data.experimental.shuffle_and_repeat(\n shuffle_size, count=repeat))\n dataset = dataset.padded_batch(\n batch_size, padded_shapes=padded_shapes, padding_values=padded_values)\n if data_source == DataSource.RICO_SCA and mean_synthetic_length > 1.0:\n def _stitch_fn(x):\n return _batch_stitch(x, mean_length=mean_synthetic_length,\n stddev=stddev_synthetic_length)\n dataset = dataset.map(_stitch_fn)\n dataset = dataset.prefetch(buffer_size=buffer_size)\n return dataset\n\n\ndef hybrid_input_fn(data_files_list,\n data_source_list,\n batch_size_list,\n max_range=1000,\n max_dom_pos=2000,\n max_pixel_pos=100,\n load_dom_dist=False,\n load_extra=False,\n buffer_size=8 * 1024,\n mean_synthetic_length=1.0,\n stddev_synthetic_length=0.0,\n hybrid_batch_size=128,\n boost_input=False,\n load_screen=True,\n shuffle_size=1024):\n \"\"\"Combines multiple datasouces.\"\"\"\n mixed_dataset = None\n for data_files, data_source, batch_size in zip(\n data_files_list, data_source_list, batch_size_list):\n dataset = input_fn(data_files, batch_size, repeat=-1,\n data_source=data_source,\n required_agreement=-1,\n max_range=max_range, max_dom_pos=max_dom_pos,\n max_pixel_pos=max_pixel_pos,\n load_dom_dist=load_dom_dist,\n load_extra=load_extra,\n buffer_size=0,\n mean_synthetic_length=mean_synthetic_length,\n stddev_synthetic_length=stddev_synthetic_length,\n shuffle_repeat=False,\n load_screen=load_screen)\n if mixed_dataset is None:\n mixed_dataset = dataset\n else:\n mixed_dataset = dataset.concatenate(mixed_dataset)\n\n mixed_dataset = mixed_dataset.unbatch()\n # Boost input examples\n if boost_input:\n def _input_booster(example):\n with tf.control_dependencies([tf.rank(example['input_refs']), 2]):\n has_input = tf.reduce_any(\n tf.greater(example['input_refs'][:, 1],\n example['input_refs'][:, 0]))\n return tf.logical_or(has_input, tf.less(tf.random_uniform([]), 0.1))\n dataset = dataset.filter(_input_booster)\n # Remix single examples\n mixed_dataset = mixed_dataset.shuffle(hybrid_batch_size * shuffle_size)\n # Batch again\n padded_shapes, padded_values = _construct_padding_info(\n data_source_list[0], load_dom_dist, load_extra)\n mixed_dataset = mixed_dataset.padded_batch(\n hybrid_batch_size, padded_shapes=padded_shapes,\n padding_values=padded_values)\n mixed_dataset = mixed_dataset.repeat()\n mixed_dataset = mixed_dataset.prefetch(buffer_size=buffer_size)\n return mixed_dataset\n\n\ndef parse_tf_example(example_proto,\n data_source,\n max_range=100,\n max_dom_pos=2000,\n max_pixel_pos=100,\n load_dom_dist=False,\n load_extra=False,\n append_eos=True,\n load_screen=True):\n \"\"\"Parses an example TFRecord proto into dictionary of tensors.\n\n Args:\n example_proto: TFRecord format proto that contains screen information.\n data_source: A DataSource instance.\n max_range: the max range.\n max_dom_pos: the maximum dom positoin.\n max_pixel_pos: the max dom position.\n load_dom_dist: whether to load the feature.\n load_extra: whether to load the extra data for debugging.\n append_eos: whether to append eos.\n load_screen: whether to load screen features.\n Returns:\n feature: The parsed tensor dictionary with the input feature data\n label: The parsed label tensor with the input label for the feature\n \"\"\"\n feature_spec = {\n 'instruction_word_id_seq':\n tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True),\n 'input_str_position_seq':\n tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True),\n 'obj_desc_position_seq':\n tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True),\n 'verb_str_position_seq':\n tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True),\n 'agreement_count':\n tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True),\n 'instruction_rule_id':\n tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True)\n }\n if load_screen:\n feature_spec['verb_id_seq'] = tf.FixedLenSequenceFeature(\n [], tf.int64, allow_missing=True)\n feature_spec['ui_target_id_seq'] = tf.FixedLenSequenceFeature(\n [], tf.int64, allow_missing=True)\n feature_spec['ui_obj_word_id_seq'] = tf.FixedLenSequenceFeature(\n [], tf.int64, allow_missing=True)\n feature_spec['ui_obj_type_id_seq'] = tf.FixedLenSequenceFeature(\n [], tf.int64, allow_missing=True)\n feature_spec['ui_obj_clickable_seq'] = tf.FixedLenSequenceFeature(\n [], tf.int64, allow_missing=True)\n feature_spec['ui_obj_cord_x_seq'] = tf.FixedLenSequenceFeature(\n [], tf.float32, allow_missing=True)\n feature_spec['ui_obj_cord_y_seq'] = tf.FixedLenSequenceFeature(\n [], tf.float32, allow_missing=True)\n feature_spec['ui_obj_dom_location_seq'] = tf.FixedLenSequenceFeature(\n [], tf.int64, allow_missing=True)\n\n if load_dom_dist:\n feature_spec['ui_obj_dom_distance'] = tf.FixedLenSequenceFeature(\n [], tf.int64, allow_missing=True)\n if load_extra:\n feature_spec['instruction_str'] = tf.FixedLenSequenceFeature(\n [], tf.string, allow_missing=True)\n feature_spec['task_id'] = tf.FixedLenSequenceFeature(\n [], tf.string, allow_missing=True)\n feature_spec['ui_obj_str_seq'] = tf.FixedLenSequenceFeature(\n [], tf.string, allow_missing=True)\n\n feature_dict = tf.parse_single_example(example_proto, feature_spec)\n\n for key in feature_dict:\n if feature_dict[key].dtype == tf.int64:\n feature_dict[key] = tf.cast(feature_dict[key], tf.int32)\n if data_source == DataSource.ANDROID_HOWTO:\n tf.logging.info('Parsing android_howto dataset')\n feature = _process_android_howto(feature_dict, max_range=max_range,\n load_dom_dist=load_dom_dist,\n load_extra=load_extra)\n elif data_source == DataSource.RICO_SCA:\n tf.logging.info('Parsing synthetic dataset')\n feature = _process_rico_sca(\n feature_dict, max_range=max_range, max_dom_pos=max_dom_pos,\n load_dom_dist=load_dom_dist,\n load_extra=load_extra,\n load_screen=load_screen)\n elif data_source == DataSource.PIXEL_HELP:\n tf.logging.info('Parsing test dataset')\n feature = _process_pixel_help(feature_dict, data_source,\n load_dom_dist=load_dom_dist,\n load_extra=load_extra)\n else:\n raise ValueError('Unsupported datasource %s' % str(data_source))\n # Remove padding from \"task\"\n feature['task'] = tf.boolean_mask(feature['task'],\n tf.not_equal(feature['task'], 0))\n feature['obj_screen_pos'] = tf.to_int32(\n feature['obj_screen_pos'] * (max_pixel_pos - 1))\n # Appending EOS and padding to match the appended length\n if append_eos:\n feature['input_refs'] = tf.pad(feature['input_refs'], [[0, 1], [0, 0]])\n feature['obj_refs'] = tf.pad(feature['obj_refs'], [[0, 1], [0, 0]])\n step_num = tf.size(feature['task'])\n feature['verb_refs'] = tf.concat(\n [feature['verb_refs'], [[step_num, step_num + 1]]], axis=0)\n feature['task'] = tf.pad(feature['task'], [[0, 1]], constant_values=1)\n feature['obj_text'] = tf.pad(feature['obj_text'], [[0, 1], [0, 0], [0, 0]])\n feature['obj_clickable'] = tf.pad(feature['obj_clickable'],\n [[0, 1], [0, 0]])\n feature['obj_type'] = tf.pad(\n feature['obj_type'], [[0, 1], [0, 0]], constant_values=-1)\n feature['obj_screen_pos'] = tf.pad(feature['obj_screen_pos'],\n [[0, 1], [0, 0], [0, 0]])\n feature['obj_dom_pos'] = tf.pad(feature['obj_dom_pos'],\n [[0, 1], [0, 0], [0, 0]])\n if load_dom_dist:\n feature['obj_dom_dist'] = tf.pad(feature['obj_dom_dist'],\n [[0, 1], [0, 0], [0, 0]])\n feature['objects'] = tf.pad(feature['objects'], [[0, 1]])\n feature['verbs'] = tf.pad(feature['verbs'], [[0, 1]])\n return feature\n\n\ndef _bound_refs(feature, max_range):\n \"\"\"Makes sure the refs are in the allowed range.\"\"\"\n for key in feature:\n if not key.endswith('_refs'):\n continue\n feature[key] = tf.where(\n tf.greater(feature[key][:, 1] - feature[key][:, 0], max_range),\n tf.stack([feature[key][:, 0], feature[key][:, 0] + max_range], axis=1),\n feature[key])\n\n\ndef _process_android_howto(feature_dict, max_range, load_dom_dist=False,\n load_extra=False):\n \"\"\"Processes webanswer feature dictionary.\n\n Args:\n feature_dict: feature dictionary\n max_range: the max range.\n load_dom_dist: whether to load the dom distance feature.\n load_extra: whether to load the extra data for debugging.\n Returns:\n A processed feature dictionary.\n \"\"\"\n\n feature = {\n 'task': tf.reshape(feature_dict['instruction_word_id_seq'], [-1]),\n 'input_refs': tf.reshape(feature_dict['input_str_position_seq'], [-1, 2]),\n 'obj_refs': tf.reshape(feature_dict['obj_desc_position_seq'], [-1, 2]),\n 'verb_refs': tf.reshape(feature_dict['verb_str_position_seq'], [-1, 2]),\n 'agreement_count': tf.reshape(feature_dict['agreement_count'], [])\n }\n if load_extra:\n feature['task_id'] = tf.constant('empty_task_id', dtype=tf.string)\n feature['raw_task'] = tf.reshape(feature_dict['instruction_str'], [])\n _bound_refs(feature, max_range)\n _load_fake_screen(feature, load_extra, load_dom_dist)\n return feature\n\n\ndef _load_fake_screen(feature, load_extra, load_dom_dist):\n \"\"\"Loads a fake screen.\"\"\"\n # Fills in fake ui object features into feature dictionary.\n step_num = tf.shape(feature['verb_refs'])[0]\n obj_num = 1\n if load_extra:\n feature['obj_raw_text'] = tf.fill([step_num, obj_num], '')\n feature['data_source'] = tf.constant(1, dtype=tf.int32)\n feature['obj_text'] = tf.zeros([step_num, obj_num, NUM_TOKENS_PER_OBJ],\n tf.int32)\n feature['obj_type'] = tf.cast(tf.fill([step_num, obj_num], -1), tf.int32)\n feature['obj_clickable'] = tf.zeros([step_num, obj_num], tf.int32)\n feature['obj_screen_pos'] = tf.zeros([step_num, obj_num, 4], tf.float32)\n feature['obj_dom_pos'] = tf.zeros([step_num, obj_num, 3], tf.int32)\n if load_dom_dist:\n feature['obj_dom_dist'] = tf.zeros([step_num, obj_num, obj_num], tf.int32)\n feature['objects'] = tf.zeros([step_num], tf.int32)\n feature['verbs'] = tf.zeros([step_num], tf.int32)\n feature['rule'] = tf.constant(5, dtype=tf.int32)\n\n\ndef _batch_stitch(features, mean_length=4.0, stddev=2.0):\n \"\"\"Stitches a batch of single-step data to a batch of multi-step data.\"\"\"\n batch_size = common_layers.shape_list(features['task'])[0]\n num_sequences = tf.maximum(\n tf.to_int32(tf.to_float(batch_size) / mean_length), 1)\n lengths = tf.random.truncated_normal(shape=[num_sequences],\n mean=mean_length, stddev=stddev)\n max_length = tf.reduce_max(lengths) * (\n tf.to_float(batch_size) / tf.reduce_sum(lengths))\n max_length = tf.to_int32(tf.ceil(max_length))\n total_items = max_length * num_sequences\n num_paddings = total_items - batch_size\n indices = tf.random.shuffle(tf.range(total_items))\n for key in features:\n shape_list = common_layers.shape_list(features[key])\n assert len(shape_list) >= 1\n with tf.control_dependencies([\n tf.assert_greater_equal(num_paddings, 0,\n name='num_paddings_positive')]):\n paddings = [[0, num_paddings]] + [[0, 0]] * (len(shape_list) - 1)\n features[key] = tf.pad(features[key], paddings,\n constant_values=-1 if key == 'obj_type' else 0)\n features[key] = tf.gather(features[key], indices)\n shape = [num_sequences, max_length]\n if len(shape_list) >= 2:\n shape += shape_list[1:]\n features[key] = tf.reshape(features[key], shape)\n # Remove all-padding seqs\n step_mask = tf.reduce_any(tf.greater(features['task'], 1), axis=-1)\n mask = tf.reduce_any(step_mask, axis=-1)\n step_mask = tf.boolean_mask(step_mask, mask)\n for key in features:\n features[key] = tf.boolean_mask(features[key], mask=mask)\n num_sequences = tf.shape(features['task'])[0]\n # Sort steps within each seq\n _, step_indices = tf.math.top_k(tf.to_int32(step_mask), k=max_length)\n step_indices = step_indices + tf.expand_dims(\n tf.range(num_sequences) * max_length, 1)\n step_indices = tf.reshape(step_indices, [-1])\n for key in features:\n shape_list = common_layers.shape_list(features[key])\n features[key] = tf.gather(tf.reshape(features[key], [-1] + shape_list[2:]),\n step_indices)\n features[key] = tf.reshape(features[key], shape_list)\n features = _stitch(features)\n return features\n\n\ndef _stitch(features):\n \"\"\"Stitch features on the first dimension.\"\"\"\n full_mask = tf.greater(features['task'], 1)\n step_mask = tf.reduce_any(full_mask, axis=-1)\n step_mask_exclude_last = tf.pad(step_mask,\n [[0, 0], [0, 1]],\n constant_values=False)[:, 1:]\n num_sequences = common_layers.shape_list(features['task'])[0]\n num_steps = common_layers.shape_list(features['task'])[1]\n connectors = tf.constant(PADDED_CONCATENATORS)\n # Select connectors\n connector_indices = tf.random.uniform(\n [num_sequences * num_steps], minval=0,\n maxval=len(PADDED_CONCATENATORS), dtype=tf.int32)\n selected_connectors = tf.reshape(\n tf.gather(connectors, connector_indices),\n [num_sequences, num_steps, len(PADDED_CONCATENATORS[0])])\n selected_connectors = tf.multiply(\n selected_connectors,\n tf.expand_dims(tf.to_int32(step_mask_exclude_last), 2),\n name='connector_mask')\n features['task'] = tf.concat([features['task'], selected_connectors], axis=-1)\n ref_offsets = tf.expand_dims(\n tf.cumsum(tf.reduce_sum(tf.to_int32(tf.greater(features['task'], 1)), -1),\n exclusive=True, axis=-1), 2)\n features['task'] = tf.reshape(features['task'], [num_sequences, -1])\n full_mask = tf.greater(features['task'], 1)\n full_mask_int = tf.to_int32(full_mask)\n indices = tf.where(tf.sequence_mask(lengths=tf.reduce_sum(full_mask_int, -1)))\n values = tf.boolean_mask(tf.reshape(features['task'], [-1]),\n tf.reshape(full_mask, [-1]))\n sparse_task = tf.sparse.SparseTensor(\n indices=indices, values=values,\n dense_shape=tf.to_int64(tf.shape(features['task'])))\n # Stitch task and raw_task\n stitched_features = {}\n stitched_features['task'] = tf.sparse_tensor_to_dense(sparse_task)\n max_len = tf.reduce_max(\n tf.reduce_sum(tf.to_int32(tf.greater(stitched_features['task'], 1)), -1))\n stitched_features['task'] = stitched_features['task'][:, :max_len]\n if 'raw_task' in features:\n connector_strs = tf.reshape(\n tf.gather(tf.constant(CONCATENATORS_STR), connector_indices),\n [num_sequences, num_steps])\n masked_connector_strs = tf.where(\n step_mask_exclude_last,\n connector_strs, tf.fill(tf.shape(connector_strs), ''))\n stitched_features['raw_task'] = tf.strings.reduce_join(\n tf.strings.reduce_join(tf.concat([\n tf.expand_dims(features['raw_task'], 2),\n tf.expand_dims(masked_connector_strs, 2)], axis=2), axis=-1), -1)\n # Stitch screen sequences\n action_lengths = tf.reduce_sum(tf.to_int32(\n tf.greater(features['verb_refs'][:, :, 0, 1],\n features['verb_refs'][:, :, 0, 0])), -1)\n max_action_length = tf.reduce_max(action_lengths)\n def _pad(tensor, padding_value=0):\n shape_list = common_layers.shape_list(tensor)\n assert len(shape_list) >= 2\n padding_list = [[0, 0], [0, 1]] + [[0, 0]] * (len(shape_list) - 2)\n return tf.pad(tensor[:, :max_action_length],\n padding_list, constant_values=padding_value)\n for key in features.keys():\n if key.endswith('_refs'):\n features[key] = tf.squeeze(features[key], 2)\n ref_mask = tf.expand_dims(tf.to_int32(\n tf.not_equal(features[key][:, :, 0],\n features[key][:, :, 1])), 2)\n stitched_features[key] = tf.multiply(\n (features[key] + ref_offsets), ref_mask, name='ref_mask')\n stitched_features[key] = _pad(stitched_features[key])\n elif key in ['verbs', 'objects', 'consumed', 'obj_dom_pos',\n 'obj_text', 'obj_type', 'obj_clickable', 'obj_screen_pos',\n 'verb_refs', 'obj_refs', 'input_refs', 'obj_dom_dist']:\n features[key] = tf.squeeze(features[key], 2)\n stitched_features[key] = features[key]\n stitched_features[key] = _pad(\n stitched_features[key],\n padding_value=-1 if key == 'obj_type' else 0)\n elif key not in ['task', 'raw_task']:\n stitched_features[key] = features[key][:, 0]\n # Append eos to 'task'\n stitched_features['task'] = tf.pad(stitched_features['task'],\n [[0, 0], [0, 1]])\n task_mask = tf.to_int32(tf.greater(stitched_features['task'], 1))\n task_eos_mask = tf.pad(task_mask, [[0, 0], [1, 0]], constant_values=1)[:, :-1]\n stitched_features['task'] = stitched_features['task'] + (\n task_eos_mask - task_mask)\n # Append eos\n verb_mask = tf.to_int32(tf.greater(stitched_features['verbs'], 1))\n verb_eos_mask = tf.pad(verb_mask, [[0, 0], [1, 0]], constant_values=1)[:, :-1]\n verb_eos = verb_eos_mask - verb_mask\n stitched_features['verbs'] = stitched_features['verbs'] + verb_eos\n # Append last step refs to 'verb_refs'\n task_lengths = tf.where(tf.equal(stitched_features['task'], 1))[:, 1]\n eos_pos = tf.to_int32(tf.stack([task_lengths, task_lengths + 1], axis=1))\n action_mask = tf.to_int32(\n tf.sequence_mask(action_lengths, max_action_length + 1))\n action_and_eos_mask = tf.pad(action_mask, [[0, 0], [1, 0]],\n constant_values=1)[:, :-1]\n verb_ref_eos = action_and_eos_mask - action_mask\n eos_refs = tf.multiply(\n tf.tile(tf.expand_dims(eos_pos, 1), [1, max_action_length + 1, 1]),\n tf.expand_dims(verb_ref_eos, 2), name='verb_ref_eos')\n stitched_features['verb_refs'] += eos_refs\n return stitched_features\n\n\ndef _process_rico_sca(feature_dict, max_range, max_dom_pos,\n load_dom_dist=False, load_extra=False, load_screen=True):\n \"\"\"Processes one_shot feature dictionary.\n\n Args:\n feature_dict: feature dictionary\n max_range: the max range.\n max_dom_pos: the max dom pos.\n load_dom_dist: whether to load the dom distance feature.\n load_extra: whether to load the extra data for debugging.\n load_screen: whether to load the screen features.\n Returns:\n A processed feature dictionary.\n \"\"\"\n phrase_count = tf.size(feature_dict['obj_desc_position_seq']) // 2\n feature = {\n 'task':\n tf.reshape(feature_dict['instruction_word_id_seq'],\n [phrase_count, NUM_TOKENS_PER_SYN]),\n 'input_refs':\n tf.reshape(feature_dict['input_str_position_seq'],\n [phrase_count, 1, 2]),\n 'obj_refs':\n tf.reshape(feature_dict['obj_desc_position_seq'],\n [phrase_count, 1, 2]),\n 'verb_refs':\n tf.reshape(feature_dict['verb_str_position_seq'],\n [phrase_count, 1, 2]),\n 'rule':\n tf.reshape(feature_dict['instruction_rule_id'], [phrase_count]),\n }\n selected_synthetic_action_idx = tf.random_uniform(\n shape=(), minval=0, maxval=phrase_count, dtype=tf.int32)\n for key in feature:\n feature[key] = feature[key][selected_synthetic_action_idx]\n if load_extra:\n feature['raw_task'] = tf.reshape(\n feature_dict['instruction_str'],\n [phrase_count])[selected_synthetic_action_idx]\n feature['task_id'] = tf.constant('empty_task_id', dtype=tf.string)\n if load_screen:\n feature['verbs'] = tf.reshape(\n feature_dict['verb_id_seq'],\n [phrase_count, 1])[selected_synthetic_action_idx]\n feature['objects'] = tf.reshape(\n feature_dict['ui_target_id_seq'],\n [phrase_count, 1])[selected_synthetic_action_idx]\n feature['obj_text'] = tf.reshape(feature_dict['ui_obj_word_id_seq'],\n [1, -1, NUM_TOKENS_PER_OBJ])\n feature['obj_type'] = tf.reshape(\n feature_dict['ui_obj_type_id_seq'], [1, -1])\n feature['obj_clickable'] = tf.reshape(feature_dict['ui_obj_clickable_seq'],\n [1, -1])\n def _make_obj_screen_pos():\n return tf.concat([\n tf.reshape(feature_dict['ui_obj_cord_x_seq'], [1, -1, 2]),\n tf.reshape(feature_dict['ui_obj_cord_y_seq'], [1, -1, 2])\n ], 2)\n\n feature['obj_screen_pos'] = tf.cond(\n tf.equal(\n tf.size(feature_dict['ui_obj_cord_x_seq']),\n 0), lambda: tf.fill([1, tf.shape(feature['obj_type'])[1], 4], 0.),\n _make_obj_screen_pos)\n feature['obj_dom_pos'] = tf.reshape(feature_dict['ui_obj_dom_location_seq'],\n [1, -1, 3])\n feature['obj_dom_pos'] = tf.minimum(feature['obj_dom_pos'], max_dom_pos - 1)\n if load_dom_dist:\n num_ui_obj = tf.to_int32(\n tf.sqrt(tf.to_float(tf.size(feature_dict['ui_obj_dom_distance']))))\n feature['obj_dom_dist'] = tf.reshape(feature_dict['ui_obj_dom_distance'],\n [1, num_ui_obj, num_ui_obj])\n if load_extra:\n feature['obj_raw_text'] = tf.reshape(feature_dict['ui_obj_str_seq'],\n [1, -1])\n else:\n _load_fake_screen(feature, load_extra, load_dom_dist)\n _bound_refs(feature, max_range)\n feature['data_source'] = tf.constant(0, dtype=tf.int32)\n feature['agreement_count'] = tf.constant(100, dtype=tf.int32)\n\n return feature\n\n\ndef _process_pixel_help(feature_dict, data_source, load_dom_dist=False,\n load_extra=False):\n \"\"\"Processes testing data feature dictionary.\n\n Args:\n feature_dict: feature dictionary\n data_source: TEST_PIXEL_HELP\n load_dom_dist: whether to load the dom distance feature.\n load_extra: whether to load the extra data for debugging.\n Returns:\n A processed feature dictionary.\n \"\"\"\n step_num = tf.size(feature_dict['verb_id_seq'])\n feature = {\n 'task':\n tf.reshape(feature_dict['instruction_word_id_seq'], [-1]),\n 'obj_text':\n tf.reshape(feature_dict['ui_obj_word_id_seq'], [\n step_num, MAX_UI_OBJECT_NUM[data_source],\n MAX_TOKEN_NUM[data_source]\n ]),\n 'obj_type':\n tf.reshape(feature_dict['ui_obj_type_id_seq'],\n [step_num, MAX_UI_OBJECT_NUM[data_source]]),\n 'obj_clickable':\n tf.reshape(feature_dict['ui_obj_clickable_seq'],\n [step_num, MAX_UI_OBJECT_NUM[data_source]]),\n # pylint: disable=g-long-ternary\n 'obj_screen_pos': (\n tf.reshape(tf.concat([\n tf.reshape(feature_dict['ui_obj_cord_x_seq'], [step_num, -1, 2]),\n tf.reshape(feature_dict['ui_obj_cord_y_seq'], [step_num, -1, 2])\n ], axis=2), [step_num, MAX_UI_OBJECT_NUM[data_source], 4])),\n 'obj_dom_pos':\n tf.reshape(feature_dict['ui_obj_dom_location_seq'],\n [step_num, MAX_UI_OBJECT_NUM[data_source], 3]),\n 'verbs':\n tf.reshape(feature_dict['verb_id_seq'], [step_num]),\n 'objects':\n tf.reshape(feature_dict['ui_target_id_seq'], [step_num]),\n 'input_refs':\n tf.reshape(feature_dict['input_str_position_seq'], [step_num, 2]),\n 'obj_refs':\n tf.reshape(feature_dict['obj_desc_position_seq'], [step_num, 2]),\n 'verb_refs': # No data for Pixel on the field\n tf.zeros([step_num, 2], tf.int32),\n 'agreement_count':\n tf.constant(100, dtype=tf.int32),\n }\n if load_dom_dist:\n feature['obj_dom_dist'] = tf.reshape(\n feature_dict['ui_obj_dom_distance'],\n [step_num, MAX_UI_OBJECT_NUM[data_source],\n MAX_UI_OBJECT_NUM[data_source]])\n feature['rule'] = tf.constant(5, dtype=tf.int32)\n if load_extra:\n feature['task_id'] = tf.reshape(feature_dict['task_id'], [])\n feature['raw_task'] = tf.reshape(feature_dict['instruction_str'], [])\n feature['obj_raw_text'] = tf.reshape(\n feature_dict['ui_obj_str_seq'],\n [step_num, MAX_UI_OBJECT_NUM[data_source]])\n feature['data_source'] = tf.constant(2, dtype=tf.int32)\n return feature\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\nr\"\"\"Approximate the distribution's partition function with a spline.\n\nThis script generates values for the distribution's partition function and then\nfits a cubic hermite spline to those values, which is then stored to disk.\nTo run this script, assuming you're in this directory, run:\n python -m robust_loss.fit_partition_spline_test\nThis script will likely never have to be run again, and is provided here for\ncompleteness and reproducibility, or in case someone decides to modify\ndistribution.partition_spline_curve() in the future in case they find a better\ncurve. If the user wants a more accurate spline approximation, this can be\nobtained by modifying the `x_max`, `x_scale`, and `redundancy` parameters in the\ncode below, but this should only be done with care.\n\"\"\"\n\nfrom absl import app\nimport numpy as np\nimport tensorflow.compat.v2 as tf\nfrom robust_loss import cubic_spline\nfrom robust_loss import distribution\nfrom robust_loss import general\n\ntf.enable_v2_behavior()\n\n\ndef numerical_base_partition_function(alpha):\n \"\"\"Numerically approximate the partition function Z(alpha).\"\"\"\n # Generate values `num_samples` values in [-x_max, x_max], with more samples\n # near the origin as `power` is set to larger values.\n num_samples = 2**24 + 1 # We want an odd value so that 0 gets sampled.\n x_max = 10**10\n power = 6\n t = t = tf.linspace(\n tf.constant(-1, tf.float64), tf.constant(1, tf.float64), num_samples)\n t = tf.sign(t) * tf.abs(t)**power\n x = t * x_max\n\n # Compute losses for the values, then exponentiate the negative losses and\n # integrate with the trapezoid rule to get the partition function.\n losses = general.lossfun(x, alpha, np.float64(1))\n y = tf.math.exp(-losses)\n partition = tf.reduce_sum((y[1:] + y[:-1]) * (x[1:] - x[:-1])) / 2.\n return partition\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n\n # Parameters governing how the x coordinate of the spline will be laid out.\n # We will construct a spline with knots at\n # [0 : 1 / x_scale : x_max],\n # by fitting it to values sampled at\n # [0 : 1 / (x_scale * redundancy) : x_max]\n x_max = 12\n x_scale = 1024\n redundancy = 4 # Must be >= 2 for the spline to be useful.\n\n spline_spacing = 1. / (x_scale * redundancy)\n x_knots = np.arange(\n 0, x_max + spline_spacing, spline_spacing, dtype=np.float64)\n table = []\n # We iterate over knots, and for each knot recover the alpha value\n # corresponding to that knot with inv_partition_spline_curve(), and then\n # with that alpha we accurately approximate its partition function using\n # numerical_base_partition_function().\n for x_knot in x_knots:\n alpha = distribution.inv_partition_spline_curve(x_knot).numpy()\n partition = numerical_base_partition_function(alpha).numpy()\n table.append((x_knot, alpha, partition))\n print(table[-1])\n\n table = np.array(table)\n x = table[:, 0]\n alpha = table[:, 1]\n y_gt = np.log(table[:, 2])\n\n # We grab the values from the true log-partition table that correpond to\n # knots, by looking for where x * x_scale is an integer.\n mask = np.abs(np.round(x * x_scale) - (x * x_scale)) <= 1e-8\n values = y_gt[mask]\n\n # Initialize `tangents` using a central differencing scheme.\n values_pad = np.concatenate([[values[0] - values[1] + values[0]], values,\n [values[-1] - values[-2] + values[-1]]], 0)\n tangents = (values_pad[2:] - values_pad[:-2]) / 2.\n\n # Construct the spline's value and tangent TF variables, constraining the last\n # knot to have a fixed value Z(infinity) and a tangent of zero.\n n = len(values)\n tangents = tf.Variable(tangents, tf.float64)\n values = tf.Variable(values, tf.float64)\n\n # Fit the spline.\n num_iters = 10001\n\n optimizer = tf.keras.optimizers.SGD(learning_rate=1e-9, momentum=0.99)\n\n trace = []\n for ii in range(num_iters):\n with tf.GradientTape() as tape:\n tape.watch([values, tangents])\n # Fix the endpoint to be a known constant with a zero tangent.\n i_values = tf.where(\n np.arange(n) == (n - 1),\n tf.ones_like(values) * 0.70526025442689566, values)\n i_tangents = tf.where(\n np.arange(n) == (n - 1), tf.zeros_like(tangents), tangents)\n i_y = cubic_spline.interpolate1d(x * x_scale, i_values, i_tangents)\n # We minimize the maximum residual, which makes for a very ugly\n # optimization problem but works well in practice.\n i_loss = tf.reduce_max(tf.abs(i_y - y_gt))\n grads = tape.gradient(i_loss, [values, tangents])\n optimizer.apply_gradients(zip(grads, [values, tangents]))\n trace.append(i_loss.numpy())\n if (ii % 200) == 0:\n print('{:5d}: {:e}'.format(ii, trace[-1]))\n\n mask = alpha <= 4\n max_error_a4 = np.max(np.abs(i_y[mask] - y_gt[mask]))\n max_error = np.max(np.abs(i_y - y_gt))\n print('Max Error (a <= 4): {:e}'.format(max_error_a4))\n print('Max Error: {:e}'.format(max_error))\n\n # Just a sanity-check on the error.\n assert max_error_a4 <= 5e-7\n assert max_error <= 5e-7\n\n # Save the spline to disk.\n np.savez(\n './data/partition_spline.npz',\n x_scale=x_scale,\n values=i_values.numpy(),\n tangents=i_tangents.numpy())\n\n\nif __name__ == '__main__':\n app.run(main)\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"TCN loss for unsupervised training.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nfrom tcc.algos.algorithm import Algorithm\nfrom tcc.config import CONFIG\nfrom tcc.utils import get_cnn_feats\nfrom tcc.utils import set_learning_phase\n\n\ndef _npairs_loss(labels, embeddings_anchor, embeddings_positive, reg_lambda):\n \"\"\"Returns n-pairs metric loss.\"\"\"\n reg_anchor = tf.reduce_mean(tf.reduce_sum(tf.square(embeddings_anchor), 1))\n reg_positive = tf.reduce_mean(tf.reduce_sum(\n tf.square(embeddings_positive), 1))\n l2loss = 0.25 * reg_lambda * (reg_anchor + reg_positive)\n\n # Get per pair similarities.\n similarity_matrix = tf.matmul(\n embeddings_anchor, embeddings_positive, transpose_a=False,\n transpose_b=True)\n\n # Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.\n lshape = tf.shape(labels)\n assert lshape.shape == 1\n labels = tf.reshape(labels, [lshape[0], 1])\n\n labels_remapped = tf.cast(\n tf.equal(labels, tf.transpose(labels)), tf.float32)\n labels_remapped /= tf.reduce_sum(labels_remapped, 1, keepdims=True)\n\n # Add the softmax loss.\n xent_loss = tf.nn.softmax_cross_entropy_with_logits(\n logits=similarity_matrix, labels=labels_remapped)\n xent_loss = tf.reduce_mean(xent_loss)\n\n return l2loss + xent_loss\n\n\ndef single_sequence_loss(embs, num_steps):\n \"\"\"Returns n-pairs loss for a single sequence.\"\"\"\n\n labels = tf.range(num_steps)\n labels = tf.stop_gradient(labels)\n embeddings_anchor = embs[0::2]\n embeddings_positive = embs[1::2]\n loss = _npairs_loss(labels, embeddings_anchor, embeddings_positive,\n reg_lambda=CONFIG.TCN.REG_LAMBDA)\n return loss\n\n\nclass TCN(Algorithm):\n \"\"\"Time-contrastive Network.\"\"\"\n\n @set_learning_phase\n def call(self, data, steps, seq_lens, training):\n \"\"\"One pass through the model.\"\"\"\n cnn = self.model['cnn']\n emb = self.model['emb']\n\n if training:\n num_steps = CONFIG.TRAIN.NUM_FRAMES * CONFIG.DATA.NUM_STEPS\n else:\n num_steps = CONFIG.EVAL.NUM_FRAMES * CONFIG.DATA.NUM_STEPS\n\n # Number of steps is doubled due to sampling of positives and anchors.\n cnn_feats = get_cnn_feats(cnn, data, training, 2 * num_steps)\n\n if training:\n num_steps = CONFIG.TRAIN.NUM_FRAMES\n else:\n num_steps = CONFIG.EVAL.NUM_FRAMES\n\n embs = emb(cnn_feats, 2 * num_steps)\n embs = tf.stack(tf.split(embs, 2 * num_steps, axis=0), axis=1)\n\n return embs\n\n def compute_loss(self, embs, steps, seq_lens, global_step, training,\n frame_labels, seq_labels):\n if training:\n num_steps = CONFIG.TRAIN.NUM_FRAMES\n batch_size = CONFIG.TRAIN.BATCH_SIZE\n else:\n num_steps = CONFIG.EVAL.NUM_FRAMES\n batch_size = CONFIG.EVAL.BATCH_SIZE\n losses = []\n for i in xrange(batch_size):\n losses.append(single_sequence_loss(embs[i], num_steps))\n loss = tf.reduce_mean(tf.stack(losses))\n return loss\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Functions and classes related to optimization (weight updates).\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\nimport tensorflow.compat.v1 as tf\n\nfrom mobilebert import lamb_optimizer\n\n\ndef create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu,\n optimizer=\"adamw\", weight_decay_rate=0.01,\n end_lr_rate=0.0001, use_layer_wise_warmup=False,\n total_warmup_phases=0):\n \"\"\"Creates an optimizer training op.\"\"\"\n global_step = tf.train.get_or_create_global_step()\n\n learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)\n\n # Implements linear decay of the learning rate.\n learning_rate = tf.train.polynomial_decay(\n learning_rate,\n global_step,\n num_train_steps,\n end_learning_rate=learning_rate * end_lr_rate,\n power=1.0,\n cycle=False)\n\n # Implements linear warmup. I.e., if global_step < num_warmup_steps, the\n # learning rate will be `global_step/num_warmup_steps * init_lr`.\n if num_warmup_steps:\n global_steps_int = tf.cast(global_step, tf.int32)\n warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)\n\n global_steps_float = tf.cast(global_steps_int, tf.float32)\n warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)\n\n warmup_percent_done = global_steps_float / warmup_steps_float\n warmup_learning_rate = init_lr * warmup_percent_done\n\n is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)\n learning_rate = (\n (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)\n\n # It is recommended that you use this optimizer for fine tuning, since this\n # is how the model was trained (note that the Adam m/v variables are NOT\n # loaded from init_checkpoint.)\n if optimizer == \"adamw\":\n optimizer = AdamWeightDecayOptimizer(\n learning_rate=learning_rate,\n weight_decay_rate=weight_decay_rate,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=[\n \"teacher\", \"LayerNorm\", \"layer_norm\", \"bias\", \"FakeLayerNorm\"],\n use_layer_wise_warmup=use_layer_wise_warmup,\n total_warmup_phases=total_warmup_phases,\n num_train_steps=num_train_steps)\n elif optimizer == \"lamb\":\n optimizer = lamb_optimizer.LAMBOptimizer(\n learning_rate=learning_rate,\n weight_decay_rate=weight_decay_rate,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=[\n \"teacher\", \"LayerNorm\", \"layer_norm\", \"bias\", \"FakeLayerNorm\"],\n use_layer_wise_warmup=use_layer_wise_warmup,\n total_warmup_phases=total_warmup_phases,\n num_train_steps=num_train_steps)\n else:\n raise ValueError(\"Not supported optimizer: \", optimizer)\n\n if use_tpu:\n optimizer = tf.tpu.CrossShardOptimizer(optimizer)\n\n tvars = tf.trainable_variables()\n grads = tf.gradients(loss, tvars)\n\n tvars = [var for var in tf.trainable_variables()\n if not var.name.startswith(\"teacher\")]\n grads = tf.gradients(loss, tvars, colocate_gradients_with_ops=True)\n\n # This is how the model was pre-trained.\n (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)\n\n ntvars = [var for var in tf.trainable_variables()\n if var.name.startswith(\"teacher\")]\n ngrads = [None for var in ntvars]\n\n train_op = optimizer.apply_gradients(\n zip(grads + ngrads, tvars + ntvars), global_step=global_step)\n\n # Normally the global step update is done inside of `apply_gradients`.\n # However, `AdamWeightDecayOptimizer` doesn't do this. But if you use\n # a different optimizer, you should probably take this line out.\n new_global_step = global_step + 1\n train_op = tf.group(train_op, [global_step.assign(new_global_step)])\n return train_op\n\n\nclass AdamWeightDecayOptimizer(tf.train.Optimizer):\n \"\"\"A basic Adam optimizer that includes \"correct\" L2 weight decay.\"\"\"\n\n def __init__(self,\n learning_rate,\n weight_decay_rate=0.0,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n name=\"AdamWeightDecayOptimizer\",\n use_layer_wise_warmup=False,\n total_warmup_phases=0,\n num_train_steps=0):\n \"\"\"Constructs a AdamWeightDecayOptimizer.\"\"\"\n super(AdamWeightDecayOptimizer, self).__init__(False, name)\n\n del use_layer_wise_warmup\n del total_warmup_phases\n del num_train_steps\n\n self.learning_rate = learning_rate\n self.weight_decay_rate = weight_decay_rate\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.exclude_from_weight_decay = exclude_from_weight_decay\n\n def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n \"\"\"See base class.\"\"\"\n assignments = []\n for (grad, param) in grads_and_vars:\n if grad is None or param is None:\n continue\n\n param_name = self._get_variable_name(param.name)\n\n m = tf.get_variable(\n name=param_name + \"/adam_m\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n v = tf.get_variable(\n name=param_name + \"/adam_v\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n\n # Standard Adam update.\n next_m = (\n tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))\n next_v = (\n tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,\n tf.square(grad)))\n\n update = next_m / (tf.sqrt(next_v) + self.epsilon)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want ot decay the weights in a manner that doesn't interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n if self._do_use_weight_decay(param_name):\n update += self.weight_decay_rate * param\n\n update_with_lr = self.learning_rate * update\n\n next_param = param - update_with_lr\n\n assignments.extend(\n [param.assign(next_param),\n m.assign(next_m),\n v.assign(next_v)])\n return tf.group(*assignments, name=name)\n\n def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n if not self.weight_decay_rate:\n return False\n if self.exclude_from_weight_decay:\n for r in self.exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n return False\n return True\n\n def _get_variable_name(self, param_name):\n \"\"\"Get the variable name from the tensor name.\"\"\"\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Benchmarks the all-reduce algorithms of tf_cnn_benchmarks.\n\ntf_cnn_benchmarks uses all-reduce to aggregate gradients. This benchmark is\nuseful for benchmarking the performance of just this gradient aggregation,\ninstead of the entire model. All the flags that tf_cnn_benchmarks accepts are\nalso accepted by this script, although many are silently ignored.\n\nThe number and shapes of the tensors all-reduced are those of the variables of\nthe model specified by the --model flag.\nTODO(reedwm): Allow custom sizes to be specified.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nimport os\nimport time\n\nfrom absl import flags as absl_flags\nimport tensorflow.compat.v1 as tf\nfrom cnn_quantization.tf_cnn_benchmarks import benchmark_cnn\nfrom cnn_quantization.tf_cnn_benchmarks import cnn_util\nfrom cnn_quantization.tf_cnn_benchmarks import flags\nfrom cnn_quantization.tf_cnn_benchmarks.cnn_util import log_fn\nfrom tensorflow.python.ops import control_flow_ops\n\n\nabsl_flags.DEFINE_integer('iters_per_step', 5,\n 'Number of iterations to run all-reduce for, per '\n 'step. Every step, a session will be run on a Graph '\n 'that contains this many copies of the all-reduce. '\n 'The copies are run sequentially. Setting this above '\n '1 is useful to lower the overhead of starting the '\n 'session run, running the VariableV2 ops at the '\n 'start of the step, etc.')\n\n\nflags.define_flags()\nfor name in flags.param_specs.keys():\n absl_flags.declare_key_flag(name)\n\n\ndef get_var_shapes(model):\n \"\"\"Returns the list of variable shapes for a tf_cnn_benchmarks Model.\"\"\"\n with tf.Graph().as_default():\n # The variable shapes do not depend on the batch size.\n images = tf.placeholder(tf.float32, model.get_input_shapes('train')[0])\n model.build_network([images])\n return [[int(d) for d in v.shape.dims] for v in tf.trainable_variables()]\n\n\ndef all_reduce(all_device_tensors, variable_mgr):\n \"\"\"Performs a single batch all-reduce.\n\n Args:\n all_device_tensors: List of lists of tensors. all_device_tensors[t][i] is\n a tensor, where t is the tower the tensor is on and i is the index of\n the tensor.\n variable_mgr: The VariableMgr to perform the all-reduce.\n Returns:\n List of list of tensors in the same form as `all_device_tensors`, except the\n tensors are aggregated across towers.\n \"\"\"\n tower_grads = [[(g, None) for g in device_tensors] for\n device_tensors in all_device_tensors]\n _, aggregated_tower_grads = variable_mgr.preprocess_device_grads(tower_grads)\n return [\n [g for g, _ in agg_device_tensors]\n for agg_device_tensors in aggregated_tower_grads]\n\n\ndef build_all_reduce_iterations(all_device_tensors, tower_devices, variable_mgr,\n num_iters):\n \"\"\"Builds the all-reduce ops for multiple iterations to aggregate tensors.\n\n The tensors in `all_device_tensors` are aggregated `num_iters` times. Each\n iteration aggregates the results from the previous iteration. The iterations\n are run sequentially, so the aggregations for an iteration do not start\n running until the previous iteration has completed. Each iteration after the\n first is aggregating already-aggregated values, but it does not matter because\n we are only aggregating for benchmarking purposes.\n\n Args:\n all_device_tensors: List of lists of tensors. all_device_tensors[t][i] is\n a tensor, where t is the tower the tensor is on and i is the index of\n the tensor.\n tower_devices: A list of device strings. tower_devices[t] is the device\n of the tensors in all_device_tensors[t].\n variable_mgr: The VariableMgr to perform the all-reduce.\n num_iters: Number of iterations to aggregate tensors for.\n Returns:\n An op that when run, causes the all-reduce ops to run.\n \"\"\"\n for i in range(num_iters):\n with tf.name_scope('iteration_%d' % i):\n # Step 1: Do the aggregation.\n with tf.name_scope('tensor_aggregation'):\n all_device_tensors = all_reduce(all_device_tensors, variable_mgr)\n\n # Step 2. Create identity ops, to bring the aggregated results back to\n # each device.\n new_all_device_tensors = []\n for device, device_tensors in zip(tower_devices, all_device_tensors):\n with tf.device(device):\n new_all_device_tensors.append([\n tf.identity(t, name='identity_after_allreduce')\n for t in device_tensors\n ])\n all_device_tensors = new_all_device_tensors\n\n # Step 3. Add control dependencies to delay the next iteration until this\n # iteration is complete. To avoid extra overhead, we do not have any\n # cross-device control dependencies, which means it's possible for two\n # iterations to slightly overlap.\n new_all_device_tensors = []\n for device_tensors in all_device_tensors:\n new_all_device_tensors.append([\n control_flow_ops.with_dependencies(\n device_tensors, t, name='identity_after_dependencies')\n for t in device_tensors\n ])\n all_device_tensors = new_all_device_tensors\n\n # To prevent the dependency optimizer from removing every op we created,\n # we store the results in variables.\n ops_to_run = []\n for device, device_tensors in zip(tower_devices, all_device_tensors):\n with tf.device(device):\n for t in device_tensors:\n # The placeholder initial value is never run.\n var = tf.Variable(tf.placeholder(tf.float32, t.shape), collections=[])\n ops_to_run.append(var.assign(t))\n return tf.group(*ops_to_run)\n\n\ndef build_graph(tower_devices, tensor_shapes, variable_mgr, num_iters):\n \"\"\"Builds the graph for the benchmark.\n\n Args:\n tower_devices: A list of device strings of the devices to run the all-reduce\n benchmark on.\n tensor_shapes: A list of shapes of the tensors that will be aggregated for\n the all-reduce.\n variable_mgr: The VariableMgr to perform the all-reduce.\n num_iters: Number of iterations to aggregate tensors for.\n Returns:\n An op that runs the benchmark.\n \"\"\"\n all_device_tensors = []\n for i, tower_device in enumerate(tower_devices):\n with tf.device(tower_device):\n device_tensors = []\n for j, shape in enumerate(tensor_shapes):\n tensor = tf.Variable(tf.random_normal(shape, dtype=tf.float32),\n name='tensor_%d_on_device_%d' % (j, i))\n device_tensors.append(tensor)\n all_device_tensors.append(device_tensors)\n\n log_fn('Building all-reduce ops')\n benchmark_op = build_all_reduce_iterations(all_device_tensors, tower_devices,\n variable_mgr, num_iters)\n log_fn('Done building all-reduce ops')\n return benchmark_op\n\n\ndef run_graph(benchmark_op, bench_cnn, init_ops, dummy_loss_op):\n \"\"\"Runs the graph for the benchmark.\n\n Args:\n benchmark_op: An op that runs the benchmark.\n bench_cnn: The BenchmarkCNN where params and other attributes are obtained.\n init_ops: A list of ops that are run before `benchmark_op` for\n initialization.\n dummy_loss_op: Any op. We must pass a loss op to\n `benchmark_cnn.benchmark_one_step`, but the result of the op is never\n actually used.\n \"\"\"\n config = benchmark_cnn.create_config_proto(bench_cnn.params)\n with tf.Session(config=config) as sess:\n for op in init_ops:\n sess.run(op)\n step_train_times = []\n fetches = {'average_loss': dummy_loss_op, 'benchmark_op': benchmark_op}\n log_fn('Running warmup')\n for i in range(-bench_cnn.num_warmup_batches, bench_cnn.num_batches):\n if i == 0:\n log_fn('Running all-reduce ops')\n start = time.time()\n if i > 0 and i % bench_cnn.params.display_every == 0:\n log_fn('Iteration: %d. Average time per step so far: %s' %\n (i, (time.time() - start) / i))\n # Call benchmark_one_step instead of directly calling sess.run(...), to\n # potentially get a trace file, partitioned graphs, etc.\n benchmark_cnn.benchmark_one_step(\n sess=sess,\n fetches=fetches,\n step=i,\n # The batch size is only used for the images/sec calculation, which is\n # not actually calculated because we pass show_images_per_sec=False.\n batch_size=None,\n step_train_times=step_train_times,\n trace_filename=bench_cnn.trace_filename,\n partitioned_graph_file_prefix=(\n bench_cnn.params.partitioned_graph_file_prefix),\n profiler=None,\n image_producer=None,\n params=bench_cnn.params,\n show_images_per_sec=False)\n log_fn('Average time per step: %s' %\n ((time.time() - start) / bench_cnn.num_batches))\n\n\ndef run_benchmark(bench_cnn, num_iters):\n \"\"\"Runs the all-reduce benchmark.\n\n Args:\n bench_cnn: The BenchmarkCNN where params, the variable manager, and other\n attributes are obtained.\n num_iters: Number of iterations to do all-reduce for for.\n\n Raises:\n ValueError: Invalid params of bench_cnn.\n \"\"\"\n if bench_cnn.params.variable_update != 'replicated':\n raise ValueError('--variable_update=replicated must be specified to use'\n 'the all-reduce benchmark')\n if bench_cnn.params.variable_consistency == 'relaxed':\n raise ValueError('--variable_consistency=relaxed is not supported')\n\n benchmark_op = build_graph(bench_cnn.raw_devices,\n get_var_shapes(bench_cnn.model),\n bench_cnn.variable_mgr, num_iters)\n init_ops = [\n tf.global_variables_initializer(),\n bench_cnn.variable_mgr.get_post_init_ops()\n ]\n loss_op = tf.no_op()\n\n if bench_cnn.graph_file:\n path, filename = os.path.split(bench_cnn.graph_file)\n as_text = filename.endswith('txt')\n log_fn('Writing GraphDef as %s to %s' % (\n 'text' if as_text else 'binary', bench_cnn.graph_file))\n tf.train.write_graph(tf.get_default_graph().as_graph_def(add_shapes=True),\n path, filename, as_text)\n\n run_graph(benchmark_op, bench_cnn, init_ops, loss_op)\n\n\n# TODO(reedwm): Reduce redundancy with tf_cnn_benchmarks\ndef main(positional_arguments):\n # Command-line arguments like '--distortions False' are equivalent to\n # '--distortions=True False', where False is a positional argument. To prevent\n # this from silently running with distortions, we do not allow positional\n # arguments.\n assert len(positional_arguments) >= 1\n if len(positional_arguments) > 1:\n raise ValueError('Received unknown positional arguments: %s'\n % positional_arguments[1:])\n\n params = benchmark_cnn.make_params_from_flags()\n params = benchmark_cnn.setup(params)\n bench = benchmark_cnn.BenchmarkCNN(params)\n\n tfversion = cnn_util.tensorflow_version_tuple()\n log_fn('TensorFlow: %i.%i' % (tfversion[0], tfversion[1]))\n\n run_benchmark(bench, absl_flags.FLAGS.iters_per_step)\n\nif __name__ == '__main__':\n tf.app.run()\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"A Sinkhorn implementation for 1D Optimal Transport.\n\nSinkhorn algorithm was introduced in 1967 by R. Sinkhorn in the article\n\"Diagonal equivalence to matrices with prescribed row and column sums.\" in\nThe American Mathematical Monthly. It is an iterative algorithm that turns an\ninput matrix (here the kernel matrix corresponding to transportation costs) into\na matrix with prescribed (a, b) (row, colums) sum marginals by multiplying it on\nthe left an right by two diagonal matrices.\n\"\"\"\n\nfrom typing import Tuple\nimport gin\nimport tensorflow.compat.v2 as tf\n\n\ndef center(cost, f, g):\n if f.shape.rank == 2:\n return cost - f[:, :, tf.newaxis] - g[:, tf.newaxis, :]\n elif f.shape.rank == 3:\n return cost[:, :, :, tf.newaxis] - (\n f[:, :, tf.newaxis, :] + g[:, tf.newaxis, :, :])\n\n\ndef softmin(cost, f, g, eps, axis):\n return -eps * tf.reduce_logsumexp(-center(cost, f, g) / eps, axis=axis)\n\n\ndef error(cost, f, g, eps, b):\n b_target = tf.math.reduce_sum(transport(cost, f, g, eps), axis=1)\n return tf.reduce_max((tf.abs(b_target - b) / b)[:])\n\n\ndef transport(cost, f, g, eps):\n return tf.math.exp(-center(cost, f, g) / eps)\n\n\ndef cost_fn(x, y,\n power):\n \"\"\"A transport cost in the form |x-y|^p and its derivative.\"\"\"\n # Check if data is 1D.\n if x.shape.rank == 2 and y.shape.rank == 2:\n # If that is the case, it is convenient to use pairwise difference matrix.\n xy_difference = x[:, :, tf.newaxis] - y[:, tf.newaxis, :]\n if power == 1.0:\n cost = tf.math.abs(xy_difference)\n derivative = tf.math.sign(xy_difference)\n elif power == 2.0:\n cost = xy_difference**2.0\n derivative = 2.0 * xy_difference\n else:\n abs_diff = tf.math.abs(xy_difference)\n cost = abs_diff**power\n derivative = power * tf.math.sign(xy_difference) * abs_diff**(power - 1.0)\n return cost, derivative\n # Otherwise data is high dimensional, in form [batch,n,d]. L2 distance used.\n elif x.shape.rank == 3 and y.shape.rank == 3:\n x2 = tf.reduce_sum(x**2, axis=2)\n y2 = tf.reduce_sum(y**2, axis=2)\n cost = (x2[:, :, tf.newaxis] + y2[:, tf.newaxis, :] -\n tf.matmul(x, y, transpose_b=True))**(power / 2)\n derivative = None\n return cost, derivative\n\n\[email protected]\ndef sinkhorn_iterations(x,\n y,\n a,\n b,\n power = 2.0,\n epsilon = 1e-3,\n epsilon_0 = 1e-1,\n epsilon_decay = 0.95,\n threshold = 1e-2,\n inner_num_iter = 5,\n max_iterations = 2000):\n \"\"\"Runs the Sinkhorn's algorithm from (x, a) to (y, b).\n\n Args:\n x: Tensor<float>[batch, n, d]: the input point clouds.\n y: Tensor<float>[batch, m, d]: the target point clouds.\n a: Tensor<float>[batch, n, q]: weights of each input point across batch. Note\n that q possible variants can be considered (for parallelism).\n Sums along axis 1 must match that of b to converge.\n b: Tensor<float>[batch, m, q]: weights of each input point across batch. As\n with a, q possible variants of weights can be considered.\n power: (float) the power of the distance for the cost function.\n epsilon: (float) the level of entropic regularization wanted.\n epsilon_0: (float) the initial level of entropic regularization.\n epsilon_decay: (float) a multiplicative factor applied at each iteration\n until reaching the epsilon value.\n threshold: (float) the relative threshold on the Sinkhorn error to stop the\n Sinkhorn iterations.\n inner_num_iter: (int32) the Sinkhorn error is not recomputed at each\n iteration but every inner_num_iter instead to avoid computational overhead.\n max_iterations: (int32) the maximum number of Sinkhorn iterations.\n\n Returns:\n A 5-tuple containing: the values of the conjugate variables f and g, the\n final value of the entropic parameter epsilon, the cost matrix and the number\n of iterations.\n \"\"\"\n max_outer_iterations = max_iterations // inner_num_iter\n loga = tf.math.log(a)\n logb = tf.math.log(b)\n cost, d_cost = cost_fn(x, y, power)\n\n def body_fn(f, g, eps, num_iter):\n for _ in range(inner_num_iter):\n g = eps * logb + softmin(cost, f, g, eps, axis=1) + g\n f = eps * loga + softmin(cost, f, g, eps, axis=2) + f\n eps = tf.math.maximum(eps * epsilon_decay, epsilon)\n return [f, g, eps, num_iter + inner_num_iter]\n\n def cond_fn(f, g, eps, num_iter):\n return tf.math.reduce_all([\n tf.math.less(num_iter, max_iterations),\n tf.math.reduce_any([\n tf.math.greater(eps, epsilon),\n tf.math.greater(error(cost, f, g, eps, b), threshold)\n ])\n ])\n\n f, g, eps, iterations = tf.while_loop(\n cond_fn,\n body_fn, [\n tf.zeros_like(loga),\n tf.zeros_like(logb),\n tf.cast(epsilon_0, dtype=x.dtype),\n tf.constant(0, dtype=tf.int32)\n ],\n parallel_iterations=1,\n maximum_iterations=max_outer_iterations + 1)\n\n return f, g, eps, cost, d_cost, iterations\n\n\ndef transport_implicit_gradients(derivative_cost,\n transport_matrix, eps, b, d_p):\n \"\"\"Application of the transpose of the Jacobians dP/dx and dP/db.\n\n This is applied to a perturbation of the size of the transport matrix.\n Required to back-propagate through Sinkhorn's output.\n\n Args:\n derivative_cost: the derivative of the cost function.\n transport_matrix: the obtained transport matrix tensor.\n eps: the value of the entropic regualarization parameter.\n b: the target weights.\n d_p: the perturbation of the transport matrix.\n\n Returns:\n A list of two tensor that correspond to the application of the transpose\n of dP/dx and dP/db on dP.\n \"\"\"\n batch_size = tf.shape(b)[0]\n m = tf.shape(b)[1]\n invmargin1 = tf.math.reciprocal(tf.reduce_sum(transport_matrix, axis=2))\n m1 = invmargin1[:, 1:, tf.newaxis] * transport_matrix[:, 1:, :]\n m1 = tf.concat([tf.zeros([tf.shape(m1)[0], 1, tf.shape(m1)[2]]), m1], axis=1)\n\n invmargin2 = tf.math.reciprocal(tf.reduce_sum(transport_matrix, axis=1))\n m2 = invmargin2[:, :, tf.newaxis] * tf.transpose(transport_matrix, [0, 2, 1])\n eye_m = tf.eye(m, batch_shape=[batch_size])\n schur = eye_m - tf.linalg.matmul(m2, m1)\n\n def jac_b_p_transpose(d_p):\n \"\"\"Transposed of the jacobian of the transport w.r.t the target weights.\"\"\"\n d_p_p = d_p * transport_matrix\n u_f = tf.reduce_sum(d_p_p, axis=2) / eps\n u_g = tf.reduce_sum(d_p_p, axis=1) / eps\n\n m1_tranpose_u_f = tf.linalg.matvec(m1, u_f, transpose_a=True)\n to_invert = tf.concat(\n [m1_tranpose_u_f[:, :, tf.newaxis], u_g[:, :, tf.newaxis]], axis=2)\n inverses = tf.linalg.solve(tf.transpose(schur, [0, 2, 1]), to_invert)\n inv_m1_tranpose_u_f, inv_u_g = inverses[:, :, 0], inverses[:, :, 1]\n jac_2 = -inv_m1_tranpose_u_f + inv_u_g\n return eps * jac_2 / b\n\n def jac_x_p_transpose(d_p):\n \"\"\"Transposed of the jacobian of the transport w.r.t the inputs.\"\"\"\n d_p_p = d_p * transport_matrix\n c_x = -tf.reduce_sum(derivative_cost * d_p_p, axis=2) / eps\n u_f = tf.math.reduce_sum(d_p_p, axis=2) / eps\n u_g = tf.math.reduce_sum(d_p_p, axis=1) / eps\n m1_tranpose_u_f = tf.linalg.matvec(m1, u_f, transpose_a=True)\n to_invert = tf.concat(\n [m1_tranpose_u_f[:, :, tf.newaxis], u_g[:, :, tf.newaxis]], axis=2)\n inverses = tf.linalg.solve(tf.transpose(schur, [0, 2, 1]), to_invert)\n inv_m1_tranpose_u_f, inv_u_g = inverses[:, :, 0], inverses[:, :, 1]\n jac_1 = u_f + tf.linalg.matvec(\n m2, inv_m1_tranpose_u_f - inv_u_g, transpose_a=True)\n jac_2 = -inv_m1_tranpose_u_f + inv_u_g\n jac_1 = jac_1 * tf.reduce_sum(m1 * derivative_cost, axis=2)\n jac_2 = tf.linalg.matvec(\n tf.transpose(m2, [0, 2, 1]) * derivative_cost, jac_2)\n return c_x + jac_1 + jac_2\n\n return [jac_x_p_transpose(d_p), jac_b_p_transpose(d_p)]\n\n\ndef autodiff_sinkhorn(x, y, a, b,\n **kwargs):\n \"\"\"A Sinkhorn function that returns the transportation matrix.\n\n This function back-propagates through the computational graph defined by the\n Sinkhorn iterations.\n\n Args:\n x: [N, n, d] the input batch of points clouds\n y: [N, m, d] the target batch points clouds.\n a: [N, n, q] q probability weight vectors for the input point cloud. The sum\n of all elements of b along axis 1 must match that of a.\n b: [N, m, q] q probability weight vectors for the target point cloud. The sum\n of all elements of b along axis 1 must match that of a.\n **kwargs: additional parameters passed to the sinkhorn algorithm. See\n sinkhorn_iterations for more details.\n\n Returns:\n A tf.Tensor representing the optimal transport matrix and the regularized OT\n cost.\n \"\"\"\n f, g, eps, cost, _, _ = sinkhorn_iterations(x, y, a, b, **kwargs)\n return transport(cost, f, g, eps)\n\n\ndef implicit_sinkhorn(x, y, a, b,\n **kwargs):\n \"\"\"A Sinkhorn function using the implicit function theorem.\n\n That is to say differentiating optimality confiditions to recover Jacobians.\n\n Args:\n x: the input batch of 1D points clouds\n y: the target batch 1D points clouds.\n a: the intput weight of each point in the input point cloud. The sum of all\n elements of b must match that of a to converge.\n b: the target weight of each point in the target point cloud. The sum of all\n elements of b must match that of a to converge.\n **kwargs: additional parameters passed to the sinkhorn algorithm. See\n sinkhorn_iterations for more details.\n\n Returns:\n A tf.Tensor representing the optimal transport matrix.\n \"\"\"\n\n @tf.custom_gradient\n def _aux(x, b):\n \"\"\"Auxiliary closure to compute custom gradient over x and b.\"\"\"\n x = tf.stop_gradient(x)\n b = tf.stop_gradient(b)\n f, g, eps, cost, d_cost, _ = sinkhorn_iterations(x, y, a, b, **kwargs)\n # This centering is crucial to ensure Jacobian is invertible.\n # This centering is also assumed in the computation of the\n # transpose-Jacobians themselves.\n to_remove = f[:, 0]\n f = f - to_remove[:, tf.newaxis]\n g = g + to_remove[:, tf.newaxis]\n forward = transport(cost, f, g, eps)\n\n def grad(d_p):\n return transport_implicit_gradients(d_cost, forward, eps, b, d_p)\n\n return forward, grad\n\n return _aux(x, b)\n\n\[email protected]\ndef sinkhorn(x,\n y,\n a,\n b,\n implicit = True,\n **kwargs):\n \"\"\"A Sinkhorn function that returns the transportation matrix.\n\n This function back-propagates through the computational graph defined by the\n Sinkhorn iterations.\n\n Args:\n x: the input batch of points clouds\n y: the target batch points clouds.\n a: the intput weight of each point in the input point cloud. The sum of all\n elements of b must match that of a to converge.\n b: the target weight of each point in the target point cloud. The sum of all\n elements of b must match that of a to converge.\n implicit: whether to run the autodiff version of the backprop or the implicit\n computation of the gradient. The implicit version is more efficient in\n terms of both speed and memory, but might be less stable numerically. It\n requires high-accuracy in the computation of the optimal transport itself.\n **kwargs: additional parameters passed to the sinkhorn algorithm. See\n sinkhorn_iterations for more details.\n\n Returns:\n A tf.Tensor representing the optimal transport matrix.\n \"\"\"\n if implicit:\n if x.shape.rank == 2:\n return implicit_sinkhorn(x, y, a, b, **kwargs)\n else:\n raise ValueError('`Implicit` not yet implemented for multivariate data')\n return autodiff_sinkhorn(x, y, a, b, **kwargs)\n\n\ndef sinkhorn_divergence(x,\n y,\n a,\n b,\n only_x_varies = False,\n **kwargs):\n \"\"\"A simple implementation of the Sinkhorn divergence.\n\n This function back-propagates through the computational graph defined by the\n Sinkhorn iterations.\n\n Args:\n x: [N,n,d] the input batch of multivariate (dimension d) points clouds\n y: [N,m,d] the input batch of multivariate (dimension d) points clouds\n a: [N,n] probability weights per batch\n b: [N,n] probability weights per batch\n only_x_varies: <bool> if only x varies, that flag should be set to True,\n in order to avoid computing the divergence between y and itself.\n **kwargs: additional parameters passed to the sinkhorn algorithm. See\n sinkhorn_iterations for more details.\n\n Returns:\n A tf.Tensor representing the optimal transport matrix.\n \"\"\"\n f_xy, g_xy = sinkhorn_iterations(x, y, a, b, **kwargs)[:2]\n f_xx, g_xx = sinkhorn_iterations(x, x, a, a, **kwargs)[:2]\n if only_x_varies:\n return tf.reduce_sum((f_xy - 0.5 * f_xx - 0.5 * g_xx) * a +\n g_xy * b, axis=1)\n else:\n f_yy, g_yy = sinkhorn_iterations(y, y, b, b, **kwargs)[:2]\n return (tf.reduce_sum((f_xy - 0.5 * f_xx - 0.5 * g_xx) * a, axis=1) +\n tf.reduce_sum((g_xy - 0.5 * f_yy - 0.5 * g_yy) * b, axis=1))\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# python3\n\"\"\"Tests for task_set.train_inner.\"\"\"\nimport json\nimport os\nimport tempfile\nimport numpy as np\n\nfrom task_set import datasets\nfrom task_set import train_inner\nfrom task_set.tasks import base\nimport tensorflow.compat.v1 as tf\n\n\nclass DummyTask(base.BaseTask):\n \"\"\"Dummy task used for tests.\"\"\"\n\n def call_split(self, params, split, with_metrics=False):\n r = tf.random_normal(shape=[], dtype=tf.float32)\n offset = {\n datasets.Split.TRAIN: 1.0,\n datasets.Split.VALID_INNER: 2.0,\n datasets.Split.VALID_OUTER: 3.0,\n datasets.Split.TEST: 4.0,\n }\n loss = offset[split] + r\n\n if with_metrics:\n return loss, {\"metric\": -1 * loss}\n else:\n return loss\n\n def get_batch(self, split):\n return None\n\n def current_params(self):\n return {}\n\n def gradients(self, loss):\n return {}\n\n def initial_params(self):\n return {}\n\n def get_variables(self):\n return []\n\n\nclass TrainInnerTest(tf.test.TestCase):\n\n def test_compute_averaged_loss(self):\n task = DummyTask()\n params = task.initial_params()\n losses, _ = train_inner.compute_averaged_loss(\n task, params, num_batches=100, with_metrics=False)\n\n with self.test_session() as sess:\n all_np_losses = []\n for _ in range(10):\n all_np_losses.append(sess.run(losses))\n\n tr, vai, vao, te = zip(*all_np_losses)\n # We are averaging over 100 with 10 replications evaluatons.\n # This means the std. error of the mean should be 1/sqrt(1000) or 0.03.\n # We use a threshold of 0.15, corresponding to a 5-sigma test.\n self.assertNear(np.mean(tr), 1.0, 0.15)\n self.assertNear(np.mean(vai), 2.0, 0.15)\n self.assertNear(np.mean(vao), 3.0, 0.15)\n self.assertNear(np.mean(te), 4.0, 0.15)\n\n # ensure that each sample is also different.\n self.assertLess(1e-5, np.var(tr), 0.5)\n self.assertLess(1e-5, np.var(vai), 0.5)\n self.assertLess(1e-5, np.var(vao), 0.5)\n self.assertLess(1e-5, np.var(te), 0.5)\n\n losses, metrics = train_inner.compute_averaged_loss(\n task, params, num_batches=100, with_metrics=True)\n tr_metrics, vai_metrics, vao_metrics, te_metrics = metrics\n with self.test_session() as sess:\n # this std. error is 1/sqrt(100), or 0.1. 5 std out is 0.5\n self.assertNear(sess.run(tr_metrics[\"metric\"]), -1.0, 0.5)\n self.assertNear(sess.run(vai_metrics[\"metric\"]), -2.0, 0.5)\n self.assertNear(sess.run(vao_metrics[\"metric\"]), -3.0, 0.5)\n self.assertNear(sess.run(te_metrics[\"metric\"]), -4.0, 0.5)\n\n def test_train(self):\n tmp_dir = tempfile.mkdtemp()\n\n # TODO(lmetz) when toy tasks are done, switch this away from an mlp.\n train_inner.train(\n tmp_dir,\n task_name=\"mlp_family_seed12\",\n optimizer_name=\"adam8p_wide_grid_seed21\",\n training_steps=10,\n eval_every_n=5)\n\n with tf.gfile.Open(os.path.join(tmp_dir, \"result\")) as f:\n result_data = json.loads(f.read())\n\n self.assertEqual(len(result_data), 3)\n # 4 losses logged out per timestep\n self.assertEqual(len(result_data[\"5\"]), 4)\n\n with tf.gfile.Open(os.path.join(tmp_dir, \"time_per_step\")) as f:\n time_per_step_data = json.loads(f.read())\n\n self.assertIn(\"mean_last_half\", time_per_step_data)\n self.assertIn(\"mean_time\", time_per_step_data)\n self.assertIn(\"median_time\", time_per_step_data)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Tests for non_semantic_speech_benchmark.eval_embedding.keras.train_keras.\"\"\"\n\nfrom absl import flags\nfrom absl.testing import absltest\nfrom absl.testing import flagsaver\nfrom absl.testing import parameterized\nimport mock\nimport tensorflow as tf\n\nfrom non_semantic_speech_benchmark.eval_embedding.finetune import train_keras\n\n\ndef _get_data(*args, **kwargs):\n del args\n assert 'samples_key' in kwargs\n assert 'min_length' in kwargs\n assert 'batch_size' in kwargs\n assert 'label_list' in kwargs\n bs = kwargs['batch_size']\n samples = tf.zeros((bs, 32000), tf.float32)\n labels = tf.zeros([bs], tf.int32)\n labels_onehot = tf.one_hot(labels, len(kwargs['label_list']))\n return tf.data.Dataset.from_tensors((samples, labels_onehot)).repeat()\n\n\nclass TrainKerasTest(parameterized.TestCase):\n\n @parameterized.parameters(\n {'num_clusters': 0, 'alpha_init': 0},\n {'num_clusters': 4, 'alpha_init': 0},\n {'num_clusters': 0, 'alpha_init': 1.0},\n )\n def test_get_model(self, num_clusters, alpha_init):\n num_classes = 4\n batched_samples = tf.zeros([3, 20000])\n y_onehot = tf.one_hot([0, 1, 2], num_classes)\n\n model = train_keras.models.get_keras_model(\n num_classes, input_length=20000, use_batchnorm=True,\n num_clusters=num_clusters, alpha_init=alpha_init)\n\n loss_obj = tf.keras.losses.CategoricalCrossentropy(from_logits=True)\n opt = tf.keras.optimizers.Adam()\n train_loss = tf.keras.metrics.Mean()\n train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()\n summary_writer = tf.summary.create_file_writer(\n absltest.get_default_test_tmpdir())\n train_step = train_keras.get_train_step(\n model, loss_obj, opt, train_loss, train_accuracy, summary_writer)\n gstep = opt.iterations\n train_step(batched_samples, y_onehot, gstep)\n self.assertEqual(1, gstep)\n train_step(batched_samples, y_onehot, gstep)\n self.assertEqual(2, gstep)\n\n @mock.patch.object(train_keras.get_data, 'get_data', new=_get_data)\n @flagsaver.flagsaver\n def test_full_flow(self):\n flags.FLAGS.file_pattern = 'dummy'\n flags.FLAGS.shuffle_buffer_size = 4\n flags.FLAGS.samples_key = 'audio'\n flags.FLAGS.nc = 2\n flags.FLAGS.label_key = 'emotion'\n flags.FLAGS.label_list = ['no', 'yes']\n flags.FLAGS.logdir = absltest.get_default_test_tmpdir()\n\n train_keras.train_and_report(debug=True)\n\n\nif __name__ == '__main__':\n tf.compat.v2.enable_v2_behavior()\n assert tf.executing_eagerly()\n absltest.main()\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python2, python3\n\"\"\"Tests for depthwise_initializers.py.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\nfrom tunas import depthwise_initializers\n\n\nclass ModelOpsTest(tf.test.TestCase):\n\n def test_variance_scaling_untruncated_normal_fan_in(self):\n initializer = depthwise_initializers.DepthwiseVarianceScaling(\n scale=1.0,\n mode='fan_in',\n distribution='untruncated_normal')\n tensor = initializer([3, 5, 1024, 1])\n\n value = self.evaluate(tensor)\n self.assertEqual(value.shape, (3, 5, 1024, 1))\n self.assertNear(np.mean(value), 0.0, 0.01)\n self.assertNear(np.std(value), 1.0 / math.sqrt(3 * 5), 0.01)\n\n def test_variance_scaling_truncated_normal_fan_in(self):\n initializer = depthwise_initializers.DepthwiseVarianceScaling(\n scale=1.0,\n mode='fan_in',\n distribution='truncated_normal')\n tensor = initializer([3, 5, 1024, 1])\n\n value = self.evaluate(tensor)\n self.assertEqual(value.shape, (3, 5, 1024, 1))\n self.assertNear(np.mean(value), 0.0, 0.01)\n self.assertNear(np.std(value), 1.0 / math.sqrt(3 * 5), 0.01)\n\n def test_variance_scaling_uniform_fan_in(self):\n initializer = depthwise_initializers.DepthwiseVarianceScaling(\n scale=1.0,\n mode='fan_in',\n distribution='uniform')\n tensor = initializer([3, 5, 1024, 1])\n\n value = self.evaluate(tensor)\n self.assertEqual(value.shape, (3, 5, 1024, 1))\n self.assertNear(np.mean(value), 0.0, 0.01)\n self.assertNear(np.std(value), 1.0 / math.sqrt(3 * 5), 0.01)\n\n def test_variance_scaling_scale_is_2(self):\n initializer = depthwise_initializers.DepthwiseVarianceScaling(\n scale=2.0,\n mode='fan_in',\n distribution='untruncated_normal')\n tensor = initializer([3, 5, 1024, 1])\n\n value = self.evaluate(tensor)\n self.assertEqual(value.shape, (3, 5, 1024, 1))\n self.assertNear(np.mean(value), 0.0, 0.01)\n self.assertNear(np.std(value), math.sqrt(2.0 / (3 * 5)), 0.01)\n\n def test_fan_in_depth_multiplier_is_2(self):\n initializer = depthwise_initializers.DepthwiseVarianceScaling(\n scale=1.0,\n mode='fan_in',\n distribution='untruncated_normal')\n tensor = initializer([3, 5, 1024, 2])\n\n value = self.evaluate(tensor)\n self.assertEqual(value.shape, (3, 5, 1024, 2))\n self.assertNear(np.mean(value), 0.0, 0.01)\n self.assertNear(np.std(value), 1.0 / math.sqrt(3 * 5), 0.01)\n\n def test_fan_out_depth_multiplier_is_2(self):\n initializer = depthwise_initializers.DepthwiseVarianceScaling(\n scale=1.0,\n mode='fan_out',\n distribution='untruncated_normal')\n tensor = initializer([3, 5, 1024, 2])\n\n value = self.evaluate(tensor)\n self.assertEqual(value.shape, (3, 5, 1024, 2))\n self.assertNear(np.mean(value), 0.0, 0.01)\n self.assertNear(np.std(value), 1.0 / math.sqrt(2 * 3 * 5), 0.01)\n\n def test_fan_avg_depth_multiplier_is_2(self):\n initializer = depthwise_initializers.DepthwiseVarianceScaling(\n scale=1.0,\n mode='fan_avg',\n distribution='untruncated_normal')\n tensor = initializer([3, 5, 1024, 2])\n\n value = self.evaluate(tensor)\n self.assertEqual(value.shape, (3, 5, 1024, 2))\n self.assertNear(np.mean(value), 0.0, 0.01)\n self.assertNear(np.std(value), 1.0 / math.sqrt(1.5 * 3 * 5), 0.01)\n\n def test_depthwise_variance_scaling_end_to_end(self):\n # This is an end-to-end test for the VarianceScaling() class.\n # We apply he initializer to a tensor, and verify that the\n # distribution of outputs matches what we expect.\n input_tensor = tf.random.normal(\n shape=(32, 20, 20, 1024),\n mean=0.0,\n stddev=1)\n\n kernel_initializer = depthwise_initializers.DepthwiseVarianceScaling(\n scale=1.0,\n mode='fan_in',\n distribution='truncated_normal')\n kernel = tf.get_variable(\n name='kernel',\n initializer=kernel_initializer,\n shape=[5, 5, 1024, 1])\n output_tensor = tf.nn.depthwise_conv2d(\n input_tensor,\n kernel,\n strides=(1, 1, 1, 1),\n padding='VALID')\n\n self.evaluate(tf.global_variables_initializer())\n result = self.evaluate(output_tensor)\n self.assertNear(np.mean(result), 0.0, 0.05)\n self.assertNear(np.std(result), 1.0, 0.05)\n\n def test_depthwise_he_normal_initializer_end_to_end(self):\n # This is an end-to-end test for the depthwise_he_normal() function.\n # We apply a depthwise_he_normal() to a tensor, and verify that the\n # distribution of outputs matches what we expect.\n input_tensor = tf.random.normal(\n shape=(32, 20, 20, 1024),\n mean=0.0,\n stddev=1)\n\n kernel_initializer = depthwise_initializers.depthwise_he_normal()\n kernel = tf.get_variable(\n name='kernel',\n initializer=kernel_initializer,\n shape=[5, 5, 1024, 1])\n output_tensor = tf.nn.depthwise_conv2d(\n tf.nn.relu(input_tensor),\n kernel,\n strides=(1, 1, 1, 1),\n padding='VALID')\n\n self.evaluate(tf.global_variables_initializer())\n result = self.evaluate(output_tensor)\n self.assertNear(np.mean(result), 0.0, 0.05)\n self.assertNear(np.std(result), 1.0, 0.05)\n\n def test_variance_scaling_initializer_dtypes(self):\n initializer0 = depthwise_initializers.DepthwiseVarianceScaling()\n tensor0 = initializer0([3, 3, 128, 1])\n self.assertEqual(tensor0.dtype, tf.float32)\n\n initializer1 = depthwise_initializers.DepthwiseVarianceScaling()\n tensor1 = initializer1([3, 3, 128, 1], dtype=tf.float64)\n self.assertEqual(tensor1.dtype, tf.float64)\n\n initializer2 = depthwise_initializers.DepthwiseVarianceScaling(\n dtype=tf.float64)\n tensor2 = initializer2([3, 3, 128, 1])\n self.assertEqual(tensor2.dtype, tf.float64)\n\n def test_variance_scaling_seed(self):\n initializer = depthwise_initializers.DepthwiseVarianceScaling(seed=42)\n tensor1 = initializer([3, 3, 128, 1])\n tensor2 = initializer([3, 3, 128, 1])\n self.assertAllClose(self.evaluate(tensor1), self.evaluate(tensor2))\n\n\nif __name__ == '__main__':\n tf.disable_v2_behavior()\n tf.test.main()\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Custom formatting functions for Electricity dataset.\n\nDefines dataset specific column definitions and data transformations. Uses\nentity specific z-score normalization.\n\"\"\"\n\nimport data_formatters.base\nimport libs.utils as utils\nimport pandas as pd\nimport sklearn.preprocessing\n\nGenericDataFormatter = data_formatters.base.GenericDataFormatter\nDataTypes = data_formatters.base.DataTypes\nInputTypes = data_formatters.base.InputTypes\n\n\nclass ElectricityFormatter(GenericDataFormatter):\n \"\"\"Defines and formats data for the electricity dataset.\n\n Note that per-entity z-score normalization is used here, and is implemented\n across functions.\n\n Attributes:\n column_definition: Defines input and data type of column used in the\n experiment.\n identifiers: Entity identifiers used in experiments.\n \"\"\"\n\n _column_definition = [\n ('id', DataTypes.REAL_VALUED, InputTypes.ID),\n ('hours_from_start', DataTypes.REAL_VALUED, InputTypes.TIME),\n ('power_usage', DataTypes.REAL_VALUED, InputTypes.TARGET),\n ('hour', DataTypes.REAL_VALUED, InputTypes.KNOWN_INPUT),\n ('day_of_week', DataTypes.REAL_VALUED, InputTypes.KNOWN_INPUT),\n ('hours_from_start', DataTypes.REAL_VALUED, InputTypes.KNOWN_INPUT),\n ('categorical_id', DataTypes.CATEGORICAL, InputTypes.STATIC_INPUT),\n ]\n\n def __init__(self):\n \"\"\"Initialises formatter.\"\"\"\n\n self.identifiers = None\n self._real_scalers = None\n self._cat_scalers = None\n self._target_scaler = None\n self._num_classes_per_cat_input = None\n self._time_steps = self.get_fixed_params()['total_time_steps']\n\n def split_data(self, df, valid_boundary=1315, test_boundary=1339):\n \"\"\"Splits data frame into training-validation-test data frames.\n\n This also calibrates scaling object, and transforms data for each split.\n\n Args:\n df: Source data frame to split.\n valid_boundary: Starting year for validation data\n test_boundary: Starting year for test data\n\n Returns:\n Tuple of transformed (train, valid, test) data.\n \"\"\"\n\n print('Formatting train-valid-test splits.')\n\n index = df['days_from_start']\n train = df.loc[index < valid_boundary]\n valid = df.loc[(index >= valid_boundary - 7) & (index < test_boundary)]\n test = df.loc[index >= test_boundary - 7]\n\n self.set_scalers(train)\n\n return (self.transform_inputs(data) for data in [train, valid, test])\n\n def set_scalers(self, df):\n \"\"\"Calibrates scalers using the data supplied.\n\n Args:\n df: Data to use to calibrate scalers.\n \"\"\"\n print('Setting scalers with training data...')\n\n column_definitions = self.get_column_definition()\n id_column = utils.get_single_col_by_input_type(InputTypes.ID,\n column_definitions)\n target_column = utils.get_single_col_by_input_type(InputTypes.TARGET,\n column_definitions)\n\n # Format real scalers\n real_inputs = utils.extract_cols_from_data_type(\n DataTypes.REAL_VALUED, column_definitions,\n {InputTypes.ID, InputTypes.TIME})\n\n # Initialise scaler caches\n self._real_scalers = {}\n self._target_scaler = {}\n identifiers = []\n for identifier, sliced in df.groupby(id_column):\n\n if len(sliced) >= self._time_steps:\n\n data = sliced[real_inputs].values\n targets = sliced[[target_column]].values\n self._real_scalers[identifier] \\\n = sklearn.preprocessing.StandardScaler().fit(data)\n\n self._target_scaler[identifier] \\\n = sklearn.preprocessing.StandardScaler().fit(targets)\n identifiers.append(identifier)\n\n # Format categorical scalers\n categorical_inputs = utils.extract_cols_from_data_type(\n DataTypes.CATEGORICAL, column_definitions,\n {InputTypes.ID, InputTypes.TIME})\n\n categorical_scalers = {}\n num_classes = []\n for col in categorical_inputs:\n # Set all to str so that we don't have mixed integer/string columns\n srs = df[col].apply(str)\n categorical_scalers[col] = sklearn.preprocessing.LabelEncoder().fit(\n srs.values)\n num_classes.append(srs.nunique())\n\n # Set categorical scaler outputs\n self._cat_scalers = categorical_scalers\n self._num_classes_per_cat_input = num_classes\n\n # Extract identifiers in case required\n self.identifiers = identifiers\n\n def transform_inputs(self, df):\n \"\"\"Performs feature transformations.\n\n This includes both feature engineering, preprocessing and normalisation.\n\n Args:\n df: Data frame to transform.\n\n Returns:\n Transformed data frame.\n\n \"\"\"\n\n if self._real_scalers is None and self._cat_scalers is None:\n raise ValueError('Scalers have not been set!')\n\n # Extract relevant columns\n column_definitions = self.get_column_definition()\n id_col = utils.get_single_col_by_input_type(InputTypes.ID,\n column_definitions)\n real_inputs = utils.extract_cols_from_data_type(\n DataTypes.REAL_VALUED, column_definitions,\n {InputTypes.ID, InputTypes.TIME})\n categorical_inputs = utils.extract_cols_from_data_type(\n DataTypes.CATEGORICAL, column_definitions,\n {InputTypes.ID, InputTypes.TIME})\n\n # Transform real inputs per entity\n df_list = []\n for identifier, sliced in df.groupby(id_col):\n\n # Filter out any trajectories that are too short\n if len(sliced) >= self._time_steps:\n sliced_copy = sliced.copy()\n sliced_copy[real_inputs] = self._real_scalers[identifier].transform(\n sliced_copy[real_inputs].values)\n df_list.append(sliced_copy)\n\n output = pd.concat(df_list, axis=0)\n\n # Format categorical inputs\n for col in categorical_inputs:\n string_df = df[col].apply(str)\n output[col] = self._cat_scalers[col].transform(string_df)\n\n return output\n\n def format_predictions(self, predictions):\n \"\"\"Reverts any normalisation to give predictions in original scale.\n\n Args:\n predictions: Dataframe of model predictions.\n\n Returns:\n Data frame of unnormalised predictions.\n \"\"\"\n\n if self._target_scaler is None:\n raise ValueError('Scalers have not been set!')\n\n column_names = predictions.columns\n\n df_list = []\n for identifier, sliced in predictions.groupby('identifier'):\n sliced_copy = sliced.copy()\n target_scaler = self._target_scaler[identifier]\n\n for col in column_names:\n if col not in {'forecast_time', 'identifier'}:\n sliced_copy[col] = target_scaler.inverse_transform(sliced_copy[col])\n df_list.append(sliced_copy)\n\n output = pd.concat(df_list, axis=0)\n\n return output\n\n # Default params\n def get_fixed_params(self):\n \"\"\"Returns fixed model parameters for experiments.\"\"\"\n\n fixed_params = {\n 'total_time_steps': 8 * 24,\n 'num_encoder_steps': 7 * 24,\n 'num_epochs': 100,\n 'early_stopping_patience': 5,\n 'multiprocessing_workers': 5\n }\n\n return fixed_params\n\n def get_default_model_params(self):\n \"\"\"Returns default optimised model parameters.\"\"\"\n\n model_params = {\n 'dropout_rate': 0.1,\n 'hidden_layer_size': 160,\n 'learning_rate': 0.001,\n 'minibatch_size': 64,\n 'max_gradient_norm': 0.01,\n 'num_heads': 4,\n 'stack_size': 1\n }\n\n return model_params\n\n def get_num_samples_for_calibration(self):\n \"\"\"Gets the default number of training and validation samples.\n\n Use to sub-sample the data for network calibration and a value of -1 uses\n all available samples.\n\n Returns:\n Tuple of (training samples, validation samples)\n \"\"\"\n return 450000, 50000\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Intersectional fairness with many constraint.\"\"\"\n\nimport random\n\nfrom absl import app\nfrom absl import flags\nimport numpy as np\nimport pandas as pd\nfrom sklearn import model_selection\nimport tensorflow.compat.v1 as tf\nimport tensorflow_constrained_optimization as tfco\n\n\nflags.DEFINE_boolean(\"constrained\", True, \"Perform constrained optimization?\")\nflags.DEFINE_float(\"dual_scale\", 0.01, \"Dual scale for gamma-updates.\")\nflags.DEFINE_float(\"epsilon\", 0.01, \"Slack.\")\nflags.DEFINE_integer(\"loops\", 100000, \"No. of loops.\")\nflags.DEFINE_integer(\"num_layers\", 2,\n \"No. of hidden layers for multiplier model.\")\nflags.DEFINE_integer(\"num_nodes\", 100,\n \"No. of hidden nodes for multiplier model.\")\n\nFLAGS = flags.FLAGS\n\n\ndef load_data():\n \"\"\"Loads and returns data.\"\"\"\n # List of column names in the dataset.\n column_names = [\"state\", \"county\", \"community\", \"communityname\", \"fold\",\n \"population\", \"householdsize\", \"racepctblack\", \"racePctWhite\",\n \"racePctAsian\", \"racePctHisp\", \"agePct12t21\", \"agePct12t29\",\n \"agePct16t24\", \"agePct65up\", \"numbUrban\", \"pctUrban\",\n \"medIncome\", \"pctWWage\", \"pctWFarmSelf\", \"pctWInvInc\",\n \"pctWSocSec\", \"pctWPubAsst\", \"pctWRetire\", \"medFamInc\",\n \"perCapInc\", \"whitePerCap\", \"blackPerCap\", \"indianPerCap\",\n \"AsianPerCap\", \"OtherPerCap\", \"HispPerCap\", \"NumUnderPov\",\n \"PctPopUnderPov\", \"PctLess9thGrade\", \"PctNotHSGrad\",\n \"PctBSorMore\", \"PctUnemployed\", \"PctEmploy\", \"PctEmplManu\",\n \"PctEmplProfServ\", \"PctOccupManu\", \"PctOccupMgmtProf\",\n \"MalePctDivorce\", \"MalePctNevMarr\", \"FemalePctDiv\",\n \"TotalPctDiv\", \"PersPerFam\", \"PctFam2Par\", \"PctKids2Par\",\n \"PctYoungKids2Par\", \"PctTeen2Par\", \"PctWorkMomYoungKids\",\n \"PctWorkMom\", \"NumIlleg\", \"PctIlleg\", \"NumImmig\",\n \"PctImmigRecent\", \"PctImmigRec5\", \"PctImmigRec8\",\n \"PctImmigRec10\", \"PctRecentImmig\", \"PctRecImmig5\",\n \"PctRecImmig8\", \"PctRecImmig10\", \"PctSpeakEnglOnly\",\n \"PctNotSpeakEnglWell\", \"PctLargHouseFam\", \"PctLargHouseOccup\",\n \"PersPerOccupHous\", \"PersPerOwnOccHous\", \"PersPerRentOccHous\",\n \"PctPersOwnOccup\", \"PctPersDenseHous\", \"PctHousLess3BR\",\n \"MedNumBR\", \"HousVacant\", \"PctHousOccup\", \"PctHousOwnOcc\",\n \"PctVacantBoarded\", \"PctVacMore6Mos\", \"MedYrHousBuilt\",\n \"PctHousNoPhone\", \"PctWOFullPlumb\", \"OwnOccLowQuart\",\n \"OwnOccMedVal\", \"OwnOccHiQuart\", \"RentLowQ\", \"RentMedian\",\n \"RentHighQ\", \"MedRent\", \"MedRentPctHousInc\",\n \"MedOwnCostPctInc\", \"MedOwnCostPctIncNoMtg\", \"NumInShelters\",\n \"NumStreet\", \"PctForeignBorn\", \"PctBornSameState\",\n \"PctSameHouse85\", \"PctSameCity85\", \"PctSameState85\",\n \"LemasSwornFT\", \"LemasSwFTPerPop\", \"LemasSwFTFieldOps\",\n \"LemasSwFTFieldPerPop\", \"LemasTotalReq\", \"LemasTotReqPerPop\",\n \"PolicReqPerOffic\", \"PolicPerPop\", \"RacialMatchCommPol\",\n \"PctPolicWhite\", \"PctPolicBlack\", \"PctPolicHisp\",\n \"PctPolicAsian\", \"PctPolicMinor\", \"OfficAssgnDrugUnits\",\n \"NumKindsDrugsSeiz\", \"PolicAveOTWorked\", \"LandArea\",\n \"PopDens\", \"PctUsePubTrans\", \"PolicCars\", \"PolicOperBudg\",\n \"LemasPctPolicOnPatr\", \"LemasGangUnitDeploy\",\n \"LemasPctOfficDrugUn\", \"PolicBudgPerPop\",\n \"ViolentCrimesPerPop\"]\n\n dataset_url = \"http://archive.ics.uci.edu/ml/machine-learning-databases/communities/communities.data\"\n\n # Read dataset from the UCI web repository and assign column names.\n data_df = pd.read_csv(dataset_url, sep=\",\", names=column_names,\n na_values=\"?\")\n\n # Make sure there are no missing values in the \"ViolentCrimesPerPop\" column.\n assert not data_df[\"ViolentCrimesPerPop\"].isna().any()\n\n # Binarize the \"ViolentCrimesPerPop\" column and obtain labels.\n crime_rate_70_percentile = data_df[\"ViolentCrimesPerPop\"].quantile(q=0.7)\n labels_df = (data_df[\"ViolentCrimesPerPop\"] >= crime_rate_70_percentile)\n\n # Now that we have assigned binary labels,\n # we drop the \"ViolentCrimesPerPop\" column from the data frame.\n data_df.drop(columns=\"ViolentCrimesPerPop\", inplace=True)\n\n # Group features.\n groups_df = pd.concat(\n [data_df[\"racepctblack\"], data_df[\"racePctAsian\"],\n data_df[\"racePctHisp\"]], axis=1)\n\n # Drop categorical features.\n data_df.drop(\n columns=[\"state\", \"county\", \"community\", \"communityname\", \"fold\"],\n inplace=True)\n\n # Handle missing features.\n feature_names = data_df.columns\n for feature_name in feature_names:\n missing_rows = data_df[feature_name].isna()\n if missing_rows.any():\n data_df[feature_name].fillna(0.0, inplace=True) # Fill NaN with 0.\n missing_rows.rename(feature_name + \"_is_missing\", inplace=True)\n # Append boolean \"is_missing\" feature.\n data_df = data_df.join(missing_rows)\n\n labels = labels_df.values.astype(np.float32)\n groups = groups_df.values.astype(np.float32)\n features = data_df.values.astype(np.float32)\n\n # Set random seed so that the results are reproducible.\n np.random.seed(121212)\n\n # Train, vali and test indices.\n train_indices, test_indices = model_selection.train_test_split(\n range(features.shape[0]), test_size=0.25)\n train_indices, vali_indices = model_selection.train_test_split(\n train_indices, test_size=1./3.)\n\n # Train features, labels and protected groups.\n x_train = features[train_indices, :]\n y_train = labels[train_indices]\n z_train = groups[train_indices]\n\n # Vali features, labels and protected groups.\n x_vali = features[vali_indices, :]\n y_vali = labels[vali_indices]\n z_vali = groups[vali_indices]\n\n # Test features, labels and protected groups.\n x_test = features[test_indices, :]\n y_test = labels[test_indices]\n z_test = groups[test_indices]\n\n return (x_train, y_train, z_train, x_vali, y_vali, z_vali, x_test, y_test,\n z_test)\n\n\ndef error_rate(labels, predictions, groups=None):\n # Returns the error rate for given labels and predictions.\n if groups is not None:\n if np.sum(groups) == 0.0:\n return 0.0\n predictions = predictions[groups]\n labels = labels[groups]\n signed_labels = labels - 0.5\n return np.mean(signed_labels * predictions <= 0.0)\n\n\ndef group_membership_thresholds(\n group_feature_train, group_feature_vali, group_feature_test, thresholds):\n \"\"\"Returns the group membership vectors on train, test and vali sets.\"\"\"\n group_memberships_list_train_ = []\n group_memberships_list_vali_ = []\n group_memberships_list_test_ = []\n group_thresholds_list = []\n\n for t1 in thresholds[0]:\n for t2 in thresholds[1]:\n for t3 in thresholds[2]:\n group_membership_train = (group_feature_train[:, 0] > t1) & (\n group_feature_train[:, 1] > t2) & (group_feature_train[:, 2] > t3)\n group_membership_vali = (group_feature_vali[:, 0] > t1) & (\n group_feature_vali[:, 1] > t2) & (group_feature_vali[:, 2] > t3)\n group_membership_test = (group_feature_test[:, 0] > t1) & (\n group_feature_test[:, 1] > t2) & (group_feature_test[:, 2] > t3)\n if (np.mean(group_membership_train) <= 0.01) or (\n np.mean(group_membership_vali) <= 0.01) or (\n np.mean(group_membership_test) <= 0.01):\n # Only consider groups that are at least 1% in size.\n continue\n group_memberships_list_train_.append(group_membership_train)\n group_memberships_list_vali_.append(group_membership_vali)\n group_memberships_list_test_.append(group_membership_test)\n group_thresholds_list.append([t1, t2, t3])\n\n group_memberships_list_train_ = np.array(group_memberships_list_train_)\n group_memberships_list_vali_ = np.array(group_memberships_list_vali_)\n group_memberships_list_test_ = np.array(group_memberships_list_test_)\n group_thresholds_list = np.array(group_thresholds_list)\n\n return (group_memberships_list_train_, group_memberships_list_vali_,\n group_memberships_list_test_, group_thresholds_list)\n\n\ndef violation(\n labels, predictions, epsilon, group_memberships_list):\n # Returns violations across different group feature thresholds.\n viol_list = []\n overall_error = error_rate(labels, predictions)\n for kk in range(group_memberships_list.shape[0]):\n group_err = error_rate(\n labels, predictions, group_memberships_list[kk, :].reshape(-1,))\n viol_list += [group_err - overall_error - epsilon]\n return np.max(viol_list), viol_list\n\n\ndef evaluate(\n features, labels, model, epsilon, group_membership_list):\n # Evaluates and prints stats.\n predictions = model(features).numpy().reshape(-1,)\n print(\"Error %.3f\" % error_rate(labels, predictions))\n _, viol_list = violation(labels, predictions, epsilon, group_membership_list)\n print(\"99p Violation %.3f\" % np.quantile(viol_list, 0.99))\n print()\n\n\ndef create_model(dimension):\n # Creates linear Keras model with no hidden layers.\n layers = []\n layers.append(tf.keras.Input(shape=(dimension,)))\n layers.append(tf.keras.layers.Dense(1))\n model = tf.keras.Sequential(layers)\n return model\n\n\ndef create_multiplier_model(\n feature_dependent_multiplier=True, dim=1, hidden_layers=None):\n \"\"\"Creates Lagrange multipler model with specified hidden layers.\"\"\"\n if feature_dependent_multiplier:\n layers = []\n layers.append(tf.keras.Input(shape=dim))\n for num_nodes in hidden_layers:\n layers.append(tf.keras.layers.Dense(num_nodes, activation=\"relu\"))\n layers.append(tf.keras.layers.Dense(1, bias_initializer=\"ones\"))\n\n # Keras model.\n multiplier_model = tf.keras.Sequential(layers)\n multiplier_weights = multiplier_model.trainable_weights\n else:\n common_multiplier = tf.Variable(1.0, name=\"common_multiplier\")\n # Ignore feature input, and return common multiplier.\n multiplier_model = lambda x: common_multiplier\n multiplier_weights = [common_multiplier]\n return multiplier_model, multiplier_weights\n\n\ndef train_unconstrained(\n dataset, group_info, epsilon=0.01, loops=10000, skip_steps=400):\n \"\"\"Train unconstrained classifier.\n\n Args:\n dataset: train, vali and test sets\n group_info: group memberships on train, vali and test sets and thresholds\n epsilon: constraint slack\n loops: number of gradient steps\n skip_steps: steps to skip before snapshotting metrics\n \"\"\"\n tf.set_random_seed(121212)\n np.random.seed(212121)\n random.seed(333333)\n\n x_train, y_train, _, x_vali, y_vali, _, x_test, y_test, _ = dataset\n\n (group_memberships_list_train, group_memberships_list_vali,\n group_memberships_list_test, _) = group_info\n\n model = create_model(x_train.shape[-1])\n features_tensor = tf.constant(x_train)\n labels_tensor = tf.constant(y_train)\n\n predictions = lambda: model(features_tensor)\n predictions_vali = lambda: model(x_vali)\n predictions_test = lambda: model(x_test)\n\n context = tfco.rate_context(predictions, labels=lambda: labels_tensor)\n overall_error = tfco.error_rate(context, penalty_loss=tfco.HingeLoss())\n problem = tfco.RateMinimizationProblem(overall_error)\n\n loss_fn, update_ops_fn, _ = tfco.create_lagrangian_loss(problem)\n optimizer = tf.keras.optimizers.Adagrad(0.1)\n\n objectives_list = []\n objectives_list_test = []\n objectives_list_vali = []\n violations_list = []\n violations_list_test = []\n violations_list_vali = []\n model_weights = []\n\n for ii in range(loops):\n update_ops_fn()\n optimizer.minimize(loss_fn, var_list=model.trainable_weights)\n\n # Snapshot iterate once in 1000 loops.\n if ii % skip_steps == 0:\n pred = np.reshape(predictions(), (-1,))\n err = error_rate(y_train, pred)\n max_viol, viol_list = violation(\n y_train, pred, epsilon, group_memberships_list_train)\n\n pred_test = np.reshape(predictions_test(), (-1,))\n err_test = error_rate(y_test, pred_test)\n _, viol_list_test = violation(\n y_test, pred_test, epsilon, group_memberships_list_test)\n\n pred_vali = np.reshape(predictions_vali(), (-1,))\n err_vali = error_rate(y_vali, pred_vali)\n max_viol_vali, viol_list_vali = violation(\n y_vali, pred_vali, epsilon, group_memberships_list_vali)\n\n objectives_list.append(err)\n objectives_list_test.append(err_test)\n objectives_list_vali.append(err_vali)\n violations_list.append(viol_list)\n violations_list_test.append(viol_list_test)\n violations_list_vali.append(viol_list_vali)\n model_weights.append(model.get_weights())\n\n if ii % 1000 == 0:\n print(\"Epoch %d | Error = %.3f | Viol = %.3f | Viol_vali = %.3f\" %\n (ii, err, max_viol, max_viol_vali), flush=True)\n\n # Best candidate index.\n best_ind = np.argmin(objectives_list)\n model.set_weights(model_weights[best_ind])\n\n print(\"Train:\")\n evaluate(x_train, y_train, model, epsilon, group_memberships_list_train)\n print(\"\\nVali:\")\n evaluate(x_vali, y_vali, model, epsilon, group_memberships_list_vali)\n print(\"\\nTest:\")\n evaluate(x_test, y_test, model, epsilon, group_memberships_list_test)\n\n\ndef train_constrained(\n dataset, group_info, epsilon=0.01, learning_rate=0.1, dual_scale=5.0,\n loops=10000, feature_dependent_multiplier=True, hidden_layers=None,\n skip_steps=400):\n \"\"\"Train constrained classifier wth Lagrangian model.\n\n Args:\n dataset: train, vali and test sets\n group_info: group memberships on train, vali and test sets and thresholds\n epsilon: constraint slack\n learning_rate: learning rate for theta\n dual_scale: learning rate for gamma = dual_scale * learning_rate\n loops: number of gradient steps\n feature_dependent_multiplier: should the multiplier model be feature\n dependent. If False, a common multipler is used for all constraints\n hidden_layers: list of hidden layer nodes to be used for multiplier model\n skip_steps: steps to skip before snapshotting metrics\n \"\"\"\n tf.set_random_seed(121212)\n np.random.seed(212121)\n random.seed(333333)\n\n x_train, y_train, z_train, x_vali, y_vali, _, x_test, y_test, _ = dataset\n\n (group_memberships_list_train,\n group_memberships_list_vali,\n group_memberships_list_test,\n group_memberships_thresholds_train) = group_info\n\n # Models and group thresholds tensor.\n model = create_model(x_train.shape[-1])\n multiplier_model, multiplier_weights = create_multiplier_model(\n feature_dependent_multiplier=feature_dependent_multiplier,\n dim=3,\n hidden_layers=hidden_layers)\n group_thresholds = tf.Variable(np.ones(3) * 0.1, dtype=tf.float32)\n\n # Features, labels, predictions, multipliers.\n features_tensor = tf.constant(x_train)\n labels_tensor = tf.constant(y_train)\n features_tensor_vali = tf.constant(x_vali)\n\n predictions = lambda: model(features_tensor)\n predictions_vali = lambda: model(features_tensor_vali)\n predictions_test = lambda: model(x_test)\n def multiplier_values():\n return tf.abs(multiplier_model(tf.reshape(group_thresholds, shape=(1, -1))))\n\n # Lagrangian loss function.\n def lagrangian_loss():\n # Separate out objective, constraints and proxy constraints.\n objective = problem.objective()\n constraints = problem.constraints()\n proxy_constraints = problem.proxy_constraints()\n\n # Set-up custom Lagrangian loss.\n primal = objective\n multipliers = multiplier_values()\n primal += tf.stop_gradient(multipliers) * proxy_constraints\n dual = dual_scale * multipliers * tf.stop_gradient(constraints)\n return primal - dual\n\n # Objective.\n context = tfco.rate_context(\n predictions,\n labels=lambda: labels_tensor)\n overall_error = tfco.error_rate(context)\n\n # Slice and subset group predictions and labels.\n def group_membership():\n return (z_train[:, 0] > group_thresholds[0]) & (\n z_train[:, 1] > group_thresholds[1]) & (\n z_train[:, 2] > group_thresholds[2])\n\n def group_predictions():\n pred = predictions()\n groups = tf.reshape(group_membership(), (-1, 1))\n return pred[groups]\n\n def group_labels():\n groups = tf.reshape(group_membership(), (-1,))\n return labels_tensor[groups]\n\n # Constraint.\n group_context = tfco.rate_context(\n group_predictions,\n labels=group_labels)\n group_error = tfco.error_rate(group_context)\n constraints = [group_error <= overall_error + epsilon]\n\n # Set up constrained optimization problem and optimizer.\n problem = tfco.RateMinimizationProblem(overall_error, constraints)\n optimizer = tf.keras.optimizers.Adagrad(learning_rate)\n var_list = model.trainable_weights + multiplier_weights\n\n objectives_list = []\n objectives_list_test = []\n objectives_list_vali = []\n violations_list = []\n violations_list_test = []\n violations_list_vali = []\n model_weights = []\n\n # Training\n for ii in range(loops):\n # Sample a group membership at random.\n random_index = np.random.randint(\n group_memberships_thresholds_train.shape[0])\n group_thresholds.assign(group_memberships_thresholds_train[random_index, :])\n\n # Gradient op.\n problem.update_ops()\n optimizer.minimize(lagrangian_loss, var_list=var_list)\n\n # Snapshot iterate once in 1000 loops.\n if ii % skip_steps == 0:\n pred = np.reshape(predictions(), (-1,))\n err = error_rate(y_train, pred)\n max_viol, viol_list = violation(\n y_train, pred, epsilon, group_memberships_list_train)\n\n pred_test = np.reshape(predictions_test(), (-1,))\n err_test = error_rate(y_test, pred_test)\n _, viol_list_test = violation(\n y_test, pred_test, epsilon, group_memberships_list_test)\n\n pred_vali = np.reshape(predictions_vali(), (-1,))\n err_vali = error_rate(y_vali, pred_vali)\n max_viol_vali, viol_list_vali = violation(\n y_vali, pred_vali, epsilon, group_memberships_list_vali)\n\n objectives_list.append(err)\n objectives_list_test.append(err_test)\n objectives_list_vali.append(err_vali)\n violations_list.append(viol_list)\n violations_list_test.append(viol_list_test)\n violations_list_vali.append(viol_list_vali)\n model_weights.append(model.get_weights())\n\n if ii % 1000 == 0:\n print(\"Epoch %d | Error = %.3f | Viol = %.3f | Viol_vali = %.3f\" %\n (ii, err, max_viol, max_viol_vali), flush=True)\n\n # Best candidate index.\n best_ind = tfco.find_best_candidate_index(\n np.array(objectives_list), np.array(violations_list),\n rank_objectives=False)\n model.set_weights(model_weights[best_ind])\n\n print(\"Train:\")\n evaluate(x_train, y_train, model, epsilon, group_memberships_list_train)\n print(\"\\nVali:\")\n evaluate(x_vali, y_vali, model, epsilon, group_memberships_list_vali)\n print(\"\\nTest:\")\n evaluate(x_test, y_test, model, epsilon, group_memberships_list_test)\n\n\ndef main(argv):\n del argv\n\n tf.compat.v1.enable_eager_execution()\n\n # Load data.\n dataset = load_data()\n _, _, z_train, _, _, z_vali, _, _, z_test = dataset\n\n # Group Thresholds for 3 Groups\n group_threshold_range = []\n for jj in range(3):\n group_threshold_range.append([np.quantile(\n z_train[:, jj], kk) for kk in np.arange(0.05, 1.0, 0.1)])\n\n # Group memberships based on group thresholds.\n group_info = group_membership_thresholds(\n z_train, z_vali, z_test, group_threshold_range)\n\n if FLAGS.constrained:\n if FLAGS.num_layers < 0:\n train_constrained(\n dataset,\n group_info,\n feature_dependent_multiplier=False,\n epsilon=FLAGS.epsilon,\n dual_scale=FLAGS.dual_scale,\n loops=FLAGS.loops)\n else:\n train_constrained(\n dataset,\n group_info,\n feature_dependent_multiplier=True,\n hidden_layers=[FLAGS.num_nodes] * FLAGS.num_layers,\n epsilon=FLAGS.epsilon,\n dual_scale=FLAGS.dual_scale,\n loops=FLAGS.loops)\n else:\n train_unconstrained(\n dataset, group_info, epsilon=FLAGS.epsilon, loops=FLAGS.loops)\n\n\nif __name__ == \"__main__\":\n app.run(main)\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A simple invertible tokenizer.\n\nThis code forked from github.com/tensorflow/tensor2tensor.\n\nConverts from a unicode string to a list of tokens\n(represented as Unicode strings).\n\nThis tokenizer has the following desirable properties:\n - It is invertible.\n - Alphanumeric characters are broken away from non-alphanumeric characters.\n - A single space between words does not produce an extra token.\n - The full Unicode punctuation and separator set is recognized.\n\nThe tokenization algorithm is as follows:\n\n1. Split the text into a list of tokens, splitting at every boundary of an\n alphanumeric character and a non-alphanumeric character. This produces\n a list which alternates between \"alphanumeric tokens\"\n (strings of alphanumeric characters) and \"non-alphanumeric tokens\"\n (strings of non-alphanumeric characters).\n\n2. Remove every token consisting of a single space, unless it is\n the very first or very last token in the list. These tokens are now\n implied by the fact that there are two adjacent alphanumeric tokens.\n\ne.g. u\"Dude - that's so cool.\"\n -> [u\"Dude\", u\" - \", u\"that\", u\"'\", u\"s\", u\"so\", u\"cool\", u\".\"]\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport sys\nimport unicodedata\nimport six\nfrom six.moves import range # pylint: disable=redefined-builtin\nimport tensorflow.compat.v1 as tf\n\n# Conversion between Unicode and UTF-8, if required (on Python2)\n_native_to_unicode = (lambda s: s.decode(\"utf-8\")) if six.PY2 else (lambda s: s)\n\n\n# This set contains all letter and number characters.\n_ALPHANUMERIC_CHAR_SET = set(\n six.unichr(i) for i in range(sys.maxunicode)\n if (unicodedata.category(six.unichr(i)).startswith(\"L\") or\n unicodedata.category(six.unichr(i)).startswith(\"N\")))\n\n\ndef encode(text):\n \"\"\"Encode a unicode string as a list of tokens.\n\n Args:\n text: a unicode string\n Returns:\n a list of tokens as Unicode strings\n \"\"\"\n if not text:\n return []\n ret = []\n token_start = 0\n # Classify each character in the input string\n is_alnum = [c in _ALPHANUMERIC_CHAR_SET for c in text]\n for pos in range(1, len(text)):\n if is_alnum[pos] != is_alnum[pos - 1]:\n token = text[token_start:pos]\n if token != u\" \" or token_start == 0:\n ret.append(token)\n token_start = pos\n final_token = text[token_start:]\n ret.append(final_token)\n return ret\n\n\ndef decode(tokens):\n \"\"\"Decode a list of tokens to a unicode string.\n\n Args:\n tokens: a list of Unicode strings\n Returns:\n a unicode string\n \"\"\"\n token_is_alnum = [t[0] in _ALPHANUMERIC_CHAR_SET for t in tokens]\n ret = []\n for i, token in enumerate(tokens):\n if i > 0 and token_is_alnum[i - 1] and token_is_alnum[i]:\n ret.append(u\" \")\n ret.append(token)\n return \"\".join(ret)\n\n\ndef _read_filepattern(filepattern, max_lines=None, split_on_newlines=True):\n \"\"\"Reads files matching a wildcard pattern, yielding the contents.\n\n Args:\n filepattern: A wildcard pattern matching one or more files.\n max_lines: If set, stop reading after reading this many lines.\n split_on_newlines: A boolean. If true, then split files by lines and strip\n leading and trailing whitespace from each line. Otherwise, treat each\n file as a single string.\n\n Yields:\n The contents of the files as lines, if split_on_newlines is True, or\n the entire contents of each file if False.\n \"\"\"\n filenames = sorted(tf.gfile.Glob(filepattern))\n lines_read = 0\n for filename in filenames:\n with tf.gfile.Open(filename) as f:\n if split_on_newlines:\n for line in f:\n yield line.strip()\n lines_read += 1\n if max_lines and lines_read >= max_lines:\n return\n\n else:\n if max_lines:\n doc = []\n for line in f:\n doc.append(line)\n lines_read += 1\n if max_lines and lines_read >= max_lines:\n yield \"\".join(doc)\n return\n yield \"\".join(doc)\n\n else:\n yield f.read()\n\n\ndef corpus_token_counts(\n text_filepattern, corpus_max_lines, split_on_newlines=True):\n \"\"\"Read the corpus and compute a dictionary of token counts.\n\n Args:\n text_filepattern: A pattern matching one or more files.\n corpus_max_lines: An integer; maximum total lines to read.\n split_on_newlines: A boolean. If true, then split files by lines and strip\n leading and trailing whitespace from each line. Otherwise, treat each\n file as a single string.\n\n Returns:\n a dictionary mapping token to count.\n \"\"\"\n counts = collections.Counter()\n for doc in _read_filepattern(\n text_filepattern,\n max_lines=corpus_max_lines,\n split_on_newlines=split_on_newlines):\n counts.update(encode(_native_to_unicode(doc)))\n\n return counts\n\n\ndef vocab_token_counts(text_filepattern, max_lines):\n \"\"\"Read a vocab file and return a dictionary of token counts.\n\n Reads a two-column CSV file of tokens and their frequency in a dataset. The\n tokens are presumed to be generated by encode() or the equivalent.\n\n Args:\n text_filepattern: A pattern matching one or more files.\n max_lines: An integer; maximum total lines to read.\n\n Returns:\n a dictionary mapping token to count.\n \"\"\"\n ret = {}\n for i, line in enumerate(\n _read_filepattern(text_filepattern, max_lines=max_lines)):\n if \",\" not in line:\n tf.logging.warning(\"Malformed vocab line #%d '%s'\", i, line)\n continue\n\n token, count = line.rsplit(\",\", 1)\n ret[_native_to_unicode(token)] = int(count)\n\n return ret\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# pylint: skip-file\nimport os\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\nimport random\nfrom sympy.combinatorics.graycode import GrayCode\nimport matplotlib.pyplot as plt\nfrom aloe.common.configs import cmd_args\nfrom aloe.common.synthetic.dataset import OnlineToyDataset\nfrom aloe.common.pytorch_util import logsumexp, hamming_mmd, MMD\n\n\ndef compress(x):\n bx = np.binary_repr(int(abs(x)), width=cmd_args.discrete_dim // 2 - 1)\n bx = '0' + bx if x >= 0 else '1' + bx\n return bx\n\n\ndef recover(bx):\n x = int(bx[1:], 2)\n return x if bx[0] == '0' else -x\n\n\ndef float2bin(samples, bm):\n bin_list = []\n for i in range(samples.shape[0]):\n x, y = samples[i] * cmd_args.int_scale\n bx, by = compress(x), compress(y)\n bx, by = bm[bx], bm[by]\n bin_list.append(np.array(list(bx + by), dtype=int))\n return np.array(bin_list)\n\n\ndef bin2float(samples, inv_bm):\n floats = []\n for i in range(samples.shape[0]):\n s = ''\n for j in range(samples.shape[1]):\n s += str(samples[i, j])\n x, y = s[:cmd_args.discrete_dim//2], s[cmd_args.discrete_dim//2:]\n x, y = inv_bm[x], inv_bm[y]\n x, y = recover(x), recover(y)\n x /= cmd_args.int_scale\n y /= cmd_args.int_scale\n floats.append((x, y))\n return np.array(floats)\n\n\ndef setup_data(args):\n bm, inv_bm = get_binmap(args.discrete_dim, args.binmode)\n db = OnlineToyDataset(args.data)\n if args.int_scale is None:\n args.int_scale = db.int_scale\n else:\n db.int_scale = args.int_scale\n if args.plot_size is None:\n args.plot_size = db.f_scale\n else:\n db.f_scale = args.plot_size\n return db, bm, inv_bm\n\n\ndef plot_heat(score_func, bm, out_file=None):\n w = 100\n size = cmd_args.plot_size\n x = np.linspace(-size, size, w)\n y = np.linspace(-size, size, w)\n xx, yy = np.meshgrid(x, y)\n xx = np.reshape(xx, [-1, 1])\n yy = np.reshape(yy, [-1, 1])\n heat_samples = float2bin(np.concatenate((xx, yy), axis=-1), bm)\n heat_samples = torch.from_numpy(heat_samples).to(cmd_args.device)\n heat_score = F.softmax(score_func(heat_samples).view(1, -1), dim=-1)\n a = heat_score.view(w, w).data.cpu().numpy()\n a = np.flip(a, axis=0)\n plt.imshow(a)\n plt.axis('equal')\n plt.axis('off')\n if out_file is None:\n out_file = os.path.join(cmd_args.save_dir, 'heat.pdf')\n plt.savefig(out_file, bbox_inches='tight')\n plt.close()\n\n\ndef get_binmap(discrete_dim, binmode):\n b = discrete_dim // 2 - 1\n all_bins = []\n for i in range(1 << b):\n bx = np.binary_repr(i, width=discrete_dim // 2 - 1)\n all_bins.append('0' + bx)\n all_bins.append('1' + bx)\n vals = all_bins[:]\n if binmode == 'rand':\n print('remapping binary repr with random permute')\n random.shuffle(vals)\n elif binmode == 'gray':\n print('remapping binary repr with gray code')\n a = GrayCode(b)\n vals = []\n for x in a.generate_gray():\n vals.append('0' + x)\n vals.append('1' + x)\n else:\n assert binmode == 'normal'\n bm = {}\n inv_bm = {}\n for i, key in enumerate(all_bins):\n bm[key] = vals[i]\n inv_bm[vals[i]] = key\n return bm, inv_bm\n\n\ndef learn_score(samples, score_fn, opt_score, sampler=None, neg_samples=None):\n opt_score.zero_grad()\n if sampler is not None:\n neg_samples = sampler(samples.shape[0])\n else:\n assert neg_samples is not None\n f_loss = -torch.mean(score_fn(samples)) + torch.mean(score_fn(neg_samples))\n f_loss.backward()\n if cmd_args.grad_clip > 0:\n torch.nn.utils.clip_grad_norm_(score_fn.parameters(), max_norm=cmd_args.grad_clip)\n opt_score.step()\n if cmd_args.weight_clip > 0:\n for p in score_fn.parameters():\n p.data.clamp_(-cmd_args.weight_clip, cmd_args.weight_clip)\n return f_loss.item()\n\n\ndef estimate_ll(score_func, samples, n_partition=None, rand_samples=None):\n with torch.no_grad():\n if rand_samples is None:\n rand_samples = torch.randint(2, (n_partition, samples.shape[1])).to(samples.device)\n n_partition = rand_samples.shape[0]\n f_z_list = []\n for i in range(0, n_partition, samples.shape[0]):\n f_z = score_func(rand_samples[i:i+samples.shape[0]]).view(-1, 1)\n f_z_list.append(f_z)\n f_z = torch.cat(f_z_list, dim=0)\n f_z = f_z - samples.shape[1] * np.log(0.5) - np.log(n_partition)\n\n log_part = logsumexp(f_z)\n f_sample = score_func(samples)\n ll = f_sample - log_part\n\n return torch.mean(ll).item()\n\n\ndef estimate_hamming(score_func, true_samples, rand_samples, gibbs_sampler):\n with torch.no_grad():\n gibbs_samples = gibbs_sampler(score_func, 20, init_samples=rand_samples)\n return hamming_mmd(true_samples, gibbs_samples)\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n# pylint: disable=line-too-long\nr\"\"\"Runs cache simulation.\n\nExample usage:\n\n python3 -m cache_replacement.policy_learning.cache.main \\\n --experiment_base_dir=/tmp \\\n --experiment_name=sample_belady_llc \\\n --cache_configs=cache_replacement/policy_learning/cache/configs/default.json \\\n --cache_configs=cache_replacement/policy_learning/cache/configs/eviction_policy/belady.json \\\n --memtrace_file=cache_replacement/policy_learning/cache/traces/sample_trace.csv\n\n Simulates a cache configured by the cache configs with Belady's as the\n replacement policy on the sample trace.\n\"\"\"\n# pylint: enable=line-too-long\n\nimport os\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nimport tensorflow.compat.v1 as tf\nimport tqdm\nfrom cache_replacement.policy_learning.cache import cache as cache_mod\nfrom cache_replacement.policy_learning.cache import evict_trace as evict\nfrom cache_replacement.policy_learning.cache import memtrace\nfrom cache_replacement.policy_learning.common import config as cfg\nfrom cache_replacement.policy_learning.common import utils\n\nFLAGS = flags.FLAGS\nflags.DEFINE_multi_string(\n \"cache_configs\",\n [\n \"cache_replacement/policy_learning/cache/configs/default.json\", # pylint: disable=line-too-long\n \"cache_replacement/policy_learning/cache/configs/eviction_policy/lru.json\" # pylint: disable=line-too-long\n ],\n \"List of config paths merged front to back for the cache.\")\nflags.DEFINE_multi_string(\n \"config_bindings\", [],\n (\"override config with key=value pairs \"\n \"(e.g., eviction_policy.policy_type=greedy)\"))\nflags.DEFINE_string(\n \"experiment_base_dir\", \"/tmp/experiments\",\n \"Base directory to store all experiments in. Should not frequently change.\")\nflags.DEFINE_string(\n \"experiment_name\", \"unnamed\",\n \"All data related to this experiment is written to\"\n \" experiment_base_dir/experiment_name.\")\nflags.DEFINE_string(\n \"memtrace_file\",\n \"cache_replacement/policy_learning/cache/traces/omnetpp_train.csv\",\n \"Memory trace file path to use.\")\nflags.DEFINE_integer(\n \"tb_freq\", 10000, \"Number of cache reads between tensorboard logs.\")\nflags.DEFINE_integer(\n \"warmup_period\", int(2e3), \"Number of cache reads before recording stats.\")\nflags.DEFINE_bool(\n \"force_overwrite\", False,\n (\"If true, overwrites directory at \"\n \" experiment_base_dir/experiment_name if it exists.\"))\n\n\ndef log_scalar(tb_writer, key, value, step):\n summary = tf.Summary(value=[tf.Summary.Value(tag=key, simple_value=value)])\n tb_writer.add_summary(summary, step)\n\n\ndef main(_):\n # Set up experiment directory\n exp_dir = os.path.join(FLAGS.experiment_base_dir, FLAGS.experiment_name)\n utils.create_experiment_directory(exp_dir, FLAGS.force_overwrite)\n tensorboard_dir = os.path.join(exp_dir, \"tensorboard\")\n tf.disable_eager_execution()\n tb_writer = tf.summary.FileWriter(tensorboard_dir)\n miss_trace_path = os.path.join(exp_dir, \"misses.csv\")\n evict_trace_path = os.path.join(exp_dir, \"evictions.txt\")\n\n cache_config = cfg.Config.from_files_and_bindings(\n FLAGS.cache_configs, FLAGS.config_bindings)\n with open(os.path.join(exp_dir, \"cache_config.json\"), \"w\") as f:\n cache_config.to_file(f)\n\n flags_config = cfg.Config({\n \"memtrace_file\": FLAGS.memtrace_file,\n \"tb_freq\": FLAGS.tb_freq,\n \"warmup_period\": FLAGS.warmup_period,\n })\n with open(os.path.join(exp_dir, \"flags.json\"), \"w\") as f:\n flags_config.to_file(f)\n\n logging.info(\"Config: %s\", str(cache_config))\n logging.info(\"Flags: %s\", str(flags_config))\n\n cache_line_size = cache_config.get(\"cache_line_size\")\n with memtrace.MemoryTrace(\n FLAGS.memtrace_file, cache_line_size=cache_line_size) as trace:\n with memtrace.MemoryTraceWriter(miss_trace_path) as write_trace:\n with evict.EvictionTrace(evict_trace_path, False) as evict_trace:\n def write_to_eviction_trace(cache_access, eviction_decision):\n evict_trace.write(\n evict.EvictionEntry(cache_access, eviction_decision))\n\n cache = cache_mod.Cache.from_config(cache_config, trace=trace)\n\n # Warm up cache\n for _ in tqdm.tqdm(range(FLAGS.warmup_period), desc=\"Warming up cache\"):\n pc, address = trace.next()\n hit = cache.read(pc, address, [write_to_eviction_trace])\n\n if not hit:\n write_trace.write(pc, address)\n\n if trace.done():\n raise ValueError()\n\n # Discard warm-up cache statistics\n cache.hit_rate_statistic.reset()\n\n num_reads = 0\n with tqdm.tqdm(desc=\"Simulating cache on MemoryTrace\") as pbar:\n while not trace.done():\n pc, address = trace.next()\n hit = cache.read(pc, address, [write_to_eviction_trace])\n\n if not hit:\n write_trace.write(pc, address)\n\n num_reads += 1\n if num_reads % FLAGS.tb_freq == 0:\n log_scalar(tb_writer, \"cache_hit_rate\",\n cache.hit_rate_statistic.success_rate(), num_reads)\n\n pbar.update(1)\n\n log_scalar(tb_writer, \"cache_hit_rate\",\n cache.hit_rate_statistic.success_rate(), num_reads)\n\n # Force flush, otherwise last writes will be lost.\n tb_writer.flush()\n\nif __name__ == \"__main__\":\n app.run(main)\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# pylint: skip-file\nimport numpy as np\nimport os\nimport sys\nimport random\nimport torch\nimport torch.nn as nn\nfrom torch.nn.parameter import Parameter\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nimport time\nfrom tqdm import tqdm\n\n\nfrom aloe.common.configs import cmd_args, set_device\nfrom aloe.common.f_family import MLPScore\nfrom aloe.common.sampler import GibbsSampler\nfrom aloe.synthetic.utils import learn_score\nfrom aloe.synthetic.reverse_train import prepare_diff_pos\nfrom aloe.fuzz.utils import CondRnnSampler, MLPVarLenMultinomialSampler, HexStreamDataset, CondRnnScore, CondMLPScore, GeoMultinomialSampler\nfrom torch_scatter import scatter_add\n\n\ndef learn_score(pos_list, true_samples, score_fn, opt_score, neg_samples):\n opt_score.zero_grad()\n true_scores = score_fn(pos_list, true_samples)\n f_loss = -torch.mean(true_scores) + torch.mean(score_fn(pos_list, neg_samples))\n f_loss.backward()\n if cmd_args.grad_clip > 0:\n torch.nn.utils.clip_grad_norm_(score_fn.parameters(), max_norm=cmd_args.grad_clip)\n opt_score.step()\n if cmd_args.weight_clip > 0:\n for p in score_fn.parameters():\n p.data.clamp_(-cmd_args.weight_clip, cmd_args.weight_clip)\n return f_loss.item(), torch.mean(true_scores).item()\n\n\ndef get_score_func(args):\n if args.score_func == 'rnn':\n score_func = CondRnnScore(args, n_choices=256, act_last=args.f_out, f_scale=args.f_scale)\n elif args.score_func == 'mlp':\n score_func = CondMLPScore(n_choices=256, discrete_dim=args.window_size, embed_dim=args.embed_dim, act_last=args.f_out, f_scale=args.f_scale)\n else:\n raise NotImplementedError\n score_func = score_func.to(args.device)\n return score_func\n\n\ndef get_editor(args):\n # build base sampler\n if args.base_type == 'mlp':\n print('using mlp autoreg base sampler')\n elif args.base_type == 'rnn':\n print('using rnn autoreg base sampler')\n base_sampler = CondRnnSampler(n_choices=256, discrete_dim=args.window_size, embed_dim=args.embed_dim)\n else:\n raise NotImplementedError\n # build editor\n if args.io_enc == 'rnn':\n pass\n elif args.io_enc == 'mlp':\n sampler = MLPVarLenMultinomialSampler(base_sampler, args.window_size,\n n_choices=256,\n embed_dim=args.embed_dim).to(args.device)\n else:\n raise NotImplementedError\n return sampler\n\n\nif __name__ == '__main__':\n np.random.seed(cmd_args.seed)\n random.seed(cmd_args.seed)\n torch.manual_seed(cmd_args.seed)\n set_device(cmd_args.gpu)\n\n # build score func\n score_func = get_score_func(cmd_args)\n sampler = get_editor(cmd_args)\n\n db = HexStreamDataset(cmd_args)\n train_load = DataLoader(db, batch_size=cmd_args.batch_size, shuffle=True,\n collate_fn=db.collate_fn, num_workers=cmd_args.num_proc, drop_last=True)\n train_gen = iter(train_load)\n gibbs_sampler = GibbsSampler(n_choices=256, discrete_dim=cmd_args.window_size, device=cmd_args.device)\n proposal_dist = GeoMultinomialSampler(n_choices=256, discrete_dim=cmd_args.window_size, stop_prob=cmd_args.mu0, device=cmd_args.device)\n opt_score = optim.Adam(score_func.parameters(), lr=cmd_args.learning_rate * cmd_args.f_lr_scale)\n opt_sampler = optim.Adam(sampler.parameters(), lr=cmd_args.learning_rate)\n\n rand_samples = torch.randint(256, (cmd_args.batch_size, cmd_args.window_size)).to(cmd_args.device)\n\n for epoch in range(cmd_args.num_epochs):\n pbar = tqdm(range(cmd_args.iter_per_epoch))\n for it in pbar:\n try:\n samples = next(train_gen)\n except StopIteration:\n train_gen = iter(train_load)\n samples = next(train_gen)\n pos_list, hex_stream = samples\n pos_list = pos_list.to(cmd_args.device)\n hex_stream = hex_stream.to(cmd_args.device)\n # get samples\n with torch.no_grad():\n init_samples, n_steps, _, _ = sampler(cmd_args.num_q_steps, pos_list)\n cur_score_fn = lambda samples: score_func(pos_list, samples)\n neg_samples = gibbs_sampler(cur_score_fn, cmd_args.gibbs_rounds, init_samples=init_samples)\n\n f_loss, true_scores = learn_score(pos_list, hex_stream, score_func, opt_score, neg_samples)\n\n with torch.no_grad():\n rand_scores = torch.mean(score_func(pos_list, rand_samples)).item()\n neg_scores = torch.mean(score_func(pos_list, neg_samples)).item()\n\n neg_samples = neg_samples.repeat(cmd_args.num_importance_samples, 1)\n pos_list = pos_list.repeat(cmd_args.num_importance_samples, 1)\n for q_it in range(cmd_args.q_iter):\n opt_sampler.zero_grad()\n if cmd_args.num_q_steps:\n with torch.no_grad():\n cur_samples, proposal_logprob = proposal_dist(cmd_args.num_q_steps, init_samples=neg_samples)\n diff_pos = (cur_samples - neg_samples).nonzero()\n rep_rows = diff_pos[:, 0].view(-1)\n rep_init = torch.index_select(cur_samples, 0, rep_rows)\n rep_final = torch.index_select(neg_samples, 0, rep_rows)\n row_ids, col_ids, col_target, traj_lens, tval_rows, tval_cols = prepare_diff_pos(diff_pos, need_target_vals=True)\n rep_init[row_ids, col_ids] = rep_final[row_ids, col_ids]\n rep_val = neg_samples[tval_rows, tval_cols]\n rep_target = torch.LongTensor(col_target).to(cmd_args.device).view(-1, 1)\n else:\n cur_samples = neg_samples\n proposal_logprob = 0\n init_prob = sampler.base_logprob(pos_list, cur_samples)\n if cmd_args.num_q_steps and rep_rows.shape[0]:\n ctx_pos_list = torch.index_select(pos_list, 0, rep_rows)\n context = sampler.get_context_from_raw(ctx_pos_list, rep_init)\n traj_prob, _, _ = sampler.forward_onestep(context, target_pos=rep_target, target_bits=rep_val)\n traj_prob = scatter_add(traj_prob, rep_rows, dim=0, dim_size=cur_samples.shape[0])\n\n zeros = torch.zeros(rep_init.shape[0], 1).to(cmd_args.device)\n nonstop = sampler.pred_stop(context, zeros)[0]\n nonstop = scatter_add(nonstop, rep_rows, dim=0, dim_size=neg_samples.shape[0])\n else:\n traj_prob = nonstop = 0\n\n ones = torch.ones(neg_samples.shape[0], 1).to(cmd_args.device)\n context = sampler.get_context_from_raw(pos_list, neg_samples)\n last_stop = sampler.pred_stop(context, ones)[0]\n log_prob = init_prob + traj_prob + nonstop + last_stop\n with torch.no_grad():\n log_ratio = (log_prob - proposal_logprob).view(cmd_args.num_importance_samples, -1)\n weight = F.softmax(log_ratio, dim=0).view(log_prob.shape)\n\n log_prob = log_prob * weight\n loss = -torch.mean(log_prob) * cmd_args.num_importance_samples\n loss.backward()\n if cmd_args.grad_clip > 0:\n torch.nn.utils.clip_grad_norm_(sampler.parameters(), max_norm=cmd_args.grad_clip)\n opt_sampler.step()\n g_loss = loss.item()\n\n avg_steps = torch.mean(n_steps.float()).item()\n pbar.set_description('epoch: %d, f: %.2f, g: %.2f, n: %.2f, true: %.2f, neg: %.2f, rand: %.2f' % (epoch, f_loss, g_loss, avg_steps, true_scores, neg_scores, rand_scores))\n\n if epoch % cmd_args.epoch_save == 0:\n save_dict = {\n 'epoch': epoch,\n 'score_func': score_func.state_dict(),\n 'sampler': sampler.state_dict(),\n 'opt_score': opt_score.state_dict(),\n 'opt_sampler': opt_sampler.state_dict(),\n }\n\n torch.save(save_dict, os.path.join(cmd_args.save_dir, 'model-%d.ckpt' % epoch))\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n# pylint: disable=dangerous-default-value\n\"\"\"A custom estimator to establish a naive reweighting baseline.\n\nImplements a DNN classifier with re-weighted risk minimization objective,\nwhere the weights are inverse propensity scores of the example.\n\nExpects model_fn parameter \"label\" to be `dict` of `Tensor` Objects,\nwith key/value pair for the keys:\n\"IPS_example_weights_with_label\" and \"IPS_example_weights_without_label\",\nand their corresponding values being inverse propensity weight of the example.\n\nThis code merely loads the weights from the \"label\" dictionary, and set them as\nexample weights. Actual, IPS weights are precomputed somewhere else, and added\nto the \"label\" dictionary in input_fn().\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v1 as tf\n\nfrom tensorflow.contrib import framework as contrib_framework\nfrom tensorflow.contrib import layers as contrib_layers\nfrom tensorflow.contrib import metrics as contrib_metrics\n\nIPS_WITH_LABEL_TARGET_COLUMN_NAME = 'IPS_example_weights_with_label'\nIPS_WITHOUT_LABEL_TARGET_COLUMN_NAME = 'IPS_example_weights_without_label'\n\n\nclass _IPSReweightingModel():\n \"\"\"TensorFlow _IPSReweightingModel base class.\n\n _IPSReweightingModel class can be used to instantiate a feedforward DNN\n classifier with Inverse Propensity re-weighted risk minimization objective.\n \"\"\"\n\n def __init__(\n self,\n feature_columns,\n label_column_name,\n config,\n model_dir,\n reweighting_type='IPS_without_label',\n hidden_units=[64, 32],\n batch_size=256,\n learning_rate=0.01,\n optimizer='Adagrad',\n activation=tf.nn.relu\n ):\n \"\"\"Initializes a IPS reweighting estimator.\n\n Args:\n feature_columns: list of feature_columns.\n label_column_name: (string) name of the target variable.\n config: `RunConfig` object to configure the runtime settings.\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into an estimator\n to continue training a previously saved model.\n reweighting_type: (string) name of the type of re-weighting to be\n performed. Expects values in ['IPS_with_label','IPS_without_label'].\n IPS stands for inverse propensity score, wherein each example is\n assigned a weight inversely proportionate their propensity of appearing\n in training distribution. Concretely, ips-weight = 1/p(x),\n where p(x) is the probability of x in training distribution.\n In \"IPS_without_label\", each example is assigned a weight as the inverse\n propensity score of their subgroup. For example, 1/p(\"Black Female\").\n In \"IPS_with_label\", each example is assigned a weight as the inverse\n propensity score of their subgroup and class membership. For example,\n 1/p(\"Black Female\", \"class 0\")).\n hidden_units: List with number of hidden units per layer for the\n shared bottom. All layers are fully connected.\n Ex. `[64, 32]` means first layer has 64 nodes and second one has 32.\n batch_size: (int) batch size.\n learning_rate: learning rate.\n optimizer: An instance of `tf.Optimizer` used to train the model.\n activation: Activation function applied to each layer.\n\n Raises:\n ValueError: if label_column_name not specified.\n ValueError: if hidden_units is not a list.\n ValueError: if reweighting_type not in\n ['IPS_with_label','IPS_without_label'].\n\n \"\"\"\n if not label_column_name:\n raise ValueError('Need to specify a label_column_name.')\n\n if not isinstance(hidden_units, list):\n raise ValueError('hidden_units should be a list')\n\n if reweighting_type not in ('IPS_with_label', 'IPS_without_label'):\n raise ValueError('Invalid reweighting_type: {}.'.format(reweighting_type))\n\n self._reweighting_type = reweighting_type\n self._feature_columns = feature_columns\n self._learning_rate = learning_rate\n self._optimizer = optimizer\n self._model_dir = model_dir\n self._hidden_units = hidden_units\n self._config = config\n self._activation = activation\n self._batch_size = batch_size\n self._label_column_name = label_column_name\n\n def _loss(self, labels, logits, example_weights):\n \"\"\"Computes weighted sigmoid cross entropy loss.\n\n Args:\n labels: Labels.\n logits: Logits.\n example_weights: example_weights.\n\n Returns:\n loss: (scalar) loss\n \"\"\"\n sigmoid_loss = tf.nn.sigmoid_cross_entropy_with_logits(\n labels=labels, logits=logits)\n weighted_loss = example_weights * sigmoid_loss\n return tf.reduce_mean(weighted_loss)\n\n def _get_model_fn(self):\n \"\"\"Method that gets a model_fn for creating an `Estimator` Object.\"\"\"\n\n def model_fn(features, labels, mode):\n \"\"\"BaselineModel model_fn.\n\n Args:\n features: `Tensor` or `dict` of `Tensor`.\n labels: A `dict` of `Tensor` Objects. Expects to have a key/value pair\n for the key self.label_column_name, \"IPS_example_weights_with_label\",\n and \"IPS_example_weights_without_label\".\n IPS stands for inverse propensity score, wherein each example is\n assigned a weight inversely proportionate their propensity of\n appearing in training distribution. Concretely, ips-weight = 1/p(x),\n where p(x) is the probability of x in training distribution.\n In \"IPS_without_label\", each example is given a weight as the inverse\n propensity score of their subgroup. For example, 1/p(\"Black Female\").\n In \"IPS_with_label\", each example is assigned a weight as the inverse\n propensity score of their subgroup and class membership. For example,\n 1/p(\"Black Female\", \"class 0\")).\n mode: Defines whether this is training, evaluation or prediction. See\n `ModeKeys`. Currently PREDICT mode is not implemented.\n\n Returns:\n An instance of `tf.estimator.EstimatorSpec', which encapsulates the\n `mode`, `predictions`, `loss` and the `train_op`. Note that here\n `predictions` is either a `Tensor` or a `dict` of `Tensor` objects,\n representing the prediction of the bianry classification model.\n 'loss` is a scalar containing the loss of the step and `train_op` is the\n op for training.\n \"\"\"\n\n # Instantiates a tensor with true class labels\n class_labels = labels[self._label_column_name]\n\n ips_example_weights_with_label = labels[IPS_WITH_LABEL_TARGET_COLUMN_NAME]\n ips_example_weights_without_label = labels[\n IPS_WITHOUT_LABEL_TARGET_COLUMN_NAME]\n\n tf.logging.info('model_fn for mode: {}'.format(mode))\n\n with tf.name_scope('model'):\n input_layer = tf.feature_column.input_layer(features,\n self._feature_columns)\n layer = input_layer\n for unit in self._hidden_units:\n layer = tf.layers.Dense(unit, activation=self._activation)(layer)\n logits = tf.layers.Dense(1)(layer)\n sigmoid_output = tf.nn.sigmoid(logits, name='sigmoid')\n class_predictions = tf.cast(tf.greater(sigmoid_output, 0.5), tf.float32) # pylint: disable=line-too-long\n tf.summary.histogram('class_predictions', class_predictions)\n\n if self._reweighting_type == 'IPS_with_label':\n example_weights = ips_example_weights_with_label\n elif self._reweighting_type == 'IPS_without_label':\n example_weights = ips_example_weights_without_label\n\n # Initializes Loss Functions\n loss = self._loss(class_labels, logits, example_weights)\n\n # Sets up dictionaries used for computing performance metrics\n predictions = {\n (self._label_column_name, 'class_ids'):\n tf.reshape(class_predictions, [-1]),\n (self._label_column_name, 'logistic'):\n tf.reshape(sigmoid_output, [-1])\n }\n\n class_id_kwargs = {\n 'labels': class_labels,\n 'predictions': class_predictions\n }\n logistics_kwargs = {'labels': class_labels, 'predictions': sigmoid_output}\n\n # EVAL Mode\n if mode == tf.estimator.ModeKeys.EVAL:\n with tf.name_scope('eval_metrics'):\n eval_metric_ops = {\n 'accuracy': tf.metrics.accuracy(**class_id_kwargs),\n 'precision': tf.metrics.precision(**class_id_kwargs),\n 'recall': tf.metrics.recall(**class_id_kwargs),\n 'fp': tf.metrics.false_positives(**class_id_kwargs),\n 'fn': tf.metrics.false_negatives(**class_id_kwargs),\n 'tp': tf.metrics.true_positives(**class_id_kwargs),\n 'tn': tf.metrics.true_negatives(**class_id_kwargs),\n 'fpr': contrib_metrics.streaming_false_positive_rate(**class_id_kwargs), # pylint: disable=line-too-long\n 'fnr': contrib_metrics.streaming_false_negative_rate(**class_id_kwargs), # pylint: disable=line-too-long\n 'auc': tf.metrics.auc(curve='ROC', **logistics_kwargs),\n 'aucpr': tf.metrics.auc(curve='PR', **logistics_kwargs)\n }\n\n # EstimatorSpec object for evaluation\n estimator_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n loss=loss,\n eval_metric_ops=eval_metric_ops)\n\n # TRAIN Mode\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op_primary = contrib_layers.optimize_loss(\n loss=loss,\n learning_rate=self._learning_rate,\n global_step=contrib_framework.get_global_step(),\n optimizer=self._optimizer)\n\n estimator_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n loss=loss,\n train_op=train_op_primary)\n\n return estimator_spec\n\n return model_fn\n\n\nclass _IPSReweightingEstimator(tf.estimator.Estimator):\n \"\"\"An estimator based on the core estimator.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initializes the estimator.\"\"\"\n self.model = _IPSReweightingModel(*args, **kwargs)\n super(_IPSReweightingEstimator, self).__init__(\n model_fn=self.model._get_model_fn(), # pylint: disable=protected-access\n model_dir=self.model._model_dir, # pylint: disable=protected-access\n config=self.model._config # pylint: disable=protected-access\n )\n\n\ndef get_estimator(*args, **kwargs):\n return _IPSReweightingEstimator(*args, **kwargs)\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Trainable models for ARM++ experiments.\"\"\"\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nfrom tensorflow_probability.python.stats.leave_one_out import log_soomean_exp\n\nkeras = tf.keras\ntfd = tfp.distributions\n\nEPS = 1e-6\n\n\ndef safe_log_prob(p):\n return tf.math.log(tf.clip_by_value(p, EPS, 1.0))\n\n\ndef logit_func(prob_tensor):\n \"\"\"Calculate logits.\"\"\"\n return safe_log_prob(prob_tensor) - safe_log_prob(1. - prob_tensor)\n\n\ndef get_local_disarm_learning_signal(\n b1,\n b2,\n elbo_b1,\n elbo_b2,\n encoder_logits):\n \"\"\"Get local learning signal for VIMCO-ARM++ v3.\"\"\"\n # b1, b2 are of the shape [num_samples, batch_size, hidden_dims]\n # elbo_b1, elbo_b2 are of the shape [num_samples, batch_size]\n\n num_samples = tf.shape(elbo_b1)[0]\n num_samples_f = tf.cast(num_samples, tf.float32)\n\n # [num_samples, batch_size, hidden_dims] shape\n disarm_factor = ((1. - b1)*b2 + b1*(1. - b2)) * (-1.)**b2\n disarm_factor *= tf.math.sigmoid(tf.math.abs(encoder_logits))\n\n # [1, batch_size] shape\n l_b1 = (tf.reduce_logsumexp(elbo_b1, axis=0, keepdims=True)\n - tf.math.log(num_samples_f))\n\n # the following manipulation is due to tf.linalg.set_diag is applying\n # on [a, b, c, d, ..., m, m] last two dimension.\n # [batch_size, num_samples]\n transposed_elbo_b1 = tf.transpose(elbo_b1, [1, 0])\n transposed_elbo_b2 = tf.transpose(elbo_b2, [1, 0])\n # [batch_size, num_samples, num_samples]\n tiled_elbo_b1 = tf.tile(\n tf.expand_dims(transposed_elbo_b1, -1),\n [1, 1, num_samples])\n # [batch_size, num_samples, num_samples]\n elbo_b_tilde = tf.linalg.set_diag(tiled_elbo_b1,\n transposed_elbo_b2)\n # [batch_size, num_samples] shape\n # TODO(zhedong): the `axis=1` is frigile, fix this with better understading\n l_b2_t = (tf.reduce_logsumexp(elbo_b_tilde, axis=1, keepdims=False)\n - tf.math.log(num_samples_f))\n # [num_samples, batch_size]\n l_b2 = tf.transpose(l_b2_t, [1, 0])\n\n # [num_samples, batch_size, 1] shape\n local_learning_signal = tf.expand_dims(l_b1 - l_b2, -1)\n\n # [batch_size, hidden_dim]\n infnet_grad_factor = tf.reduce_sum(\n 0.5 * local_learning_signal * disarm_factor,\n axis=0,\n keepdims=False)\n multisample_objective = tf.squeeze(l_b1, axis=0)\n return infnet_grad_factor, multisample_objective\n\n\ndef get_vimco_local_learning_signal(elbo_tensor):\n \"\"\"Get vimco local learning signal from batched ELBO.\n\n Args:\n elbo_tensor: a `float` Tensor of the shape [num_samples, batch_size].\n\n Returns:\n local_learning_signal: a `float` Tensor of the same shape as `input_tensor`,\n contains the multiplicative factor as described in Algorithm 1 of VIMCO,\n L_hat - L_hat^[-i].\n \"\"\"\n assert_op = tf.debugging.assert_rank_at_least(\n elbo_tensor, rank=2,\n message='ELBO needs at least 2D, [sample, batch].')\n with tf.control_dependencies([assert_op]):\n # Calculate the log swap-one-out mean and log mean\n # log_soomean_f is of the same shape as f: [num_samples, batch]\n # log_mean_f is of the reduced shape: [1, batch]\n log_soomean_f, log_mean_f = log_soomean_exp(elbo_tensor,\n axis=0,\n keepdims=True)\n local_learning_signal = log_mean_f - log_soomean_f\n return local_learning_signal\n\n\ndef get_vimco_disarm_learning_signal(\n b1, b2, elbo_b1, elbo_b2, encoder_logits,\n verbose_return=False,\n half_p_trick=False):\n \"\"\"Get local learning signal for VIMCO-ARM++.\"\"\"\n elbo_b1b2 = tf.concat([elbo_b1, elbo_b2], axis=0)\n l_b1b2 = (\n tf.reduce_logsumexp(elbo_b1b2, axis=0, keepdims=True)\n - tf.math.log(tf.cast(tf.shape(elbo_b1b2)[0], tf.float32)))\n local_learning_signal = l_b1b2 - 0.5 * (elbo_b1 + elbo_b2)\n local_learning_signal = tf.expand_dims(local_learning_signal, axis=-1)\n local_learning_signal = tf.stop_gradient(local_learning_signal)\n\n if half_p_trick:\n logprob_term1 = (tf.stop_gradient((1. - b1) * b2 + b1 * (1. - b2))\n * (tf.math.log_sigmoid(encoder_logits) - tf.math.log(2.)))\n logprob_term2 = - (tf.stop_gradient((1. - b1) * (1. - b2))\n * tf.math.softplus(encoder_logits))\n else:\n abs_phi = tf.math.abs(encoder_logits)\n\n logprob_term1 = (tf.stop_gradient((1. - b1) * b2 + b1 * (1. - b2))\n * (-1.) * tf.math.softplus(abs_phi))\n logprob_term2 = (tf.stop_gradient((1. - b1) * (1. - b2) + b1 * b2)\n * (tfp.math.log1mexp(-abs_phi)\n - tf.math.softplus(-abs_phi)))\n logprob_b1b2 = logprob_term1 + logprob_term2\n\n if half_p_trick:\n sigma_abs_phi = tf.math.sigmoid(tf.math.abs(\n tf.math.log_sigmoid(encoder_logits-tf.math.log(2.))))\n else:\n sigma_abs_phi = tf.math.sigmoid(tf.math.abs(encoder_logits))\n\n vimco_signal = local_learning_signal * logprob_b1b2\n\n # arm pp factor, shape [sample, batch, event_dims]\n disarm_factor = ((1. - b1) * b2 + b1 * (1. - b2)) * (-1.)**b2\n disarm_factor *= sigma_abs_phi\n disarm_signal = (\n 0.5 * tf.expand_dims(elbo_b1 - elbo_b2, axis=-1) * disarm_factor)\n\n if verbose_return:\n return dict(l_b1b2=l_b1b2,\n local_learning_signal=local_learning_signal,\n logprob_b1b2=logprob_b1b2,\n disarm_signal=disarm_signal,\n vimco_signal=vimco_signal,\n logprob_list=(logprob_term1, logprob_term2))\n else:\n return local_learning_signal, logprob_b1b2, disarm_signal, l_b1b2\n\n\nclass BinaryNetwork(tf.keras.Model):\n \"\"\"Network generating binary samples.\"\"\"\n\n def __init__(self,\n hidden_sizes,\n activations,\n mean_xs=None,\n demean_input=False,\n final_layer_bias_initializer='zeros',\n name='binarynet'):\n\n super(BinaryNetwork, self).__init__(name=name)\n assert len(activations) == len(hidden_sizes)\n\n num_layers = len(hidden_sizes)\n self.hidden_sizes = hidden_sizes\n self.activations = activations\n self.networks = keras.Sequential()\n\n if demean_input:\n if mean_xs is not None:\n self.networks.add(\n tf.keras.layers.Lambda(lambda x: x - mean_xs))\n else:\n self.networks.add(\n tf.keras.layers.Lambda(lambda x: 2.*tf.cast(x, tf.float32) - 1.))\n for i in range(num_layers-1):\n self.networks.add(\n keras.layers.Dense(\n units=hidden_sizes[i],\n activation=activations[i]))\n\n self.networks.add(\n keras.layers.Dense(\n units=hidden_sizes[-1],\n activation=activations[-1],\n bias_initializer=final_layer_bias_initializer))\n\n def __call__(self,\n input_tensor,\n samples=None,\n num_samples=(),\n half_p_trick=False):\n logits = self.get_logits(input_tensor, half_p_trick)\n dist = tfd.Bernoulli(logits=logits)\n if samples is None:\n samples = dist.sample(num_samples)\n samples = tf.cast(samples, tf.float32)\n likelihood = dist.log_prob(samples)\n return samples, likelihood, logits\n\n def get_logits(self, input_tensor, half_p_trick=False):\n logits = self.networks(input_tensor)\n if half_p_trick:\n logits = tf.math.log_sigmoid(logits-tf.math.log(2.))\n return logits\n\n def sample_uniform_variables(self, sample_shape, nfold=1):\n if nfold > 1:\n sample_shape = tf.concat(\n [sample_shape[0:1] * nfold, sample_shape[1:]],\n axis=0)\n return tf.random.uniform(shape=sample_shape, maxval=1.0)\n\n\nclass SingleLayerDiscreteVAE(tf.keras.Model):\n \"\"\"Discrete VAE as described in ARM, (Yin and Zhou (2019)).\"\"\"\n\n def __init__(self,\n encoder,\n decoder,\n prior_logits,\n grad_type='arm',\n half_p_trick=False,\n epsilon=0.,\n control_nn=None,\n name='dvae'):\n super(SingleLayerDiscreteVAE, self).__init__(name)\n self.encoder = encoder\n self.decoder = decoder\n\n self.half_p_trick = half_p_trick\n if self.half_p_trick:\n # This is to enforce the p of Bernoulli distribution is [0, 0.5].\n self.prior_logits = tf.math.log_sigmoid(prior_logits-tf.math.log(2.))\n else:\n self.prior_logits = prior_logits\n\n self.prior_dist = tfd.Bernoulli(logits=self.prior_logits)\n\n self.grad_type = grad_type.lower()\n\n # used for variance of gradients estiamations.\n self.ema = tf.train.ExponentialMovingAverage(0.999)\n\n # epsilon for the tolerrence used in VIMCO-ARM++\n self.epsilon = epsilon\n\n if self.grad_type == 'relax':\n self.log_temperature_variable = tf.Variable(\n initial_value=tf.math.log(0.1), # Reasonable init\n dtype=tf.float32)\n\n # the scaling_factor is a trainable ~1.\n self.scaling_variable = tf.Variable(\n initial_value=1.,\n dtype=tf.float32)\n\n # neural network for control variates lambda * r(z)\n self.control_nn = control_nn\n\n def call(self, input_tensor, hidden_samples=None, num_samples=()):\n \"\"\"Returns ELBO.\n\n Args:\n input_tensor: a `float` Tensor for input observations.\n The tensor is of the shape [batch_size, observation_dims].\n hidden_samples: a discrete Tensor for hidden states.\n The tensor is of the shape [batch_size, hidden_dims].\n Default to None, in which case the hidden samples will be generated\n based on num_samples.\n num_samples: 0-D or 1-D `int` Tensor. Shape of the generated samples.\n\n Returns:\n elbo: the ELBO with shape [batch_size].\n \"\"\"\n hidden_sample, encoder_llk, encoder_logits = self.encoder(\n input_tensor,\n samples=hidden_samples,\n num_samples=num_samples,\n half_p_trick=self.half_p_trick)\n\n encoder_llk = tf.reduce_sum(encoder_llk, axis=-1)\n log_pb = tf.reduce_sum(\n self.prior_dist.log_prob(hidden_sample),\n axis=-1)\n\n decoder_llk = tf.reduce_sum(\n self.decoder(hidden_sample, input_tensor)[1],\n axis=-1)\n\n elbo = decoder_llk + log_pb - encoder_llk\n encoder_logits = self.threshold_around_zero(encoder_logits)\n\n return elbo, hidden_sample, encoder_logits, encoder_llk\n\n def get_elbo(self, input_tensor, hidden_tensor):\n \"\"\"Returns ELBO.\n\n Args:\n input_tensor: a `float` Tensor for input observations.\n The tensor is of the shape [batch_size, observation_dims].\n hidden_tensor: a discrete Tensor for hidden states.\n The tensor is of the shape [batch_size, hidden_dims].\n\n Returns:\n elbo: the ELBO with shape [batch_size].\n \"\"\"\n elbo = self.call(input_tensor, hidden_samples=hidden_tensor)[0]\n return elbo\n\n def get_layer_grad_estimation(\n self, input_tensor, grad_type=None):\n if grad_type is None:\n grad_type = self.grad_type\n\n encoder_logits = self.encoder.get_logits(input_tensor)\n sigma_phi = tf.math.sigmoid(encoder_logits)\n\n if grad_type == 'ar':\n u_noise = self.encoder.sample_uniform_variables(\n sample_shape=tf.shape(encoder_logits),\n nfold=1)\n b = tf.cast(u_noise < sigma_phi, tf.float32)\n f = self.get_elbo(input_tensor, b)[:, tf.newaxis]\n layer_grad = f * (1. - 2.*u_noise)\n\n elif grad_type == 'ar-2sample':\n u_noise = self.encoder.sample_uniform_variables(\n sample_shape=tf.shape(encoder_logits),\n nfold=2)\n u1, u2 = tf.split(u_noise, num_or_size_splits=2, axis=0)\n b1 = tf.cast(u1 < sigma_phi, tf.float32)\n b2 = tf.cast(u2 < sigma_phi, tf.float32)\n f1 = self.get_elbo(input_tensor, b1)[:, tf.newaxis]\n f2 = self.get_elbo(input_tensor, b2)[:, tf.newaxis]\n layer_grad = 0.5 * (f1 * (1. - 2.*u1) + f2 * (1. - 2.*u2))\n\n elif grad_type == 'arm':\n u_noise = self.encoder.sample_uniform_variables(\n sample_shape=tf.shape(encoder_logits),\n nfold=1)\n b1 = tf.cast(u_noise > 1. - sigma_phi, tf.float32)\n b2 = tf.cast(u_noise < sigma_phi, tf.float32)\n f1 = self.get_elbo(input_tensor, b1)[:, tf.newaxis]\n f2 = self.get_elbo(input_tensor, b2)[:, tf.newaxis]\n layer_grad = (f1 - f2) * (u_noise - 0.5)\n\n elif grad_type == 'armp':\n # ARM with conditioning, but no dropping of b_i = tilde b_i terms.\n u_noise = self.encoder.sample_uniform_variables(\n sample_shape=tf.shape(encoder_logits),\n nfold=1)\n b1 = tf.cast(u_noise > 1. - sigma_phi, tf.float32)\n b2 = tf.cast(u_noise < sigma_phi, tf.float32)\n f1 = self.get_elbo(input_tensor, b1)[:, tf.newaxis]\n f2 = self.get_elbo(input_tensor, b2)[:, tf.newaxis]\n # Only consider the case where b1_i \\neq b2_i\n armp_factor = ((1. - b1) * (b2) + b1 * (1. - b2)) # * (-1.)**b2\n layer_grad = (f1 - f2) * (u_noise - 0.5) * armp_factor\n\n elif grad_type == 'disarm':\n u_noise = self.encoder.sample_uniform_variables(\n sample_shape=tf.shape(encoder_logits),\n nfold=1)\n sigma_abs_phi = tf.math.sigmoid(tf.math.abs(encoder_logits))\n b1 = tf.cast(u_noise > 1. - sigma_phi, tf.float32)\n b2 = tf.cast(u_noise < sigma_phi, tf.float32)\n f1 = self.get_elbo(input_tensor, b1)[:, tf.newaxis]\n f2 = self.get_elbo(input_tensor, b2)[:, tf.newaxis]\n # the factor is I(b1+b2=1) * (-1)**b2 * sigma(|phi|)\n disarm_factor = ((1. - b1) * (b2) + b1 * (1. - b2)) * (-1.)**b2\n disarm_factor *= sigma_abs_phi\n layer_grad = 0.5 * (f1 - f2) * disarm_factor\n\n elif grad_type == 'reinforce':\n u_noise = self.encoder.sample_uniform_variables(\n sample_shape=tf.shape(encoder_logits),\n nfold=1)\n b = tf.cast(u_noise < sigma_phi, tf.float32)\n f = self.get_elbo(input_tensor, b)[:, tf.newaxis]\n layer_grad = f * (b - sigma_phi)\n\n elif grad_type == 'reinforce-2sample':\n u_noise = self.encoder.sample_uniform_variables(\n sample_shape=tf.shape(encoder_logits),\n nfold=2)\n u1, u2 = tf.split(u_noise, num_or_size_splits=2, axis=0)\n b1 = tf.cast(u1 < sigma_phi, tf.float32)\n b2 = tf.cast(u2 < sigma_phi, tf.float32)\n f1 = self.get_elbo(input_tensor, b1)[:, tf.newaxis]\n f2 = self.get_elbo(input_tensor, b2)[:, tf.newaxis]\n layer_grad = 0.5 * (f1 * (b1 - sigma_phi) + f2 * (b2 - sigma_phi))\n\n elif grad_type == 'reinforce_loo_baseline':\n u_noise = self.encoder.sample_uniform_variables(\n sample_shape=tf.shape(encoder_logits),\n nfold=2)\n u1, u2 = tf.split(u_noise, num_or_size_splits=2, axis=0)\n b1 = tf.cast(u1 < sigma_phi, tf.float32)\n b2 = tf.cast(u2 < sigma_phi, tf.float32)\n f1 = self.get_elbo(input_tensor, b1)[:, tf.newaxis]\n f2 = self.get_elbo(input_tensor, b2)[:, tf.newaxis]\n layer_grad = 0.5 * ((f1 - f2) * (b1 - sigma_phi)\n + (f2 - f1) * (b2 - sigma_phi))\n\n else:\n raise NotImplementedError('Gradient type %s is not supported.'%grad_type)\n\n return layer_grad\n\n def sample_binaries_with_loss(\n self,\n input_tensor,\n antithetic_sample=True):\n encoder_logits = self.encoder.get_logits(input_tensor)\n sigma_phi = tf.math.sigmoid(encoder_logits)\n scaling_factor = 0.5 if self.half_p_trick else 1.\n bernoulli_prob = scaling_factor * sigma_phi\n # returned u_noise would be of the shape [batch x num_samples, event_dim].\n u_noise = self.encoder.sample_uniform_variables(\n sample_shape=tf.shape(encoder_logits))\n\n theresholded_encoder_logits = self.threshold_around_zero(encoder_logits)\n\n if antithetic_sample:\n b1 = tf.cast(u_noise > 1. - bernoulli_prob, tf.float32)\n b2 = tf.cast(u_noise < bernoulli_prob, tf.float32)\n elbo_b1 = self.get_elbo(input_tensor, b1)\n elbo_b2 = self.get_elbo(input_tensor, b2)\n\n return b1, b2, elbo_b1, elbo_b2, theresholded_encoder_logits\n\n else:\n b = tf.cast(u_noise < bernoulli_prob, tf.float32)\n elbo = self.get_elbo(input_tensor, b)\n return b, elbo, theresholded_encoder_logits\n\n def get_relax_parameters(\n self,\n input_tensor,\n temperature=None,\n scaling_factor=None):\n if temperature is None:\n temperature = tf.math.exp(self.log_temperature_variable)\n if scaling_factor is None:\n scaling_factor = self.scaling_variable\n # [batch, hidden_units]\n encoder_logits = self.encoder.get_logits(input_tensor)\n\n # returned uniform_noise would be of the shape\n # [batch x 2, event_dim].\n uniform_noise = self.encoder.sample_uniform_variables(\n sample_shape=tf.shape(encoder_logits),\n nfold=2)\n # u_noise and v_noise are both of [batch, event_dim].\n u_noise, v_noise = tf.split(uniform_noise, num_or_size_splits=2, axis=0)\n\n theta = tf.math.sigmoid(encoder_logits)\n z = encoder_logits + logit_func(u_noise)\n b_sample = tf.cast(z > 0, tf.float32)\n\n v_prime = (b_sample * (v_noise * theta + 1 - theta)\n + (1 - b_sample) * v_noise * (1 - theta))\n # z_tilde ~ p(z | b)\n z_tilde = encoder_logits + logit_func(v_prime)\n\n elbo = self.get_elbo(input_tensor, b_sample)\n control_variate = self.get_relax_control_variate(\n input_tensor, z,\n temperature=temperature, scaling_factor=scaling_factor)\n conditional_control = self.get_relax_control_variate(\n input_tensor, z_tilde,\n temperature=temperature, scaling_factor=scaling_factor)\n\n log_q = tfd.Bernoulli(logits=encoder_logits).log_prob(b_sample)\n return elbo, control_variate, conditional_control, log_q\n\n def get_relax_control_variate(self, input_tensor, z_sample,\n temperature, scaling_factor):\n control_value = (\n scaling_factor *\n self.get_elbo(input_tensor, tf.math.sigmoid(z_sample/temperature)))\n if self.control_nn is not None:\n control_nn_input = tf.concat((input_tensor, z_sample), axis=-1)\n control_value += (scaling_factor\n * tf.squeeze(self.control_nn(control_nn_input),\n axis=-1))\n return control_value\n\n def _get_grad_variance(self, grad_variable, grad_sq_variable, grad_tensor):\n grad_variable.assign(grad_tensor)\n grad_sq_variable.assign(tf.square(grad_tensor))\n self.ema.apply([grad_variable, grad_sq_variable])\n\n # mean per component variance\n grad_var = (\n self.ema.average(grad_sq_variable)\n - tf.square(self.ema.average(grad_variable)))\n return grad_var\n\n def compute_grad_variance(\n self,\n grad_variables,\n grad_sq_variables,\n grad_tensors):\n # In order to use `tf.train.ExponentialMovingAverage`, one has to\n # use `tf.Variable`.\n grad_var = [\n tf.reshape(self._get_grad_variance(*g), [-1])\n for g in zip(grad_variables, grad_sq_variables, grad_tensors)]\n return tf.reduce_mean(tf.concat(grad_var, axis=0))\n\n def threshold_around_zero(self, input_tensor):\n if self.epsilon > 0.:\n return (tf.where(tf.math.greater(input_tensor, 0.),\n tf.math.maximum(input_tensor, self.epsilon),\n tf.math.minimum(input_tensor, -self.epsilon)))\n return input_tensor\n\n @property\n def encoder_vars(self):\n return self.encoder.trainable_variables\n\n @property\n def decoder_vars(self):\n return self.decoder.trainable_variables\n\n @property\n def prior_vars(self):\n return self.prior_dist.trainable_variables\n\n def get_vimco_losses(self, input_batch, num_samples):\n batch_size = tf.shape(input_batch)[0]\n tiled_inputs = tf.tile(input_batch, [num_samples, 1])\n elbo, _, _, encoder_llk = self.call(tiled_inputs)\n\n elbo = tf.reshape(elbo, [num_samples, batch_size])\n encoder_llk = tf.reshape(encoder_llk, [num_samples, batch_size])\n\n # [sample_size, batch_size]\n local_learning_signal = get_vimco_local_learning_signal(elbo)\n local_learning_signal = tf.stop_gradient(local_learning_signal)\n\n # [batch_size]\n multisample_objective = (\n tf.reduce_logsumexp(elbo, axis=0, keepdims=False) -\n tf.math.log(tf.cast(tf.shape(elbo)[0], tf.float32)))\n\n infnet_loss = -1. * (multisample_objective +\n tf.reduce_sum(local_learning_signal * encoder_llk,\n axis=0))\n genmo_loss = -1. * multisample_objective\n return genmo_loss, infnet_loss\n\n def get_local_disarm_losses(self, input_batch, num_samples,\n symmetrized=False):\n batch_size = tf.shape(input_batch)[0]\n tiled_inputs = tf.tile(input_batch, [num_samples, 1])\n\n # [batch_size, hidden_size]\n batch_encoder_logits = self.encoder.get_logits(input_batch)\n\n # b1, b2 are of the shape [num_samples * batch_size, hidden_dim]\n b1, b2, elbo_b1, elbo_b2, _ = (\n self.sample_binaries_with_loss(\n tiled_inputs,\n antithetic_sample=True))\n b1 = tf.reshape(b1, [num_samples, batch_size, -1])\n b2 = tf.reshape(b2, [num_samples, batch_size, -1])\n\n elbo_b1 = tf.reshape(elbo_b1, [num_samples, batch_size])\n elbo_b2 = tf.reshape(elbo_b2, [num_samples, batch_size])\n\n # infnet_grad_multiplier: [batch_size, hidden_dim]\n # batch_multisample_objective: [batch_size]\n if symmetrized:\n infnet_grad_multiplier_1, multisample_objective_1 = (\n get_local_disarm_learning_signal(\n b1, b2, elbo_b1, elbo_b2, batch_encoder_logits))\n infnet_grad_multiplier_2, multisample_objective_2 = (\n get_local_disarm_learning_signal(\n b2, b1, elbo_b2, elbo_b1, batch_encoder_logits))\n infnet_grad_multiplier = 0.5 * tf.stop_gradient(\n infnet_grad_multiplier_1 + infnet_grad_multiplier_2)\n multisample_objective = 0.5 * (\n multisample_objective_1 + multisample_objective_2)\n else:\n infnet_grad_multiplier, multisample_objective = (\n get_local_disarm_learning_signal(\n b1, b2, elbo_b1, elbo_b2, batch_encoder_logits))\n infnet_grad_multiplier = tf.stop_gradient(infnet_grad_multiplier)\n\n genmo_loss = -1. * multisample_objective\n\n infnet_loss = -1. * (infnet_grad_multiplier * batch_encoder_logits)\n return genmo_loss, infnet_loss\n\n def get_multisample_baseline_loss(self, input_batch, num_samples):\n \"\"\"Computes gradients for num_samples IWAE bound.\n\n This estimator uses 2 * num_samples, half to compute the bound\n and the other half to compute a baseline.\n\n Args:\n input_batch: Input tensor [batch_size, dim].\n num_samples: Number of samples for the IWAE bound.\n\n Returns:\n genmo_loss: Loss function for the model params.\n infnet_loss: Loss function for the inference network params.\n \"\"\"\n batch_size = tf.shape(input_batch)[0]\n tiled_inputs = tf.tile(input_batch, [2 * num_samples, 1])\n elbo, _, _, encoder_llk = self.call(tiled_inputs)\n\n elbo = tf.reshape(elbo, [2 * num_samples, batch_size])\n elbo_signal, elbo_baseline = tf.split(elbo, num_or_size_splits=2, axis=0)\n\n encoder_llk = tf.reshape(encoder_llk, [2 * num_samples, batch_size])\n encoder_llk = tf.split(encoder_llk, num_or_size_splits=2, axis=0)[0]\n\n # [batch_size]\n encoder_llk = tf.reduce_sum(encoder_llk, axis=0)\n control_variate = (\n tf.reduce_logsumexp(elbo_baseline, axis=0, keepdims=False) -\n tf.math.log(tf.cast(tf.shape(elbo_baseline)[0], tf.float32)))\n multisample_objective = (\n tf.reduce_logsumexp(elbo_signal, axis=0, keepdims=False) -\n tf.math.log(tf.cast(tf.shape(elbo_signal)[0], tf.float32)))\n\n # [num_samples, batch_size]\n learning_signal = multisample_objective - control_variate\n learning_signal = tf.stop_gradient(learning_signal)\n\n infnet_loss = -1. * (multisample_objective +\n learning_signal * encoder_llk)\n genmo_loss = -1. * multisample_objective\n return genmo_loss, infnet_loss\n\n def get_relax_loss(self, input_batch, temperature=None, scaling_factor=None):\n # elbo, control_variate, conditional_control should be of [batch_size]\n # log_q is of [batch_size, event_dim]\n elbo, control_variate, conditional_control, log_q = (\n self.get_relax_parameters(\n input_batch,\n temperature=temperature,\n scaling_factor=scaling_factor))\n\n # Define losses\n genmo_loss = -1. * elbo\n\n reparam_loss = -1. * (control_variate - conditional_control)\n\n # [batch_size]\n learning_signal = -1. * (elbo - conditional_control)\n self.mean_learning_signal = tf.reduce_mean(learning_signal)\n\n # [batch_size, hidden_size]\n learning_signal = tf.tile(\n tf.expand_dims(learning_signal, axis=-1),\n [1, tf.shape(log_q)[-1]])\n\n return genmo_loss, reparam_loss, learning_signal, log_q\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# coding=utf-8\n# Copyright 2019 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Script for getting the top words associated with each emotion.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import Counter # pylint: disable=g-importing-member\nfrom collections import defaultdict # pylint: disable=g-importing-member\nimport math\nimport operator\nimport os\nimport re\nimport string\n\nfrom absl import app\nfrom absl import flags\nimport pandas as pd\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\"data\", \"data/full_dataset\",\n \"Directory containing full dataset.\")\n\nflags.DEFINE_string(\"output\", \"tables/emotion_words.csv\",\n \"Output csv file for the emotion words.\")\n\nflags.DEFINE_string(\"emotion_file\", \"data/emotions.txt\",\n \"File containing list of emotions.\")\n\npunct_chars = list((set(string.punctuation) | {\n \"’\", \"‘\", \"–\", \"—\", \"~\", \"|\", \"“\", \"”\", \"…\", \"'\", \"`\", \"_\",\n \"“\"\n}) - set([\"#\"]))\npunct_chars.sort()\npunctuation = \"\".join(punct_chars)\nreplace = re.compile(\"[%s]\" % re.escape(punctuation))\n\n\ndef CheckAgreement(ex, min_agreement, all_emotions, max_agreement=100):\n \"\"\"Return the labels that at least min_agreement raters agree on.\"\"\"\n sum_ratings = ex[all_emotions].sum(axis=0)\n agreement = ((sum_ratings >= min_agreement) & (sum_ratings <= max_agreement))\n return \",\".join(sum_ratings.index[agreement].tolist())\n\n\ndef CleanText(text):\n \"\"\"Clean text.\"\"\"\n if isinstance(text, float):\n return []\n # lower case\n text = text.lower()\n # eliminate urls\n text = re.sub(r\"http\\S*|\\S*\\.com\\S*|\\S*www\\S*\", \" \", text)\n # eliminate @mentions\n text = re.sub(r\"\\s@\\S+\", \" \", text)\n # substitute all other punctuation with whitespace\n text = replace.sub(\" \", text)\n # replace all whitespace with a single space\n text = re.sub(r\"\\s+\", \" \", text)\n # strip off spaces on either end\n text = text.strip()\n words = text.split()\n return [w for w in words if len(w) > 2]\n\n\ndef LogOdds(counts1, counts2, prior, zscore=True):\n \"\"\"Calculates log odds ratio.\n\n Source: Dan Jurafsky.\n\n Args:\n counts1: dict of word counts for group 1\n counts2: dict of word counts for group 2\n prior: dict of prior word counts\n zscore: whether to z-score the log odds ratio\n\n Returns:\n delta: dict of delta values for each word.\n \"\"\"\n\n sigmasquared = defaultdict(float)\n sigma = defaultdict(float)\n delta = defaultdict(float)\n\n n1 = sum(counts1.values())\n n2 = sum(counts2.values())\n\n nprior = sum(prior.values())\n for word in prior.keys():\n if prior[word] == 0:\n delta[word] = 0\n continue\n l1 = float(counts1[word] + prior[word]) / ((n1 + nprior) -\n (counts1[word] + prior[word]))\n l2 = float(counts2[word] + prior[word]) / ((n2 + nprior) -\n (counts2[word] + prior[word]))\n sigmasquared[word] = 1 / (float(counts1[word]) + float(prior[word])) + 1 / (\n float(counts2[word]) + float(prior[word]))\n sigma[word] = math.sqrt(sigmasquared[word])\n delta[word] = (math.log(l1) - math.log(l2))\n if zscore:\n delta[word] /= sigma[word]\n return delta\n\n\ndef GetCounts(df):\n words = []\n for t in df[\"text\"]:\n words.extend(t)\n return Counter(words)\n\n\ndef main(_):\n print(\"Loading data...\")\n dfs = []\n for filename in os.listdir(FLAGS.data):\n if filename.endswith(\".csv\"):\n dfs.append(\n pd.read_csv(os.path.join(FLAGS.data, filename), encoding=\"utf-8\"))\n data = pd.concat(dfs)\n print(\"%d Examples\" % (len(set(data[\"id\"]))))\n print(\"%d Annotations\" % len(data))\n\n with open(FLAGS.emotion_file, \"r\") as f:\n all_emotions = f.read().splitlines()\n print(\"%d emotion Categories\" % len(all_emotions))\n\n print(\"Processing data...\")\n data[\"text\"] = data[\"text\"].apply(CleanText)\n agree_dict = data.groupby(\"id\").apply(CheckAgreement, 2,\n all_emotions).to_dict()\n data[\"agreement\"] = data[\"id\"].map(agree_dict)\n\n data = data[~data[\"agreement\"].isnull()]\n dicts = []\n for e in all_emotions:\n print(e)\n contains = data[\"agreement\"].str.contains(e)\n emotion_words = GetCounts(data[contains])\n other_words = GetCounts(data[~contains])\n prior = Counter()\n prior.update(dict(emotion_words))\n prior.update(dict(other_words))\n emotion_words_total = sum(emotion_words.values())\n delta = LogOdds(emotion_words, other_words, prior, True)\n c = 0\n for k, v in sorted(delta.items(), key=operator.itemgetter(1), reverse=True):\n if v < 3:\n continue\n dicts.append({\n \"emotion\": e,\n \"word\": k,\n \"odds\": \"%.2f\" % v,\n \"freq\": \"%.3f\" % (emotion_words[k] / emotion_words_total)\n })\n c += 1\n if c < 11:\n print(\"%s (%.2f)\" % (k, v))\n print(\"--------\")\n\n emotion_words_df = pd.DataFrame(dicts)\n emotion_words_df.to_csv(FLAGS.output, index=False, encoding=\"utf-8\")\n\n\nif __name__ == \"__main__\":\n app.run(main)\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utility functions, primarily used for finding the best hyperparameters.\n\"\"\"\nimport datetime\nimport os\n\nimport numpy as np\nimport pandas as pd\n\nimport tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import\n\nHPARAM_COLUMNS = ['xm_adv_conv_filters', 'xm_adv_entropy_regularization',\n 'xm_adversary_population_size',\n 'xm_antagonist_population_size', 'xm_non_negative_regret',\n 'xm_percent_random_episodes', 'xm_protagonist_episode_length',\n 'xm_protagonist_population_size', 'xm_agents_use_regret',\n 'xm_combined_population', 'xm_flexible_protagonist',\n 'xm_block_budget_weight']\n\n\ndef save_best_work_units_csv(experiments, csv_path=None, metrics=None,\n last_x_percent=.2, step_limit_1agent=None):\n \"\"\"Collects XM work unit IDs corresponding to best hparams and saves to csv.\n\n Args:\n experiments: A list of Experiment objects.\n csv_path: Location where the resulting csv should be saved.\n metrics: Metrics used to select best hparams (e.g. reward).\n last_x_percent: Select hparams that led to the best performance over the\n last X% of the run.\n step_limit_1agent: Restrict dataframes to this many steps before selecting\n best hparams for last X%. This is the step limit if only 1 agent is being\n trained, so will need to be adjusted for units with multiple agents.\n Returns:\n A pandas dataframe with the best work units.\n \"\"\"\n if metrics is None:\n metrics = ['SolvedPathLength', 'adversary_env_AdversaryReward']\n best_seeds_df = pd.DataFrame()\n\n for exp in experiments:\n if (not exp.hparam_sweep or exp.metrics_df is None\n or 'work_unit' not in exp.metrics_df.columns.values):\n continue\n\n print_header(exp, exp.metrics_df)\n\n if ('combined' in exp.name.lower() and\n 'xm_combined_population' not in exp.metrics_df.columns.values):\n exp.metrics_df['xm_combined_population'] = True\n\n metrics_df = exp.metrics_df\n for metric in metrics:\n if metric not in exp.metrics_df.columns.values:\n continue\n\n print('\\nLooking for highest', metric)\n best_setting = find_best_setting_for_metric(\n metrics_df, metric, run='eval', step_limit_1agent=step_limit_1agent,\n last_x_percent=last_x_percent, num_agents=exp.num_agents)\n setting_df = restrict_to_setting(metrics_df, best_setting)\n units = setting_df['work_unit'].unique().tolist()\n\n # Check for combined population and calculate number of agents in pop\n combined_population = False\n if 'xm_combined_population' in setting_df.columns.values:\n assert len(setting_df['xm_combined_population'].unique()) == 1\n combined_population = setting_df['xm_combined_population'].unique()[0]\n num_agents = calculate_num_agents_based_on_population(\n setting_df, exp.num_agents, combined_population)\n\n # Adjust step limit for number of agents\n if step_limit_1agent:\n step_limit = step_limit_1agent * num_agents\n else:\n step_limit = None\n\n scores = get_score_for_setting(\n metrics_df, exp.metrics, best_setting, step_limit=step_limit,\n last_x_percent=last_x_percent, run='eval')\n\n m_dict = {\n 'exp_name': exp.name,\n 'xm_id': exp.xm_id,\n 'settings': best_setting,\n 'best_seeds': [str(u) for u in units],\n 'metric': metric + '_last20%',\n 'score': scores[metric],\n 'work_units_tested': len(metrics_df['work_unit'].unique()),\n 'max_steps': metrics_df['step'].max()\n }\n best_seeds_df = best_seeds_df.append(m_dict, ignore_index=True)\n\n if metric == 'SolvedPathLength':\n single_best = metrics_df.loc[metrics_df[metric].idxmax()]\n search_params = get_search_params(metrics_df)\n settings = {}\n for s in search_params:\n settings[s] = single_best[s]\n\n m_dict = {\n 'exp_name': exp.name,\n 'xm_id': exp.xm_id,\n 'settings': settings,\n 'best_seeds': single_best['work_unit'],\n 'metric': metric + '_best_ever',\n 'score': single_best[metric],\n 'work_units_tested': len(metrics_df['work_unit'].unique()),\n 'max_steps': metrics_df['step'].max()\n }\n best_seeds_df = best_seeds_df.append(m_dict, ignore_index=True)\n\n if csv_path is not None:\n with tf.io.gfile.GFile(csv_path, 'wb') as f:\n best_seeds_df.to_csv(f)\n print('Saved best seeds csv to:', csv_path)\n\n return best_seeds_df\n\n\ndef combine_existing_transfer_data(transfer_dir, after_date=None,\n filter_n_trials=10.):\n \"\"\"Combine all transfer files after a certain date, dropping duplicates.\"\"\"\n files = tf.io.gfile.listdir(transfer_dir)\n\n # This will sort files, and trim any files pre-dating the after_date\n sorted_files = sort_files_by_date(files, after_date=after_date)\n\n df = pd.DataFrame()\n for transfer_file in reversed(sorted_files):\n transfer_df_path = os.path.join(transfer_dir, transfer_file)\n if tf.io.gfile.stat(transfer_df_path).length == 0:\n print('File', transfer_df_path, 'has length 0, skipping')\n continue\n\n with tf.gfile.GFile(transfer_df_path, 'rb') as f:\n file_df = pd.read_csv(f)\n print('\\nLoaded file', transfer_file, 'of length', len(file_df))\n\n if file_df.empty:\n continue\n\n # Remove previous rows which used a different number of trials\n if filter_n_trials is not None:\n prev_len = len(file_df)\n file_df = file_df[file_df['n'] == filter_n_trials]\n if len(file_df) != prev_len:\n print('Removed', prev_len - len(file_df),\n 'rows where n !=', filter_n_trials, '... New length is:',\n len(file_df))\n if file_df.empty:\n continue\n\n # Remove extra unnecessary index columns\n bad_cols = [c for c in file_df.columns.values if 'Unnamed' in c]\n file_df = file_df.drop(columns=bad_cols)\n\n if 'metric' not in file_df.columns.values:\n file_df['metric'] = ''\n\n print('\\tExperiments/metrics found in this file:',\n get_unique_combos_in_df(file_df, ['name', 'metric']))\n\n key_names = ['name', 'xm_id', 'seed', 'env', 'checkpoint', 'agent_id', 'n',\n 'domain_rand_comparable_checkpoint', 'metric', 'adv_id',\n 'adv_agent_id']\n\n # Merge in new rows\n deduped_file_df = drop_duplicates_but_alert(\n file_df, key_names, transfer_file)\n deduped_df = drop_duplicates_but_alert(\n df, key_names, 'main df')\n\n prev_len = len(deduped_df)\n df = pd.concat([deduped_df, deduped_file_df],\n sort=True).reset_index(drop=True)\n df.drop_duplicates(subset=key_names, inplace=True, keep='first')\n print('Added', len(df) - prev_len, 'new rows to the main df. It now has',\n len(df), 'rows')\n\n if len(df) == prev_len:\n continue\n\n assert prev_len < len(df), 'Merging should not remove rows'\n\n # Analyze which rows were added by this file\n new_rows = df[prev_len:]\n print('\\t', len(new_rows) / float(len(file_df)) * 100.,\n '% of the rows in file', transfer_file, 'were new.')\n print('\\tNew rows involve these experiments/metrics:',\n get_unique_combos_in_df(new_rows, ['name', 'metric']))\n\n return df\n\n\ndef sort_files_by_date(files, after_date=None):\n \"\"\"Sorts files by date, assuming the date is the last part of the filename.\n\n Will discard files with a date before the after_date.\n Args:\n files: A list of string filenames with the date as the last part of the\n string before the extension.\n after_date: A date such that any file dating after this date should be kept.\n Returns:\n A list of filenames in sorted order and with dates that are too early\n discarded.\n \"\"\"\n after_dt = None\n if after_date is not None:\n after_dt = datetime.datetime.strptime(after_date, '%d.%m.%Y.%H:%M:%S')\n\n trimmed_files = []\n datetimes = []\n for f in files:\n if f == 'transfer_results.csv' or 'transfer' not in f:\n continue\n\n end_idx = f.find('.csv')\n start_idx = end_idx - len('02.06.2020.07:58:30')\n date_str = f[start_idx:end_idx]\n dt = datetime.datetime.strptime(date_str, '%d.%m.%Y.%H:%M:%S')\n\n if after_dt is not None and dt < after_dt:\n # Ignore dates before the after_date\n continue\n\n datetimes.append(dt)\n trimmed_files.append(f)\n\n zipped_pairs = zip(datetimes, trimmed_files)\n sorted_files = [x for _, x in sorted(zipped_pairs)]\n return sorted_files\n\n\ndef drop_duplicates_but_alert(df, key_names, df_name):\n prev_len = len(df)\n deduped_df = df.drop_duplicates(key_names)\n if len(deduped_df) != prev_len:\n print('Dropped', prev_len - len(deduped_df), 'duplicates from', df_name)\n return deduped_df\n return deduped_df\n\n\ndef get_unique_combos_in_df(df, keys):\n for k in keys:\n df[k] = df[k].fillna('')\n return np.unique(['/'.join(k) for k in df[keys].values])\n\n\ndef calculate_num_agents_based_on_population(\n settings, exp_num_agents, combined_population=False, is_dict=False):\n \"\"\"Calculate how much to adjust steps based on number of trained agents.\"\"\"\n pop_sizes = {}\n\n # Get population sizes from a dictionary\n if is_dict:\n for pop_type in ['xm_protagonist_population_size',\n 'xm_antagonist_population_size',\n 'xm_adversary_population_size']:\n if pop_type in settings:\n pop_sizes[pop_type] = settings[pop_type]\n else:\n pop_sizes[pop_type] = 1\n\n # Get population sizes from a dataframe\n else:\n for pop_type in ['xm_protagonist_population_size',\n 'xm_antagonist_population_size',\n 'xm_adversary_population_size']:\n if pop_type in settings.columns.values:\n assert len(settings[pop_type].unique()) == 1\n pop_sizes[pop_type] = settings[pop_type].unique()[0]\n else:\n pop_sizes[pop_type] = 1\n\n if combined_population:\n num_agents = pop_sizes['xm_protagonist_population_size'] + \\\n pop_sizes['xm_adversary_population_size']\n elif exp_num_agents == 3:\n num_agents = pop_sizes['xm_protagonist_population_size'] + \\\n pop_sizes['xm_antagonist_population_size'] + \\\n pop_sizes['xm_adversary_population_size']\n elif exp_num_agents == 2:\n num_agents = pop_sizes['xm_protagonist_population_size'] + \\\n pop_sizes['xm_adversary_population_size']\n else:\n num_agents = 1\n\n return num_agents\n\n\ndef print_header(exp, df, last_x_percent=.2):\n \"\"\"Print information about a hyperparameter sweep experiment.\"\"\"\n print('HPARAM SWEEP =', exp.name)\n print('Looking at last', last_x_percent*100, '% of data')\n print('Considering', df['run'].unique())\n print('Model has been trained for', df['step'].max(), 'steps')\n print(len(df['work_unit'].unique()), 'work units reporting in\\n')\n\n\ndef get_search_params(df, hparam_columns=None):\n \"\"\"Find all different hyperparameter combinations present in an XM exp df.\"\"\"\n if hparam_columns is None:\n hparam_columns = HPARAM_COLUMNS\n search_hparams = [h for h in hparam_columns if h in df.columns.values]\n to_remove = []\n for h in search_hparams:\n if len(df[h].unique()) < 2:\n to_remove.append(h)\n return [h for h in search_hparams if h not in to_remove]\n\n\ndef restrict_to_setting(df, setting, run='eval'):\n \"\"\"Restrict an experiment dataframe to one hyperparameter setting.\"\"\"\n setting_df = df[df['run'] == run]\n for k, v in setting.items():\n if k in df.columns.values:\n setting_df = setting_df[setting_df[k] == v]\n return setting_df\n\n\ndef get_score_for_setting(df, metrics, setting, step_limit=None,\n last_x_percent=.2, run='eval', verbose=True,\n ignore_metrics=None):\n \"\"\"Find the average score across several metrics for an hparam setting.\"\"\"\n if ignore_metrics is None:\n ignore_metrics = ['NumEnvEpisodes', 'GoalX', 'GoalY']\n\n if verbose:\n print('Testing hparameter settings:')\n print(setting)\n setting_df = restrict_to_setting(df, setting, run)\n if verbose:\n print('There are', len(setting_df['work_unit'].unique()),\n 'work units with these settings')\n print('\\twhich are:', setting_df['work_unit'].unique())\n\n setting_df = setting_df.sort_values('step')\n\n if step_limit is not None:\n prev_len = len(setting_df)\n setting_df = setting_df[setting_df['step'] <= step_limit]\n if verbose:\n print('After restricting to step limit of', step_limit,\n 'the dataframe went from', prev_len, 'rows to', len(setting_df))\n\n start_step = int(len(setting_df) * (1-last_x_percent))\n scores = {}\n for metric in metrics:\n if metric not in setting_df.columns.values or metric in ignore_metrics:\n continue\n scores[metric] = setting_df[metric][start_step:].mean()\n if verbose: print('Mean', metric, scores[metric])\n return scores\n\n\ndef find_best_settings(df, metrics, verbose=True, step_limit_1agent=None,\n last_x_percent=.2, run='eval', hparam_columns=None,\n num_agents=1):\n \"\"\"Find the hparam settings that led to the highest score on each metric.\"\"\"\n if hparam_columns is None:\n hparam_columns = HPARAM_COLUMNS\n\n search_hparams = [h for h in hparam_columns if h in df.columns.values]\n to_remove = []\n for h in search_hparams:\n if h != 'xm_combined_population' and len(df[h].unique()) < 2:\n to_remove.append(h)\n search_hparams = [h for h in search_hparams if h not in to_remove]\n if verbose: print('Searching for combos of', search_hparams)\n hparam_combos = df[search_hparams].drop_duplicates()\n if verbose:\n print('Investigating', len(hparam_combos), 'hparam settings')\n\n scores_list = []\n settings_list = []\n for k, row in hparam_combos.iterrows():\n row_dict = row.to_dict()\n settings_list.append(row_dict)\n\n # Check for combined population. If True the number of agents varies per\n # hparam setting.\n combined_population = (\n 'xm_combined_population' in row_dict\n and row_dict['xm_combined_population']) or (\n 'xm_combined_population' in df.columns.values and\n df['xm_combined_population'].unique()[0])\n num_agents = calculate_num_agents_based_on_population(\n row_dict, num_agents, combined_population, is_dict=True)\n\n # Recompute step limit based on number of agents\n if step_limit_1agent is not None:\n step_limit = step_limit_1agent * num_agents\n else:\n step_limit = None\n\n scores_list.append(get_score_for_setting(\n df, metrics, row_dict, step_limit=step_limit,\n last_x_percent=last_x_percent, run=run, verbose=False))\n\n scores_dict = {k: [dic[k] for dic in scores_list] for k in scores_list[0]}\n return scores_dict, settings_list\n\n\ndef find_best_setting_for_metric(df, metric, run='eval', step_limit_1agent=None,\n last_x_percent=.2, num_agents=1):\n \"\"\"Find the hparam setting that led to the highest score on metric.\"\"\"\n scores_dict, settings_list = find_best_settings(\n df,\n [metric],\n run=run,\n step_limit_1agent=step_limit_1agent,\n last_x_percent=last_x_percent,\n num_agents=num_agents)\n\n scores = scores_dict[metric]\n max_idx = scores.index(max(scores))\n return settings_list[max_idx]\n\n\ndef restrict_to_best_setting_for_metric(df, metric, run='eval',\n last_x_percent=.2, num_agents=1,\n step_limit_1agent=None):\n \"\"\"Restrict df to hparam settings with highest score on metric.\"\"\"\n best_setting = find_best_setting_for_metric(\n df, metric, run=run, last_x_percent=last_x_percent, num_agents=num_agents,\n step_limit_1agent=step_limit_1agent)\n print('Found best setting', best_setting)\n return restrict_to_setting(df, best_setting)\n\n\ndef copy_recursively(source, destination):\n \"\"\"Copies a directory and its content.\n\n Args:\n source: Source directory.\n destination: Destination directory.\n \"\"\"\n for src_dir, _, src_files in tf.io.gfile.walk(source):\n dst_dir = os.path.join(destination, os.path.relpath(src_dir, source))\n if not tf.io.gfile.exists(dst_dir):\n tf.io.gfile.makedirs(dst_dir)\n for src_file in src_files:\n tf.io.gfile.copy(\n os.path.join(src_dir, src_file),\n os.path.join(dst_dir, src_file),\n overwrite=True)\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom six.moves import range\nfrom six.moves import zip\nimport tensorflow.compat.v1 as tf\nfrom smith.bert import optimization\n\n\nclass OptimizationTest(tf.test.TestCase):\n\n def test_adam(self):\n with self.test_session() as sess:\n w = tf.get_variable(\n \"w\",\n shape=[3],\n initializer=tf.constant_initializer([0.1, -0.2, -0.1]))\n x = tf.constant([0.4, 0.2, -0.5])\n loss = tf.reduce_mean(tf.square(x - w))\n tvars = tf.trainable_variables()\n grads = tf.gradients(loss, tvars)\n global_step = tf.train.get_or_create_global_step()\n optimizer = optimization.AdamWeightDecayOptimizer(learning_rate=0.2)\n train_op = optimizer.apply_gradients(list(zip(grads, tvars)), global_step)\n init_op = tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer())\n sess.run(init_op)\n for _ in range(100):\n sess.run(train_op)\n w_np = sess.run(w)\n self.assertAllClose(w_np.flat, [0.4, 0.2, -0.5], rtol=1e-2, atol=1e-2)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tf3d.utils.pointcloud_utils.\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\nfrom tf3d.utils import pointcloud_utils\n\n\nclass PointcloudUtilsTest(tf.test.TestCase):\n\n def get_sample_points(self):\n return tf.constant([[10.0, 12.0, 2.0],\n [2.0, 10.0, 9.0],\n [1.0, 11.0, 11.0],\n [0.0, 1.0, 11.0],\n [0.0, 0.0, 10.0],\n [-1.0, 1.0, 11.0],\n [11.0, 11.0, 1.0],\n [11.0, 12.0, -1.0],\n [0.0, 0.0, 11.0],\n [0.01, 0.0, 11.0]], dtype=tf.float32)\n\n def test_flip_normals_towards_viewpoint(self):\n points = tf.constant([[1.0, 1.0, 1.0],\n [2.0, 2.0, 1.0],\n [1.0, 2.0, 1.0],\n [2.0, 1.0, 1.0]], dtype=tf.float32)\n normals = tf.constant([[0.1, 0.2, -1.0],\n [0.1, 0.2, -1.0],\n [0.0, 0.0, -1.0],\n [0.1, -0.2, -1.0]], dtype=tf.float32)\n viewpoint = tf.constant([1.0, 1.0, 100.0])\n flipped_normals = pointcloud_utils.flip_normals_towards_viewpoint(\n points=points, normals=normals, viewpoint=viewpoint)\n self.assertAllClose(flipped_normals.numpy(),\n np.array([[-0.1, -0.2, 1.0],\n [-0.1, -0.2, 1.0],\n [0.0, 0.0, 1.0],\n [-0.1, 0.2, 1.0]]))\n\n # def test_points_to_normals_unbatched_pca(self):\n # points = tf.constant([[1.0, 1.0, 1.0],\n # [2.0, 2.0, 1.0],\n # [1.0, 2.0, 1.0],\n # [2.0, 1.0, 1.0]], dtype=tf.float32)\n # normals = pointcloud_utils.points_to_normals_unbatched(\n # points=points,\n # k=4,\n # distance_upper_bound=5.0,\n # viewpoint=tf.constant([1.0, 1.0, 100.0]),\n # method='pca')\n # self.assertAllClose(normals.numpy(),\n # np.array([[0.0, 0.0, 1.0],\n # [0.0, 0.0, 1.0],\n # [0.0, 0.0, 1.0],\n # [0.0, 0.0, 1.0]]),\n # atol=0.001)\n\n# def test_points_to_normals_unbatched_cross(self):\n# points = tf.constant([[1.0, 1.0, 1.0],\n# [2.0, 2.0, 1.0],\n# [1.0, 2.0, 1.0],\n# [2.0, 1.0, 1.0]], dtype=tf.float32)\n# normals = pointcloud_utils.points_to_normals_unbatched(\n# points=points,\n# k=3,\n# distance_upper_bound=5.0,\n# viewpoint=tf.constant([1.0, 1.0, 100.0]),\n# method='cross')\n# normals_zero = pointcloud_utils.points_to_normals_unbatched(\n# points=points, k=3, distance_upper_bound=0.5, method='cross')\n# self.assertAllClose(normals.numpy(),\n# np.array([[0.0, 0.0, 1.0],\n# [0.0, 0.0, 1.0],\n# [0.0, 0.0, 1.0],\n# [0.0, 0.0, 1.0]]),\n# atol=0.001)\n# self.assertAllClose(normals_zero.numpy(),\n# np.array([[0.0, 0.0, 0.0],\n# [0.0, 0.0, 0.0],\n# [0.0, 0.0, 0.0],\n# [0.0, 0.0, 0.0]]),\n# atol=0.001)\n\n# def test_points_to_normals(self):\n# points = tf.constant([[[1.0, 1.0, 1.0],\n# [2.0, 2.0, 1.0],\n# [1.0, 2.0, 1.0],\n# [2.0, 1.0, 1.0]]], dtype=tf.float32)\n# normals = pointcloud_utils.points_to_normals(\n# points=points,\n# num_valid_points=tf.constant([4], dtype=tf.int32),\n# k=4,\n# distance_upper_bound=5.0,\n# viewpoints=tf.constant([[1.0, 1.0, 100.0]]),\n# method='pca')\n# self.assertAllClose(normals.numpy(),\n# np.array([[[0.0, 0.0, 1.0],\n# [0.0, 0.0, 1.0],\n# [0.0, 0.0, 1.0],\n# [0.0, 0.0, 1.0]]]),\n# atol=0.001)\n\n def test_np_knn_graph_from_points_unbatched(self):\n points = np.array([[1.0, 1.0, 1.0],\n [1.0, 0.0, 1.0],\n [1.0, 0.0, 0.0],\n [0.0, 1.0, 0.0],\n [0.0, 0.0, 1.0],\n [1.0, 1.0, 0.0],\n [0.0, 1.0, 1.0],\n [0.0, 0.0, 0.0]])\n mask = np.array([1, 1, 0, 0, 1, 1, 0, 1], dtype=np.bool)\n distances, indices = pointcloud_utils.np_knn_graph_from_points_unbatched(\n points=points, k=3, distance_upper_bound=1.1, mask=mask)\n expected_distances = np.array([[0., 1., 1.],\n [0., 1., 1.],\n [0., 0., 0.],\n [0., 0., 0.],\n [0., 1., 1.],\n [0., 1., 0.],\n [0., 0., 0.],\n [0., 1., 0.]])\n expected_indices = np.array([[0, 5, 1],\n [1, 4, 0],\n [2, 2, 2],\n [3, 3, 3],\n [4, 7, 1],\n [5, 0, 5],\n [6, 6, 6],\n [7, 4, 7]])\n self.assertAllClose(distances, expected_distances)\n self.assertAllEqual(indices, expected_indices)\n\n def test_knn_graph_from_points_unbatched(self):\n points = self.get_sample_points()\n distances, indices = pointcloud_utils.knn_graph_from_points_unbatched(\n points, k=2, distance_upper_bound=2.0)\n expected_distances = tf.constant([[0, 1.73205081],\n [0, 0],\n [0, 0],\n [0, 1],\n [0, 1],\n [0, 1],\n [0, 1.73205081],\n [0, 0],\n [0, 0.01],\n [0, 0.01]], dtype=tf.float32)\n expected_indices = tf.constant([[0, 6],\n [1, 1],\n [2, 2],\n [3, 5],\n [4, 8],\n [5, 3],\n [6, 0],\n [7, 7],\n [8, 9],\n [9, 8]], dtype=tf.int32)\n self.assertAllClose(expected_distances.numpy(), distances.numpy())\n self.assertAllEqual(expected_indices.numpy(), indices.numpy())\n\n# def test_knn_graph_from_points_unbatched_less_than_k_dynamic_shape(self):\n# points = self.get_sample_points()\n# distances, indices = pointcloud_utils.knn_graph_from_points_unbatched(\n# points, k=20, distance_upper_bound=1000.0)\n# self.assertAllClose(distances.shape, [10, 20])\n# self.assertAllEqual(indices.shape, [10, 20])\n\n# def test_knn_graph_from_points_unbatched_dynamic_shape(self):\n# points = tf.random_uniform([500000, 3],\n# minval=-10.0,\n# maxval=10.0,\n# dtype=tf.float32)\n# distances, indices = pointcloud_utils.knn_graph_from_points_unbatched(\n# points, k=5, distance_upper_bound=100.0)\n# self.assertAllEqual(distances.shape, [500000, 5])\n# self.assertAllEqual(indices.shape, [500000, 5])\n\n# def test_knn_graph_from_points_dynamic_shape(self):\n# points = tf.random_uniform([8, 500000, 3],\n# minval=-10.0,\n# maxval=10.0,\n# dtype=tf.float32)\n# num_valid_points = tf.ones([8], dtype=tf.int32) * 500000\n# distances, indices = pointcloud_utils.knn_graph_from_points(\n# points,\n# num_valid_points=num_valid_points,\n# k=5,\n# distance_upper_bound=20.0)\n# self.assertAllEqual(distances.shape, [8, 500000, 5])\n# self.assertAllEqual(indices.shape, [8, 500000, 5])\n\n# def test_identity_knn_graph_unbatched(self):\n# points = self.get_sample_points()\n# distances, indices = pointcloud_utils.identity_knn_graph_unbatched(\n# points, 2)\n# expected_distances = tf.zeros([10, 2], dtype=tf.float32)\n# expected_indices = tf.constant([[0, 0],\n# [1, 1],\n# [2, 2],\n# [3, 3],\n# [4, 4],\n# [5, 5],\n# [6, 6],\n# [7, 7],\n# [8, 8],\n# [9, 9]], dtype=tf.int32)\n# self.assertAllClose(expected_distances.numpy(), distances.numpy())\n# self.assertAllEqual(expected_indices.numpy(), indices.numpy())\n\n# def test_identity_knn_graph_dynamic_shape(self):\n# points = tf.random_uniform([8, 500000, 3],\n# minval=-10.0,\n# maxval=10.0,\n# dtype=tf.float32)\n# num_valid_points = tf.ones([8], dtype=tf.int32) * 500000\n# distances, indices = pointcloud_utils.identity_knn_graph(\n# points, num_valid_points=num_valid_points, k=5)\n# self.assertAllEqual(distances.shape, [8, 500000, 5])\n# self.assertAllEqual(indices.shape, [8, 500000, 5])\n\n# def test_identity_knn_graph(self):\n# points = self.get_sample_points()\n# points = tf.tile(tf.expand_dims(points, axis=0), [8, 1, 1])\n# num_valid_points = tf.ones([8], dtype=tf.int32) * 10\n# distances, indices = pointcloud_utils.identity_knn_graph(\n# points, num_valid_points=num_valid_points, k=2)\n# expected_distances = tf.zeros([8, 10, 2], dtype=tf.float32)\n# expected_indices = tf.constant([[0, 0],\n# [1, 1],\n# [2, 2],\n# [3, 3],\n# [4, 4],\n# [5, 5],\n# [6, 6],\n# [7, 7],\n# [8, 8],\n# [9, 9]], dtype=tf.int32)\n# expected_indices = tf.tile(\n# tf.expand_dims(expected_indices, axis=0), [8, 1, 1])\n# self.assertAllClose(expected_distances.numpy(), distances.numpy())\n# self.assertAllEqual(expected_indices.numpy(), indices.numpy())\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Parameter-free optimization inside parameter-free optimization.\n\nLearns a preconditioned optimal direction using a two-layer algorithm\nstructure. An inner un-preconditioned online learning algorithm runs on each\ncoordinate of the problem. This inner optimizer is used to find an optimal\npre-conditioned direction for the outer optimization algorithm.\n\nSee the paper\nAshok Cutkosky, and Tamas Sarlos.\n\"Matrix-Free Preconditioning in Online Learning\", ICML 2019.\nhttp://proceedings.mlr.press/v97/cutkosky19b.html\n\nArgs:\n epsilon: regret at 0 of outer optimizer\n epsilon_v: regret at 0 of each coordinate of inner optimizer\n g_max: guess for maximum L1 norm of gradients\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\nGATE_OP = 1\n\nOUTER_BETTING_FRACTION = \"outer_betting_fraction\"\nINNER_BETTING_FRACTION = \"inner_betting_fraction\"\n\nOUTER_WEALTH = \"outer_wealth\"\nINNER_WEALTH = \"inner_wealth\"\nINNER_REWARD = \"inner_reward\"\n\nINNER_SUM_GRAD_SQUARED = \"inner_sum_grad_squared\"\nINNER_SUM_GRAD = \"inner_sum_grad\"\n\nMAXIMUM_GRADIENT = \"maximum_gradient\"\nINNER_MAXIMUM_GRADIENT = \"inner_maximum_gradient\"\n\n# used if add_average flag is true\nAVERAGE_OFFSET = \"average_offset\"\nSUM_GRAD_NORM_SQUARED = \"sum_grad_norm_squared\"\nPREVIOUS_OFFSET = \"previous_offset\"\n\nEPSILON = \"epsilon\"\nEPSILON_V = \"epsilon_v\"\nG_MAX = \"g_max\"\nBETTING_DOMAIN = \"betting_domain\"\nETA = \"eta\"\nLR = \"learning_rate\"\n\nONSBET = \"ONSBET\"\nSCINOL = \"SCINOL\"\nINNER_OPTIMIZERS = [ONSBET, SCINOL]\n\nINITIAL_VALUE = \"initial_value\"\n\nSMALL_VALUE = 0.00000001\n\n\nclass RecursiveOptimizer(tf.train.Optimizer):\n \"\"\"RecursiveOptimizer implementation.\"\"\"\n\n def __init__(self,\n lr=1.0,\n epsilon=1.0,\n epsilon_v=1.0,\n g_max=SMALL_VALUE,\n betting_domain=0.5,\n tau=SMALL_VALUE,\n eta=None,\n rescale_inner=True,\n inner_optimizer=\"SCINOL\",\n add_average=False,\n beta=0.9,\n output_summaries=False,\n use_locking=False,\n name=\"RecursiveOptimizer\"):\n \"\"\"Construct new RecursiveOptimizer.\n\n Args:\n lr: ''learning rate'' - a scale factor on the predictions. Should have\n identical performance to changing epsilon (initial wealth).\n epsilon: regret at 0 of outer optimizer (this is the initial wealth).\n epsilon_v: regret at 0 of each coordinate of inner optimizer\n (per-coordinate initial wealth)\n g_max: guess for maximum L1 norm of gradients. In theory, this guess needs\n to be an over-estimate, otherwise all bounds are invalid in the worst\n case. In stochastic problems we shouldn't expect worst-case behavior and\n so violations of the bound are not bad. Larger values lead to more a\n conservative algorithm, so we opt for an aggressive default.\n betting_domain: maximum betting fraction.\n tau: initial value for denominator in inner optimizer update.\n eta: If inner optimizer is ONS, manually overrides the ONS learning rate.\n If inner optimizer is SCINOL, sets maximum betting fraction of SCINOL in\n first several iterations.\n rescale_inner: Modifies the behavior of the inner optimizer to adapt to\n gradient scaling. For ONS, rescale the gradients supplied to the inner\n optimizer by their maximum value. For SCINOL, scale the initial wealth\n epsilon_v by the maximum gradient value.\n inner_optimizer: which optimizer to use as inner optimizer. ONSBET\n corresponds to using coin-betting reduction with ONS as base optimizer.\n SCINOL corresponds to scale-invariant online learning algorithm\n (https://arxiv.org/pdf/1902.07528.pdf).\n add_average: Whether to add the weighted average of past iterates to the\n current iterate as described in (section 6 of\n https://arxiv.org/abs/1802.06293). This is \"morally\" similar to momentum\n term in other SGD variants in that it pushes the iterates further in\n direction they have been moving.\n beta: only relevent when add_average=True. Uses an exponentially weighted\n average with exponential parameter beta when computing the average\n iterate.\n output_summaries: Whether to output scalar_summaries of some internal\n variables. Note that this will significantly impact the number of\n iterations per second.\n use_locking: whether to use locks for update operations.\n name: name for optimizer.\n \"\"\"\n super(RecursiveOptimizer, self).__init__(use_locking, name)\n self.output_summaries = output_summaries\n self.g_max = max(g_max, SMALL_VALUE)\n self.epsilon = max(epsilon, SMALL_VALUE)\n self.epsilon_v = max(epsilon_v, SMALL_VALUE)\n self.tau = max(tau, SMALL_VALUE)\n self.rescale_inner = rescale_inner\n self.inner_optimizer = inner_optimizer\n self.add_average = add_average\n\n if self.inner_optimizer not in INNER_OPTIMIZERS:\n raise ValueError(\"Invalid inner optimizer!\")\n\n if eta is None or eta == 0:\n if inner_optimizer == ONSBET:\n # Set learning rate for the online newton step update.\n # This is the maximum eta such that:\n # f(x) - f(u) < f'(x) * (x-u) - ((x-u) f'(x))^2/(2*eta)\n # for all x,u in [-betting_domain, betting_domain]\n # where f(x) = -log(1+x)\n eta = 0.5 / (\n betting_domain -\n betting_domain**2 * np.log(1 + 1.0 / betting_domain))\n elif inner_optimizer == SCINOL:\n eta = 0.1\n self.eta = eta\n\n self.lr = lr\n self.eta = eta\n self.beta = beta\n self.betting_domain = betting_domain\n\n self.non_slot_dict = {}\n\n # Propagates use_locking from constructor to\n # https://www.tensorflow.org/api_docs/python/tf/compat/v1/assign\n def _assign(self, ref, value):\n return tf.assign(ref, value, use_locking=self._use_locking)\n\n # Propagates use_locking from constructor to\n # https://www.tensorflow.org/api_docs/python/tf/compat/v1/assign_add\n def _assign_add(self, ref, value):\n return tf.assign_add(ref, value, use_locking=self._use_locking)\n\n def _create_slot_with_value(self, var, value, name, dtype=None):\n if dtype is None:\n dtype = var.dtype.base_dtype\n self._get_or_make_slot(var,\n tf.constant(value, shape=var.shape,\n dtype=dtype), name, name + \"_slot\")\n\n def _create_non_slot_with_value(self, value, name, dtype):\n non_slot = tf.get_variable(\n name=self.get_name() + \"/non_slot_variables/\" + name,\n dtype=dtype,\n trainable=False,\n initializer=value)\n self.non_slot_dict[name] = non_slot\n\n def _get_non_slot(self, name):\n return self.non_slot_dict[name]\n\n def _create_zeros_slot(self, var, name):\n self._zeros_slot(var, name, name + \"_slot\")\n\n def _create_slots(self, var_list):\n for var in var_list:\n # TODO(cutkosky): See if any of these can be eliminated, and if this\n # improves performance.\n self._create_zeros_slot(var, OUTER_BETTING_FRACTION)\n\n self._get_or_make_slot(var, var.initialized_value(), INITIAL_VALUE,\n INITIAL_VALUE + \"_slot\")\n self._create_slot_with_value(var, self.tau, INNER_SUM_GRAD_SQUARED)\n self._create_slot_with_value(var, self.g_max, INNER_MAXIMUM_GRADIENT)\n\n if self.inner_optimizer == SCINOL:\n self._create_zeros_slot(\n var,\n INNER_SUM_GRAD,\n )\n self._create_zeros_slot(var, INNER_REWARD)\n\n if self.inner_optimizer == ONSBET:\n self._create_zeros_slot(var, INNER_BETTING_FRACTION)\n self._create_slot_with_value(var, self.epsilon_v, INNER_WEALTH)\n\n if self.add_average:\n self._create_zeros_slot(var, AVERAGE_OFFSET)\n\n dtype = var_list[0].dtype.base_dtype\n self._create_non_slot_with_value(self.epsilon, OUTER_WEALTH, dtype)\n self._create_non_slot_with_value(self.g_max, MAXIMUM_GRADIENT, dtype)\n self._create_non_slot_with_value(0.0, SUM_GRAD_NORM_SQUARED, dtype)\n\n def _prepare(self):\n # These are dicts to hold per-variable intermediate values\n # that are recomputed from scratch every iteration.\n self.grads = {}\n\n # These dicts store increments that will be added up to obtain the\n # correct global value once all variables have been processed.\n self.betting_fraction_dot_product_deltas = {}\n self.wealth_deltas = {}\n self.grad_norms = {}\n\n def _resource_apply_dense(self, grad, var):\n return self._apply_dense(grad, var)\n\n def _apply_dense(self, grad, var):\n # We actually apply grads in _finish. This function is used\n # to record intermediate variables related to the individual gradients\n # which we eventually combine in _finish to obtain global statistics\n # (e.g. the L1 norm of the full gradient).\n\n self.grads[var] = grad\n\n betting_fraction = self.get_slot(var, OUTER_BETTING_FRACTION)\n self.betting_fraction_dot_product_deltas[var] = tf.reduce_sum(\n betting_fraction * grad)\n\n # Wealth increases by -g \\cdot w where w is the parameter value.\n # Since w = Wealth * v with betting fraction v, we can write\n # the wealth increment as -(g \\cdot v) Wealth.\n # TODO(cutkosky): at one point there was a bug in which epsilon\n # was not added here. It seemed performance may have degraded\n # somewhat after fixing this. Find out why this would be.\n wealth_delta = -self.betting_fraction_dot_product_deltas[\n var] * self._get_non_slot(OUTER_WEALTH)\n self.wealth_deltas[var] = wealth_delta\n\n self.grad_norms[var] = tf.norm(grad, 1)\n\n return tf.no_op()\n\n def _compute_inner_update(self, var, grad):\n if self.inner_optimizer == ONSBET:\n return self._compute_inner_update_onsbet(var, grad)\n if self.inner_optimizer == SCINOL:\n return self._compute_inner_update_scinol(var, grad)\n\n raise TypeError(\"Unknown inner_optimizer: \" + self.inner_optimizer)\n\n def _compute_inner_update_scinol(self, var, grad):\n update_ops = []\n\n betting_domain = tf.cast(self.betting_domain, var.dtype.base_dtype)\n\n reward = self.get_slot(var, INNER_REWARD)\n betting_fraction = self.get_slot(var, OUTER_BETTING_FRACTION)\n sum_grad_squared = self.get_slot(var, INNER_SUM_GRAD_SQUARED)\n sum_grad = self.get_slot(var, INNER_SUM_GRAD)\n inner_maximum_gradient = self.get_slot(var, INNER_MAXIMUM_GRADIENT)\n\n # clip inner gradient to respect previous inner_maximum_gradient value\n # This introduces at most an additive constant overhead in the regret\n # since the inner betting fraction lies in a bounded domain.\n clipped_grad = tf.clip_by_value(grad, -inner_maximum_gradient,\n inner_maximum_gradient)\n\n with tf.control_dependencies([clipped_grad]):\n inner_maximum_gradient_updated = self._assign(\n inner_maximum_gradient,\n tf.maximum(inner_maximum_gradient, tf.abs(grad)))\n update_ops.append(inner_maximum_gradient_updated)\n\n clipped_old_betting_fraction = tf.clip_by_value(betting_fraction,\n -betting_domain,\n betting_domain)\n\n # Process grad to respect truncation to [-betting_domain, betting_domain]\n truncated_grad = tf.where(\n tf.greater_equal(\n clipped_grad * (betting_fraction - clipped_old_betting_fraction),\n 0.0), clipped_grad, tf.zeros(tf.shape(clipped_grad)))\n\n reward_delta = -betting_fraction * truncated_grad\n reward_updated = self._assign_add(reward, reward_delta)\n update_ops.append(reward_updated)\n\n sum_grad_squared_updated = self._assign_add(sum_grad_squared,\n tf.square(truncated_grad))\n update_ops.append(sum_grad_squared_updated)\n\n sum_grad_updated = self._assign_add(sum_grad, truncated_grad)\n update_ops.append(sum_grad_updated)\n\n # The second term in this maximum, inner_maximum_gradient_updated / self.eta\n # is a hack to force the betting fraction to not be too big at first.\n scaling = tf.minimum(\n tf.rsqrt(sum_grad_squared_updated +\n tf.square(inner_maximum_gradient_updated)),\n self.eta / inner_maximum_gradient_updated)\n theta = -sum_grad_updated * scaling\n\n # rescale inner flag is a hack that rescales the epsilon_v by the\n # maximum inner gradient.\n if self.rescale_inner:\n epsilon_scaling = inner_maximum_gradient_updated\n else:\n epsilon_scaling = 1.0\n\n inner_betting_fraction = tf.sign(theta) * tf.minimum(tf.abs(theta),\n 1.0) * scaling / 2.0\n new_betting_fraction = inner_betting_fraction * (\n reward_updated + epsilon_scaling * self.epsilon_v)\n\n betting_fraction_updated = self._assign(betting_fraction,\n new_betting_fraction)\n update_ops.append(betting_fraction_updated)\n\n clipped_betting_fraction = tf.clip_by_value(betting_fraction_updated,\n -betting_domain, betting_domain)\n\n if self.output_summaries:\n mean_unclipped_betting_fraction_summary = tf.reduce_mean(\n tf.abs(betting_fraction_updated))\n max_unclipped_betting_fraction_summary = tf.reduce_max(\n tf.abs(betting_fraction_updated))\n\n mean_clipped_betting_fraction_summary = tf.reduce_mean(\n tf.abs(clipped_betting_fraction))\n max_clipped_betting_fraction_summary = tf.reduce_max(\n tf.abs(clipped_betting_fraction))\n\n max_abs_gradient = tf.reduce_max(tf.abs(grad))\n max_truncated_grad = tf.reduce_max(tf.abs(truncated_grad))\n\n tf.summary.scalar(self._name + \"/mean_unclipped_bet/\" + var.name,\n mean_unclipped_betting_fraction_summary)\n tf.summary.scalar(self._name + \"/max_unclipped_bet/\" + var.name,\n max_unclipped_betting_fraction_summary)\n tf.summary.scalar(self._name + \"/mean_clipped_bet/\" + var.name,\n mean_clipped_betting_fraction_summary)\n tf.summary.scalar(self._name + \"/max_clipped_bet/\" + var.name,\n max_clipped_betting_fraction_summary)\n\n tf.summary.scalar(self._name + \"/max_abs_inner_grad/\" + var.name,\n max_abs_gradient)\n tf.summary.scalar(\n self._name + \"/max_abs_truncated_inner_grad/\" + var.name,\n max_truncated_grad)\n return clipped_betting_fraction, tf.group(*update_ops)\n\n def _compute_inner_update_onsbet(self, var, grad):\n update_ops = []\n\n eta = tf.cast(self.eta, var.dtype.base_dtype)\n betting_domain = tf.cast(self.betting_domain, var.dtype.base_dtype)\n\n wealth = self.get_slot(var, INNER_WEALTH)\n betting_fraction = self.get_slot(var, OUTER_BETTING_FRACTION)\n inner_betting_fraction = self.get_slot(var, INNER_BETTING_FRACTION)\n sum_grad_squared = self.get_slot(var, INNER_SUM_GRAD_SQUARED)\n inner_maximum_gradient = self.get_slot(var, INNER_MAXIMUM_GRADIENT)\n\n inner_maximum_gradient_updated = self._assign(\n inner_maximum_gradient, tf.maximum(inner_maximum_gradient,\n tf.abs(grad)))\n update_ops.append(inner_maximum_gradient_updated)\n\n clipped_old_betting_fraction = tf.clip_by_value(betting_fraction,\n -betting_domain,\n betting_domain)\n\n # Process grad to respect truncation to [-betting_domain, betting_domain]\n truncated_grad = tf.where(\n tf.greater_equal(\n grad * (betting_fraction - clipped_old_betting_fraction), 0), grad,\n tf.zeros(tf.shape(grad)))\n\n wealth_delta = -betting_fraction * truncated_grad\n wealth_updated = self._assign_add(wealth, wealth_delta)\n update_ops.append(wealth_updated)\n\n # This is the gradient with respect to the betting fraction v\n # use by the ONS algorithm - a kind of \"inner inner grad\".\n # Hueristic: We also scale v_grad down by the inner maximum gradient so as\n # to make it ``unitless''. This is helpful because the learning rate for\n # ONS is proportional to sum v_grad**2, and so the scale of the learning\n # rate and of v_grad are unlikely to be properly matched without this.\n if self.rescale_inner:\n v_grad = truncated_grad / (\n (1.0 - inner_betting_fraction * truncated_grad) *\n inner_maximum_gradient_updated)\n else:\n v_grad = truncated_grad / (\n (1.0 - inner_betting_fraction * truncated_grad))\n\n sum_grad_squared_updated = self._assign_add(sum_grad_squared,\n tf.square(v_grad))\n update_ops.append(sum_grad_squared_updated)\n\n new_inner_betting_fraction = inner_betting_fraction - eta * v_grad / (\n sum_grad_squared_updated)\n new_inner_betting_fraction = tf.clip_by_value(new_inner_betting_fraction,\n -betting_domain,\n betting_domain)\n inner_betting_fraction_updated = self._assign(inner_betting_fraction,\n new_inner_betting_fraction)\n update_ops.append(inner_betting_fraction_updated)\n\n if self.output_summaries:\n mean_inner_betting_fraction_summary = tf.reduce_mean(\n tf.abs(inner_betting_fraction_updated))\n max_inner_betting_fraction_summary = tf.reduce_max(\n tf.abs(inner_betting_fraction_updated))\n inner_maximum_gradient_summary = tf.reduce_max(\n inner_maximum_gradient_updated)\n tf.summary.scalar(self._name + \"/mean_inner_betting/\" + var.name,\n mean_inner_betting_fraction_summary)\n tf.summary.scalar(self._name + \"/max_inner_betting/\" + var.name,\n max_inner_betting_fraction_summary)\n tf.summary.scalar(self._name + \"/inner_maximum_gradient/\" + var.name,\n inner_maximum_gradient_summary)\n\n betting_fraction_updated = self._assign(\n betting_fraction, inner_betting_fraction_updated * wealth_updated)\n update_ops.append(betting_fraction_updated)\n\n clipped_betting_fraction = tf.clip_by_value(betting_fraction_updated,\n -betting_domain, betting_domain)\n\n return clipped_betting_fraction, tf.group(*update_ops)\n\n def _finish(self, update_ops, name):\n\n outer_wealth = self._get_non_slot(OUTER_WEALTH)\n betting_domain = self.betting_domain\n maximum_gradient = self._get_non_slot(MAXIMUM_GRADIENT)\n\n wealth_increment = sum(self.wealth_deltas.values())\n betting_fraction_dot_product = sum(\n self.betting_fraction_dot_product_deltas.values())\n grad_norm = sum(self.grad_norms.values())\n\n maximum_gradient_updated = self._assign(\n maximum_gradient, tf.maximum(maximum_gradient, grad_norm))\n update_ops.append(maximum_gradient_updated)\n\n gradient_scaling = 1.0 / maximum_gradient_updated\n # We will replace gradient with gradient/maximum_gradient_updated in order\n # to ensure ||gradient||_1 \\le 1.\n # Since betting_fraction_dot_product and wealth_increment were calculated\n # using the original gradient, we also scale them by the same amount.\n betting_fraction_dot_product = betting_fraction_dot_product * gradient_scaling\n wealth_increment = wealth_increment * gradient_scaling\n\n outer_wealth_updated = self._assign_add(outer_wealth, wealth_increment)\n update_ops.append(outer_wealth_updated)\n\n inner_grad_scaling = (1.0 - betting_domain) / (1.0 -\n betting_fraction_dot_product)\n\n if self.output_summaries:\n tf.summary.scalar(self._name + \"/total_wealth\", outer_wealth_updated)\n tf.summary.scalar(self._name + \"/maximum_gradient_norm\",\n maximum_gradient_updated)\n tf.summary.scalar(self._name + \"/gradient_L1_norm\", grad_norm)\n\n if self.add_average:\n grad_norm_squared = tf.square(grad_norm)\n sum_grad_norm_squared = self._get_non_slot(SUM_GRAD_NORM_SQUARED)\n sum_grad_norm_squared_updated = self._assign(\n sum_grad_norm_squared,\n self.beta * sum_grad_norm_squared + grad_norm_squared)\n\n for var in self.grads:\n\n grad = self.grads[var]\n\n if self.inner_optimizer == SCINOL:\n inner_grad = grad * inner_grad_scaling\n else:\n # Rescale gradient to have L1 norm at most 1.0\n scaled_grad = grad * gradient_scaling\n inner_grad = scaled_grad * inner_grad_scaling\n\n betting_fraction, inner_update_op = self._compute_inner_update(\n var, inner_grad)\n update_ops.append(inner_update_op)\n\n if self.output_summaries:\n betting_fraction_summary = tf.reduce_mean(tf.abs(betting_fraction))\n tf.summary.scalar(self._name + \"/mean_abs_betting_fraction/\" + var.name,\n betting_fraction_summary)\n max_betting_fraction_summary = tf.reduce_max(tf.abs(betting_fraction))\n tf.summary.scalar(self._name + \"/max_abs_betting_fraction/\" + var.name,\n max_betting_fraction_summary)\n\n next_offset = self.lr * betting_fraction * outer_wealth_updated\n initial_value = self.get_slot(var, INITIAL_VALUE)\n\n if self.add_average:\n average_offset = self.get_slot(var, AVERAGE_OFFSET)\n average_offset_updated = self._assign_add(\n average_offset,\n (grad_norm_squared *\n (next_offset - average_offset)) / (sum_grad_norm_squared_updated))\n update_ops.append(average_offset_updated)\n\n var_updated = self._assign(\n var, next_offset + average_offset_updated + initial_value)\n else:\n var_updated = self._assign(var, next_offset + initial_value)\n update_ops.append(var_updated)\n\n return tf.group(*update_ops, name=name)\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Tests for gfsa.training.train_var_misuse_lib.\"\"\"\n\nfrom absl.testing import absltest\nimport flax\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom gfsa import sparse_operator\nfrom gfsa.datasets.var_misuse import example_definition\nfrom gfsa.model import side_outputs\nfrom gfsa.training import train_var_misuse_lib\n\n\nclass TrainVarMisuseLibTest(absltest.TestCase):\n\n def test_loss_fn(self):\n mock_example = example_definition.VarMisuseExample(\n input_graph=None,\n bug_node_index=2,\n repair_node_mask=jnp.array([0., 1., 1., 0.5, 0.]),\n candidate_node_mask=None,\n unique_candidate_operator=sparse_operator.SparseCoordOperator(\n input_indices=jnp.array([0, 1, 2, 3, 3, 4])[:, None],\n output_indices=jnp.array([0, 1, 1, 1, 2, 3])[:, None],\n values=jnp.array([1, 1, 1, 0.5, 0.5, 1])),\n repair_id=1)\n\n mock_metadata = object()\n\n @flax.nn.module\n def mock_model_def(example, metadata):\n # Check that we get the right inputs.\n self.assertIs(example, mock_example)\n self.assertIs(metadata, mock_metadata)\n\n # Register a side output\n side_outputs.SideOutput(jnp.array(.1234), name=\"test_penalty\")\n\n # Make sure we can generate an rng key with flax.\n _ = flax.nn.make_rng()\n\n return jnp.log(\n jnp.array([\n [.0, .0, .0, .0, .0],\n [.1, .0, .0, .2, .0],\n [.0, .1, .2, .2, .1], # <- This row is the \"correct\" bug index.\n [.0, .0, .0, .0, .0],\n [.1, .0, .0, .0, .0],\n ]))\n\n with flax.nn.stochastic(jax.random.PRNGKey(0)):\n _, params = mock_model_def.init(\n jax.random.PRNGKey(0), mock_example, mock_metadata)\n\n mock_model = flax.nn.Model(mock_model_def, params)\n\n loss, metrics = train_var_misuse_lib.loss_fn(\n mock_model, (mock_example, jax.random.PRNGKey(0)),\n mock_metadata,\n regularization_weights={\"penalty\": 2})\n\n np.testing.assert_allclose(metrics[\"nll/joint\"], -np.log(0.4), atol=1e-7)\n np.testing.assert_allclose(metrics[\"side/test_penalty\"], .1234, atol=1e-7)\n np.testing.assert_allclose(loss, -np.log(0.4) + 2 * .1234, atol=1e-7)\n\n np.testing.assert_allclose(\n metrics[\"nll/marginal_bug\"], -np.log(0.6), atol=1e-7)\n np.testing.assert_allclose(\n metrics[\"nll/marginal_repair\"], -np.log(0.5), atol=1e-7)\n np.testing.assert_allclose(\n metrics[\"nll/repair_given_bug\"], -np.log(0.4 / 0.6), atol=1e-7)\n np.testing.assert_allclose(\n metrics[\"nll/bug_given_repair\"], -np.log(0.4 / 0.5), atol=1e-7)\n np.testing.assert_allclose(metrics[\"inaccuracy/classification_overall\"], 0)\n np.testing.assert_allclose(\n metrics[\"inaccuracy/classification_given_nobug\"].numerator, 0)\n np.testing.assert_allclose(\n metrics[\"inaccuracy/classification_given_nobug\"].denominator, 0)\n np.testing.assert_allclose(\n metrics[\"inaccuracy/classification_given_bug\"].numerator, 0)\n np.testing.assert_allclose(\n metrics[\"inaccuracy/classification_given_bug\"].denominator, 1)\n\n\nif __name__ == \"__main__\":\n absltest.main()\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Main evaluation loop.\"\"\"\n\n# pylint: disable=unused-import\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\nimport os\nimport pprint\nimport re\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport gym\nimport numpy as np\nfrom policies.exponential_family import ExponentialFamilyPolicy\nfrom policies.l_norm import LNormPolicy\nimport replay\nimport tensorflow.compat.v2 as tf\nfrom tensorflow.python.ops import summary_ops_v2 as summary\nimport util\nimport yaml\n\n\nFLAGS = flags.FLAGS\nflags.DEFINE_string('experiment_path', None,\n 'Where to load experiment info from.')\nflags.DEFINE_integer('num_episodes', -1, 'How many episodes to sample.')\n\n\ndef sample_episode(env, policy, memory, max_episode_length=1000):\n \"\"\"Collect episodes from policy.\"\"\"\n obs = env.reset()\n memory.log_init(obs)\n\n for step in range(max_episode_length):\n act = policy.argmax(np.expand_dims(obs, 0)).numpy()[0]\n next_obs, reward, term, _ = env.step(act)\n memory.log_experience(obs, act, reward, next_obs)\n if term:\n logging.info('Episode terminated early, step=%d', step)\n break\n obs = next_obs\n\n return memory\n\n\ndef most_recent_file(directory, regex_string):\n \"\"\"Returns the path of the most recently modified file matching the regex.\"\"\"\n file_times = [\n (f, os.path.getmtime(os.path.join(directory, f)))\n for f in os.listdir(directory)\n if re.search(regex_string, f)]\n if not file_times:\n return\n most_recent, _ = max(file_times, key=lambda x: x[1])\n return os.path.join(directory, most_recent)\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n\n tf.enable_v2_behavior()\n\n config_file = most_recent_file(FLAGS.experiment_path, r'config.yaml')\n assert config_file\n with open(config_file, 'r') as f:\n config = util.AttrDict(**yaml.load(f.read()))\n logging.info('Config:\\n%s', pprint.pformat(config))\n env = gym.make(config.env)\n cls = globals()[config.policy]\n policy = cls(config)\n # Initialize policy\n policy.argmax(np.expand_dims(env.reset(), 0))\n\n # Load checkpoint.\n # Assuming policy is a keras.Model instance.\n logging.info('policy variables: %s',\n [v.name for v in policy.trainable_variables])\n ckpt = tf.train.Checkpoint(policy=policy)\n ckpt_file = most_recent_file(FLAGS.experiment_path, r'model.ckpt-[0-9]+')\n if ckpt_file:\n ckpt_file = re.findall('^(.*/model.ckpt-[0-9]+)', ckpt_file)[0]\n logging.info('Checkpoint file: %s', ckpt_file)\n ckpt.restore(ckpt_file).assert_consumed()\n else:\n raise RuntimeError('No checkpoint found')\n\n summary_writer = tf.summary.create_file_writer(FLAGS.experiment_path,\n flush_millis=10000)\n\n logging.info('Starting Evaluation')\n it = (\n range(FLAGS.num_episodes) if FLAGS.num_episodes >= 0\n else itertools.count())\n for ep in it:\n memory = replay.Memory()\n sample_episode(env, policy, memory, max_episode_length=200)\n logging.info(ep)\n with summary_writer.as_default(), summary.always_record_summaries():\n summary.scalar('return', memory.observed_rewards().sum(), step=ep)\n summary.scalar('length', memory.observed_rewards().shape[-1], step=ep)\n\n logging.info('DONE')\n\n\nif __name__ == '__main__':\n app.run(main)\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport numpy as np\nimport tensorflow as tf\n# pylint: skip-file\n\ndef get_weight(shape, stddev, reg, name):\n wd = 5e-4\n init = tf.random_normal_initializer(stddev=stddev)\n if reg:\n regu = tf.contrib.layers.l2_regularizer(wd)\n filt = tf.get_variable(name, shape, initializer=init, regularizer=regu)\n else:\n filt = tf.get_variable(name, shape, initializer=init)\n return filt\n\ndef get_bias(shape, init_bias, reg, name):\n wd = 5e-4\n init = tf.constant_initializer(init_bias)\n if reg:\n regu = tf.contrib.layers.l2_regularizer(wd)\n bias = tf.get_variable(name, shape, initializer=init, regularizer=regu)\n else:\n bias = tf.get_variable(name, shape, initializer=init)\n return bias\n\ndef batch_norm(x, phase_train):\n \"\"\"\n Batch normalization on convolutional maps.\n Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow\n Args:\n x: Tensor, 4D BHWD input maps\n phase_train: boolean tf.Varialbe, true indicates training phase\n scope: string, variable scope\n Return:\n normed: batch-normalized maps\n \"\"\"\n with tf.variable_scope('bn'):\n n_out = x.get_shape().as_list()[3]\n gamma = get_bias(n_out, 1.0, True, 'gamma')\n beta = get_bias(n_out, 0.0, True, 'beta')\n batch_mean, batch_var = tf.nn.moments(x, [0,1,2], name='moments')\n ema = tf.train.ExponentialMovingAverage(decay=0.999)\n\n def mean_var_with_update():\n ema_apply_op = ema.apply([batch_mean, batch_var])\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n\n mean, var = tf.cond(phase_train,\n mean_var_with_update,\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\n normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)\n return normed\n\ndef max_pool(inputs, name, k_shape=[1, 2, 2, 1],s_shape=[1, 2, 2, 1]):\n with tf.variable_scope(name) as scope:\n outputs = tf.nn.max_pool(inputs, ksize=k_shape, strides=s_shape, padding='SAME', name=name)\n return outputs\n\ndef conv_2d(inputs, ksize, n_output, is_training, name, stride=1, pad='SAME', relu=True, reg=True, bn=True):\n with tf.variable_scope(name) as scope:\n n_input = inputs.get_shape().as_list()[3]\n shape = [ksize, ksize, n_input, n_output]\n # print(\"shape of filter %s: %s\" % (name, str(shape)))\n filt = get_weight(shape, stddev=tf.sqrt(2.0/tf.to_float(ksize*ksize*n_input)), reg=True, name='weight')\n outputs = tf.nn.conv2d(inputs, filt, [1, stride, stride, 1], padding=pad)\n if bn:\n outputs = batch_norm(outputs, is_training)\n if relu:\n outputs = tf.nn.relu(outputs)\n return outputs\n\ndef vgg(inputs, n_class, is_training):\n net = inputs\n\n for i in range(2): #128x128\n net = conv_2d(net, 3, 64, is_training, 'conv1_'+str(i))\n net = max_pool(net, 'pool1')\n\n for i in range(2): #64x64\n net = conv_2d(net, 3, 128, is_training, 'conv2_'+str(i))\n net = max_pool(net, 'pool2')\n\n for i in range(3): #32x32\n net = conv_2d(net, 3, 256, is_training, 'conv3_'+str(i))\n net = max_pool(net, 'pool3')\n\n for i in range(3): #16x16\n net = conv_2d(net, 3, 512, is_training, 'conv4_'+str(i))\n net = max_pool(net, 'pool4')\n\n for i in range(3): #8x8\n net = conv_2d(net, 3, 512, is_training, 'conv5_'+str(i))\n net = max_pool(net, 'pool5')\n\n net = conv_2d(net, 4, 256, is_training, 'fc1', pad='VALID')\n\n net = conv_2d(net, 1, 128, is_training, 'fc2', pad='VALID')\n\n net = tf.squeeze(conv_2d(net, 1, n_class, is_training, 'fc3', pad='VALID', relu=False, bn=False))\n\n return net\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for ...tf3d.object_detection.model_utils.\"\"\"\n\nimport math\nimport numpy as np\nimport tensorflow as tf\nfrom tf3d import standard_fields\nfrom tf3d.object_detection import model_utils\n\n\nclass ModelUtilsTest(tf.test.TestCase):\n\n def test_normalize_cos_sin_rotation(self):\n outputs = {\n 'cos': tf.constant([[3.0], [6.0]], dtype=tf.float32),\n 'sin': tf.constant([[4.0], [8.0]], dtype=tf.float32)\n }\n model_utils.normalize_cos_sin_rotation(\n outputs=outputs, cos_key='cos', sin_key='sin')\n self.assertAllClose(outputs['cos'].numpy(), np.array([[0.6], [0.6]]))\n self.assertAllClose(outputs['sin'].numpy(), np.array([[0.8], [0.8]]))\n\n def test_make_boxes_positive(self):\n outputs = {\n 'length': tf.constant([[-1.0], [2.0], [-2.0]], dtype=tf.float32),\n 'height': tf.constant([[-2.0], [3.0], [-4.0]], dtype=tf.float32),\n 'width': tf.constant([[-3.0], [4.0], [-1.0]], dtype=tf.float32),\n }\n model_utils.make_box_sizes_positive(\n outputs=outputs,\n length_key='length',\n height_key='height',\n width_key='width')\n self.assertAllClose(outputs['length'].numpy(),\n np.array([[1.0], [2.0], [2.0]]))\n self.assertAllClose(outputs['height'].numpy(),\n np.array([[2.0], [3.0], [4.0]]))\n self.assertAllClose(outputs['width'].numpy(), np.array([[3.0], [4.0],\n [1.0]]))\n\n def test_rectify_outputs(self):\n lengths = tf.constant([[[1.2], [-0.9], [2.5], [1.3], [1.7], [21.0]]])\n heights = tf.constant([[[-3.2], [0.4], [-2.8], [1.4], [-1.1], [11.0]]])\n widths = tf.constant([[[5.1], [0.7], [2.3], [3.4], [3.1], [-31.0]]])\n rot_x_sin = tf.constant([[[0.0], [1.0], [-0.5], [2.0], [3.0], [4.0]]])\n rot_x_cos = tf.constant([[[1.0], [0.0], [-0.5], [2.0], [3.0], [4.0]]])\n rot_y_sin = tf.constant([[[0.0], [1.0], [-0.5], [2.0], [3.0], [4.0]]])\n rot_y_cos = tf.constant([[[1.0], [0.0], [-0.5], [2.0], [3.0], [4.0]]])\n rot_z_sin = tf.constant([[[0.0], [1.0], [-0.5], [2.0], [3.0], [4.0]]])\n rot_z_cos = tf.constant([[[1.0], [0.0], [-0.5], [2.0], [3.0], [4.0]]])\n expected_lengths = tf.constant([[[1.2], [0.9], [2.5], [1.3], [1.7],\n [21.0]]])\n expected_heights = tf.constant([[[3.2], [0.4], [2.8], [1.4], [1.1],\n [11.0]]])\n expected_widths = tf.constant([[[5.1], [0.7], [2.3], [3.4], [3.1], [31.0]]])\n expected_rot_x_sin = tf.constant([[[0.0], [1.0], [-math.sqrt(2) / 2],\n [math.sqrt(2) / 2], [math.sqrt(2) / 2],\n [math.sqrt(2) / 2]]])\n expected_rot_x_cos = tf.constant([[[1.0], [0.0], [-math.sqrt(2) / 2],\n [math.sqrt(2) / 2], [math.sqrt(2) / 2],\n [math.sqrt(2) / 2]]])\n expected_rot_y_sin = tf.constant([[[0.0], [1.0], [-math.sqrt(2) / 2],\n [math.sqrt(2) / 2], [math.sqrt(2) / 2],\n [math.sqrt(2) / 2]]])\n expected_rot_y_cos = tf.constant([[[1.0], [0.0], [-math.sqrt(2) / 2],\n [math.sqrt(2) / 2], [math.sqrt(2) / 2],\n [math.sqrt(2) / 2]]])\n expected_rot_z_sin = tf.constant([[[0.0], [1.0], [-math.sqrt(2) / 2],\n [math.sqrt(2) / 2], [math.sqrt(2) / 2],\n [math.sqrt(2) / 2]]])\n expected_rot_z_cos = tf.constant([[[1.0], [0.0], [-math.sqrt(2) / 2],\n [math.sqrt(2) / 2], [math.sqrt(2) / 2],\n [math.sqrt(2) / 2]]])\n outputs = {\n standard_fields.DetectionResultFields.object_length_voxels:\n lengths,\n standard_fields.DetectionResultFields.object_height_voxels:\n heights,\n standard_fields.DetectionResultFields.object_width_voxels:\n widths,\n standard_fields.DetectionResultFields.object_rotation_x_cos_voxels:\n rot_x_cos,\n standard_fields.DetectionResultFields.object_rotation_x_sin_voxels:\n rot_x_sin,\n standard_fields.DetectionResultFields.object_rotation_y_cos_voxels:\n rot_y_cos,\n standard_fields.DetectionResultFields.object_rotation_y_sin_voxels:\n rot_y_sin,\n standard_fields.DetectionResultFields.object_rotation_z_cos_voxels:\n rot_z_cos,\n standard_fields.DetectionResultFields.object_rotation_z_sin_voxels:\n rot_z_sin,\n }\n model_utils.rectify_outputs(outputs=outputs)\n expected_outputs = {\n standard_fields.DetectionResultFields.object_length_voxels:\n expected_lengths,\n standard_fields.DetectionResultFields.object_height_voxels:\n expected_heights,\n standard_fields.DetectionResultFields.object_width_voxels:\n expected_widths,\n standard_fields.DetectionResultFields.object_rotation_x_cos_voxels:\n expected_rot_x_cos,\n standard_fields.DetectionResultFields.object_rotation_x_sin_voxels:\n expected_rot_x_sin,\n standard_fields.DetectionResultFields.object_rotation_y_cos_voxels:\n expected_rot_y_cos,\n standard_fields.DetectionResultFields.object_rotation_y_sin_voxels:\n expected_rot_y_sin,\n standard_fields.DetectionResultFields.object_rotation_z_cos_voxels:\n expected_rot_z_cos,\n standard_fields.DetectionResultFields.object_rotation_z_sin_voxels:\n expected_rot_z_sin,\n }\n for key in outputs:\n if key in expected_outputs:\n self.assertAllClose(outputs[key].numpy(), expected_outputs[key].numpy())\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Runs qualitative evaluation given a model and saved checkpoint.\"\"\"\nimport time\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nimport gin\nimport gin.tf\nimport six\nimport tensorflow as tf\n\nfrom tf3d.utils import callback_utils\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_multi_string('import_module', None, 'List of modules to import.')\n\nflags.DEFINE_string('master', '', 'BNS name of the TensorFlow master to use.')\n\nflags.DEFINE_string('eval_dir', '/tmp/masternet/',\n 'Directory where to write event logs.')\n\nflags.DEFINE_string('ckpt_dir', '/tmp/masternet/',\n 'Directory where to load checkpoint.')\n\nflags.DEFINE_string('config_file', None, 'The path to the config file.')\n\nflags.DEFINE_string('split', 'val', 'The data split to evaluate on.')\n\nflags.DEFINE_multi_string('params', None,\n 'Newline separated list of Gin parameter bindings.')\n\nflags.DEFINE_bool('run_functions_eagerly', False,\n 'Run function eargerly for easy debugging.')\n\nflags.DEFINE_integer(\n 'num_steps_per_epoch', 1000,\n 'Number of batches to train before saving the model weights again. The next'\n 'epoch will continue loading the data stream from where the current epoch'\n 'left behind. Used for calculating actual ckpt step number during eval.')\n\nflags.DEFINE_integer(\n 'num_steps_per_log', 100,\n 'Number of steps to log the eval progress.')\n\n\[email protected]('evaluation')\ndef evaluation(model_class=None,\n input_fn=None,\n num_quantitative_examples=1000,\n num_qualitative_examples=50):\n \"\"\"A function that build the model and eval quali.\"\"\"\n\n tensorboard_callback = callback_utils.CustomTensorBoard(\n log_dir=FLAGS.eval_dir,\n batch_update_freq=1,\n split=FLAGS.split,\n num_qualitative_examples=num_qualitative_examples,\n num_steps_per_epoch=FLAGS.num_steps_per_epoch)\n model = model_class()\n checkpoint = tf.train.Checkpoint(\n model=model,\n ckpt_saved_epoch=tf.Variable(initial_value=-1, dtype=tf.int64))\n val_inputs = input_fn(is_training=False, batch_size=1)\n num_evauated_epoch = -1\n\n while True:\n ckpt_path = tf.train.latest_checkpoint(FLAGS.ckpt_dir)\n if ckpt_path:\n ckpt_num_of_epoch = int(ckpt_path.split('/')[-1].split('-')[-1])\n if num_evauated_epoch == ckpt_num_of_epoch:\n logging.info('Found old epoch %d ckpt, skip and will check later.',\n num_evauated_epoch)\n time.sleep(30)\n continue\n try:\n logging.info('Restoring new checkpoint[epoch:%d] at %s',\n ckpt_num_of_epoch, ckpt_path)\n checkpoint.restore(ckpt_path)\n except tf.errors.NotFoundError:\n logging.info('Restoring from checkpoint has failed. Maybe file missing.'\n 'Try again now.')\n time.sleep(3)\n continue\n else:\n logging.info('No checkpoint found at %s, will check again 10 s later..',\n FLAGS.ckpt_dir)\n time.sleep(10)\n continue\n\n tensorboard_callback.set_epoch_number(ckpt_num_of_epoch)\n logging.info('Start qualitative eval for %d steps...',\n num_quantitative_examples)\n try:\n # TODO(huangrui): there is still possibility of crash due to\n # not found ckpt files.\n model._predict_counter.assign(0) # pylint: disable=protected-access\n tensorboard_callback.set_model(model)\n tensorboard_callback.on_predict_begin()\n for i, inputs in enumerate(\n val_inputs.take(num_quantitative_examples), start=1):\n tensorboard_callback.on_predict_batch_begin(batch=i)\n outputs = model(inputs, training=False)\n model._predict_counter.assign_add(1) # pylint: disable=protected-access\n tensorboard_callback.on_predict_batch_end(\n batch=i, logs={'outputs': outputs, 'inputs': inputs})\n if i % FLAGS.num_steps_per_log == 0:\n logging.info('eval progress %d / %d...', i, num_quantitative_examples)\n tensorboard_callback.on_predict_end()\n\n num_evauated_epoch = ckpt_num_of_epoch\n logging.info('Finished eval for epoch %d, sleeping for :%d s...',\n num_evauated_epoch, 100)\n time.sleep(100)\n except tf.errors.NotFoundError:\n logging.info('Restoring from checkpoint has failed. Maybe file missing.'\n 'Try again now.')\n continue\n\n\ndef main(argv):\n del argv\n # Import modules BEFORE running Gin.\n if FLAGS.import_module:\n for module_name in FLAGS.import_module:\n __import__(module_name)\n\n # First, try to parse from a config file.\n if FLAGS.config_file:\n bindings = None\n if bindings is None:\n with tf.io.gfile.GFile(FLAGS.config_file) as f:\n bindings = f.readlines()\n bindings = [six.ensure_str(b) for b in bindings if b.strip()]\n gin.parse_config('\\n'.join(bindings))\n\n if FLAGS.params:\n gin.parse_config(FLAGS.params)\n\n if FLAGS.run_functions_eagerly:\n tf.config.experimental_run_functions_eagerly(True)\n\n if not tf.io.gfile.exists(FLAGS.eval_dir):\n tf.io.gfile.makedirs(FLAGS.eval_dir)\n\n evaluation()\n\n\nif __name__ == '__main__':\n logging.set_verbosity(logging.INFO)\n app.run(main)\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Library containing Tensorflow implementations of gradient regularizers.\n\nEach regularizer applies a penalty to model input gradients, i.e. the gradients\nof some model output (e.g. logits, loss) with respect to the input.\n\"\"\"\n\nimport re\nimport tensorflow as tf\nfrom optimizing_interpretability import metrics_utils\n\n\ndef _compute_logit_gradients(logits, images, labels):\n \"\"\"Computes input gradients at the logits layer of a model.\n\n Specifically, for each input image in a batch, computes the gradient of the\n logit for the true class of the input with respect to that input. This\n corresponds to the sensitivity (gradient) heatmap for that image.\n\n Note that, for images x and class logits y, we are interested in dy_i / dx_i\n for each (x_i, y_i) pair. tf.gradients(y, x) computes d(sum_i y_i) / dx_i for\n each x_i in x. The former and latter are equivalent as long as y_j is not a\n function of x_i for i != j, i.e. as long as the output for a given example\n depends only on that example and not on the rest of the batch. Standard image\n models may violate this constraint if using batch norm, but we can get around\n this problem by stopping gradient flow through the batch norm moment tensors.\n (Other violations must be handled explicitly by the user.)\n\n Args:\n logits: the unscaled model outputs. A Tensor of shape [batch_size,\n num_classes].\n images: the batch of inputs corresponding to the given outputs. A Tensor of\n shape [batch_size, height, width, channels].\n labels: one-hot encoded. A Tensor of shape [batch_size, num_classes].\n\n Returns:\n The input gradients, a Tensor of the same shape as images.\n \"\"\"\n with tf.name_scope('computing_gradients'):\n class_logits = tf.reduce_sum(logits * labels, axis=1)\n # Collect all batch norm moment tensors in the graph, for stop_gradients.\n bn_ops = [\n x for x in tf.compat.v1.get_default_graph().get_operations() # pylint: disable=g-complex-comprehension\n if re.search(\n '(batch_normalization|BatchNorm)[^/]*/moments/(mean|variance):',\n x.name)\n ]\n grads = tf.gradients(class_logits, images, stop_gradients=bn_ops)[0]\n\n return grads\n\n\ndef _normalize(batch):\n \"\"\"Normalize a batch of images or gradient heatmaps to [0, 1].\"\"\"\n with tf.control_dependencies([tf.assert_equal(tf.rank(batch), 4)]):\n batch_min = tf.reduce_min(batch, axis=[1, 2, 3], keepdims=True)\n batch_max = tf.reduce_max(batch, axis=[1, 2, 3], keepdims=True)\n return (batch - batch_min) / (batch_max - batch_min)\n\n\ndef datagrad_regularizer(logits, images, labels):\n \"\"\"L2 norm of input gradients of loss.\"\"\"\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)\n grads = tf.gradients(loss, images)[0]\n return tf.reduce_mean(tf.square(grads))\n\n\n# pylint: disable=unused-argument\ndef spectreg_regularizer(logits, images, labels):\n \"\"\"L2 norm of input gradients of a random projection of logits.\"\"\"\n logits_proj = logits * tf.random.normal(tf.shape(logits))\n grads = tf.gradients(logits_proj, images)[0]\n return tf.reduce_mean(tf.square(grads))\n# pylint: enable=unused-argument\n\n\ndef l2_grad_regularizer(logits, images, labels):\n \"\"\"L2 norm of the gradient heatmap.\"\"\"\n grads = _compute_logit_gradients(logits, images, labels)\n return tf.reduce_mean(tf.square(grads))\n\n\ndef tv_grad_regularizer(logits, images, labels):\n \"\"\"Total variation norm of the gradient heatmap.\"\"\"\n grads = _compute_logit_gradients(logits, images, labels)\n reg_loss = tf.image.total_variation(grads)\n return tf.reduce_mean(reg_loss)\n\n\ndef tv_abs_grad_regularizer(logits, images, labels):\n \"\"\"TV norm of the abs gradient heatmap after reducing channels, scaling.\"\"\"\n grads = _compute_logit_gradients(logits, images, labels)\n grad_map = tf.reduce_sum(tf.abs(grads), -1, keep_dims=True)\n _, grad_map_var = tf.nn.moments(images, (1, 2), keep_dims=True)\n grad_map_standardized = grad_map / tf.sqrt(grad_map_var)\n return tf.reduce_mean(tf.image.total_variation(grad_map_standardized))\n\n\ndef tv_abs_unscaled_grad_regularizer(logits, images, labels):\n \"\"\"TV norm of the abs gradient heatmap after reducing channels.\"\"\"\n grads = _compute_logit_gradients(logits, images, labels)\n grad_map = tf.reduce_sum(tf.abs(grads), -1, keep_dims=True)\n return tf.reduce_mean(tf.image.total_variation(grad_map))\n\n\ndef mse_grad_regularizer(logits, images, labels):\n \"\"\"Mean squared error between the gradient heatmap and image.\"\"\"\n grads = _compute_logit_gradients(logits, images, labels)\n grads, images = _normalize(grads), _normalize(images)\n reg_loss = tf.squared_difference(grads, images)\n return tf.reduce_mean(reg_loss)\n\n\ndef cor_grad_regularizer(logits, images, labels):\n \"\"\"Inverse correlation between the gradient heatmap and image. MSE variant.\"\"\"\n grads = _compute_logit_gradients(logits, images, labels)\n image_means, image_vars = tf.nn.moments(images, (1, 2), keep_dims=True)\n grad_means, grad_vars = tf.nn.moments(grads, (1, 2), keep_dims=True)\n images_standardized = (images - image_means) / tf.sqrt(image_vars)\n grads_standardized = (grads - grad_means) / tf.sqrt(grad_vars)\n cor = tf.reduce_mean(images_standardized * grads_standardized, (1, 2))\n return 1 - tf.reduce_mean(tf.square(cor))\n\n\ndef graddiff_grad_regularizer(logits, images, labels):\n \"\"\"Image gradient difference loss between the gradient heatmap and image.\"\"\"\n grads = _compute_logit_gradients(logits, images, labels)\n grads, images = _normalize(grads), _normalize(images)\n reg_loss = tf.sqrt(metrics_utils.GradientDifferenceLoss(grads, images))\n return tf.reduce_mean(reg_loss)\n\n\ndef sobel_edges_grad_regularizer(logits, images, labels):\n \"\"\"Sobel edge map loss between the gradient heatmap and image.\"\"\"\n grads = _compute_logit_gradients(logits, images, labels)\n grads, images = _normalize(grads), _normalize(images)\n reg_loss = metrics_utils.SobelEdgeLoss(img1=grads, img2=images)\n return tf.reduce_mean(reg_loss)\n\n\ndef psnr_grad_regularizer(logits, images, labels):\n \"\"\"Reciprocal peak signal-to-noise ratio for gradient heatmap and image.\"\"\"\n grads = _compute_logit_gradients(logits, images, labels)\n grads, images = _normalize(grads), _normalize(images)\n reg_loss = tf.reciprocal(metrics_utils.PSNR(grads, images, max_val=1.))\n return tf.reduce_mean(reg_loss)\n\n\ndef ssim_unfiltered_grad_regularizer(logits, images, labels):\n \"\"\"SSIM variant using a moving average, rather than Gaussian, filter.\"\"\"\n grads = _compute_logit_gradients(logits, images, labels)\n grads, images = _normalize(grads), _normalize(images)\n reg_loss = -metrics_utils.SSIMWithoutFilter(grads, images, max_val=1.)\n return tf.reduce_mean(reg_loss)\n\n\nREGULARIZERS = {\n 'datagrad': datagrad_regularizer,\n 'spectreg': spectreg_regularizer,\n 'l2': l2_grad_regularizer,\n 'tv': tv_grad_regularizer,\n 'mse': mse_grad_regularizer,\n 'grad_diff': graddiff_grad_regularizer,\n 'sobel_edges': sobel_edges_grad_regularizer,\n 'psnr': psnr_grad_regularizer,\n 'ssim_unfiltered': ssim_unfiltered_grad_regularizer,\n 'cor': cor_grad_regularizer,\n 'tv_abs': tv_abs_grad_regularizer,\n 'tv_abs_unscaled': tv_abs_unscaled_grad_regularizer\n}\n\n\ndef compute_reg_loss(regularizer, logits, images, labels):\n \"\"\"Computes the specified regularization loss.\n\n Args:\n regularizer: string name of the penalty to apply.\n logits: the unscaled model outputs. A Tensor of shape [batch_size,\n num_classes].\n images: the batch of inputs corresponding to the given outputs. A Tensor of\n shape [batch_size, height, width, channels].\n labels: one-hot encoded. A Tensor of shape [batch_size, num_classes].\n\n Returns:\n The regularization penalty, a scalar Tensor.\n\n Raises:\n KeyError: if regularizer is not among the available penalties.\n \"\"\"\n if regularizer not in REGULARIZERS:\n raise KeyError('Regularizer not available.')\n with tf.name_scope(regularizer):\n reg_fn = REGULARIZERS[regularizer]\n return reg_fn(logits, images, labels)\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Loss computation utility functions.\"\"\"\n\nimport functools\n\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\nfrom poem.core import common\nfrom poem.core import data_utils\nfrom poem.core import distance_utils\nfrom poem.core import keypoint_utils\n\n\ndef create_sample_distance_fn(\n pair_type=common.DISTANCE_PAIR_TYPE_ALL_PAIRS,\n distance_kernel=common.DISTANCE_KERNEL_SQUARED_L2,\n pairwise_reduction=common.DISTANCE_REDUCTION_MEAN,\n componentwise_reduction=common.DISTANCE_REDUCTION_MEAN,\n **distance_kernel_kwargs):\n \"\"\"Creates sample distance function.\n\n Args:\n pair_type: An enum string (see `common`) for type of pairs to use.\n distance_kernel: An enum string (see `common`) or a function handle for\n point distance kernel to use.\n pairwise_reduction: An enum string (see `common`) or a function handle for\n pairwise distance reducer to use. If not a supported enum string, uses it\n directly as a function handle.\n componentwise_reduction: An enum string (see `common`) or a function handle\n for component-wise distance reducer to use. If not a supported enum\n string, uses it directly as a function handle.\n **distance_kernel_kwargs: A dictionary for additional arguments to be passed\n to the distance kernel. The keys are in the format\n `${distance_kernel_name}_${argument_name}`.\n\n Returns:\n A function handle for computing sample group distances that takes two\n tensors of shape [..., num_components, num_embeddings, embedding_dim] as\n input.\n \"\"\"\n\n def get_distance_matrix_fn():\n \"\"\"Selects point distance matrix function.\"\"\"\n if pair_type == common.DISTANCE_PAIR_TYPE_ALL_PAIRS:\n l2_distance_computer = distance_utils.compute_all_pair_l2_distances\n elif pair_type == common.DISTANCE_PAIR_TYPE_CORRESPONDING_PAIRS:\n l2_distance_computer = distance_utils.compute_corresponding_pair_l2_distances\n\n if distance_kernel == common.DISTANCE_KERNEL_SQUARED_L2:\n return functools.partial(l2_distance_computer, squared=True)\n\n if distance_kernel == common.DISTANCE_KERNEL_L2_SIGMOID_MATCHING_PROB:\n\n def compute_l2_sigmoid_matching_distances(lhs, rhs):\n \"\"\"Computes L2 sigmoid matching probability distances.\"\"\"\n inner_distances = l2_distance_computer(lhs, rhs, squared=False)\n return distance_utils.compute_sigmoid_matching_probabilities(\n inner_distances,\n a_initializer=distance_kernel_kwargs.get(\n (distance_kernel + '_a_initializer'), None),\n b_initializer=distance_kernel_kwargs.get(\n (distance_kernel + '_b_initializer'), None),\n name=distance_kernel_kwargs.get((distance_kernel + '_name'),\n 'MatchingSigmoid'))\n\n return compute_l2_sigmoid_matching_distances\n\n if distance_kernel == common.DISTANCE_KERNEL_EXPECTED_LIKELIHOOD:\n\n def compute_gaussian_likelihoods(lhs, rhs):\n \"\"\"Computes sample likelihoods.\"\"\"\n num_lhs_samples = lhs.shape.as_list()[-2] - 2\n num_rhs_samples = rhs.shape.as_list()[-2] - 2\n lhs_means, lhs_stddevs, lhs_samples = tf.split(\n lhs, [1, 1, num_lhs_samples], axis=-2)\n rhs_means, rhs_stddevs, rhs_samples = tf.split(\n rhs, [1, 1, num_rhs_samples], axis=-2)\n rhs_likelihoods = distance_utils.compute_gaussian_likelihoods(\n lhs_means,\n lhs_stddevs,\n rhs_samples,\n min_stddev=distance_kernel_kwargs.get(\n distance_kernel + '_min_stddev', None),\n max_squared_mahalanobis_distance=distance_kernel_kwargs.get(\n distance_kernel + '_max_squared_mahalanobis_distance', None),\n smoothing=distance_kernel_kwargs.get(distance_kernel + '_smoothing',\n None))\n lhs_likelihoods = distance_utils.compute_gaussian_likelihoods(\n rhs_means,\n rhs_stddevs,\n lhs_samples,\n l2_distance_computer=l2_distance_computer,\n min_stddev=distance_kernel_kwargs.get(\n distance_kernel + '_min_stddev', None),\n max_squared_mahalanobis_distance=distance_kernel_kwargs.get(\n distance_kernel + '_max_squared_mahalanobis_distance', None),\n smoothing=distance_kernel_kwargs.get(distance_kernel + '_smoothing',\n None))\n return (rhs_likelihoods + lhs_likelihoods) / 2.0\n\n return compute_gaussian_likelihoods\n\n raise ValueError('Unsupported distance kernel: `%s`.' %\n str(distance_kernel))\n\n def get_pairwise_distance_reduction_fn():\n \"\"\"Selects pairwise distance reduction function.\"\"\"\n if pairwise_reduction == common.DISTANCE_REDUCTION_MEAN:\n return functools.partial(tf.math.reduce_mean, axis=[-2, -1])\n if pairwise_reduction == common.DISTANCE_REDUCTION_LOWER_HALF_MEAN:\n return functools.partial(\n data_utils.compute_lower_percentile_means, axis=[-2, -1], q=50)\n if pairwise_reduction == common.DISTANCE_REDUCTION_NEG_LOG_MEAN:\n return lambda x: -tf.math.log(tf.math.reduce_mean(x, axis=[-2, -1]))\n\n if pairwise_reduction == common.DISTANCE_REDUCTION_LOWER_HALF_NEG_LOG_MEAN:\n\n def compute_lower_half_negative_log_mean(x):\n return -tf.math.log(\n data_utils.compute_lower_percentile_means(x, axis=[-2, -1], q=50))\n\n return compute_lower_half_negative_log_mean\n\n if pairwise_reduction == common.DISTANCE_REDUCTION_ONE_MINUS_MEAN:\n return lambda x: 1.0 - tf.math.reduce_mean(x, axis=[-2, -1])\n\n return pairwise_reduction\n\n def get_componentwise_distance_reduction_fn():\n \"\"\"Selects component-wise distance reduction function.\"\"\"\n if componentwise_reduction == common.DISTANCE_REDUCTION_MEAN:\n return functools.partial(tf.math.reduce_mean, axis=[-1])\n\n return componentwise_reduction\n\n def sample_distance_fn(lhs, rhs):\n \"\"\"Computes sample distances.\"\"\"\n distances = get_distance_matrix_fn()(lhs, rhs)\n distances = get_pairwise_distance_reduction_fn()(distances)\n distances = get_componentwise_distance_reduction_fn()(distances)\n return distances\n\n return sample_distance_fn\n\n\ndef compute_negative_indicator_matrix(anchor_points,\n match_points,\n distance_fn,\n min_negative_distance,\n anchor_point_masks=None,\n match_point_masks=None):\n \"\"\"Computes all-pair negative match indicator matrix.\n\n Args:\n anchor_points: A tensor for anchor points. Shape = [num_anchors, ...,\n point_dim].\n match_points: A tensor for match points. Shape = [num_matches, ...,\n point_dim].\n distance_fn: A function handle for computing distance matrix.\n min_negative_distance: A float for the minimum negative distance threshold.\n anchor_point_masks: A tensor for anchor point masks. Shape = [num_anchors,\n ...]. Ignored if None.\n match_point_masks: A tensor for match point masks. Shape = [num_matches,\n ...]. Ignored if None.\n\n Returns:\n A boolean tensor for negative indicator matrix. Shape = [num_anchors,\n num_matches].\n \"\"\"\n distance_matrix = distance_utils.compute_distance_matrix(\n anchor_points,\n match_points,\n distance_fn=distance_fn,\n start_point_masks=anchor_point_masks,\n end_point_masks=match_point_masks)\n return distance_matrix >= min_negative_distance\n\n\ndef compute_hard_negative_distances(anchor_match_distance_matrix,\n negative_indicator_matrix,\n use_semi_hard=False,\n anchor_positive_mining_distances=None,\n anchor_match_mining_distance_matrix=None):\n \"\"\"Computes (semi-)hard negative distances.\n\n Args:\n anchor_match_distance_matrix: A tensor for anchor/match distance matrix.\n Shape = [num_anchors, num_matches].\n negative_indicator_matrix: A tensor for anchor/match negative indicator\n matrix. Shape = [num_anchors, num_matches].\n use_semi_hard: A boolean for whether to compute semi-hard negative distances\n instead of hard negative distances.\n anchor_positive_mining_distances: A tensor for positive distances of each\n anchor for (semi-)hard negative mining. Only used if `use_semi_hard` is\n True. Shape = [num_anchors].\n anchor_match_mining_distance_matrix: A tensor for an alternative\n anchor/match distance matrix to use for (semi-)hard negative mining. Use\n None to ignore and use `anchor_match_distance_matrix` instead. If\n specified, must be of the same shape as `anchor_match_distance_matrix`.\n\n Returns:\n hard_negative_distances: A tensor for (semi-)hard negative distances. Shape\n = [num_amchors]. If an anchor has no (semi-)hard negative match, its\n negative distance will be assigned as the maximum value of\n anchor_match_distance_matrix.dtype.\n hard_negative_mining_distances: A tensor for (semi-)hard negative mining\n distances. Shape = [num_amchors]. If an anchor has no (semi-)hard negative\n match, its negative distance will be assigned as the maximum value of\n anchor_match_distance_matrix.dtype.\n\n Raises:\n ValueError: If `use_semi_hard` is True, but\n `anchor_positive_mining_distances` is not specified.\n \"\"\"\n indicators = negative_indicator_matrix\n if anchor_match_mining_distance_matrix is None:\n anchor_match_mining_distance_matrix = anchor_match_distance_matrix\n\n if use_semi_hard:\n if anchor_positive_mining_distances is None:\n raise ValueError('Positive match embeddings must be specified to compute '\n 'semi-hard distances.')\n anchor_positive_mining_distances = tf.expand_dims(\n anchor_positive_mining_distances, axis=-1)\n indicators &= (\n anchor_match_mining_distance_matrix > anchor_positive_mining_distances)\n\n def find_hard_distances(distance_matrix, indicator_matrix):\n distance_matrix = tf.where(\n tf.stop_gradient(indicator_matrix), distance_matrix,\n tf.fill(tf.shape(distance_matrix), distance_matrix.dtype.max))\n hard_distances = tf.math.reduce_min(distance_matrix, axis=-1)\n return hard_distances\n\n hard_negative_mining_distances = find_hard_distances(\n anchor_match_mining_distance_matrix, indicators)\n\n indicators &= tf.math.equal(\n anchor_match_mining_distance_matrix,\n tf.expand_dims(hard_negative_mining_distances, axis=-1))\n\n hard_negative_distances = find_hard_distances(anchor_match_distance_matrix,\n indicators)\n\n return hard_negative_distances, hard_negative_mining_distances\n\n\ndef compute_hard_negative_triplet_loss(\n anchor_positive_distances,\n anchor_match_distance_matrix,\n anchor_match_negative_indicator_matrix,\n margin,\n use_semi_hard,\n anchor_positive_mining_distances=None,\n anchor_match_mining_distance_matrix=None):\n \"\"\"Computes triplet loss with (semi-)hard negative mining.\n\n Args:\n anchor_positive_distances: A tensor for anchor/positive distances. Shape =\n [num_anchors].\n anchor_match_distance_matrix: A tensor for anchor/match distance matrix.\n Shape = [num_anchors, num_matches].\n anchor_match_negative_indicator_matrix: A tensor for anchor/match negative\n indicator matrix. Shape = [num_anchors, num_matches].\n margin: A float for triplet loss margin.\n use_semi_hard: A boolean for whether to compute semi-hard negative distances\n instead of hard negative distances.\n anchor_positive_mining_distances: A tensor for positive distances of each\n anchor for (semi-)hard negative mining. Only used if `use_semi_hard` is\n True. Shape = [num_anchors].\n anchor_match_mining_distance_matrix: A tensor for an alternative\n anchor/match distance matrix to use for (semi-)hard negative mining. Use\n None to ignore and use `anchor_match_distance_matrix` instead. If\n specified, must be of the same shape as `anchor_match_distance_matrix`.\n\n Returns:\n loss: A tensor for loss. Shape = [].\n num_active_triplets: A tensor for number of active triplets. Shape = [].\n anchor_negative_distances: A tensor for anchor/negative distances. Shape =\n [num_amchors]. If an anchor has no (semi-)hard negative match, its\n negative distance will be assigned as the maximum value of\n anchor_match_distance_matrix.dtype.\n mining_loss: A tensor for loss based on mining distances. Shape = [].\n num_active_mining_triplets: A tensor for number of active triplets based on\n mining distances. Shape = [].\n anchor_negative_mining_distances: A tensor for anchor/negative mining\n distances. Shape = [num_amchors]. If an anchor has no (semi-)hard negative\n match, its negative distance will be assigned as the maximum value of\n anchor_match_mining_distance_matrix.dtype.\n \"\"\"\n if anchor_positive_mining_distances is None:\n anchor_positive_mining_distances = anchor_positive_distances\n if anchor_match_mining_distance_matrix is None:\n anchor_match_mining_distance_matrix = anchor_match_distance_matrix\n\n anchor_negative_distances, anchor_negative_mining_distances = (\n compute_hard_negative_distances(\n anchor_match_distance_matrix,\n anchor_match_negative_indicator_matrix,\n use_semi_hard=use_semi_hard,\n anchor_positive_mining_distances=anchor_positive_mining_distances,\n anchor_match_mining_distance_matrix=(\n anchor_match_mining_distance_matrix)))\n\n def compute_triplet_loss(positive_distances, negative_distances):\n losses = tf.nn.relu(positive_distances + margin - negative_distances)\n losses = tf.where(\n tf.stop_gradient(losses < losses.dtype.max), losses,\n tf.zeros_like(losses))\n num_nonzero_losses = tf.math.count_nonzero(losses)\n loss = tf.math.reduce_mean(losses)\n return loss, num_nonzero_losses\n\n loss, num_active_triplets = compute_triplet_loss(anchor_positive_distances,\n anchor_negative_distances)\n mining_loss, num_active_mining_triplets = compute_triplet_loss(\n anchor_positive_mining_distances, anchor_negative_mining_distances)\n\n return (loss, num_active_triplets, anchor_negative_distances, mining_loss,\n num_active_mining_triplets, anchor_negative_mining_distances)\n\n\ndef compute_keypoint_triplet_losses(\n anchor_embeddings,\n positive_embeddings,\n match_embeddings,\n anchor_keypoints,\n match_keypoints,\n margin,\n min_negative_keypoint_distance,\n use_semi_hard,\n exclude_inactive_triplet_loss,\n anchor_keypoint_masks=None,\n match_keypoint_masks=None,\n embedding_sample_distance_fn=create_sample_distance_fn(),\n keypoint_distance_fn=keypoint_utils.compute_procrustes_aligned_mpjpes,\n anchor_mining_embeddings=None,\n positive_mining_embeddings=None,\n match_mining_embeddings=None,\n summarize_percentiles=True):\n \"\"\"Computes triplet losses with both hard and semi-hard negatives.\n\n Args:\n anchor_embeddings: A tensor for anchor embeddings. Shape = [num_anchors,\n embedding_dim] or [num_anchors, num_samples, embedding_dim].\n positive_embeddings: A tensor for positive match embeddings. Shape =\n [num_anchors, embedding_dim] or [num_anchors, num_samples, embedding_dim].\n match_embeddings: A tensor for candidate negative match embeddings. Shape =\n [num_anchors, embedding_dim] or [num_matches, num_samples, embedding_dim].\n anchor_keypoints: A tensor for anchor keypoints for computing pair labels.\n Shape = [num_anchors, ..., num_keypoints, keypoint_dim].\n match_keypoints: A tensor for match keypoints for computing pair labels.\n Shape = [num_anchors, ..., num_keypoints, keypoint_dim].\n margin: A float for triplet loss margin.\n min_negative_keypoint_distance: A float for the minimum negative distance\n threshold. If negative, uses all other samples as negative matches. In\n this case, `num_anchors` and `num_matches` are assumed to be equal. Note\n that this option is for saving negative match computation. To support\n different `num_anchors` and `num_matches`, setting this to 0 (without\n saving computation).\n use_semi_hard: A boolean for whether to use semi-hard negative triplet loss\n as the final loss.\n exclude_inactive_triplet_loss: A boolean for whether to exclude inactive\n triplets in the final loss computation.\n anchor_keypoint_masks: A tensor for anchor keypoint masks for computing pair\n labels. Shape = [num_anchors, ..., num_keypoints]. Ignored if None.\n match_keypoint_masks: A tensor for match keypoint masks for computing pair\n labels. Shape = [num_anchors, ..., num_keypoints]. Ignored if None.\n embedding_sample_distance_fn: A function handle for computing sample\n embedding distances, which takes two embedding tensors of shape [...,\n num_samples, embedding_dim] and returns a distance tensor of shape [...].\n keypoint_distance_fn: A function handle for computing keypoint distance\n matrix, which takes two matrix tensors and returns an element-wise\n distance matrix tensor.\n anchor_mining_embeddings: A tensor for anchor embeddings for triplet mining.\n Shape = [num_anchors, embedding_dim] or [num_anchors, num_samples,\n embedding_dim]. Use None to ignore and use `anchor_embeddings` instead.\n positive_mining_embeddings: A tensor for positive match embeddings for\n triplet mining. Shape = [num_anchors, embedding_dim] or [num_anchors,\n num_samples, embedding_dim]. Use None to ignore and use\n `positive_embeddings` instead.\n match_mining_embeddings: A tensor for candidate negative match embeddings\n for triplet mining. Shape = [num_anchors, embedding_dim] or [num_matches,\n num_samples, embedding_dim]. Use None to ignore and use `match_embeddings`\n instead.\n summarize_percentiles: A boolean for whether to summarize percentiles of\n certain variables, e.g., embedding distances in triplet loss. Consider\n turning this off in case tensorflow_probability percentile computation\n causes failures at random due to empty tensor.\n\n Returns:\n loss: A tensor for triplet loss. Shape = [].\n summaries: A dictionary for loss and batch statistics summaries.\n \"\"\"\n\n def maybe_expand_sample_dim(embeddings):\n if len(embeddings.shape.as_list()) == 2:\n return tf.expand_dims(embeddings, axis=-2)\n return embeddings\n\n anchor_embeddings = maybe_expand_sample_dim(anchor_embeddings)\n positive_embeddings = maybe_expand_sample_dim(positive_embeddings)\n match_embeddings = maybe_expand_sample_dim(match_embeddings)\n\n if min_negative_keypoint_distance >= 0.0:\n anchor_match_negative_indicator_matrix = (\n compute_negative_indicator_matrix(\n anchor_points=anchor_keypoints,\n match_points=match_keypoints,\n distance_fn=keypoint_distance_fn,\n min_negative_distance=min_negative_keypoint_distance,\n anchor_point_masks=anchor_keypoint_masks,\n match_point_masks=match_keypoint_masks))\n else:\n num_anchors = tf.shape(anchor_keypoints)[0]\n anchor_match_negative_indicator_matrix = tf.math.logical_not(\n tf.eye(num_anchors, dtype=tf.bool))\n\n anchor_positive_distances = embedding_sample_distance_fn(\n anchor_embeddings, positive_embeddings)\n\n if anchor_mining_embeddings is None and positive_mining_embeddings is None:\n anchor_positive_mining_distances = anchor_positive_distances\n else:\n anchor_positive_mining_distances = embedding_sample_distance_fn(\n anchor_embeddings if anchor_mining_embeddings is None else\n maybe_expand_sample_dim(anchor_mining_embeddings),\n positive_embeddings if positive_mining_embeddings is None else\n maybe_expand_sample_dim(positive_mining_embeddings))\n\n anchor_match_distance_matrix = distance_utils.compute_distance_matrix(\n anchor_embeddings,\n match_embeddings,\n distance_fn=embedding_sample_distance_fn)\n\n if anchor_mining_embeddings is None and match_mining_embeddings is None:\n anchor_match_mining_distance_matrix = anchor_match_distance_matrix\n else:\n anchor_match_mining_distance_matrix = distance_utils.compute_distance_matrix(\n anchor_embeddings if anchor_mining_embeddings is None else\n maybe_expand_sample_dim(anchor_mining_embeddings),\n match_embeddings if match_mining_embeddings is None else\n maybe_expand_sample_dim(match_mining_embeddings),\n distance_fn=embedding_sample_distance_fn)\n\n num_total_triplets = tf.cast(tf.shape(anchor_embeddings)[0], dtype=tf.float32)\n\n def compute_loss_and_create_summaries(use_semi_hard):\n \"\"\"Computes loss and creates summaries.\"\"\"\n (loss, num_active_triplets, negative_distances, mining_loss,\n num_active_mining_triplets, negative_mining_distances) = (\n compute_hard_negative_triplet_loss(\n anchor_positive_distances,\n anchor_match_distance_matrix,\n anchor_match_negative_indicator_matrix,\n margin=margin,\n use_semi_hard=use_semi_hard,\n anchor_positive_mining_distances=anchor_positive_mining_distances,\n anchor_match_mining_distance_matrix=(\n anchor_match_mining_distance_matrix)))\n negative_distances = tf.boolean_mask(\n negative_distances,\n mask=negative_distances < negative_distances.dtype.max)\n negative_mining_distances = tf.boolean_mask(\n negative_mining_distances,\n mask=negative_distances < negative_distances.dtype.max)\n\n active_triplet_ratio = (\n tf.cast(num_active_triplets, dtype=tf.float32) / num_total_triplets)\n active_mining_triplet_ratio = (\n tf.cast(num_active_mining_triplets, dtype=tf.float32) /\n num_total_triplets)\n\n active_loss = (\n loss / tf.math.maximum(1e-12, tf.stop_gradient(active_triplet_ratio)))\n active_mining_loss = (\n mining_loss /\n tf.math.maximum(1e-12, tf.stop_gradient(active_mining_triplet_ratio)))\n\n tag = 'SemiHardNegative' if use_semi_hard else 'HardNegative'\n summaries = {\n # Summaries related to triplet loss computation.\n 'triplet_loss/Anchor/%s/Distance/Mean' % tag:\n tf.math.reduce_mean(negative_distances),\n 'triplet_loss/%s/Loss/All' % tag:\n loss,\n 'triplet_loss/%s/Loss/Active' % tag:\n active_loss,\n 'triplet_loss/%s/ActiveTripletNum' % tag:\n num_active_triplets,\n 'triplet_loss/%s/ActiveTripletRatio' % tag:\n active_triplet_ratio,\n\n # Summaries related to triplet mining.\n 'triplet_mining/Anchor/%s/Distance/Mean' % tag:\n tf.math.reduce_mean(negative_mining_distances),\n 'triplet_mining/%s/Loss/All' % tag:\n mining_loss,\n 'triplet_mining/%s/Loss/Active' % tag:\n active_mining_loss,\n 'triplet_mining/%s/ActiveTripletNum' % tag:\n num_active_mining_triplets,\n 'triplet_mining/%s/ActiveTripletRatio' % tag:\n active_mining_triplet_ratio,\n }\n if summarize_percentiles:\n summaries.update({\n 'triplet_loss/Anchor/%s/Distance/Median' % tag:\n tfp.stats.percentile(negative_distances, q=50),\n 'triplet_mining/Anchor/%s/Distance/Median' % tag:\n tfp.stats.percentile(negative_mining_distances, q=50),\n })\n\n return loss, active_loss, summaries\n\n hard_negative_loss, hard_negative_active_loss, hard_negative_summaries = (\n compute_loss_and_create_summaries(use_semi_hard=False))\n (semi_hard_negative_loss, semi_hard_negative_active_loss,\n semi_hard_negative_summaries) = (\n compute_loss_and_create_summaries(use_semi_hard=True))\n\n summaries = {\n 'triplet_loss/Margin':\n tf.constant(margin),\n 'triplet_loss/Anchor/Positive/Distance/Mean':\n tf.math.reduce_mean(anchor_positive_distances),\n 'triplet_mining/Anchor/Positive/Distance/Mean':\n tf.math.reduce_mean(anchor_positive_mining_distances),\n }\n if summarize_percentiles:\n summaries.update({\n 'triplet_loss/Anchor/Positive/Distance/Median':\n tfp.stats.percentile(anchor_positive_distances, q=50),\n 'triplet_mining/Anchor/Positive/Distance/Median':\n tfp.stats.percentile(anchor_positive_mining_distances, q=50),\n })\n summaries.update(hard_negative_summaries)\n summaries.update(semi_hard_negative_summaries)\n\n if use_semi_hard:\n if exclude_inactive_triplet_loss:\n loss = semi_hard_negative_active_loss\n else:\n loss = semi_hard_negative_loss\n else:\n if exclude_inactive_triplet_loss:\n loss = hard_negative_active_loss\n else:\n loss = hard_negative_loss\n\n return loss, summaries\n\n\ndef compute_kl_regularization_loss(means,\n stddevs,\n loss_weight,\n prior_mean=0.0,\n prior_stddev=1.0):\n \"\"\"Computes KL divergence regularization loss for multivariate Gaussian.\n\n Args:\n means: A tensor for distribution means. Shape = [..., dim].\n stddevs: A tensor for distribution standard deviations. Shape = [..., dim].\n loss_weight: A float for loss weight.\n prior_mean: A float for prior distribution mean.\n prior_stddev: A float for prior distribution standard deviation.\n\n Returns:\n loss: A tensor for weighted regularization loss. Shape = [].\n summaries: A dictionary for loss summaries.\n \"\"\"\n loss = tf.math.reduce_mean(\n distance_utils.compute_gaussian_kl_divergence(\n means, stddevs, rhs_means=prior_mean, rhs_stddevs=prior_stddev))\n weighted_loss = loss_weight * loss\n summaries = {\n 'regularization_loss/KL/PriorMean/Mean':\n tf.math.reduce_mean(tf.constant(prior_mean)),\n 'regularization_loss/KL/PriorVar/Mean':\n tf.math.reduce_mean(tf.constant(prior_stddev)**2),\n 'regularization_loss/KL/Loss/Original':\n loss,\n 'regularization_loss/KL/Loss/Weighted':\n weighted_loss,\n 'regularization_loss/KL/Loss/Weight':\n tf.constant(loss_weight),\n }\n return weighted_loss, summaries\n\n\ndef compute_positive_pairwise_loss(anchor_embeddings,\n positive_embeddings,\n loss_weight,\n distance_fn=functools.partial(\n distance_utils.compute_l2_distances,\n squared=True)):\n \"\"\"Computes anchor/positive pairwise (squared L2) loss.\n\n Args:\n anchor_embeddings: A tensor for anchor embeddings. Shape = [...,\n embedding_dim].\n positive_embeddings: A tensor for positive embeddings. Shape = [...,\n embedding_dim].\n loss_weight: A float for loss weight.\n distance_fn: A function handle for computing embedding distances, which\n takes two embedding tensors of shape [..., embedding_dim] and returns a\n distance tensor of shape [...].\n\n Returns:\n loss: A tensor for weighted positive pairwise loss. Shape = [].\n summaries: A dictionary for loss summaries.\n \"\"\"\n loss = tf.math.reduce_mean(\n distance_fn(anchor_embeddings, positive_embeddings))\n weighted_loss = loss_weight * loss\n summaries = {\n 'pairwise_loss/PositivePair/Loss/Original': loss,\n 'pairwise_loss/PositivePair/Loss/Weighted': weighted_loss,\n 'pairwise_loss/PositivePair/Loss/Weight': tf.constant(loss_weight),\n }\n return weighted_loss, summaries\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"An implementation of Munchausen DQN in Dopamine style.\n\nThe class MunchausenDQNAgent inherits from Dopamine's DQNAgent.\n\"\"\"\n\nimport random\nfrom dopamine.agents.dqn import dqn_agent\nimport gin\nimport tensorflow.compat.v1 as tf\n\nfrom munchausen_rl.common import utils\n\ntf.disable_v2_behavior()\n\n\[email protected]\nclass MunchausenDQNAgent(dqn_agent.DQNAgent):\n \"\"\"An implementation of the Munchausen-DQN agent.\"\"\"\n\n def __init__(self,\n sess,\n num_actions,\n tau,\n alpha=1,\n clip_value_min=-10,\n interact='greedy',\n optimizer_type='adam',\n optimizer_lr=0.00005,\n **kwargs):\n \"\"\"Initializes the agent and constructs the components of its graph.\n\n About tau and alpha coefficients:\n tau and alpha balance the entropy and KL regularizations. tau is used as the\n 'explicit' entropy temperature, and alpha as a scaling of the log-policy.\n Implicitly, it defines an entropy regularization of coefficient\n (1-alpha) * tau and a KL one of coeff alpha * tau.\n\n Args:\n sess: `tf.Session`, for executing ops.\n num_actions: int, number of actions the agent can take at any state.\n tau: float (>0.), tau regularization factor in M-DQN.\n alpha: float in [0, 1], entropy scaling factor.\n clip_value_min: float (<0), minimum value to clip the log-policy.\n interact: string, 'stochastic' or 'greedy'. Which policy to use.\n optimizer_type: string, 'adam' or 'rms'.\n optimizer_lr: float, optimizer learning rate.\n **kwargs: see dqn_agent.DQNAgent doc.\n \"\"\"\n self.tau = tau\n self.alpha = alpha\n self.clip_value_min = clip_value_min\n self._interact = interact\n self.optimizer_type = optimizer_type\n self.optimizer_lr = optimizer_lr\n self.optimizer = self._build_optimizer()\n\n super(MunchausenDQNAgent, self).__init__(sess, num_actions, **kwargs)\n\n def _build_optimizer(self):\n \"\"\"Creates the optimizer for the Q-networks.\"\"\"\n if self.optimizer_type == 'adam':\n return tf.train.AdamOptimizer(\n learning_rate=self.optimizer_lr, epsilon=0.0003125)\n if self.optimizer_type == 'rms':\n return tf.train.RMSPropOptimizer(\n learning_rate=self.optimizer_lr,\n decay=0.95,\n momentum=0.0,\n epsilon=0.00001,\n centered=True)\n raise ValueError('Undefined optimizer')\n\n def _build_networks(self):\n \"\"\"Builds the Q-value network computations needed for acting and training.\n\n These are:\n self.online_convnet: For computing the current state's Q-values.\n self.target_convnet: For computing the next state's target Q-values.\n self._net_outputs: The actual Q-values.\n self._q_argmax: The action maximizing the current state's Q-values.\n self._replay_net_outputs: The replayed states' Q-values.\n self._replay_next_net_outputs: The replayed next states' Q-values.\n self._replay_target_net_outputs: The replayed states' target\n Q-values.\n self._replay_next_target_net_outputs: The replayed next states' target\n Q-values.\n \"\"\"\n\n # _network_template instantiates the model and returns the network object.\n # The network object can be used to generate different outputs in the graph.\n # At each call to the network, the parameters will be reused.\n self.online_convnet = self._create_network(name='Online')\n self.target_convnet = self._create_network(name='Target')\n\n self._net_outputs = self.online_convnet(self.state_ph)\n\n self._q_argmax = tf.argmax(self._net_outputs.q_values, axis=1)[0]\n self._replay_net_outputs = self.online_convnet(\n self._replay.states)\n self._replay_next_net_outputs = self.online_convnet(\n self._replay.next_states)\n\n self._replay_target_net_outputs = self.target_convnet(\n self._replay.states)\n self._replay_next_target_net_outputs = self.target_convnet(\n self._replay.next_states)\n\n self._policy_logits = utils.stable_scaled_log_softmax(\n self._net_outputs.q_values, self.tau, axis=1) / self.tau\n\n self._stochastic_action = tf.random.categorical(\n self._policy_logits,\n num_samples=1,\n dtype=tf.int32)[0][0]\n\n def _build_target_q_op(self):\n \"\"\"Build an op used as a target for the Q-value.\n\n Returns:\n target_q_op: An op calculating the Q-value.\n \"\"\"\n replay_action_one_hot = tf.one_hot(\n self._replay.actions, self.num_actions, 1., 0., name='action_one_hot')\n # tau * ln pi_k+1 (s')\n replay_next_log_policy = utils.stable_scaled_log_softmax(\n self._replay_next_target_net_outputs.q_values, self.tau, axis=1)\n # tau * ln pi_k+1(s)\n replay_log_policy = utils.stable_scaled_log_softmax(\n self._replay_target_net_outputs.q_values, self.tau, axis=1)\n replay_next_policy = utils.stable_softmax( # pi_k+1(s')\n self._replay_next_target_net_outputs.q_values, self.tau, axis=1)\n\n replay_next_qt_softmax = tf.reduce_sum(\n (self._replay_next_target_net_outputs.q_values -\n replay_next_log_policy) * replay_next_policy, 1)\n\n tau_log_pi_a = tf.reduce_sum( # tau * ln pi_k+1(a|s)\n replay_log_policy * replay_action_one_hot, axis=1)\n\n tau_log_pi_a = tf.clip_by_value(\n tau_log_pi_a,\n clip_value_min=self.clip_value_min,\n clip_value_max=1)\n\n munchausen_term = self.alpha * tau_log_pi_a\n\n modified_bellman = (\n self._replay.rewards + munchausen_term +\n self.cumulative_gamma * replay_next_qt_softmax *\n (1. - tf.cast(self._replay.terminals, tf.float32)))\n\n if self.summary_writer is not None:\n with tf.variable_scope('policy'):\n entropy = -tf.reduce_sum(\n replay_next_policy * replay_next_log_policy / self.tau, axis=1)\n tf.summary.scalar('entropy', tf.reduce_mean(entropy))\n\n return modified_bellman\n\n def _select_action(self):\n \"\"\"Select an action from the set of available actions.\n\n Chooses an action randomly with probability self._calculate_epsilon(), and\n otherwise acts greedily according to the current Q-value estimates.\n\n Returns:\n int, the selected action.\n \"\"\"\n if self.eval_mode:\n epsilon = self.epsilon_eval\n else:\n epsilon = self.epsilon_fn(\n self.epsilon_decay_period,\n self.training_steps,\n self.min_replay_history,\n self.epsilon_train)\n if random.random() <= epsilon:\n # Choose a random action with probability epsilon.\n return random.randint(0, self.num_actions - 1)\n else:\n # Choose the action with highest Q-value at the current state.\n if self._interact == 'stochastic':\n selected_action = self._stochastic_action\n elif self._interact == 'greedy':\n selected_action = self._q_argmax\n else:\n raise ValueError('Undefined interaction')\n return self._sess.run(selected_action, {self.state_ph: self.state})\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for util.py (e.g. analytical option pricing formula).\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow.compat.v1 as tf # tf\n\nfrom simulation_research.tf_risk import util\n\n\nclass UtilTest(tf.test.TestCase):\n\n def assertAllAlmostEqual(self, a, b, delta=1e-7):\n self.assertEqual(a.shape, b.shape)\n a = a.flatten()\n b = b.flatten()\n self.assertLessEqual(np.max(np.abs(a - b)), delta)\n\n def test_std_estimate_works_with_tensors(self):\n num_dims = 16\n mean_est = tf.ones(num_dims)\n mean_sq_est = tf.ones(num_dims)\n stddev_est = util.stddev_est(mean_est, mean_sq_est)\n\n with self.test_session() as session:\n stddev_est_eval = session.run(stddev_est)\n\n self.assertAllEqual(stddev_est_eval, np.zeros(num_dims))\n\n def test_std_estimate_works_with_arrays(self):\n num_dims = 16\n mean_est = np.ones(num_dims)\n mean_sq_est = np.ones(num_dims)\n stddev_est = util.stddev_est(mean_est, mean_sq_est)\n\n self.assertAllEqual(stddev_est, np.zeros(num_dims))\n\n def test_half_clt_conf_interval_is_correct(self):\n confidence_level = 0.95\n num_samples = 400\n\n # Test scaler float value.\n stddev = 2.0\n correct_value = stddev / np.sqrt(num_samples) * 1.959963984540\n conf_interval_half_width = util.half_clt_conf_interval(\n confidence_level, num_samples, stddev)\n\n self.assertAlmostEqual(correct_value, conf_interval_half_width)\n\n # Test array float values.\n stddev = np.array([2.0, 1.0])\n correct_value = stddev / np.sqrt(num_samples) * 1.959963984540\n conf_interval_half_width = util.half_clt_conf_interval(\n confidence_level, num_samples, stddev)\n\n self.assertAllAlmostEqual(correct_value, conf_interval_half_width)\n\n def test_half_clt_conf_interval_with_zero_stdev(self):\n confidence_level = 0.95\n num_samples = 400\n\n # Test zero scaler.\n stddev = 0.0\n correct_value = stddev / np.sqrt(num_samples) * 1.959963984540\n conf_interval_half_width = util.half_clt_conf_interval(\n confidence_level, num_samples, stddev)\n\n self.assertAlmostEqual(correct_value, conf_interval_half_width)\n\n # Test array with zero elements.\n stddev = np.array([2.0, 0.0])\n correct_value = stddev / np.sqrt(num_samples) * 1.959963984540\n conf_interval_half_width = util.half_clt_conf_interval(\n confidence_level, num_samples, stddev)\n\n self.assertAllAlmostEqual(correct_value, conf_interval_half_width)\n\n def test_half_clt_conf_interval_unsupported_type(self):\n confidence_level = 0.95\n num_samples = 400\n # Integer stddev not supported.\n stddev = 2\n\n with self.assertRaises(TypeError):\n util.half_clt_conf_interval(confidence_level, num_samples, stddev)\n\n def test_running_mean_estimate_is_correct(self):\n np.random.seed(0)\n num_dims = 8\n mean_est = np.random.normal(size=[num_dims])\n batch_est = np.random.normal(size=[num_dims])\n\n num_samples = 128\n batch_size = 16\n\n updated_mean_est = util.running_mean_estimate(\n mean_est, batch_est, num_samples, batch_size)\n\n mean_est_fraction = float(num_samples) / float(num_samples + batch_size)\n batch_est_fraction = float(batch_size) / float(num_samples + batch_size)\n\n self.assertAllAlmostEqual(\n updated_mean_est,\n mean_est_fraction * mean_est + batch_est_fraction * batch_est)\n\n def test_call_put_parity(self):\n current_price = 100.0\n interest_rate = 0.05\n vol = 0.2\n strike = 120.0\n maturity = 1.0\n\n call_price = util.black_scholes_call_price(current_price, interest_rate,\n vol, strike, maturity)\n put_price = util.black_scholes_put_price(current_price, interest_rate, vol,\n strike, maturity)\n total_price = current_price - strike * tf.exp(- interest_rate * maturity)\n\n with self.test_session() as session:\n\n call_price_eval = session.run(call_price)\n put_price_eval = session.run(put_price)\n total_price_eval = session.run(total_price)\n\n self.assertGreater(call_price_eval, 0.0)\n self.assertGreater(put_price_eval, 0.0)\n self.assertAlmostEqual(call_price_eval - put_price_eval, total_price_eval,\n delta=1e-5)\n\n def test_barrier_parity(self):\n current_price = 100.0\n interest_rate = 0.05\n vol = 0.2\n strike = 120.0\n barrier = 150.0\n maturity = 1.0\n\n put_up_in_price = util.black_scholes_up_in_put_price(current_price,\n interest_rate,\n vol, strike, barrier,\n maturity)\n put_up_out_price = util.black_scholes_up_out_put_price(current_price,\n interest_rate,\n vol, strike, barrier,\n maturity)\n put_price = util.black_scholes_put_price(current_price, interest_rate, vol,\n strike, maturity)\n\n with self.test_session() as session:\n\n put_up_in_price_eval = session.run(put_up_in_price)\n put_up_out_price_eval = session.run(put_up_out_price)\n put_price_eval = session.run(put_price)\n\n self.assertGreater(put_up_in_price_eval, 0.0)\n self.assertGreater(put_up_out_price_eval, 0.0)\n self.assertAlmostEqual(put_up_in_price_eval + put_up_out_price_eval,\n put_price_eval, delta=1e-5)\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This file runs value iteration on an aggregated state space.\n\nIt aggregates states using the supplied metric.\n\nThis module will run a number of trials on a set of possible metrics and compile\nthe results in a plot.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\nfrom absl import logging\nimport gin\nfrom matplotlib import cm\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom six.moves import range\nimport tensorflow.compat.v1 as tf\n\n\ndef greedy(metric, num_states, num_states_target, max_iterations,\n verbose=False):\n \"\"\"Greedily aggregate states until a desired number of aggregate states.\n\n Args:\n metric: matrix of distances.\n num_states: int, number of total states.\n num_states_target: int, desired number of states.\n max_iterations: int, maximum number of iterations to run algorithm.\n verbose: bool, whether to print verbose messages.\n\n Returns:\n list of aggregated states and list mapping state to its cluster.\n \"\"\"\n curr_metric = np.copy(metric)\n # First we ensure that we won't aggregate states with themselves.\n np.fill_diagonal(curr_metric, np.inf)\n aggregate_states = [[x] for x in range(num_states)]\n state_to_aggregate_states = list(range(num_states))\n num_iterations = 1\n while len(aggregate_states) > num_states_target:\n # Pick a pair of the closest states randomly.\n min_distance = np.min(curr_metric)\n # We add a little epsilon here to avoid floating point precision issues.\n x, y = np.where(curr_metric <= min_distance + 1e-8)\n i = np.random.randint(len(x))\n s, t = x[i], y[i]\n # So we no longer try to aggregate these states.\n curr_metric[s, t] = np.inf\n curr_metric[t, s] = np.inf\n # For simplicity we'll put the new aggregation at the front.\n c1 = state_to_aggregate_states[s]\n c2 = state_to_aggregate_states[t]\n new_aggregate_states = [[]]\n for c in [c1, c2]:\n for s in aggregate_states[c]:\n if s in new_aggregate_states[0]:\n # If c1 == c2, this would cause duplicates which causes never-ending\n # loops.\n continue\n new_aggregate_states[0].append(s)\n state_to_aggregate_states[s] = 0\n # Re-index all the other aggregations.\n for i, c in enumerate(aggregate_states):\n if i == c1 or i == c2:\n continue\n for s in c:\n state_to_aggregate_states[s] = len(new_aggregate_states)\n new_aggregate_states.append(c)\n aggregate_states = new_aggregate_states\n if num_iterations % 1000 == 0 and verbose:\n logging.info('Iteration %d', num_iterations)\n num_iterations += 1\n if num_iterations > max_iterations:\n break\n return aggregate_states, state_to_aggregate_states\n\n\ndef k_medians(metric, num_states, num_states_target, max_iterations,\n verbose=False):\n \"\"\"Aggregate states using the k-medians algorithm.\n\n Args:\n metric: matrix of distances.\n num_states: int, number of total states.\n num_states_target: int, desired number of states.\n max_iterations: int, maximum number of iterations to run algorithm.\n verbose: bool, whether to print verbose messages.\n\n Returns:\n list of aggregated states and dict mapping state to its cluster.\n \"\"\"\n # Pick an initial set of centroids.\n centroids = np.random.choice(num_states, size=num_states_target,\n replace=False)\n state_to_centroid = [0 for _ in range(num_states)]\n for k, s in enumerate(centroids):\n state_to_centroid[s] = k\n # We first put each state in a random cluster.\n for s in range(num_states):\n if s in centroids:\n continue\n k = s % num_states_target\n state_to_centroid[s] = k\n clusters_changing = True\n num_iterations = 1\n while clusters_changing:\n clusters_changing = False\n clusters = [[x] for x in centroids]\n for s in range(num_states):\n if s in centroids:\n continue\n nearest_centroid = 0\n smallest_distance = np.inf\n for k, t in enumerate(centroids):\n if metric[s, t] < smallest_distance:\n smallest_distance = metric[s, t]\n nearest_centroid = k\n if nearest_centroid != state_to_centroid[s]:\n clusters_changing = True\n state_to_centroid[s] = nearest_centroid\n clusters[nearest_centroid].append(s)\n # Re-calculate centroids.\n for k, c in enumerate(clusters):\n min_avg_distance = np.inf\n new_centroid = 0\n for s in c:\n avg_distance = 0.\n for t in c:\n avg_distance += metric[s, t]\n avg_distance /= len(c)\n if avg_distance < min_avg_distance:\n min_avg_distance = avg_distance\n new_centroid = s\n centroids[k] = new_centroid\n if num_iterations % 1000 == 0 and verbose:\n logging.info('Iteration %d', num_iterations)\n num_iterations += 1\n if num_iterations > max_iterations:\n break\n return clusters, state_to_centroid\n\n\[email protected]\ndef value_iteration(env, aggregate_states, tolerance=0.001, verbose=False):\n r\"\"\"Run value iteration on the aggregate MDP.\n\n This constructs a new MDP using the aggregate states as follows:\n ```\n R(c, a) = 1/|c| * \\sum_{s \\in c} R(s, a)\n P(c, a)(c') = 1/|c| * \\sum_{s \\in c}\\sum_{s' \\in c'} P(s, a)(s')\n ```\n\n Args:\n env: the original environment.\n aggregate_states: list of aggregate states.\n tolerance: float, maximum difference in value between successive\n iterations. Once this threshold is past, computation stops.\n verbose: bool, whether to print verbose messages.\n\n Returns:\n list of floats representing cluster values.\n \"\"\"\n num_clusters = len(aggregate_states)\n transition_probs = np.zeros((num_clusters, env.num_actions, num_clusters))\n rewards = np.zeros((num_clusters, env.num_actions))\n for c1 in range(num_clusters):\n for a in range(env.num_actions):\n for s1 in aggregate_states[c1]:\n rewards[c1, a] += env.rewards[s1, a]\n for c2 in range(num_clusters):\n for s2 in aggregate_states[c2]:\n transition_probs[c1, a, c2] += env.transition_probs[s1, a, s2]\n rewards[c1, a] /= len(aggregate_states[c1])\n transition_probs[c1, a, :] /= len(aggregate_states[c1])\n q_values = np.zeros((num_clusters, env.num_actions))\n error = tolerance * 2.\n num_iterations = 1\n while error > tolerance:\n for c in range(num_clusters):\n for a in range(env.num_actions):\n old_q_values = np.copy(q_values[c, a])\n q_values[c, a] = rewards[c, a] + env.gamma * np.matmul(\n transition_probs[c, a, :], np.max(q_values, axis=1))\n error = np.max(abs(q_values[c, a] - old_q_values))\n if num_iterations % 1000 == 0 and verbose:\n logging.info('Iteration %d: %f', num_iterations, error)\n num_iterations += 1\n return q_values\n\n\[email protected]\ndef experiment(base_dir,\n env,\n metrics,\n max_iterations=100,\n run=0,\n random_mdp=False,\n verbose=False,\n aggregation_method='greedy'):\n \"\"\"Module to run the experiment.\n\n Args:\n base_dir: str, base directory where to save the files.\n env: an environment specifying the true underlying MDP.\n metrics: list of metrics which will be used for the nearest-neighbour\n approximants.\n max_iterations: int, maximum number of iterations for each of the\n aggregation methods.\n run: int, run id.\n random_mdp: bool, whether the environment is a random MDP or not.\n verbose: bool, whether to print verbose messages.\n aggregation_method: string, greedy or k_median method\n\n Returns:\n Dict containing statistics.\n \"\"\"\n if env.values is None:\n logging.info('Values must have already been computed.')\n return\n cmap = cm.get_cmap('plasma', 256)\n data = {\n 'Metric': [],\n 'num_states_target': [],\n 'run': [],\n 'qg': [],\n 'exact_qvalues': [],\n 'error': []\n }\n num_states_targets = np.linspace(1, env.num_states, 10).astype(int)\n for num_states_target in num_states_targets:\n # -(-x//1) is the same as ceil(x).\n # num_states_target = max(int(-(-state_fraction * env.num_states // 1)), 1)\n for metric in metrics:\n if metric.metric is None:\n continue\n if verbose:\n logging.info('***Run %d, %s, %d',\n num_states_target, metric.name, run)\n if aggregation_method == 'k_median':\n aggregate_states, state_to_aggregate_states = (\n k_medians(\n metric.metric,\n env.num_states,\n num_states_target,\n max_iterations,\n verbose=verbose))\n if aggregation_method == 'greedy':\n aggregate_states, state_to_aggregate_states = (\n greedy(\n metric.metric,\n env.num_states,\n num_states_target,\n max_iterations,\n verbose=verbose))\n if not random_mdp:\n # Generate plot of neighborhoods.\n neighbourhood_path = os.path.join(\n base_dir, metric.name,\n 'neighborhood_{}_{}.pdf'.format(num_states_target, run))\n obs_image = env.render_custom_observation(\n env.reset(), state_to_aggregate_states, cmap,\n boundary_values=[-1, num_states_target])\n plt.imshow(obs_image)\n with tf.gfile.GFile(neighbourhood_path, 'w') as f:\n plt.savefig(f, format='pdf', dpi=300, bbox_inches='tight')\n plt.clf()\n # Perform value iteration on aggregate states.\n q_aggregate = value_iteration(env, aggregate_states)\n # Now project the values of the aggregate states to the ground states.\n q_projected = [\n q_aggregate[state_to_aggregate_states[s]]\n for s in range(env.num_states)]\n data['Metric'].append(metric.label)\n data['num_states_target'].append(num_states_target)\n data['run'].append(run)\n data['qg'].append(q_projected)\n data['exact_qvalues'].append(env.q_val_it_q_values)\n data['error'].append(\n np.mean(\n np.max((np.abs(q_projected - env.q_val_it_q_values)), axis=1)))\n return data\n\n\ndef plot_data(base_dir, data):\n \"\"\"Plot the data collected from all experiment runs.\"\"\"\n del data['qg']\n del data['exact_qvalues']\n df = pd.DataFrame(data=data)\n plt.subplots(1, 1, figsize=(8, 6))\n sns.lineplot(x='num_states_target', y='error', hue='Metric', data=df,\n ci=99, lw=3)\n plt.xlabel('Number of aggregate states', fontsize=24)\n plt.ylabel('Avg. Error', fontsize=24)\n plt.legend(fontsize=18)\n pdf_file = os.path.join(base_dir, 'aggregate_value_iteration.pdf')\n with tf.io.gfile.GFile(pdf_file, 'w') as f:\n plt.savefig(f, format='pdf', dpi=300, bbox_inches='tight')\n plt.clf()\n plt.close('all')\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Loss functions for CF with support for optional negative sampling.\"\"\"\n\nimport abc\nimport tensorflow.compat.v2 as tf\n\n\nclass LossFn(abc.ABC):\n \"\"\"Abstract loss function for CF embeddings.\"\"\"\n\n def __init__(self, sizes, neg_sample_size, double_neg, margin):\n \"\"\"Initialize CF loss function.\n\n Args:\n sizes: Tuple of size 2 containing (n_users, n_items).\n neg_sample_size: Integer indicating the number of negative samples to use.\n double_neg: Bool indicating whether or not to use double negative\n sampling.\n margin: Float indicating the margin between ascore for positive and\n negative examples.\n \"\"\"\n self.n_users = sizes[0]\n self.n_items = sizes[1]\n self.neg_sample_size = neg_sample_size\n self.double_neg = double_neg\n self.use_neg_sampling = neg_sample_size > 0\n self.gamma = tf.Variable(\n self.neg_sample_size * tf.keras.backend.ones(1) / self.n_items,\n trainable=False)\n self.margin = tf.Variable(\n margin * tf.keras.backend.ones(1),\n trainable=False)\n\n @abc.abstractmethod\n def loss_from_logits(self, logits, full_labels, labels):\n \"\"\"Computes CF loss.\n\n Args:\n logits: Tensor of size batch_size x n_items containing predictions.\n full_labels: Tensor of size batch_size x n_items containing one-hot\n labels.\n labels: Tensor of size batch_size x 1 containing sparse labels (index of\n correct item).\n\n Returns:\n Average loss within batch.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def get_neg_sample_mask(self, logits, full_labels):\n \"\"\"Generates negative sampling mask.\n\n Args:\n logits: Tensor of size batch_size x n_items containing predictions.\n full_labels: Tensor of size batch_size x n_items containing one-hot\n labels.\n\n Returns:\n neg_sample_mask: Tensor of size batch_size x n_items.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def calculate_loss(self, model, input_batch):\n \"\"\"Computes loss with or without negative sampling.\n\n Args:\n model: tf.keras.Model CF embedding model.\n input_batch: Tensor of size batch_size x 2 containing input pairs.\n\n Returns:\n Average loss within the input_batch.\n \"\"\"\n pass\n\n\nclass ExpLossFn(LossFn):\n \"\"\"Exponent based losses.\"\"\"\n\n def get_neg_sample_mask(self, logits, full_labels):\n \"\"\"Generates negative sampling mask on logits for exp-based losses.\n\n Args:\n logits: Tensor of size batch_size x n_items containing predictions.\n full_labels: Tensor of size batch_size x n_items containing one-hot\n labels.\n\n Returns:\n neg_sample_mask: Tensor of size batch_size x n_items with -1e6 and\n zeros (-1e6 indicates that the corresonding example\n is masked).\n \"\"\"\n neg_sample_mask = tf.random.uniform(tf.shape(logits), dtype=logits.dtype)\n neg_sample_mask = tf.cast(neg_sample_mask > self.gamma, logits.dtype)\n neg_sample_mask = -1e6 * tf.maximum(neg_sample_mask - full_labels, 0)\n return neg_sample_mask\n\n def calculate_loss(self, model, input_batch):\n labels = input_batch[:, 1]\n logits = model(input_batch, eval_mode=True)\n full_labels = tf.one_hot(labels, depth=self.n_items, dtype=logits.dtype)\n if self.use_neg_sampling:\n # mask some values for negative sampling\n neg_sample_mask = self.get_neg_sample_mask(logits, full_labels)\n # mask logits to only keep target and negative examples' scores\n logits = logits + neg_sample_mask\n return self.loss_from_logits(logits, full_labels, labels)\n\n\nclass SigmoidCrossEntropy(ExpLossFn):\n \"\"\"Sigmoid cross entropy loss.\"\"\"\n\n def loss_from_logits(self, logits, full_labels, labels):\n return tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(full_labels, logits))\n\n\nclass SoftmaxCrossEntropy(ExpLossFn):\n \"\"\"Softmax cross entropy loss.\"\"\"\n\n def loss_from_logits(self, logits, full_labels, labels):\n return tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(labels, logits))\n\n\nclass PairwiseHingeFn(LossFn):\n \"\"\"Pairwise ranking hinge loss.\"\"\"\n\n def get_neg_sample_mask(self, logits, full_labels):\n \"\"\"Generates negative sampling mask.\n\n Args:\n logits: Tensor of size batch_size x n_items containing predictions.\n full_labels: Tensor of size batch_size x n_items containing one-hot\n labels.\n\n Returns:\n neg_sample_mask: Tensor of size batch_size x n_items with ones and\n zeros (zero indicates that the corresonding example\n is masked).\n \"\"\"\n neg_sample_mask = tf.random.uniform(tf.shape(logits), dtype=logits.dtype)\n neg_sample_mask = tf.cast(neg_sample_mask < self.gamma, logits.dtype)\n neg_sample_mask = tf.maximum(neg_sample_mask, full_labels)\n return neg_sample_mask\n\n def loss_from_logits(self, logits, full_labels, labels):\n signed_logits = (1.0 - 2.0 * full_labels) * logits\n return tf.reduce_mean(\n tf.nn.relu(\n self.margin + tf.reduce_sum(signed_logits, 1)))\n\n def calculate_loss(self, model, input_batch):\n labels = input_batch[:, 1]\n logits = model(input_batch, eval_mode=True)\n full_labels = tf.one_hot(labels, depth=self.n_items, dtype=logits.dtype)\n if self.use_neg_sampling:\n # mask some values for negative sampling\n neg_sample_mask = self.get_neg_sample_mask(logits, full_labels)\n # mask logits to only keep target and negative examples' scores\n logits = logits * neg_sample_mask\n return self.loss_from_logits(logits, full_labels, labels)\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Generating hyperparameters dictionary from multiple sources.\"\"\"\n\nimport os\nimport warnings\nimport six\nimport tensorflow.compat.v1 as tf\nimport yaml\n\n\n# TODO(amangu): Add tests for this class.\nclass _Hyperparameters(object):\n \"\"\"_Hyperparameters class to generate final hparams from various inputs.\"\"\"\n\n def __init__(self, default_hparams_file, specific_hparams_file, input_flags,\n hparams_overrides):\n \"\"\"Initialze and load parameter dictionary with different input sources.\n\n Args:\n default_hparams_file: YAML storing default values of all hyperparameters.\n specific_hparams_file: YAML file storing accelerator specific values of\n hyperparameters to override the default values.\n input_flags: Command line flags values for hyperparameters. [This is for\n backward compatibility, so that users passing hyperparameters as regular\n flags should not run into trouble].\n hparams_overrides: A kv string representing which hyperparameters need to\n be override from the command-line.\n\n Raises:\n ValueError: Raised when 'default_hparams_file' is not readable.\n \"\"\"\n if not tf.io.gfile.exists(default_hparams_file):\n raise ValueError(\n 'Expected a valid path to a YAML file, which represents the default '\n 'hyperparameters file. {}'.format(default_hparams_file))\n\n self._params = {}\n self._params_source = {}\n self._default_hparams_file = default_hparams_file\n self._specific_hparams_file = specific_hparams_file\n self._input_flags = input_flags\n self._hparams_overrides = hparams_overrides\n\n def get_parameters(self, log_params):\n \"\"\"Returns the dictionary loaded with final values of all hyperparameters.\n\n Args:\n log_params: Bool to specify if the hyperparameters final value need to be\n logged or not.\n\n Returns:\n Python dictionary with all the final hyperparameters.\n \"\"\"\n self._params, self._params_source = load_from_file(\n self._params, self._params_source, self._default_hparams_file)\n self._params, self._params_source = load_from_file(\n self._params, self._params_source, self._specific_hparams_file)\n self._params, self._params_source = load_from_input_flags(\n self._params, self._params_source, self._input_flags)\n self._params, self._params_source = load_from_hparams_overrides(\n self._params, self._params_source, self._hparams_overrides)\n\n if log_params:\n self.log_parameters()\n\n return self._params\n\n def log_parameters(self):\n \"\"\"Log the hyperparameters value along with the source of those values.\"\"\"\n params_log = ''\n\n for k in self._params:\n params_log += k + ': \\t' + str(self._params[k])\n params_log += ' \\t[' + self._params_source[k] + ']\\n'\n tf.logging.info('\\nModel hyperparameters [source]:\\n%s', params_log)\n\n\ndef load_from_file(params, params_source, file_path):\n \"\"\"Given a path to a YAML file, read the file and load it to dictionary.\n\n Args:\n params: Python dictionary of hyperparameters.\n params_source: Python dictionary to record source of hyperparameters.\n file_path: Python string containing path to file.\n\n Returns:\n Python dict of hyperparameters.\n \"\"\"\n if file_path is None:\n return params, params_source\n\n if not tf.io.gfile.exists(file_path):\n warnings.warn('Could not read Hyperparameter file : ' + file_path,\n RuntimeWarning)\n return params, params_source\n\n with tf.gfile.Open(file_path, 'r') as f:\n overrides = yaml.safe_load(f)\n for key, value in six.iteritems(overrides):\n params[key] = value\n params_source[key] = os.path.basename(file_path)\n\n return params, params_source\n\n\n# TODO(amangu): Once global hyperparameter flags will be removed, we won't need\n# this function. Remove this functions after implementing this.\ndef load_from_input_flags(params, params_source, input_flags):\n \"\"\"Update params dictionary with input flags.\n\n Args:\n params: Python dictionary of hyperparameters.\n params_source: Python dictionary to record source of hyperparameters.\n input_flags: All the flags with non-null value of overridden\n hyperparameters.\n\n Returns:\n Python dict of hyperparameters.\n \"\"\"\n if params is None:\n raise ValueError(\n 'Input dictionary is empty. It is expected to be loaded with default '\n 'values')\n\n if not isinstance(params, dict):\n raise ValueError(\n 'The base parameter set must be a Python dict, was: {}'.format(\n type(params)))\n\n for key in params:\n flag_value = input_flags.get_flag_value(key, None)\n\n if flag_value is not None:\n params[key] = flag_value\n params_source[key] = 'Command-line flags'\n\n return params, params_source\n\n\n# TODO(amangu): Add tests to verify different dtypes of params.\ndef load_from_hparams_overrides(params, params_source, hparams_overrides):\n \"\"\"Given a dictionary of hyperparameters and a list of overrides, merge them.\n\n Args:\n params: Python dict containing a base hyperparameters set.\n params_source: Python dictionary to record source of hyperparameters.\n hparams_overrides: Python list of strings. This is a set of k=v overrides\n for the hyperparameters in `params`; if `k=v1` in `params` but `k=v2` in\n `hparams_overrides`, the second value wins and the value for `k` is `v2`.\n\n Returns:\n Python dict of hyperparameters.\n \"\"\"\n if params is None:\n raise ValueError(\n 'Input dictionary is empty. It is expected to be loaded with default '\n 'values')\n\n if not isinstance(params, dict):\n raise ValueError(\n 'The base hyperparameters set must be a Python dict, was: {}'.format(\n type(params)))\n\n if hparams_overrides is None:\n return params, params_source\n\n if isinstance(hparams_overrides, six.string_types):\n hparams_overrides = [hparams_overrides]\n\n if not isinstance(hparams_overrides, list):\n raise ValueError(\n 'Expected that hparams_overrides would be `None`, a single string, or a'\n ' list of strings, was: {}'.format(type(hparams_overrides)))\n\n for kv_pair in hparams_overrides:\n if not isinstance(kv_pair, six.string_types):\n raise ValueError(\n 'Expected that hparams_overrides would contain Python list of strings,'\n ' but encountered an item: {}'.format(type(kv_pair)))\n key, value = kv_pair.split('=')\n parser = type(params[key])\n if parser is bool:\n params[key] = value not in ('0', 'False', 'false')\n else:\n params[key] = parser(value)\n params_source[key] = 'Command-line `hparams` flag'\n\n return params, params_source\n\n\ndef get_hyperparameters(default_hparams_file,\n specific_hparams_file,\n input_flags,\n hparams_overrides,\n log_params=True):\n \"\"\"Single function to get hparams for any model using different sources.\n\n Args:\n default_hparams_file: YAML storing default values of all hyperparameters.\n specific_hparams_file: YAML file storing accelerator specific values of\n hyperparameters to override the default values.\n input_flags: Command line flags values for hyperparameters. [This is for\n backward compatibility, so that users passing hyperparameters as regular\n flags should not run into trouble].\n hparams_overrides: A kv string representing which hyperparameters need to be\n override from the command-line.\n log_params: Bool to specify if the hyperparameters final value need to be\n logged or not.\n\n Returns:\n Python dictionary with all the final hyperparameters.\n \"\"\"\n parameter = _Hyperparameters(default_hparams_file, specific_hparams_file,\n input_flags, hparams_overrides)\n\n return parameter.get_parameters(log_params)\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python2, python3\n\"\"\"Trains a ResNet model on CIFAR-10.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nimport tensorflow.compat.v2 as tf\nfrom uq_benchmark_2019 import experiment_utils\nfrom uq_benchmark_2019 import image_data_utils\nfrom uq_benchmark_2019.cifar import data_lib\nfrom uq_benchmark_2019.cifar import hparams_lib\nfrom uq_benchmark_2019.cifar import models_lib\n\nFLAGS = flags.FLAGS\n\n\ndef _declare_flags():\n \"\"\"Declare flags; not invoked when this module is imported as a library.\"\"\"\n flags.DEFINE_enum('method', None, models_lib.METHODS,\n 'Name of modeling method.')\n flags.DEFINE_string('output_dir', None, 'Output directory.')\n flags.DEFINE_integer('test_level', 0, 'Testing level.')\n flags.DEFINE_integer('task', 0, 'Task number.')\n\n\ndef run(method, output_dir, fake_data=False, fake_training=False):\n \"\"\"Trains a model and records its predictions on configured datasets.\n\n Args:\n method: Modeling method to experiment with.\n output_dir: Directory to record the trained model and output stats.\n fake_data: If true, use fake data.\n fake_training: If true, train for a trivial number of steps.\n Returns:\n Trained Keras model.\n \"\"\"\n tf.io.gfile.makedirs(output_dir)\n model_opts = hparams_lib.model_opts_from_hparams(hparams_lib.HPS_DICT[method],\n method,\n fake_training=fake_training)\n if fake_training:\n model_opts.batch_size = 32\n model_opts.examples_per_epoch = 256\n model_opts.train_epochs = 1\n\n experiment_utils.record_config(model_opts, output_dir+'/model_options.json')\n\n dataset_train = data_lib.build_dataset(image_data_utils.DATA_CONFIG_TRAIN,\n is_training=True,\n fake_data=fake_data)\n dataset_test = data_lib.build_dataset(image_data_utils.DATA_CONFIG_TEST,\n fake_data=fake_data)\n model = models_lib.build_and_train(model_opts,\n dataset_train, dataset_test,\n output_dir)\n\n logging.info('Saving model to output_dir.')\n model.save_weights(output_dir + '/model.ckpt')\n # TODO(yovadia): Figure out why save_model() wants to serialize ModelOptions.\n return model\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n run(FLAGS.method,\n FLAGS.output_dir.replace('%task%', str(FLAGS.task)),\n fake_data=FLAGS.test_level > 1,\n fake_training=FLAGS.test_level > 0)\n\nif __name__ == '__main__':\n _declare_flags()\n app.run(main)\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utils for TPUs.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.contrib import summary as contrib_summary\nfrom tensorflow.contrib import tpu as contrib_tpu\n\ngfile = tf.gfile\n\n\nUSE_MOVING_AVERAGE = 'USE_MOVING_AVERAGE'\n\n\ndef get_lr(curr_step, params):\n \"\"\"Compute learning rate at step depends on `params`.\"\"\"\n lr = tf.constant(params.learning_rate, dtype=tf.float32)\n if 'num_warmup_steps' in params and params.num_warmup_steps > 0:\n num_warmup_steps = tf.cast(params.num_warmup_steps, dtype=tf.float32)\n step = tf.cast(curr_step, dtype=tf.float32)\n warmup_lr = params.learning_rate * step / num_warmup_steps\n lr = tf.cond(tf.less(step, num_warmup_steps), lambda: warmup_lr, lambda: lr)\n return lr\n\n\ndef strip_var_name(var_name):\n \"\"\"Strips variable name of sub-strings blocking variable name matching.\"\"\"\n # Strip trailing number, e.g. convert\n # 'lstm/W_0:0' to 'lstm/W_0'.\n var_name = re.sub(r':\\d+$', '', var_name)\n # Strip partitioning info, e.g. convert\n # 'W_0/part_3/Adagrad' to 'W_0/Adagrad'.\n var_name = re.sub(r'/part_\\d+', '', var_name)\n return var_name\n\n\ndef create_estimator(params, model_dir, model_fn):\n \"\"\"Create a `TPUEstimator`.\"\"\"\n\n tpu_config = contrib_tpu.TPUConfig(\n iterations_per_loop=params.save_every,\n num_cores_per_replica=2,\n per_host_input_for_training=contrib_tpu.InputPipelineConfig.PER_HOST_V2, # pylint: disable=line-too-long\n input_partition_dims=[{\n 'x': [1, 2],\n 'y': [1, 2]\n }, None],\n tpu_job_name=params.tpu_job_name,\n )\n\n session_config = tf.ConfigProto(\n operation_timeout_in_ms=int(6e9),\n allow_soft_placement=True,\n isolate_session_state=True)\n\n run_config = contrib_tpu.RunConfig(\n tpu_config=tpu_config,\n master=params.master,\n session_config=session_config,\n log_step_count_steps=None,\n keep_checkpoint_max=5,\n save_checkpoints_steps=params.save_every)\n\n estimator = contrib_tpu.TPUEstimator(\n model_fn=model_fn,\n model_dir=model_dir,\n train_batch_size=params.train_batch_size,\n eval_batch_size=params.eval_batch_size,\n config=run_config,\n params=params,\n use_tpu=params.use_tpu,\n eval_on_tpu=True)\n\n return estimator\n\n\ndef build_host_call_fn(params, names_and_tensors):\n \"\"\"Wrapper to build `host_call` for `TPUEstimator`.\n\n Args:\n params: a `tf.contrib.train.HParams` object.\n names_and_tensors: list of elemens such as\n `(\"loss\", loss)`. These are the tensors' names and values.\n\n Returns:\n A pair of `(host_call_fn, tensors)` for `TPUEstimatorSpec`.\n \"\"\"\n\n names, tensors = zip(*names_and_tensors)\n\n def host_call_fn(global_step, *tensors):\n \"\"\"Training host call.\"\"\"\n global_step = global_step[0]\n with contrib_summary.create_file_writer(params.output_dir).as_default():\n with contrib_summary.record_summaries_every_n_global_steps(\n n=params.log_every, global_step=global_step):\n for i, tensor in enumerate(tensors):\n if 'images' not in names[i]:\n contrib_summary.scalar(names[i], tensor[0], step=global_step)\n return contrib_summary.all_summary_ops()\n\n global_step = tf.reshape(tf.train.get_or_create_global_step(), [1])\n tensors = [tf.expand_dims(tf.cast(t, dtype=tf.float32), axis=0)\n for t in tensors]\n\n return (host_call_fn, [global_step] + tensors)\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Abstract class for Collaborative Filtering models.\"\"\"\n# pytype: skip-file\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom hyperbolic.learning import regularizers\n\n\nclass CFModel(tf.keras.Model, abc.ABC):\n \"\"\"Abstract CF embedding model class.\n\n Module to define basic operations in CF embedding models, including embedding\n initialization, computing embeddings and pairs' scores.\n Attributes:\n sizes: Pair of size 2 containing (n_users, n_items).\n rank: Integer, embeddings dimension.\n bias: String indicating if the bias is learned ('learn'),\n constant ('constant'), or zero (anything else).\n initializer: tf.keras.initializers class indicating which initializer\n to use.\n item_regularizer: tf.keras.regularizers.Regularizer class from regularizers,\n indicating which regularizer to use for item embeddings.\n user_regularizer: tf.keras.regularizers.Regularizer class from regularizers,\n indicating which regularizer to use for user embeddings.\n user: Tensorflow tf.keras.layers.Embedding class, holding user\n embeddings.\n item: Tensorflow tf.keras.layers.Embedding class, holding item\n embeddings.\n bu: Tensorflow tf.keras.layers.Embedding class, holding user biases.\n bi: Tensorflow tf.keras.layers.Embedding class, holding item biases.\n gamma: non trainable tf.Variable representing the margin for\n distance-based losses.\n rhs_dep_lhs: Bool indicating if in the distance comparisons, the\n right hand side of the distance function (item embeddings) depends on\n the left hand side (user embeddings).\n \"\"\"\n\n def __init__(self, sizes, args):\n \"\"\"Initialize CF embedding model.\n\n Args:\n sizes: pair of size 2 containing (n_users, n_items).\n args: Namespace with config arguments (see config.py for detailed overview\n of arguments supported).\n \"\"\"\n super(CFModel, self).__init__()\n self.sizes = sizes\n self.rank = args.rank\n self.bias = args.bias\n self.initializer = getattr(tf.keras.initializers, args.initializer)\n self.item_regularizer = getattr(regularizers, args.regularizer)(\n args.item_reg)\n self.user_regularizer = getattr(regularizers, args.regularizer)(\n args.user_reg)\n self.user = tf.keras.layers.Embedding(\n input_dim=sizes[0],\n output_dim=self.rank,\n embeddings_initializer=self.initializer,\n embeddings_regularizer=self.user_regularizer,\n name='user_embeddings')\n self.item = tf.keras.layers.Embedding(\n input_dim=sizes[1],\n output_dim=self.rank,\n embeddings_initializer=self.initializer,\n embeddings_regularizer=self.item_regularizer,\n name='item_embeddings')\n train_biases = self.bias == 'learn'\n self.bu = tf.keras.layers.Embedding(\n input_dim=sizes[0],\n output_dim=1,\n embeddings_initializer='zeros',\n name='user_biases',\n trainable=train_biases)\n self.bi = tf.keras.layers.Embedding(\n input_dim=sizes[1],\n output_dim=1,\n embeddings_initializer='zeros',\n name='item_biases',\n trainable=train_biases)\n self.gamma = tf.Variable(\n initial_value=args.gamma * tf.keras.backend.ones(1), trainable=False)\n self.rhs_dep_lhs = False\n\n @abc.abstractmethod\n def get_queries(self, input_tensor):\n \"\"\"Get query embeddings using user and item for an index tensor.\n\n Args:\n input_tensor: Tensor of size batch_size x 2 containing users and items'\n indices.\n\n Returns:\n Tensor of size batch_size x embedding_dimension representing users'\n embeddings.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def get_rhs(self, input_tensor):\n \"\"\"Get right hand side (item) embeddings for an index tensor.\n\n Args:\n input_tensor: Tensor of size batch_size x 2 containing users and items'\n indices.\n\n Returns:\n Tensor of size batch_size x embedding_dimension representing item\n entities' embeddings.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def get_candidates(self, input_tensor=None):\n \"\"\"Get all candidate item embeddings in a CF dataset.\n\n Args:\n input_tensor: Tensor of size batch_size x 2 containing users and items'\n indices, or None\n\n Returns:\n Tensor of size (batch_size x) n_items x embedding_dimension\n representing embeddings for all items in the CF\n if self.rhs_dep_lhs = False (True).\n \"\"\"\n pass\n\n @abc.abstractmethod\n def similarity_score(self, lhs, rhs, eval_mode):\n \"\"\"Computes a similarity score between user and item embeddings.\n\n Args:\n lhs: Tensor of size B1 x embedding_dimension containing users'\n embeddings.\n rhs: Tensor of size (B1 x) B2 x embedding_dimension containing items'\n embeddings if self.rhs_dep_lhs = False (True).\n eval_mode: boolean to indicate whether to compute all pairs of scores or\n not. If False, B2 must be equal to 1.\n\n Returns:\n Tensor representing similarity scores. If eval_mode is False, this tensor\n has size B1 x 1, otherwise it has size B1 x B2.\n \"\"\"\n pass\n\n def call(self, input_tensor, eval_mode=False):\n \"\"\"Forward pass of CF embedding models.\n\n Args:\n input_tensor: Tensor of size batch_size x 2 containing pairs' indices.\n eval_mode: boolean to indicate whether to compute scores against all\n possible item entities in the CF, or only individual pairs' scores.\n\n Returns:\n Tensor containing pairs scores. If eval_mode is False, this tensor\n has size batch_size x 1, otherwise it has size batch_size x n_item\n where n_item is the total number of items in the CF.\n \"\"\"\n lhs = self.get_queries(input_tensor)\n lhs_biases = self.bu(input_tensor[:, 0])\n if eval_mode:\n rhs = self.get_candidates(input_tensor)\n rhs_biases = self.bi.embeddings\n else:\n rhs = self.get_rhs(input_tensor)\n rhs_biases = self.bi(input_tensor[:, 1])\n predictions = self.score(lhs, lhs_biases, rhs, rhs_biases, eval_mode)\n return predictions\n\n def score(self, lhs, lhs_biases, rhs, rhs_biases, eval_mode):\n \"\"\"Compute pairs scores using embeddings and biases.\"\"\"\n score = self.similarity_score(lhs, rhs, eval_mode)\n if self.bias == 'constant':\n return score + self.gamma\n elif self.bias == 'learn':\n if eval_mode:\n return score + tf.reshape(rhs_biases, [1, -1])\n else:\n return score + rhs_biases\n else:\n return score\n\n def get_scores_targets(self, input_tensor):\n \"\"\"Computes pairs' scores as well as scores againts all possible entities.\n\n Args:\n input_tensor: Tensor of size batch_size x 2 containing pairs' indices.\n\n Returns:\n scores: Numpy array of size batch_size x n_items containing users'\n scores against all possible items in the CF.\n targets: Numpy array of size batch_size x 1 containing pairs' scores.\n \"\"\"\n cand = self.get_candidates(input_tensor)\n cand_biases = self.bi.embeddings\n lhs = self.get_queries(input_tensor)\n lhs_biases = self.bu(input_tensor[:, 0])\n rhs = self.get_rhs(input_tensor)\n rhs_biases = self.bi(input_tensor[:, 1])\n scores = self.score(lhs, lhs_biases, cand, cand_biases, eval_mode=True)\n targets = self.score(lhs, lhs_biases, rhs, rhs_biases, eval_mode=False)\n return scores.numpy(), targets.numpy()\n\n def eval(self, examples, filters, batch_size=500):\n \"\"\"Compute ranking-based evaluation metrics in full setting.\n\n Args:\n examples: Tensor of size n_examples x 2 containing pairs' indices.\n filters: Dict representing items to skip per user for evaluation in\n the filtered setting.\n batch_size: batch size to use to compute scores.\n\n Returns:\n Numpy array of shape (n_examples, ) containing the rank of each example.\n \"\"\"\n total_examples = tf.data.experimental.cardinality(examples).numpy()\n batch_size = min(batch_size, total_examples)\n ranks = np.ones(total_examples)\n for counter, input_tensor in enumerate(examples.batch(batch_size)):\n if batch_size * counter >= total_examples:\n break\n scores, targets = self.get_scores_targets(input_tensor)\n for i, query in enumerate(input_tensor):\n query = query.numpy()\n filter_out = filters[query[0]]\n scores[i, filter_out] = -1e6\n ranks[counter * batch_size:(counter + 1) * batch_size] += np.sum(\n (scores >= targets), axis=1)\n return ranks\n\n def random_eval(self,\n examples,\n filters,\n batch_size=500,\n num_rand=100,\n seed=1234):\n \"\"\"Compute ranking-based evaluation metrics in both full and random settings.\n\n Args:\n examples: Tensor of size n_examples x 2 containing pairs' indices.\n filters: Dict representing items to skip per user for evaluation in\n the filtered setting.\n batch_size: batch size to use to compute scores.\n num_rand: number of negative samples to draw.\n seed: seed for random sampling.\n\n Returns:\n ranks: Numpy array of shape (n_examples, ) containing the rank of each\n example in full setting (ranking against the full item corpus).\n ranks_random: Numpy array of shape (n_examples, ) containing the rank of\n each example in random setting (ranking against randomly selected\n num_rand items).\n \"\"\"\n total_examples = tf.data.experimental.cardinality(examples).numpy()\n batch_size = min(batch_size, total_examples)\n ranks = np.ones(total_examples)\n ranks_random = np.ones(total_examples)\n for counter, input_tensor in enumerate(examples.batch(batch_size)):\n if batch_size * counter >= total_examples:\n break\n scores, targets = self.get_scores_targets(input_tensor)\n scores_random = np.ones(shape=(scores.shape[0], num_rand))\n for i, query in enumerate(input_tensor):\n query = query.numpy()\n filter_out = filters[query[0]]\n scores[i, filter_out] = -1e6\n comp_filter_out = list(\n set(list(range(scores.shape[1]))) - set(filter_out))\n np.random.seed(seed)\n random_indices = np.random.choice(\n comp_filter_out, num_rand, replace=False)\n scores_random[i, :] = scores[i, random_indices]\n ranks[counter * batch_size:(counter + 1) * batch_size] += np.sum(\n (scores >= targets), axis=1)\n ranks_random[counter * batch_size:(counter + 1) * batch_size] += np.sum(\n (scores_random >= targets), axis=1)\n\n return ranks, ranks_random\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Unprocessing evaluation on the Darmstadt Noise Dataset.\n\nUnprocessing Images for Learned Raw Denoising\nhttp://timothybrooks.com/tech/unprocessing\n\nThis file denoises images from the Darmstadt Noise Dataset using the\nunprocessing neural networks. The full Darmstadt code and data should be\ndownloaded from https://noise.visinf.tu-darmstadt.de/downloads and this file\nshould replace the dnd_denoise.py file that is provided.\n\nThis file is modified from the original version by Tobias Plotz, TU Darmstadt\n([email protected]), and is part of the implementation as\ndescribed in the CVPR 2017 paper: Benchmarking Denoising Algorithms with Real\nPhotographs, Tobias Plotz and Stefan Roth. Modified by Tim Brooks of Google in\n2019. The original license is below.\n\nCopyright (c) 2017, Technische Universitat Darmstadt\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\nlist of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\nthis list of conditions and the following disclaimer in the documentation and/or\nother materials provided with the distribution.\n\n3. Any redistribution, use, or modification is done solely for non-commercial\npurposes. Examples of non-commercial uses are teaching, academic research,\npublic demonstrations and personal experimentation.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nfrom absl import app\nfrom absl import flags\nimport h5py\nimport numpy as np\nimport scipy.io as sio\nimport tensorflow.compat.v1 as tf\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\n 'model_ckpt',\n None,\n 'Path to checkpoint of a trained unprocessing model. For example: '\n '/path/to/models/unprocessing_srgb_loss/model.ckpt-3516383')\n\nflags.DEFINE_string(\n 'data_dir',\n None,\n 'Location from which to load input noisy images. This should correspond '\n 'with the \\'data\\' directory downloaded as part of the Darmstadt Noise '\n 'Dataset.')\n\nflags.DEFINE_string(\n 'output_dir',\n None,\n 'Location at which to save output denoised images.')\n\n\ndef denoise_raw(denoiser, data_dir, output_dir):\n \"\"\"Denoises all bounding boxes in all raw images from the DND dataset.\n\n The resulting denoised images are saved to disk.\n\n Args:\n denoiser: Function handle called as:\n denoised_img = denoiser(noisy_img, shot_noise, read_noise).\n data_dir: Folder where the DND dataset resides\n output_dir: Folder where denoised output should be written to\n\n Returns:\n None\n \"\"\"\n # Loads image information and bounding boxes.\n info = h5py.File(os.path.join(data_dir, 'info.mat'), 'r')['info']\n bb = info['boundingboxes']\n\n # Denoise each image.\n for i in range(50):\n # Loads the noisy image.\n filename = os.path.join(data_dir, 'images_raw', '%04d.mat' % (i + 1))\n img = h5py.File(filename, 'r')\n noisy = np.float32(np.array(img['Inoisy']).T)\n\n # Loads raw Bayer color pattern.\n bayer_pattern = np.asarray(info[info['camera'][0][i]]['pattern']).tolist()\n\n # Denoises each bounding box in this image.\n boxes = np.array(info[bb[0][i]]).T\n for k in range(20):\n # Crops the image to this bounding box.\n idx = [\n int(boxes[k, 0] - 1),\n int(boxes[k, 2]),\n int(boxes[k, 1] - 1),\n int(boxes[k, 3])\n ]\n noisy_crop = noisy[idx[0]:idx[1], idx[2]:idx[3]].copy()\n\n # Flips the raw image to ensure RGGB Bayer color pattern.\n if (bayer_pattern == [[1, 2], [2, 3]]):\n pass\n elif (bayer_pattern == [[2, 1], [3, 2]]):\n noisy_crop = np.fliplr(noisy_crop)\n elif (bayer_pattern == [[2, 3], [1, 2]]):\n noisy_crop = np.flipud(noisy_crop)\n else:\n print('Warning: assuming unknown Bayer pattern is RGGB.')\n\n # Loads shot and read noise factors.\n nlf_h5 = info[info['nlf'][0][i]]\n shot_noise = nlf_h5['a'][0][0]\n read_noise = nlf_h5['b'][0][0]\n\n # Extracts each Bayer image plane.\n denoised_crop = noisy_crop.copy()\n height, width = noisy_crop.shape\n channels = []\n for yy in range(2):\n for xx in range(2):\n noisy_crop_c = noisy_crop[yy:height:2, xx:width:2].copy()\n channels.append(noisy_crop_c)\n channels = np.stack(channels, axis=-1)\n\n # Denoises this crop of the image.\n output = denoiser(channels, shot_noise, read_noise)\n\n # Copies denoised results to output denoised array.\n for yy in range(2):\n for xx in range(2):\n denoised_crop[yy:height:2, xx:width:2] = output[:, :, 2 * yy + xx]\n\n # Flips denoised image back to original Bayer color pattern.\n if (bayer_pattern == [[1, 2], [2, 3]]):\n pass\n elif (bayer_pattern == [[2, 1], [3, 2]]):\n denoised_crop = np.fliplr(denoised_crop)\n elif (bayer_pattern == [[2, 3], [1, 2]]):\n denoised_crop = np.flipud(denoised_crop)\n\n # Saves denoised image crop.\n denoised_crop = np.clip(np.float32(denoised_crop), 0.0, 1.0)\n save_file = os.path.join(output_dir, '%04d_%02d.mat' % (i + 1, k + 1))\n sio.savemat(save_file, {'denoised_crop': denoised_crop})\n\n\ndef main(_):\n with tf.Graph().as_default() as graph:\n with tf.Session(graph=graph) as sess:\n saver = tf.train.import_meta_graph(FLAGS.model_ckpt + '.meta')\n saver.restore(sess, FLAGS.model_ckpt)\n\n def denoiser(noisy_img, shot_noise, read_noise):\n \"\"\"Unprocessing denoiser.\"\"\"\n denoised_img_tensor = graph.get_tensor_by_name('denoised_img:0')\n noisy_img_tensor = graph.get_tensor_by_name('noisy_img:0')\n shot_noise_tensor = graph.get_tensor_by_name('stddev/shot_noise:0')\n read_noise_tensor = graph.get_tensor_by_name('stddev/read_noise:0')\n feed_dict = {\n noisy_img_tensor: noisy_img[np.newaxis, :, :, :],\n shot_noise_tensor: np.asarray([shot_noise]),\n read_noise_tensor: np.asarray([read_noise])\n }\n return sess.run(denoised_img_tensor, feed_dict=feed_dict)[0, :, :, :]\n\n denoise_raw(denoiser, FLAGS.data_dir, FLAGS.output_dir)\n\n\nif __name__ == '__main__':\n app.run(main)\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Shuffle and Learn loss for unsupervised training.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nfrom tcc.algos.algorithm import Algorithm\nfrom tcc.config import CONFIG\nfrom tcc.models import Classifier\nfrom tcc.utils import random_choice_noreplace\n\n\ndef randomly_reverse_indices(indices):\n \"\"\"Randomly reverse the indices.\"\"\"\n return tf.cond(tf.random.uniform(()) < 0.5,\n lambda: indices,\n lambda: indices[:, ::-1])\n\n\ndef get_shuffled_indices_and_labels(batch_size, num_samples, shuffle_fraction,\n num_steps):\n \"\"\"Produce possibly shuffled indices and labels.\"\"\"\n total_num_samples = batch_size * num_samples\n num_shuffled_examples = int(shuffle_fraction * total_num_samples)\n\n shuffle_labels = tf.random.shuffle(tf.cast(\n num_shuffled_examples*[1] +\n (total_num_samples - num_shuffled_examples) * [0], tf.int32))\n indices = tf.sort(random_choice_noreplace(\n total_num_samples, num_steps)[:, :5], axis=1)\n indices = randomly_reverse_indices(indices)\n shuffled_samples = tf.where(\n tf.less_equal(tf.random.uniform((total_num_samples, 1)), 0.5),\n tf.gather(indices, [1, 0, 3], axis=1),\n tf.gather(indices, [1, 4, 3], axis=1))\n ordered_samples = tf.gather(indices, [1, 2, 3], axis=1)\n indices = tf.where(tf.equal(tf.expand_dims(shuffle_labels, axis=-1), 1),\n shuffled_samples, ordered_samples)\n\n return indices, shuffle_labels\n\n\ndef sample_batch(embs, batch_size, num_steps):\n \"\"\"Returns concatenated features and shuffle labels.\"\"\"\n shuffle_fraction = CONFIG.SAL.SHUFFLE_FRACTION\n num_samples = CONFIG.SAL.NUM_SAMPLES\n indices, labels = get_shuffled_indices_and_labels(batch_size,\n num_samples,\n shuffle_fraction,\n num_steps)\n labels = tf.one_hot(labels, 2)\n labels = tf.stop_gradient(labels)\n indices = tf.stop_gradient(indices)\n embs = tf.tile(embs, [num_samples, 1, 1])\n embs = tf.gather(embs, indices, batch_dims=-1)\n concat_embs = tf.squeeze(tf.concat(tf.split(embs, 3, axis=1), axis=-1),\n axis=1)\n return concat_embs, labels\n\n\nclass SaL(Algorithm):\n \"\"\"Shuffle and Learn algorithm (https://arxiv.org/abs/1603.08561) .\"\"\"\n\n def __init__(self, model=None):\n super(SaL, self).__init__(model)\n if CONFIG.SAL.FC_LAYERS[-1][0] != 2:\n raise ValueError('Shuffle and Learn classifier has only 2 classes:'\n 'correct order or incorrect order. Ensure last layer in '\n 'config.sal.fc_layers is 2.')\n\n sal_classifier = Classifier(CONFIG.SAL.FC_LAYERS, CONFIG.SAL.DROPOUT_RATE)\n self.model['sal_classifier'] = sal_classifier\n\n def get_algo_variables(self):\n return self.model['sal_classifier'].variables\n\n def compute_loss(self, embs, steps, seq_lens, global_step, training,\n frame_labels, seq_labels):\n if training:\n batch_size = CONFIG.TRAIN.BATCH_SIZE\n num_steps = CONFIG.TRAIN.NUM_FRAMES\n else:\n batch_size = CONFIG.EVAL.BATCH_SIZE\n num_steps = CONFIG.EVAL.NUM_FRAMES\n\n concat_embs, labels = sample_batch(embs, batch_size, num_steps)\n logits = self.model['sal_classifier'](concat_embs)\n\n loss = tf.reduce_mean(\n tf.keras.losses.categorical_crossentropy(\n y_true=labels,\n y_pred=logits,\n from_logits=True,\n label_smoothing=CONFIG.SAL.LABEL_SMOOTHING))\n\n return loss\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n#!/usr/bin/env python\n\"\"\"Ground-truth state 2-step Agent.\"\"\"\n\nimport time\n\nimport numpy as np\n\nfrom ravens import utils\nfrom ravens.agents import GtState6DAgent\nfrom ravens.agents import GtStateAgent\nfrom ravens.models import mdn_utils\nfrom ravens.models import MlpModel\nimport tensorflow as tf\ntf.compat.v1.enable_eager_execution()\n\n\nclass GtState2StepAgent(GtStateAgent):\n \"\"\"Agent which uses ground-truth state information -- useful as a baseline.\"\"\"\n\n def __init__(self, name, task):\n super(GtState2StepAgent, self).__init__(name, task)\n\n # Set up model.\n self.pick_model = None\n self.place_model = None\n\n self.pick_optim = tf.keras.optimizers.Adam(learning_rate=2e-4)\n self.place_optim = tf.keras.optimizers.Adam(learning_rate=2e-4)\n self.metric = tf.keras.metrics.Mean(name='metric')\n self.val_metric = tf.keras.metrics.Mean(name='val_metric')\n\n def init_model(self, dataset):\n \"\"\"Initialize models, including normalization parameters.\"\"\"\n self.set_max_obs_vector_length(dataset)\n\n _, _, info = dataset.random_sample()\n obs_vector = self.info_to_gt_obs(info)\n\n # Setup pick model\n obs_dim = obs_vector.shape[0]\n act_dim = 3\n self.pick_model = MlpModel(\n self.batch_size, obs_dim, act_dim, 'relu', self.use_mdn, dropout=0.1)\n\n sampled_gt_obs = []\n\n num_samples = 1000\n for _ in range(num_samples):\n _, _, info = dataset.random_sample()\n t_worldaug_world, _ = self.get_augmentation_transform()\n sampled_gt_obs.append(self.info_to_gt_obs(info, t_worldaug_world))\n\n sampled_gt_obs = np.array(sampled_gt_obs)\n\n obs_train_parameters = dict()\n obs_train_parameters['mean'] = sampled_gt_obs.mean(axis=(0)).astype(\n np.float32)\n obs_train_parameters['std'] = sampled_gt_obs.std(axis=(0)).astype(\n np.float32)\n self.pick_model.set_normalization_parameters(obs_train_parameters)\n\n # Setup pick-conditioned place model\n obs_dim = obs_vector.shape[0] + act_dim\n act_dim = 3\n self.place_model = MlpModel(\n self.batch_size, obs_dim, act_dim, 'relu', self.use_mdn, dropout=0.1)\n\n sampled_gt_obs = []\n\n num_samples = 1000\n for _ in range(num_samples):\n _, act, info = dataset.random_sample()\n t_worldaug_world, _ = self.get_augmentation_transform()\n obs = self.info_to_gt_obs(info, t_worldaug_world)\n obs = np.hstack((obs, self.act_to_gt_act(act, t_worldaug_world)[:3]))\n sampled_gt_obs.append(obs)\n\n sampled_gt_obs = np.array(sampled_gt_obs)\n\n obs_train_parameters = dict()\n obs_train_parameters['mean'] = sampled_gt_obs.mean(axis=(0)).astype(\n np.float32)\n obs_train_parameters['std'] = sampled_gt_obs.std(axis=(0)).astype(\n np.float32)\n self.place_model.set_normalization_parameters(obs_train_parameters)\n\n def train(self, dataset, num_iter, writer, validation_dataset):\n \"\"\"Train on dataset for a specific number of iterations.\"\"\"\n\n if self.pick_model is None:\n self.init_model(dataset)\n\n if self.use_mdn:\n loss_criterion = mdn_utils.mdn_loss\n else:\n loss_criterion = tf.keras.losses.MeanSquaredError()\n\n @tf.function\n def train_step(pick_model, place_model, batch_obs, batch_act,\n loss_criterion):\n with tf.GradientTape() as tape:\n prediction = pick_model(batch_obs)\n loss0 = loss_criterion(batch_act[:, 0:3], prediction)\n grad = tape.gradient(loss0, pick_model.trainable_variables)\n self.pick_optim.apply_gradients(\n zip(grad, pick_model.trainable_variables))\n with tf.GradientTape() as tape:\n # batch_obs = tf.concat((batch_obs, batch_act[:,0:3] +\n # tf.random.normal(shape=batch_act[:,0:3].shape,\n # stddev=0.001)), axis=1)\n batch_obs = tf.concat((batch_obs, batch_act[:, 0:3]), axis=1)\n prediction = place_model(batch_obs)\n loss1 = loss_criterion(batch_act[:, 3:], prediction)\n grad = tape.gradient(loss1, place_model.trainable_variables)\n self.place_optim.apply_gradients(\n zip(grad, place_model.trainable_variables))\n return loss0 + loss1\n\n print_rate = 100\n for i in range(num_iter):\n start = time.time()\n\n batch_obs, batch_act, _, _, _ = self.get_data_batch(dataset)\n\n # Forward through model, compute training loss, update weights.\n self.metric.reset_states()\n loss = train_step(self.pick_model, self.place_model, batch_obs, batch_act,\n loss_criterion)\n self.metric(loss)\n with writer.as_default():\n tf.summary.scalar(\n 'gt_state_loss', self.metric.result(), step=self.total_iter + i)\n\n if i % print_rate == 0:\n loss = np.float32(loss)\n print(f'Train Iter: {self.total_iter + i} Loss: {loss:.4f} Iter time:',\n time.time() - start)\n # utils.meshcat_visualize(self.vis, obs, act, info)\n\n self.total_iter += num_iter\n self.save()\n\n def act(self, obs, info):\n \"\"\"Run inference and return best action.\"\"\"\n act = {'camera_config': self.camera_config, 'primitive': None}\n\n # Get observations and run pick prediction\n gt_obs = self.info_to_gt_obs(info)\n pick_prediction = self.pick_model(gt_obs[None, Ellipsis])\n if self.use_mdn:\n pi, mu, var = pick_prediction\n # prediction = mdn_utils.pick_max_mean(pi, mu, var)\n pick_prediction = mdn_utils.sample_from_pdf(pi, mu, var)\n pick_prediction = pick_prediction[:, 0, :]\n pick_prediction = pick_prediction[0] # unbatch\n\n # Get observations and run place prediction\n obs_with_pick = np.hstack((gt_obs, pick_prediction))\n\n # since the pick at train time is always 0.0,\n # the predictions are unstable if not exactly 0\n obs_with_pick[-1] = 0.0\n\n place_prediction = self.place_model(obs_with_pick[None, Ellipsis])\n if self.use_mdn:\n pi, mu, var = place_prediction\n # prediction = mdn_utils.pick_max_mean(pi, mu, var)\n place_prediction = mdn_utils.sample_from_pdf(pi, mu, var)\n place_prediction = place_prediction[:, 0, :]\n place_prediction = place_prediction[0]\n\n prediction = np.hstack((pick_prediction, place_prediction))\n\n # just go exactly to objects, from observations\n # p0_position = np.hstack((gt_obs[3:5], 0.02))\n # p0_rotation = utils.eulerXYZ_to_quatXYZW(\n # (0, 0, -gt_obs[5]*self.theta_scale))\n # p1_position = np.hstack((gt_obs[0:2], 0.02))\n # p1_rotation = utils.eulerXYZ_to_quatXYZW(\n # (0, 0, -gt_obs[2]*self.theta_scale))\n\n # just go exactly to objects, predicted\n p0_position = np.hstack((prediction[0:2], 0.02))\n p0_rotation = utils.eulerXYZ_to_quatXYZW(\n (0, 0, -prediction[2] * self.theta_scale))\n p1_position = np.hstack((prediction[3:5], 0.02))\n p1_rotation = utils.eulerXYZ_to_quatXYZW(\n (0, 0, -prediction[5] * self.theta_scale))\n\n # Select task-specific motion primitive.\n act['primitive'] = 'pick_place'\n if self.task == 'sweeping':\n act['primitive'] = 'sweep'\n elif self.task == 'pushing':\n act['primitive'] = 'push'\n\n params = {\n 'pose0': (p0_position, p0_rotation),\n 'pose1': (p1_position, p1_rotation)\n }\n act['params'] = params\n return act\n\n #-------------------------------------------------------------------------\n # Helper Functions\n #-------------------------------------------------------------------------\n\n def load(self, num_iter):\n \"\"\"Load something.\"\"\"\n\n # Do something here.\n # self.model.load(os.path.join(self.models_dir, model_fname))\n # Update total training iterations of agent.\n self.total_iter = num_iter\n\n def save(self):\n \"\"\"Save models.\"\"\"\n # Do something here.\n # self.model.save(os.path.join(self.models_dir, model_fname))\n pass\n\n\nclass GtState3Step6DAgent(GtState6DAgent):\n \"\"\"Agent which uses ground-truth state information -- useful as a baseline.\"\"\"\n\n def __init__(self, name, task):\n super().__init__(name, task)\n\n # Set up model.\n self.pick_model = None\n self.place_se2_model = None\n self.place_rpz_model = None\n\n self.pick_optim = tf.keras.optimizers.Adam(learning_rate=2e-4)\n self.place_se2_optim = tf.keras.optimizers.Adam(learning_rate=2e-4)\n self.place_rpz_optim = tf.keras.optimizers.Adam(learning_rate=2e-4)\n\n self.metric = tf.keras.metrics.Mean(name='metric')\n self.val_metric = tf.keras.metrics.Mean(name='val_metric')\n\n def init_model(self, dataset):\n \"\"\"Initialize models, including normalization parameters.\"\"\"\n self.set_max_obs_vector_length(dataset)\n\n _, _, info = dataset.random_sample()\n obs_vector = self.info_to_gt_obs(info)\n\n # Setup pick model\n obs_dim = obs_vector.shape[0]\n act_dim = 3\n self.pick_model = MlpModel(\n self.batch_size, obs_dim, act_dim, 'relu', self.use_mdn, dropout=0.1)\n\n sampled_gt_obs = []\n\n num_samples = 1000\n for _ in range(num_samples):\n _, _, info = dataset.random_sample()\n t_worldaug_world, _ = self.get_augmentation_transform()\n sampled_gt_obs.append(self.info_to_gt_obs(info, t_worldaug_world))\n\n sampled_gt_obs = np.array(sampled_gt_obs)\n\n obs_train_parameters = dict()\n obs_train_parameters['mean'] = sampled_gt_obs.mean(axis=(0)).astype(\n np.float32)\n obs_train_parameters['std'] = sampled_gt_obs.std(axis=(0)).astype(\n np.float32)\n self.pick_model.set_normalization_parameters(obs_train_parameters)\n\n # Setup pick-conditioned place se2 model\n obs_dim = obs_vector.shape[0] + act_dim\n act_dim = 3\n self.place_se2_model = MlpModel(\n self.batch_size, obs_dim, act_dim, 'relu', self.use_mdn, dropout=0.1)\n\n sampled_gt_obs = []\n\n num_samples = 1000\n for _ in range(num_samples):\n _, act, info = dataset.random_sample()\n t_worldaug_world, _ = self.get_augmentation_transform()\n obs = self.info_to_gt_obs(info, t_worldaug_world)\n obs = np.hstack((obs, self.act_to_gt_act(act, t_worldaug_world)[:3]))\n sampled_gt_obs.append(obs)\n\n sampled_gt_obs = np.array(sampled_gt_obs)\n\n obs_train_parameters = dict()\n obs_train_parameters['mean'] = sampled_gt_obs.mean(axis=(0)).astype(\n np.float32)\n obs_train_parameters['std'] = sampled_gt_obs.std(axis=(0)).astype(\n np.float32)\n self.place_se2_model.set_normalization_parameters(obs_train_parameters)\n\n # Setup pick-conditioned place rpz model\n obs_dim = obs_vector.shape[0] + act_dim + 3\n act_dim = 3\n self.place_rpz_model = MlpModel(\n self.batch_size, obs_dim, act_dim, 'relu', self.use_mdn, dropout=0.1)\n\n sampled_gt_obs = []\n\n num_samples = 1000\n for _ in range(num_samples):\n _, act, info = dataset.random_sample()\n t_worldaug_world, _ = self.get_augmentation_transform()\n obs = self.info_to_gt_obs(info, t_worldaug_world)\n obs = np.hstack((obs, self.act_to_gt_act(act, t_worldaug_world)[:3]))\n sampled_gt_obs.append(obs)\n\n sampled_gt_obs = np.array(sampled_gt_obs)\n\n obs_train_parameters = dict()\n obs_train_parameters['mean'] = sampled_gt_obs.mean(axis=(0)).astype(\n np.float32)\n obs_train_parameters['std'] = sampled_gt_obs.std(axis=(0)).astype(\n np.float32)\n self.place_rpz_model.set_normalization_parameters(obs_train_parameters)\n\n def train(self, dataset, num_iter, writer, validation_dataset):\n \"\"\"Train on dataset for a specific number of iterations.\"\"\"\n\n if self.pick_model is None:\n self.init_model(dataset)\n\n if self.use_mdn:\n loss_criterion = mdn_utils.mdn_loss\n else:\n loss_criterion = tf.keras.losses.MeanSquaredError()\n\n @tf.function\n def train_step(pick_model, place_se2_model, place_rpz_model, batch_obs,\n batch_act, loss_criterion):\n with tf.GradientTape() as tape:\n prediction = pick_model(batch_obs)\n loss0 = loss_criterion(batch_act[:, 0:3], prediction)\n grad = tape.gradient(loss0, pick_model.trainable_variables)\n self.pick_optim.apply_gradients(\n zip(grad, pick_model.trainable_variables))\n with tf.GradientTape() as tape:\n batch_obs = tf.concat((batch_obs, batch_act[:, 0:3]), axis=1)\n prediction = place_se2_model(batch_obs)\n loss1 = loss_criterion(batch_act[:, 3:6], prediction)\n grad = tape.gradient(loss1, place_se2_model.trainable_variables)\n self.place_se2_optim.apply_gradients(\n zip(grad, place_se2_model.trainable_variables))\n with tf.GradientTape() as tape:\n batch_obs = tf.concat((batch_obs, batch_act[:, 3:6]), axis=1)\n prediction = place_rpz_model(batch_obs)\n loss2 = loss_criterion(batch_act[:, 6:], prediction)\n grad = tape.gradient(loss2, place_rpz_model.trainable_variables)\n self.place_rpz_optim.apply_gradients(\n zip(grad, place_rpz_model.trainable_variables))\n return loss0 + loss1 + loss2\n\n print_rate = 100\n for i in range(num_iter):\n start = time.time()\n\n batch_obs, batch_act, _, _, _ = self.get_data_batch(dataset)\n\n # Forward through model, compute training loss, update weights.\n self.metric.reset_states()\n loss = train_step(self.pick_model, self.place_se2_model,\n self.place_rpz_model, batch_obs, batch_act,\n loss_criterion)\n self.metric(loss)\n with writer.as_default():\n tf.summary.scalar(\n 'gt_state_loss', self.metric.result(), step=self.total_iter + i)\n\n if i % print_rate == 0:\n loss = np.float32(loss)\n print(f'Train Iter: {self.total_iter + i} Loss: {loss:.4f} Iter time:',\n time.time() - start)\n # utils.meshcat_visualize(self.vis, obs, act, info)\n\n self.total_iter += num_iter\n self.save()\n\n def act(self, obs, info):\n \"\"\"Run inference and return best action.\"\"\"\n act = {'camera_config': self.camera_config, 'primitive': None}\n\n # Get observations and run pick prediction\n gt_obs = self.info_to_gt_obs(info)\n pick_prediction = self.pick_model(gt_obs[None, Ellipsis])\n if self.use_mdn:\n pi, mu, var = pick_prediction\n # prediction = mdn_utils.pick_max_mean(pi, mu, var)\n pick_prediction = mdn_utils.sample_from_pdf(pi, mu, var)\n pick_prediction = pick_prediction[:, 0, :]\n pick_prediction = pick_prediction[0] # unbatch\n\n # Get observations and run place prediction\n obs_with_pick = np.hstack((gt_obs, pick_prediction)).astype(np.float32)\n\n # since the pick at train time is always 0.0,\n # the predictions are unstable if not exactly 0\n obs_with_pick[-1] = 0.0\n\n place_se2_prediction = self.place_se2_model(obs_with_pick[None, Ellipsis])\n if self.use_mdn:\n pi, mu, var = place_se2_prediction\n # prediction = mdn_utils.pick_max_mean(pi, mu, var)\n place_se2_prediction = mdn_utils.sample_from_pdf(pi, mu, var)\n place_se2_prediction = place_se2_prediction[:, 0, :]\n place_se2_prediction = place_se2_prediction[0]\n\n # Get observations and run rpz prediction\n obs_with_pick_place_se2 = np.hstack(\n (obs_with_pick, place_se2_prediction)).astype(np.float32)\n\n place_rpz_prediction = self.place_rpz_model(obs_with_pick_place_se2[None,\n Ellipsis])\n if self.use_mdn:\n pi, mu, var = place_rpz_prediction\n # prediction = mdn_utils.pick_max_mean(pi, mu, var)\n place_rpz_prediction = mdn_utils.sample_from_pdf(pi, mu, var)\n place_rpz_prediction = place_rpz_prediction[:, 0, :]\n place_rpz_prediction = place_rpz_prediction[0]\n\n p0_position = np.hstack((pick_prediction[0:2], 0.02))\n p0_rotation = utils.eulerXYZ_to_quatXYZW((0, 0, 0))\n\n p1_position = np.hstack(\n (place_se2_prediction[0:2], place_rpz_prediction[2]))\n p1_rotation = utils.eulerXYZ_to_quatXYZW(\n (place_rpz_prediction[0] * self.theta_scale,\n place_rpz_prediction[1] * self.theta_scale,\n -place_se2_prediction[2] * self.theta_scale))\n\n # Select task-specific motion primitive.\n act['primitive'] = 'pick_place_6dof'\n\n params = {\n 'pose0': (p0_position, p0_rotation),\n 'pose1': (p1_position, p1_rotation)\n }\n act['params'] = params\n return act\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Contains run function for CNC.\"\"\"\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom sklearn.metrics import normalized_mutual_info_score as nmi\nfrom sklearn.preprocessing import OneHotEncoder\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.compat.v1 import gfile\nfrom tensorflow.compat.v1.keras.layers import Input\n\nfrom clustering_normalized_cuts import networks\nfrom clustering_normalized_cuts.util import print_accuracy\n\n\ndef run_net(data, params):\n \"\"\"run the network with the parameters.\"\"\"\n #\n # UNPACK DATA\n #\n\n x_train, y_train, x_val, y_val, x_test, y_test = data['cnc']['train_and_test']\n x_train_unlabeled, _, x_train_labeled, y_train_labeled = data['cnc'][\n 'train_unlabeled_and_labeled']\n x_val_unlabeled, _, _, _ = data['cnc']['val_unlabeled_and_labeled']\n\n if 'siamese' in params['affinity']:\n pairs_train, dist_train, pairs_val, dist_val = data['siamese'][\n 'train_and_test']\n\n x = np.concatenate((x_train, x_val, x_test), axis=0)\n y = np.concatenate((y_train, y_val, y_test), axis=0)\n\n if x_train_labeled:\n y_train_labeled_onehot = OneHotEncoder().fit_transform(\n y_train_labeled.reshape(-1, 1)).toarray()\n else:\n y_train_labeled_onehot = np.empty((0, len(np.unique(y))))\n\n #\n # SET UP INPUTS\n #\n\n # create true y placeholder (not used in unsupervised training)\n y_true = tf.placeholder(\n tf.float32, shape=(None, params['n_clusters']), name='y_true')\n\n batch_sizes = {\n 'Unlabeled': params['batch_size'],\n 'Labeled': params['batch_size']\n }\n\n input_shape = x.shape[1:]\n\n # inputs to CNC\n inputs = {\n 'Unlabeled': Input(shape=input_shape, name='UnlabeledInput'),\n 'Labeled': Input(shape=input_shape, name='LabeledInput'),\n }\n\n #\n # DEFINE AND TRAIN SIAMESE NET\n # http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf.\n\n # DEFINE AND TRAIN Siamese NET\n if params['affinity'] == 'siamese':\n siamese_net = networks.SiameseNet(inputs, params['siam_arch'],\n params.get('siam_reg'),\n params['main_path'], y_true)\n\n siamese_net.train(pairs_train, dist_train, pairs_val, dist_val,\n params['siam_lr'], params['siam_drop'],\n params['siam_patience'], params['siam_ne'],\n params['siam_batch_size'], params['dset'])\n\n else:\n siamese_net = None\n\n #\n # DEFINE AND TRAIN CNC NET\n #\n cnc_net = networks.CncNet(inputs, params['cnc_arch'], params.get('cnc_reg'),\n y_true, y_train_labeled_onehot,\n params['n_clusters'], params['affinity'],\n params['scale_nbr'], params['n_nbrs'], batch_sizes,\n params['result_path'], params['dset'], siamese_net,\n x_train, params['cnc_lr'], params['cnc_tau'],\n params['bal_reg'])\n\n cnc_net.train(x_train_unlabeled, x_train_labeled, x_val_unlabeled,\n params['cnc_drop'], params['cnc_patience'], params['min_tem'],\n params['cnc_epochs'])\n\n #\n # EVALUATE\n #\n\n x_cncnet = cnc_net.predict(x)\n prediction = np.argmax(x_cncnet, 1)\n accuray_all = print_accuracy(prediction, y, params['n_clusters'])\n nmi_score_all = nmi(prediction, y)\n print('NMI: {0}'.format(np.round(nmi_score_all, 3)))\n\n if params['generalization_metrics']:\n x_cncnet_train = cnc_net.predict(x_train_unlabeled)\n x_cncnet_test = cnc_net.predict(x_test)\n\n prediction_train = np.argmax(x_cncnet_train, 1)\n accuray_train = print_accuracy(prediction_train, y_train,\n params['n_clusters'])\n nmi_score_train = nmi(prediction_train, y_train)\n print('TRAIN NMI: {0}'.format(np.round(nmi_score_train, 3)))\n\n prediction_test = np.argmax(x_cncnet_test, 1)\n accuray_test = print_accuracy(prediction_test, y_test, params['n_clusters'])\n nmi_score_test = nmi(prediction_test, y_test)\n print('TEST NMI: {0}'.format(np.round(nmi_score_test, 3)))\n with gfile.Open(params['result_path'] + 'results', 'w') as f:\n f.write(accuray_all + ' ' + accuray_train + ' ' + accuray_test + '\\n')\n f.write(\n str(np.round(nmi_score_all, 3)) + ' ' +\n str(np.round(nmi_score_train, 3)) + ' ' +\n str(np.round(nmi_score_test, 3)) + '\\n')\n\n else:\n with gfile.Open(params['result_path'] + 'results', 'w') as f:\n f.write(accuray_all + ' ' + str(np.round(nmi_score_all, 3)) + '\\n')\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Library for aligning stacks of images.\n\nGiven a fixed number of images, defines a free parameter of control\npoints for a bspline warp that aligns all images.\n\"\"\"\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.contrib.resampler as contrib_resampler\nfrom factorize_a_city.libs import bspline\n\n\ndef interpolate_2d(knots, positions, degree, cyclical):\n \"\"\"Interpolates the knot values at positions of a bspline surface warp.\n\n Sparse mode b-spline warp so the memory usage is efficient. This is a\n 2D version of tfg.math.interpolation.bspline.interpolate.\n\n Args:\n knots: A tensor with shape [bsz, KH, KW, KCh] representing the values to be\n interpolated over. In warping these are control_points.\n positions: A tensor with shape [bsz, H, W, 2] that defines the desired\n positions to interpolate. Positions must be between [0, KHW - D) for\n non-cyclical and [0, KHW) for cyclical splines, where KHW is the number of\n knots on the height or width dimension and D is the spline degree. The\n last dimension of positions record [y, x] coordinates.\n degree: An int describing the degree of the spline. There must be at least D\n + 1 horizontal and vertical knots.\n cyclical: A length-two tuple bool describing whether the spline is cyclical\n in the height and width dimension respectively.\n\n Returns:\n A tensor of shape '[bsz, H, W, KCh]' with the interpolated value based on\n the control points at various positions.\n\n Raises:\n ValueError: If degree is greater than 4 or num_knots - 1, or less than 0.\n InvalidArgumentError: If positions are not in the right range.\n \"\"\"\n batch_size, knots_height, knots_width, knots_ch = knots.shape.as_list()\n y_weights, y_ind = bspline.knot_weights(\n positions[Ellipsis, 0], knots_height, degree, cyclical[0], sparse_mode=True)\n\n x_weights, x_ind = bspline.knot_weights(\n positions[Ellipsis, 1], knots_width, degree, cyclical[1], sparse_mode=True)\n if cyclical[0]:\n stacked_y_inds = []\n for i in range(-degree // 2, degree // 2 + 1):\n stacked_y_inds.append(y_ind + i)\n stacked_y = tf.stack(stacked_y_inds, axis=-1)\n stacked_y = tf.floormod(stacked_y, knots_height)\n else:\n stacked_y = tf.stack([y_ind + i for i in range(degree + 1)], axis=-1)\n if cyclical[1]:\n stacked_x_inds = []\n for i in range(-degree // 2, degree // 2 + 1):\n stacked_x_inds.append(x_ind + i)\n stacked_x = tf.stack(stacked_x_inds, axis=-1)\n stacked_x = tf.floormod(stacked_x, knots_width)\n else:\n stacked_x = tf.stack([x_ind + i for i in range(degree + 1)], axis=-1)\n\n stacked_y = stacked_y[:, :, :, :, tf.newaxis]\n stacked_x = stacked_x[:, :, :, tf.newaxis, :]\n\n stacked_y += tf.zeros_like(stacked_x)\n stacked_x += tf.zeros_like(stacked_y)\n\n batch_ind = tf.range(\n 0, batch_size, 1, dtype=tf.int32)[:, tf.newaxis, tf.newaxis, tf.newaxis,\n tf.newaxis]\n batch_ind += tf.zeros_like(stacked_y)\n\n # tf.gather process dimensions left to right which means (batch, H, W)\n gather_idx = tf.stack([stacked_y, stacked_x], axis=-1)\n original_shape_no_channel = gather_idx.shape.as_list()[:-1]\n gather_nd_indices = tf.reshape(gather_idx, [batch_size, -1, 2])\n\n relevant_cp = tf.gather_nd(knots, gather_nd_indices, batch_dims=1)\n reshaped_cp = tf.reshape(relevant_cp, original_shape_no_channel + [knots_ch])\n\n mixed = y_weights[:, :, :, :, tf.newaxis] * x_weights[:, :, :, tf.newaxis, :]\n mixed = mixed[Ellipsis, tf.newaxis]\n return tf.reduce_sum(reshaped_cp * mixed, axis=[-2, -3])\n\n\ndef bspline_warp(cps, image, degree, regularization=0, pano_pad=False):\n \"\"\"Differentiable 2D alignment of a stack of nearby panoramas.\n\n Entry point for regularized b-spline surface warp with appropriate handling\n for boundary padding of panoramas. Includes the image resampling operation.\n\n Args:\n cps: Control points [bsz, H_CP, W_CP, d] defining the deformations.\n image: An image tensor [bsz, H, W, 3] from which we sample deformed\n coordinates.\n degree: Defines the degree of the b-spline interpolation.\n regularization: A float ranging from [0, 1] that smooths the extremes of the\n control points. The effect is that the network has some leeway in fitting\n the original control points exactly.\n pano_pad: When true pads the image and uses a cyclical horizontal warp.\n Useful for warping panorama images.\n\n Returns:\n A warped image based on deformations specified by control points at various\n positions. Has shape [bsz, H, W, d]\n\n Raises:\n ValueError: If degree is greater than 4 or num_knots - 1, or less than 0.\n InvalidArgumentError: If positions are not in the right range.\n \"\"\"\n\n if regularization < 0 or regularization > 1:\n raise ValueError(\"b-spline regularization must be between [0, 1]\")\n\n if regularization > 0.:\n # Regularizing constraint on the local structure of control points.\n # New control points is:\n # regularization * ave_neighbor + (1-regularization) * cp\n cps_down = tf.concat([cps[:, 1:], cps[:, -1:]], axis=1)\n cps_up = tf.concat([cps[:, :1], cps[:, :-1]], axis=1)\n if pano_pad:\n cps_left = tf.roll(cps, shift=1, axis=2)\n cps_right = tf.roll(cps, shift=-1, axis=2)\n else:\n cps_left = tf.concat([cps[:, :, :1], cps[:, :, :-1]], axis=2)\n cps_right = tf.concat([cps[:, :, 1:], cps[:, :, -1:]], axis=2)\n cps_reg = (cps_left + cps_right + cps_up + cps_down) / 4.\n cps = cps * (1 - regularization) + cps_reg * (regularization)\n tf.summary.image(\"cps_h\", cps[Ellipsis, :1])\n tf.summary.image(\"cps_w\", cps[Ellipsis, 1:])\n\n batch_size, small_h, small_w, unused_d = cps.shape.as_list()\n unused_batch_size, big_h, big_w, unused_d = image.shape.as_list()\n\n # Control points are \"normalized\" in the sense that they're agnostic to the\n # resolution of the image being warped.\n cps = cps * np.array([big_h, big_w])\n\n y_coord = tf.linspace(0., small_h - 3 - 1e-4, big_h - 4)\n y_coord = tf.concat(\n [tf.zeros([2]), y_coord,\n tf.ones([2]) * (small_h - 3 - 1e-4)], axis=0)\n y_coord = y_coord[:, tf.newaxis]\n if pano_pad:\n x_coord = tf.linspace(0., small_w + 1 - 1e-4, big_w)[tf.newaxis, :]\n else:\n x_coord = tf.linspace(0., small_w - 3 - 1e-4, big_w - 4)\n x_coord = tf.concat(\n [tf.zeros([\n 2,\n ]), x_coord,\n tf.ones([\n 2,\n ]) * (small_w - 3 - 1e-4)], axis=0)\n x_coord = x_coord[tf.newaxis, :]\n y_coord += tf.zeros_like(x_coord)\n x_coord += tf.zeros_like(y_coord)\n\n stacked_coords = tf.stack([y_coord, x_coord], axis=-1)[tf.newaxis]\n stacked_coords = tf.tile(stacked_coords, [batch_size, 1, 1, 1])\n estimated_offsets = interpolate_2d(cps, stacked_coords, degree,\n [False, pano_pad])\n tf.summary.image(\"y_flowfield\", estimated_offsets[Ellipsis, :1])\n tf.summary.image(\"x_flowfield\", estimated_offsets[Ellipsis, 1:])\n\n y_coord_sample = tf.range(0., big_h, 1)[:, tf.newaxis]\n x_coord_sample = tf.range(0., big_w, 1)[tf.newaxis, :]\n\n y_coord_sample += tf.zeros_like(x_coord_sample)\n x_coord_sample += tf.zeros_like(y_coord_sample)\n\n y_coord_sample += estimated_offsets[Ellipsis, 0]\n x_coord_sample += estimated_offsets[Ellipsis, 1]\n y_clipped = tf.clip_by_value(y_coord_sample, 0, big_h - 1)\n if pano_pad:\n x_clipped = tf.floormod(x_coord_sample, big_w)\n image = tf.concat([image, image[:, :, :1]], axis=2)\n else:\n x_clipped = tf.clip_by_value(x_coord_sample, 0, big_w - 1)\n\n stacked_resampler_coords = tf.stack([x_clipped, y_clipped], axis=-1)\n return contrib_resampler.resampler(image, stacked_resampler_coords)\n\n\nclass ImageAlignment(object):\n \"\"\"A class for aligning a set of images using bspline warps.\"\"\"\n\n def __init__(self,\n regularization=0.3,\n clip_margin=32,\n pano_pad=True,\n spline_degree=3):\n \"\"\"Initializes a layer that warp images based on alignment parameters.\n\n Args:\n regularization: (float): A regularization, ranging from [0, 1], for\n alignment control points\n clip_margin (int): For non-panoramic padding dimensions, controls how many\n edge pixels to crop. Useful for removing warping artifacts near the\n boundary due to missing pixels.\n pano_pad (bool): If true, performs a panoramic spline warp in the\n horizontal dimension.\n spline_degree (int): Degree of the spline\n \"\"\"\n self.regularization = regularization\n\n self.clip_margin = clip_margin\n self.pano_pad = pano_pad\n self.spline_degree = spline_degree\n\n def align_images(self, image, thetas):\n \"\"\"Performs a warp on the images with the sub_theta control points.\n\n Warped images are clipped along the edges to prevent missing pixels\n from being visible as a result of warping.\n\n Args:\n image: [num_ims, H, W, d] image tensor\n thetas: [num_ims, h_cp, w_cp, 2] control points to warp\n\n Returns:\n clipped: [num_ims, new_H, new_W, d] where new_H and new_W depend on\n pano_pad and clip_margin.\n \"\"\"\n aligned_results = bspline_warp(\n thetas,\n image,\n self.spline_degree,\n regularization=self.regularization,\n pano_pad=self.pano_pad)\n if self.pano_pad:\n clipped_results = aligned_results[:, self.clip_margin:-self.clip_margin]\n else:\n clipped_results = aligned_results[:, self.clip_margin:-self.clip_margin,\n self.clip_margin:-self.clip_margin, :]\n return clipped_results\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Transformer model from \"Attention Is All You Need\".\n\nThe Transformer model consists of an encoder and a decoder. Both are stacks\nof self-attention layers followed by feed-forward layers. This model yields\ngood results on a number of problems, especially in NLP and machine translation.\n\nSee \"Attention Is All You Need\" (https://arxiv.org/abs/1706.03762) for the full\ndescription of the model and the results obtained with its early version.\n\nBranched from Tensor2Tensor implementation:\ngithub.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom six.moves import range # pylint: disable=redefined-builtin\n\nfrom tensor2tensor.layers import common_attention\nfrom tensor2tensor.layers import common_hparams\nfrom tensor2tensor.layers import common_layers\nfrom tensor2tensor.layers import modalities\nfrom tensor2tensor.layers import transformer_layers\nfrom tensor2tensor.utils import beam_search\nfrom tensor2tensor.utils import mlperf_log\nfrom tensor2tensor.utils import registry\n\nimport tensorflow.compat.v1 as tf\n\nfrom state_of_sparsity.sparse_transformer.layers import common_sparse\nfrom state_of_sparsity.sparse_transformer.layers import sparse_attention\nfrom state_of_sparsity.sparse_transformer.layers import sparse_modalities\nfrom state_of_sparsity.sparse_transformer.layers import sparse_transformer_layers\nfrom state_of_sparsity.sparse_transformer.models import sparse_model\n\nfrom tensorflow.python.ops import inplace_ops # pylint: disable=g-direct-tensorflow-import\nfrom tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import\n\n\n# Alias some commonly reused layers, here and elsewhere.\ntransformer_prepare_encoder = transformer_layers.transformer_prepare_encoder\ntransformer_encoder = sparse_transformer_layers.transformer_encoder\ntransformer_ffn_layer = sparse_transformer_layers.transformer_ffn_layer\n\n\[email protected]_model\nclass SparseTransformer(sparse_model.SparseModel):\n \"\"\"Attention net. See file docstring.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(SparseTransformer, self).__init__(*args, **kwargs)\n self.attention_weights = dict() # For visualizing attention heads.\n\n def encode(self, inputs, target_space, hparams, features=None, losses=None):\n \"\"\"Encode transformer inputs.\n\n Args:\n inputs: Transformer inputs [batch_size, input_length, 1, hidden_dim] which\n will be flattened along the two spatial dimensions.\n target_space: scalar, target space ID.\n hparams: hyperparameters for model.\n features: optionally pass the entire features dictionary as well.\n This is needed now for \"packed\" datasets.\n losses: optional list onto which to append extra training losses\n\n Returns:\n Tuple of:\n encoder_output: Encoder representation.\n [batch_size, input_length, hidden_dim]\n encoder_decoder_attention_bias: Bias and mask weights for\n encoder-decoder attention. [batch_size, input_length]\n \"\"\"\n inputs = common_layers.flatten4d3d(inputs)\n\n encoder_input, self_attention_bias, encoder_decoder_attention_bias = (\n transformer_prepare_encoder(\n inputs, target_space, hparams, features=features))\n\n mlperf_log.transformer_print(\n key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT,\n value=hparams.layer_prepostprocess_dropout)\n\n encoder_input = tf.nn.dropout(encoder_input,\n 1.0 - hparams.layer_prepostprocess_dropout)\n\n encoder_output = transformer_encoder(\n encoder_input,\n self_attention_bias,\n hparams,\n nonpadding=features_to_nonpadding(features, \"inputs\"),\n save_weights_to=self.attention_weights,\n make_image_summary=not common_layers.is_xla_compiled())\n\n return encoder_output, encoder_decoder_attention_bias\n\n def decode(self,\n decoder_input,\n encoder_output,\n encoder_decoder_attention_bias,\n decoder_self_attention_bias,\n hparams,\n cache=None,\n decode_loop_step=None,\n losses=None):\n \"\"\"Decode Transformer outputs from encoder representation.\n\n Args:\n decoder_input: inputs to bottom of the model.\n [batch_size, decoder_length, hidden_dim]\n encoder_output: Encoder representation.\n [batch_size, input_length, hidden_dim]\n encoder_decoder_attention_bias: Bias and mask weights for\n encoder-decoder attention. [batch_size, input_length]\n decoder_self_attention_bias: Bias and mask weights for decoder\n self-attention. [batch_size, decoder_length]\n hparams: hyperparameters for model.\n cache: dict, containing tensors which are the results of previous\n attentions, used for fast decoding.\n decode_loop_step: An integer, step number of the decoding loop.\n Only used for inference on TPU.\n losses: optional list onto which to append extra training losses\n\n Returns:\n Final decoder representation. [batch_size, decoder_length, hidden_dim]\n \"\"\"\n mlperf_log.transformer_print(\n key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT,\n value=hparams.layer_prepostprocess_dropout)\n decoder_input = tf.nn.dropout(decoder_input,\n 1.0 - hparams.layer_prepostprocess_dropout)\n\n decoder_output = transformer_decoder(\n decoder_input,\n encoder_output,\n decoder_self_attention_bias,\n encoder_decoder_attention_bias,\n hparams,\n cache=cache,\n decode_loop_step=decode_loop_step,\n save_weights_to=self.attention_weights,\n losses=losses)\n\n if (common_layers.is_xla_compiled() and\n hparams.mode == tf.estimator.ModeKeys.TRAIN):\n return decoder_output\n else:\n # Expand since t2t expects 4d tensors.\n return tf.expand_dims(decoder_output, axis=2)\n\n def body(self, features):\n \"\"\"Transformer main model_fn.\n\n Args:\n features: Map of features to the model. Should contain the following:\n \"inputs\": Transformer inputs.\n [batch_size, input_length, 1, hidden_dim].\n \"targets\": Target decoder outputs.\n [batch_size, decoder_length, 1, hidden_dim]\n \"target_space_id\": A scalar int from data_generators.problem.SpaceID.\n\n Returns:\n Final decoder representation. [batch_size, decoder_length, hidden_dim]\n \"\"\"\n hparams = self._hparams\n\n losses = []\n\n if self.has_input:\n inputs = features[\"inputs\"]\n target_space = features[\"target_space_id\"]\n encoder_output, encoder_decoder_attention_bias = self.encode(\n inputs, target_space, hparams, features=features, losses=losses)\n else:\n encoder_output, encoder_decoder_attention_bias = (None, None)\n\n targets = features[\"targets\"]\n targets_shape = common_layers.shape_list(targets)\n targets = common_layers.flatten4d3d(targets)\n decoder_input, decoder_self_attention_bias = transformer_prepare_decoder(\n targets, hparams, features=features)\n decoder_output = self.decode(\n decoder_input,\n encoder_output,\n encoder_decoder_attention_bias,\n decoder_self_attention_bias,\n hparams,\n losses=losses)\n\n sparsity_technique = hparams.get(\"sparsity_technique\")\n expected_attentions = features.get(\"expected_attentions\")\n if expected_attentions is not None:\n assert not sparsity_technique\n\n attention_loss = common_attention.encoder_decoder_attention_loss(\n expected_attentions, self.attention_weights,\n hparams.expected_attention_loss_type,\n hparams.expected_attention_loss_multiplier)\n return decoder_output, {\"attention_loss\": attention_loss}\n\n # Add the extra loss term needed for each sparsity technique\n if sparsity_technique == \"variational_dropout\":\n losses += common_sparse.variational_dropout_dkl_loss(\n sparsity_check=True,\n threshold=hparams.get(\"log_alpha_threshold\"),\n dkl_weight=hparams.get(\"dkl_weight\"),\n begin_step=hparams.get(\"dkl_weight_start\"),\n end_step=(hparams.get(\"dkl_weight_start\") +\n hparams.get(\"dkl_weight_diff\")),\n weight_function=hparams.get(\"dkl_weight_fn\"),\n clip_alpha=hparams.get(\"clip_log_alpha\"))\n elif sparsity_technique == \"l0_regularization\":\n losses += common_sparse.l0_regularization_term(\n sparsity_check=True,\n regularization_weight=hparams.get(\"l0_norm_weight\"),\n weight_start=hparams.get(\"l0_weight_start\"),\n weight_end=(hparams.get(\"l0_weight_start\") +\n hparams.get(\"l0_weight_diff\")),\n weight_function=hparams.get(\"dkl_weight_fn\"))\n\n ret = tf.reshape(decoder_output, targets_shape)\n if losses:\n return ret, {\"extra_loss\": tf.add_n(losses)}\n else:\n return ret\n\n def _greedy_infer(self, features, decode_length, use_tpu=False):\n \"\"\"Fast version of greedy decoding.\n\n Args:\n features: an map of string to `Tensor`\n decode_length: an integer. How many additional timesteps to decode.\n use_tpu: A bool. Whether to build the inference graph for TPU.\n\n Returns:\n A dict of decoding results {\n \"outputs\": integer `Tensor` of decoded ids of shape\n [batch_size, <= decode_length] if beam_size == 1 or\n [batch_size, top_beams, <= decode_length]\n \"scores\": decoding log probs from the beam search,\n None if using greedy decoding (beam_size=1)\n }\n\n Raises:\n NotImplementedError: If there are multiple data shards.\n \"\"\"\n # For real-valued modalities use the slow decode path for now.\n if (self._target_modality_is_real or\n self._hparams.self_attention_type != \"dot_product\"):\n return super(SparseTransformer, self)._greedy_infer(\n features, decode_length)\n with tf.variable_scope(self.name):\n return (self._fast_decode_tpu(features, decode_length) if use_tpu else\n self._fast_decode(features, decode_length))\n\n def _beam_decode(self,\n features,\n decode_length,\n beam_size,\n top_beams,\n alpha,\n use_tpu=False):\n \"\"\"Beam search decoding.\n\n Args:\n features: an map of string to `Tensor`\n decode_length: an integer. How many additional timesteps to decode.\n beam_size: number of beams.\n top_beams: an integer. How many of the beams to return.\n alpha: Float that controls the length penalty. larger the alpha, stronger\n the preference for longer translations.\n use_tpu: A bool, whether to do beam decode on TPU.\n\n Returns:\n A dict of decoding results {\n \"outputs\": integer `Tensor` of decoded ids of shape\n [batch_size, <= decode_length] if beam_size == 1 or\n [batch_size, top_beams, <= decode_length]\n \"scores\": decoding log probs from the beam search,\n None if using greedy decoding (beam_size=1)\n }\n \"\"\"\n if self._hparams.self_attention_type != \"dot_product\":\n # Caching is not guaranteed to work with attention types other than\n # dot_product.\n return self._beam_decode_slow(features, decode_length, beam_size,\n top_beams, alpha, use_tpu)\n with tf.variable_scope(self.name):\n if use_tpu:\n return self._fast_decode_tpu(\n features, decode_length, beam_size, top_beams, alpha)\n else:\n return self._fast_decode(\n features, decode_length, beam_size, top_beams, alpha)\n\n def _fast_decode_tpu(self,\n features,\n decode_length,\n beam_size=1,\n top_beams=1,\n alpha=1.0):\n \"\"\"Fast decoding.\n\n Implements both greedy and beam search decoding on TPU, uses beam search\n iff beam_size > 1, otherwise beam search related arguments are ignored.\n\n Args:\n features: A map of string to model features.\n decode_length: An integer, how many additional timesteps to decode.\n beam_size: An integer, number of beams.\n top_beams: An integer, how many of the beams to return.\n alpha: A float that controls the length penalty. Larger the alpha,\n stronger the preference for longer translations.\n\n Returns:\n A dict of decoding results {\n \"outputs\": integer `Tensor` of decoded ids of shape\n [batch_size, <= decode_length] if beam_size == 1 or\n [batch_size, top_beams, <= decode_length]\n \"scores\": decoding log probs from the beam search,\n None if using greedy decoding (beam_size=1)\n }.\n\n Raises:\n NotImplementedError: If there are multiple data shards.\n \"\"\"\n if self._num_datashards != 1:\n raise NotImplementedError(\"Fast decoding only supports a single shard.\")\n if \"targets_segmentation\" in features:\n raise NotImplementedError(\n \"Decoding not supported on packed datasets \"\n \" If you want to decode from a dataset, use the non-packed version\"\n \" of the dataset when decoding.\")\n dp = self._data_parallelism\n hparams = self._hparams\n target_modality = self._problem_hparams.modality[\"targets\"]\n target_vocab_size = self._problem_hparams.vocab_size[\"targets\"]\n if target_vocab_size is not None and hasattr(hparams, \"vocab_divisor\"):\n target_vocab_size += (-target_vocab_size) % hparams.vocab_divisor\n\n if self.has_input:\n inputs = features[\"inputs\"]\n if target_modality == modalities.ModalityType.CLASS_LABEL:\n decode_length = 1\n else:\n decode_length = (\n common_layers.shape_list(inputs)[1] + features.get(\n \"decode_length\", decode_length))\n\n inputs = tf.expand_dims(inputs, axis=1)\n if len(inputs.shape) < 5:\n inputs = tf.expand_dims(inputs, axis=4)\n s = common_layers.shape_list(inputs)\n batch_size = s[0]\n inputs = tf.reshape(inputs, [s[0] * s[1], s[2], s[3], s[4]])\n # _shard_features called to ensure that the variable names match\n inputs = self._shard_features({\"inputs\": inputs})[\"inputs\"]\n input_modality = self._problem_hparams.modality[\"inputs\"]\n input_vocab_size = self._problem_hparams.vocab_size[\"inputs\"]\n if input_vocab_size is not None and hasattr(hparams, \"vocab_divisor\"):\n input_vocab_size += (-input_vocab_size) % hparams.vocab_divisor\n modality_name = hparams.name.get(\n \"inputs\",\n modalities.get_name(input_modality))(hparams, input_vocab_size)\n with tf.variable_scope(modality_name):\n bottom = hparams.bottom.get(\n \"inputs\", modalities.get_bottom(input_modality))\n inputs = dp(bottom, inputs, hparams, input_vocab_size)\n with tf.variable_scope(\"body\"):\n encoder_output, encoder_decoder_attention_bias = dp(\n self.encode,\n inputs,\n features[\"target_space_id\"],\n hparams,\n features=features)\n encoder_output = encoder_output[0]\n encoder_decoder_attention_bias = encoder_decoder_attention_bias[0]\n partial_targets = None\n else:\n # The problem has no inputs.\n encoder_output = None\n encoder_decoder_attention_bias = None\n\n # Prepare partial targets.\n # In either features[\"inputs\"] or features[\"targets\"].\n # We force the outputs to begin with these sequences.\n partial_targets = features.get(\"inputs\")\n if partial_targets is None:\n partial_targets = features[\"targets\"]\n assert partial_targets is not None\n partial_targets = common_layers.expand_squeeze_to_nd(partial_targets, 2)\n partial_targets = tf.to_int64(partial_targets)\n partial_targets_shape = common_layers.shape_list(partial_targets)\n partial_targets_length = partial_targets_shape[1]\n decode_length = (\n partial_targets_length + features.get(\"decode_length\", decode_length))\n batch_size = partial_targets_shape[0]\n\n if hparams.pos == \"timing\":\n positional_encoding = common_attention.get_timing_signal_1d(\n decode_length + 1, hparams.hidden_size)\n elif hparams.pos == \"emb\":\n positional_encoding = common_attention.add_positional_embedding(\n tf.zeros([1, decode_length + 1, hparams.hidden_size]),\n hparams.max_length, \"body/targets_positional_embedding\", None)\n else:\n positional_encoding = None\n\n def preprocess_targets(targets, i):\n \"\"\"Performs preprocessing steps on the targets to prepare for the decoder.\n\n This includes:\n - Embedding the ids.\n - Flattening to 3D tensor.\n - Optionally adding timing signals.\n\n Args:\n targets: A tensor, inputs ids to the decoder. [batch_size, 1].\n i: An integer, Step number of the decoding loop.\n\n Returns:\n A tensor, processed targets [batch_size, 1, hidden_dim].\n \"\"\"\n # _shard_features called to ensure that the variable names match\n targets = self._shard_features({\"targets\": targets})[\"targets\"]\n modality_name = hparams.name.get(\n \"targets\",\n modalities.get_name(target_modality))(hparams, target_vocab_size)\n with tf.variable_scope(modality_name):\n bottom = hparams.bottom.get(\n \"targets\", modalities.get_targets_bottom(target_modality))\n targets = dp(bottom, targets, hparams, target_vocab_size)[0]\n targets = common_layers.flatten4d3d(targets)\n\n targets = tf.cond(\n tf.equal(i, 0), lambda: tf.zeros_like(targets), lambda: targets)\n\n if positional_encoding is not None:\n positional_encoding_shape = positional_encoding.shape.as_list()\n targets += tf.slice(\n positional_encoding, [0, i, 0],\n [positional_encoding_shape[0], 1, positional_encoding_shape[2]])\n return targets\n\n decoder_self_attention_bias = (\n common_attention.attention_bias_lower_triangle(decode_length))\n if hparams.proximity_bias:\n decoder_self_attention_bias += common_attention.attention_bias_proximal(\n decode_length)\n\n def symbols_to_logits_tpu_fn(ids, i, cache):\n \"\"\"Go from ids to logits for next symbol on TPU.\n\n Args:\n ids: A tensor, symbol IDs.\n i: An integer, step number of the decoding loop. Only used for inference\n on TPU.\n cache: A dict, containing tensors which are the results of previous\n attentions, used for fast decoding.\n\n Returns:\n ret: A tensor, computed logits.\n cache: A dict, containing tensors which are the results of previous\n attentions, used for fast decoding.\n \"\"\"\n ids = ids[:, -1:]\n targets = tf.expand_dims(tf.expand_dims(ids, axis=2), axis=3)\n targets = preprocess_targets(targets, i)\n\n bias_shape = decoder_self_attention_bias.shape.as_list()\n bias = tf.slice(decoder_self_attention_bias, [0, 0, i, 0],\n [bias_shape[0], bias_shape[1], 1, bias_shape[3]])\n\n with tf.variable_scope(\"body\"):\n body_outputs = dp(\n self.decode,\n targets,\n cache.get(\"encoder_output\"),\n cache.get(\"encoder_decoder_attention_bias\"),\n bias,\n hparams,\n cache,\n i)\n\n modality_name = hparams.name.get(\n \"targets\",\n modalities.get_name(target_modality))(hparams, target_vocab_size)\n with tf.variable_scope(modality_name):\n top = hparams.top.get(\"targets\", modalities.get_top(target_modality))\n logits = dp(top, body_outputs, None, hparams, target_vocab_size)[0]\n\n ret = tf.squeeze(logits, axis=[1, 2, 3])\n if partial_targets is not None:\n # If the position is within the given partial targets, we alter the\n # logits to always return those values.\n # A faster approach would be to process the partial targets in one\n # iteration in order to fill the corresponding parts of the cache.\n # This would require broader changes, though.\n vocab_size = tf.shape(ret)[1]\n\n def forced_logits():\n return tf.one_hot(\n tf.tile(\n tf.slice(partial_targets, [0, i],\n [partial_targets.shape.as_list()[0], 1]),\n [beam_size]), vocab_size, 0.0, -1e9)\n\n ret = tf.cond(\n tf.less(i, partial_targets_length), forced_logits, lambda: ret)\n return ret, cache\n\n ret = fast_decode_tpu(\n encoder_output=encoder_output,\n encoder_decoder_attention_bias=encoder_decoder_attention_bias,\n symbols_to_logits_fn=symbols_to_logits_tpu_fn,\n hparams=hparams,\n decode_length=decode_length,\n vocab_size=target_vocab_size,\n beam_size=beam_size,\n top_beams=top_beams,\n alpha=alpha,\n batch_size=batch_size,\n force_decode_length=self._decode_hparams.force_decode_length)\n if partial_targets is not None:\n if beam_size <= 1 or top_beams <= 1:\n ret[\"outputs\"] = ret[\"outputs\"][:, partial_targets_length:]\n else:\n ret[\"outputs\"] = ret[\"outputs\"][:, :, partial_targets_length:]\n return ret\n\n def _fast_decode(self,\n features,\n decode_length,\n beam_size=1,\n top_beams=1,\n alpha=1.0):\n \"\"\"Fast decoding.\n\n Implements both greedy and beam search decoding, uses beam search iff\n beam_size > 1, otherwise beam search related arguments are ignored.\n\n Args:\n features: a map of string to model features.\n decode_length: an integer. How many additional timesteps to decode.\n beam_size: number of beams.\n top_beams: an integer. How many of the beams to return.\n alpha: Float that controls the length penalty. larger the alpha, stronger\n the preference for longer translations.\n\n Returns:\n A dict of decoding results {\n \"outputs\": integer `Tensor` of decoded ids of shape\n [batch_size, <= decode_length] if beam_size == 1 or\n [batch_size, top_beams, <= decode_length]\n \"scores\": decoding log probs from the beam search,\n None if using greedy decoding (beam_size=1)\n }\n\n Raises:\n NotImplementedError: If there are multiple data shards.\n \"\"\"\n if self._num_datashards != 1:\n raise NotImplementedError(\"Fast decoding only supports a single shard.\")\n dp = self._data_parallelism\n hparams = self._hparams\n target_modality = self._problem_hparams.modality[\"targets\"]\n target_vocab_size = self._problem_hparams.vocab_size[\"targets\"]\n if target_vocab_size is not None and hasattr(hparams, \"vocab_divisor\"):\n target_vocab_size += (-target_vocab_size) % hparams.vocab_divisor\n if \"targets_segmentation\" in features:\n raise NotImplementedError(\n \"Decoding not supported on packed datasets \"\n \" If you want to decode from a dataset, use the non-packed version\"\n \" of the dataset when decoding.\")\n if self.has_input:\n inputs = features[\"inputs\"]\n if target_modality == modalities.ModalityType.CLASS_LABEL:\n decode_length = 1\n else:\n decode_length = (\n common_layers.shape_list(inputs)[1] + features.get(\n \"decode_length\", decode_length))\n\n inputs = tf.expand_dims(inputs, axis=1)\n if len(inputs.shape) < 5:\n inputs = tf.expand_dims(inputs, axis=4)\n s = common_layers.shape_list(inputs)\n batch_size = s[0]\n inputs = tf.reshape(inputs, [s[0] * s[1], s[2], s[3], s[4]])\n # _shard_features called to ensure that the variable names match\n inputs = self._shard_features({\"inputs\": inputs})[\"inputs\"]\n input_modality = self._problem_hparams.modality[\"inputs\"]\n input_vocab_size = self._problem_hparams.vocab_size[\"inputs\"]\n if input_vocab_size is not None and hasattr(hparams, \"vocab_divisor\"):\n input_vocab_size += (-input_vocab_size) % hparams.vocab_divisor\n modality_name = hparams.name.get(\n \"inputs\",\n modalities.get_name(input_modality))(hparams, input_vocab_size)\n with tf.variable_scope(modality_name):\n bottom = hparams.bottom.get(\n \"inputs\", modalities.get_bottom(input_modality))\n inputs = dp(bottom, inputs, hparams, input_vocab_size)\n with tf.variable_scope(\"body\"):\n encoder_output, encoder_decoder_attention_bias = dp(\n self.encode,\n inputs,\n features[\"target_space_id\"],\n hparams,\n features=features)\n encoder_output = encoder_output[0]\n encoder_decoder_attention_bias = encoder_decoder_attention_bias[0]\n partial_targets = None\n else:\n # The problem has no inputs.\n encoder_output = None\n encoder_decoder_attention_bias = None\n\n # Prepare partial targets.\n # In either features[\"inputs\"] or features[\"targets\"].\n # We force the outputs to begin with these sequences.\n partial_targets = features.get(\"inputs\")\n if partial_targets is None:\n partial_targets = features[\"targets\"]\n assert partial_targets is not None\n partial_targets = common_layers.expand_squeeze_to_nd(partial_targets, 2)\n partial_targets = tf.to_int64(partial_targets)\n partial_targets_shape = common_layers.shape_list(partial_targets)\n partial_targets_length = partial_targets_shape[1]\n decode_length = (\n partial_targets_length + features.get(\"decode_length\", decode_length))\n batch_size = partial_targets_shape[0]\n\n if hparams.pos == \"timing\":\n positional_encoding = common_attention.get_timing_signal_1d(\n decode_length + 1, hparams.hidden_size)\n elif hparams.pos == \"emb\":\n positional_encoding = common_attention.add_positional_embedding(\n tf.zeros([1, decode_length, hparams.hidden_size]),\n hparams.max_length, \"body/targets_positional_embedding\", None)\n else:\n positional_encoding = None\n\n def preprocess_targets(targets, i):\n \"\"\"Performs preprocessing steps on the targets to prepare for the decoder.\n\n This includes:\n - Embedding the ids.\n - Flattening to 3D tensor.\n - Optionally adding timing signals.\n\n Args:\n targets: inputs ids to the decoder. [batch_size, 1]\n i: scalar, Step number of the decoding loop.\n\n Returns:\n Processed targets [batch_size, 1, hidden_dim]\n \"\"\"\n # _shard_features called to ensure that the variable names match\n targets = self._shard_features({\"targets\": targets})[\"targets\"]\n modality_name = hparams.name.get(\n \"targets\",\n modalities.get_name(target_modality))(hparams, target_vocab_size)\n with tf.variable_scope(modality_name):\n bottom = hparams.bottom.get(\n \"targets\", modalities.get_targets_bottom(target_modality))\n targets = dp(bottom, targets, hparams, target_vocab_size)[0]\n targets = common_layers.flatten4d3d(targets)\n\n targets = tf.cond(\n tf.equal(i, 0), lambda: tf.zeros_like(targets), lambda: targets)\n\n if positional_encoding is not None:\n targets += positional_encoding[:, i:i + 1]\n return targets\n\n decoder_self_attention_bias = (\n common_attention.attention_bias_lower_triangle(decode_length))\n if hparams.proximity_bias:\n decoder_self_attention_bias += common_attention.attention_bias_proximal(\n decode_length)\n\n def symbols_to_logits_fn(ids, i, cache):\n \"\"\"Go from ids to logits for next symbol.\"\"\"\n ids = ids[:, -1:]\n targets = tf.expand_dims(tf.expand_dims(ids, axis=2), axis=3)\n targets = preprocess_targets(targets, i)\n\n bias = decoder_self_attention_bias[:, :, i:i + 1, :i + 1]\n\n with tf.variable_scope(\"body\"):\n body_outputs = dp(\n self.decode,\n targets,\n cache.get(\"encoder_output\"),\n cache.get(\"encoder_decoder_attention_bias\"),\n bias,\n hparams,\n cache)\n\n modality_name = hparams.name.get(\n \"targets\",\n modalities.get_name(target_modality))(hparams, target_vocab_size)\n with tf.variable_scope(modality_name):\n top = hparams.top.get(\"targets\", modalities.get_top(target_modality))\n logits = dp(top, body_outputs, None, hparams, target_vocab_size)[0]\n\n ret = tf.squeeze(logits, axis=[1, 2, 3])\n if partial_targets is not None:\n # If the position is within the given partial targets, we alter the\n # logits to always return those values.\n # A faster approach would be to process the partial targets in one\n # iteration in order to fill the corresponding parts of the cache.\n # This would require broader changes, though.\n vocab_size = tf.shape(ret)[1]\n\n def forced_logits():\n return tf.one_hot(\n tf.tile(partial_targets[:, i], [beam_size]), vocab_size, 0.0,\n -1e9)\n\n ret = tf.cond(\n tf.less(i, partial_targets_length), forced_logits, lambda: ret)\n return ret, cache\n\n ret = fast_decode(\n encoder_output=encoder_output,\n encoder_decoder_attention_bias=encoder_decoder_attention_bias,\n symbols_to_logits_fn=symbols_to_logits_fn,\n hparams=hparams,\n decode_length=decode_length,\n vocab_size=target_vocab_size,\n beam_size=beam_size,\n top_beams=top_beams,\n alpha=alpha,\n batch_size=batch_size,\n force_decode_length=self._decode_hparams.force_decode_length)\n if partial_targets is not None:\n if beam_size <= 1 or top_beams <= 1:\n ret[\"outputs\"] = ret[\"outputs\"][:, partial_targets_length:]\n else:\n ret[\"outputs\"] = ret[\"outputs\"][:, :, partial_targets_length:]\n return ret\n\n\ndef fast_decode_tpu(encoder_output,\n encoder_decoder_attention_bias,\n symbols_to_logits_fn,\n hparams,\n decode_length,\n vocab_size,\n beam_size=1,\n top_beams=1,\n alpha=1.0,\n sos_id=0,\n eos_id=beam_search.EOS_ID,\n batch_size=None,\n force_decode_length=False,\n scope_prefix=\"body/\"):\n \"\"\"Given encoder output and a symbols to logits function, does fast decoding.\n\n Implements both greedy and beam search decoding for TPU, uses beam search iff\n beam_size > 1, otherwise beam search related arguments are ignored.\n\n Args:\n encoder_output: A tensor, output from encoder.\n encoder_decoder_attention_bias: A tensor, bias for use in encoder-decoder\n attention.\n symbols_to_logits_fn: Incremental decoding, function mapping triple\n `(ids, step, cache)` to symbol logits.\n hparams: Run hyperparameters.\n decode_length: An integer, how many additional timesteps to decode.\n vocab_size: Output vocabulary size.\n beam_size: An integer, number of beams.\n top_beams: An integer, how many of the beams to return.\n alpha: A float that controls the length penalty. Larger the alpha, stronger\n the preference for longer translations.\n sos_id: Start-of-sequence symbol.\n eos_id: End-of-sequence symbol.\n batch_size: An integer, must be passed if there is no input.\n force_decode_length: A bool, whether to force the full decode length, or if\n False, stop when all beams hit eos_id.\n scope_prefix: str, prefix for decoder layer variable scopes.\n\n Returns:\n A dict of decoding results {\n \"outputs\": integer `Tensor` of decoded ids of shape\n [batch_size, <= decode_length] if top_beams == 1 or\n [batch_size, top_beams, <= decode_length] otherwise\n \"scores\": decoding log probs from the beam search,\n None if using greedy decoding (beam_size=1)\n }.\n\n Raises:\n NotImplementedError: If beam size > 1 with partial targets.\n \"\"\"\n if encoder_output is not None:\n batch_size = common_layers.shape_list(encoder_output)[0]\n\n key_channels = hparams.attention_key_channels or hparams.hidden_size\n value_channels = hparams.attention_value_channels or hparams.hidden_size\n num_layers = hparams.num_decoder_layers or hparams.num_hidden_layers\n vars_3d_num_heads = (\n hparams.num_heads if hparams.get(\"attention_variables_3d\") else 0)\n\n cache = {\n \"layer_%d\" % layer: { # pylint: disable=g-complex-comprehension\n \"k\":\n common_attention.split_heads(\n tf.zeros([batch_size, decode_length, key_channels]),\n hparams.num_heads),\n \"v\":\n common_attention.split_heads(\n tf.zeros([batch_size, decode_length, value_channels]),\n hparams.num_heads),\n \"f\":\n tf.zeros([batch_size, decode_length, hparams.hidden_size]),\n } for layer in range(num_layers)\n }\n\n if encoder_output is not None:\n for layer in range(num_layers):\n layer_name = \"layer_%d\" % layer\n with tf.variable_scope(\n \"%sdecoder/%s/encdec_attention/multihead_attention\" % (scope_prefix,\n layer_name)):\n initial_sparsity = None\n if hparams.get(\"load_masks_from\"):\n initial_sparsity = hparams.get(\"initial_sparsity\")\n\n k_encdec = sparse_attention.compute_attention_component(\n encoder_output, key_channels, name=\"k\",\n vars_3d_num_heads=vars_3d_num_heads,\n sparsity_technique=hparams.get(\"sparsity_technique\"),\n threshold=hparams.get(\"log_alpha_threshold\"),\n training=hparams.get(\"mode\") == tf.estimator.ModeKeys.TRAIN,\n clip_alpha=hparams.get(\"clip_log_alpha\"),\n initial_sparsity=initial_sparsity,\n split_heads=hparams.get(\"split_heads\"),\n num_heads=hparams.num_heads)\n k_encdec = common_attention.split_heads(k_encdec, hparams.num_heads)\n v_encdec = sparse_attention.compute_attention_component(\n encoder_output, value_channels, name=\"v\",\n vars_3d_num_heads=vars_3d_num_heads,\n sparsity_technique=hparams.get(\"sparsity_technique\"),\n threshold=hparams.get(\"log_alpha_threshold\"),\n training=hparams.get(\"mode\") == tf.estimator.ModeKeys.TRAIN,\n clip_alpha=hparams.get(\"clip_log_alpha\"),\n initial_sparsity=initial_sparsity,\n split_heads=hparams.get(\"split_heads\"),\n num_heads=hparams.num_heads)\n v_encdec = common_attention.split_heads(v_encdec, hparams.num_heads)\n cache[layer_name][\"k_encdec\"] = k_encdec\n cache[layer_name][\"v_encdec\"] = v_encdec\n\n cache[\"encoder_output\"] = encoder_output\n cache[\"encoder_decoder_attention_bias\"] = encoder_decoder_attention_bias\n\n mlperf_log.transformer_print(\n key=mlperf_log.MODEL_HP_SEQ_BEAM_SEARCH,\n value={\n \"vocab_size\": vocab_size,\n \"batch_size\": batch_size,\n \"beam_size\": beam_size,\n \"alpha\": alpha,\n \"max_decode_length\": decode_length\n })\n if beam_size > 1: # Beam Search\n initial_ids = sos_id * tf.ones([batch_size], dtype=tf.int32)\n decoded_ids, scores, _ = beam_search.beam_search(\n symbols_to_logits_fn,\n initial_ids,\n beam_size,\n decode_length,\n vocab_size,\n alpha,\n states=cache,\n eos_id=eos_id,\n stop_early=(top_beams == 1),\n use_tpu=True)\n\n if top_beams == 1:\n decoded_ids = decoded_ids[:, 0, 1:]\n scores = scores[:, 0]\n else:\n decoded_ids = decoded_ids[:, :top_beams, 1:]\n scores = scores[:, :top_beams]\n else: # Greedy\n def inner_loop(i, hit_eos, next_id, decoded_ids, cache, log_prob):\n \"\"\"One step of greedy decoding.\"\"\"\n logits, cache = symbols_to_logits_fn(next_id, i, cache)\n log_probs = common_layers.log_prob_from_logits(logits)\n temperature = (0.0 if hparams.sampling_method == \"argmax\" else\n hparams.sampling_temp)\n next_id = common_layers.sample_with_temperature(logits, temperature)\n hit_eos |= tf.equal(next_id, eos_id)\n\n log_prob_indices = tf.stack(\n [tf.range(tf.to_int64(batch_size)), next_id], axis=1)\n log_prob += tf.gather_nd(log_probs, log_prob_indices)\n\n next_id = tf.expand_dims(next_id, axis=1)\n decoded_ids = tf.transpose(decoded_ids)\n decoded_ids = inplace_ops.alias_inplace_update(\n decoded_ids, i, tf.squeeze(next_id, axis=1))\n decoded_ids = tf.transpose(decoded_ids)\n return i + 1, hit_eos, next_id, decoded_ids, cache, log_prob\n\n def is_not_finished(i, hit_eos, *_):\n finished = i >= decode_length\n if not force_decode_length:\n finished |= tf.reduce_all(hit_eos)\n return tf.logical_not(finished)\n\n decoded_ids = tf.zeros([batch_size, decode_length], dtype=tf.int64)\n hit_eos = tf.fill([batch_size], False)\n next_id = sos_id * tf.ones([batch_size, 1], dtype=tf.int64)\n initial_log_prob = tf.zeros([batch_size], dtype=tf.float32)\n\n def compute_cache_shape_invariants(tensor):\n return tf.TensorShape(tensor.shape.as_list())\n\n _, _, _, decoded_ids, _, log_prob = tf.while_loop(\n is_not_finished,\n inner_loop, [\n tf.constant(0), hit_eos, next_id, decoded_ids, cache,\n initial_log_prob\n ],\n shape_invariants=[\n tf.TensorShape([]),\n tf.TensorShape([batch_size]),\n tf.TensorShape([batch_size, 1]),\n tf.TensorShape([batch_size, decode_length]),\n nest.map_structure(compute_cache_shape_invariants, cache),\n tf.TensorShape([batch_size]),\n ])\n scores = log_prob\n\n return {\"outputs\": decoded_ids, \"scores\": scores}\n\n\ndef fast_decode(encoder_output,\n encoder_decoder_attention_bias,\n symbols_to_logits_fn,\n hparams,\n decode_length,\n vocab_size,\n beam_size=1,\n top_beams=1,\n alpha=1.0,\n sos_id=0,\n eos_id=beam_search.EOS_ID,\n batch_size=None,\n force_decode_length=False,\n scope_prefix=\"body/\"):\n \"\"\"Given encoder output and a symbols to logits function, does fast decoding.\n\n Implements both greedy and beam search decoding, uses beam search iff\n beam_size > 1, otherwise beam search related arguments are ignored.\n\n Args:\n encoder_output: Output from encoder.\n encoder_decoder_attention_bias: a bias tensor for use in encoder-decoder\n attention\n symbols_to_logits_fn: Incremental decoding; function mapping triple\n `(ids, step, cache)` to symbol logits.\n hparams: run hyperparameters\n decode_length: an integer. How many additional timesteps to decode.\n vocab_size: Output vocabulary size.\n beam_size: number of beams.\n top_beams: an integer. How many of the beams to return.\n alpha: Float that controls the length penalty. larger the alpha, stronger\n the preference for longer translations.\n sos_id: End-of-sequence symbol in beam search.\n eos_id: End-of-sequence symbol in beam search.\n batch_size: an integer scalar - must be passed if there is no input\n force_decode_length: bool, whether to force the full decode length, or if\n False, stop when all beams hit eos_id.\n scope_prefix: str, prefix for decoder layer variable scopes.\n\n Returns:\n A dict of decoding results {\n \"outputs\": integer `Tensor` of decoded ids of shape\n [batch_size, <= decode_length] if top_beams == 1 or\n [batch_size, top_beams, <= decode_length] otherwise\n \"scores\": decoding log probs from the beam search,\n None if using greedy decoding (beam_size=1)\n }\n\n Raises:\n NotImplementedError: If beam size > 1 with partial targets.\n \"\"\"\n if encoder_output is not None:\n batch_size = common_layers.shape_list(encoder_output)[0]\n\n key_channels = hparams.attention_key_channels or hparams.hidden_size\n value_channels = hparams.attention_value_channels or hparams.hidden_size\n num_layers = hparams.num_decoder_layers or hparams.num_hidden_layers\n vars_3d_num_heads = (\n hparams.num_heads if hparams.get(\"attention_variables_3d\") else 0)\n\n cache = {\n \"layer_%d\" % layer: { # pylint: disable=g-complex-comprehension\n \"k\":\n common_attention.split_heads(\n tf.zeros([batch_size, 0, key_channels]), hparams.num_heads),\n \"v\":\n common_attention.split_heads(\n tf.zeros([batch_size, 0, value_channels]), hparams.num_heads),\n \"f\":\n tf.zeros([batch_size, 0, hparams.hidden_size]),\n } for layer in range(num_layers)\n }\n\n if encoder_output is not None:\n for layer in range(num_layers):\n layer_name = \"layer_%d\" % layer\n with tf.variable_scope(\n \"%sdecoder/%s/encdec_attention/multihead_attention\" % (scope_prefix,\n layer_name)):\n initial_sparsity = None\n if hparams.get(\"load_masks_from\"):\n initial_sparsity = hparams.get(\"initial_sparsity\")\n\n k_encdec = sparse_attention.compute_attention_component(\n encoder_output, key_channels, name=\"k\",\n vars_3d_num_heads=vars_3d_num_heads,\n sparsity_technique=hparams.get(\"sparsity_technique\"),\n threshold=hparams.get(\"log_alpha_threshold\"),\n training=hparams.get(\"mode\") == tf.estimator.ModeKeys.TRAIN,\n clip_alpha=hparams.get(\"clip_log_alpha\"),\n initial_sparsity=initial_sparsity,\n split_heads=hparams.get(\"split_heads\"),\n num_heads=hparams.num_heads)\n k_encdec = common_attention.split_heads(k_encdec, hparams.num_heads)\n v_encdec = sparse_attention.compute_attention_component(\n encoder_output, value_channels, name=\"v\",\n vars_3d_num_heads=vars_3d_num_heads,\n sparsity_technique=hparams.get(\"sparsity_technique\"),\n threshold=hparams.get(\"log_alpha_threshold\"),\n training=hparams.get(\"mode\") == tf.estimator.ModeKeys.TRAIN,\n clip_alpha=hparams.get(\"clip_log_alpha\"),\n initial_sparsity=initial_sparsity,\n split_heads=hparams.get(\"split_heads\"),\n num_heads=hparams.num_heads)\n v_encdec = common_attention.split_heads(v_encdec, hparams.num_heads)\n cache[layer_name][\"k_encdec\"] = k_encdec\n cache[layer_name][\"v_encdec\"] = v_encdec\n\n cache[\"encoder_output\"] = encoder_output\n cache[\"encoder_decoder_attention_bias\"] = encoder_decoder_attention_bias\n\n if beam_size > 1: # Beam Search\n initial_ids = sos_id * tf.ones([batch_size], dtype=tf.int32)\n decoded_ids, scores, _ = beam_search.beam_search(\n symbols_to_logits_fn,\n initial_ids,\n beam_size,\n decode_length,\n vocab_size,\n alpha,\n states=cache,\n eos_id=eos_id,\n stop_early=(top_beams == 1))\n\n if top_beams == 1:\n decoded_ids = decoded_ids[:, 0, 1:]\n scores = scores[:, 0]\n else:\n decoded_ids = decoded_ids[:, :top_beams, 1:]\n scores = scores[:, :top_beams]\n else: # Greedy\n\n def inner_loop(i, hit_eos, next_id, decoded_ids, cache, log_prob):\n \"\"\"One step of greedy decoding.\"\"\"\n logits, cache = symbols_to_logits_fn(next_id, i, cache)\n log_probs = common_layers.log_prob_from_logits(logits)\n temperature = (0.0 if hparams.sampling_method == \"argmax\" else\n hparams.sampling_temp)\n next_id = common_layers.sample_with_temperature(logits, temperature)\n hit_eos |= tf.equal(next_id, eos_id)\n\n log_prob_indices = tf.stack(\n [tf.range(tf.to_int64(batch_size)), next_id], axis=1)\n log_prob += tf.gather_nd(log_probs, log_prob_indices)\n\n next_id = tf.expand_dims(next_id, axis=1)\n decoded_ids = tf.concat([decoded_ids, next_id], axis=1)\n return i + 1, hit_eos, next_id, decoded_ids, cache, log_prob\n\n def is_not_finished(i, hit_eos, *_):\n finished = i >= decode_length\n if not force_decode_length:\n finished |= tf.reduce_all(hit_eos)\n return tf.logical_not(finished)\n\n decoded_ids = tf.zeros([batch_size, 0], dtype=tf.int64)\n hit_eos = tf.fill([batch_size], False)\n next_id = sos_id * tf.ones([batch_size, 1], dtype=tf.int64)\n initial_log_prob = tf.zeros([batch_size], dtype=tf.float32)\n _, _, _, decoded_ids, _, log_prob = tf.while_loop(\n is_not_finished,\n inner_loop, [\n tf.constant(0), hit_eos, next_id, decoded_ids, cache,\n initial_log_prob\n ],\n shape_invariants=[\n tf.TensorShape([]),\n tf.TensorShape([None]),\n tf.TensorShape([None, None]),\n tf.TensorShape([None, None]),\n nest.map_structure(beam_search.get_state_shape_invariants, cache),\n tf.TensorShape([None]),\n ])\n scores = log_prob\n\n return {\"outputs\": decoded_ids, \"scores\": scores}\n\n\ndef features_to_nonpadding(features, inputs_or_targets=\"inputs\"):\n key = inputs_or_targets + \"_segmentation\"\n if features and key in features:\n return tf.minimum(tf.to_float(features[key]), 1.0)\n return None\n\n\ndef transformer_prepare_decoder(targets, hparams, features=None):\n \"\"\"Prepare one shard of the model for the decoder.\n\n Args:\n targets: a Tensor.\n hparams: run hyperparameters\n features: optionally pass the entire features dictionary as well.\n This is needed now for \"packed\" datasets.\n\n Returns:\n decoder_input: a Tensor, bottom of decoder stack\n decoder_self_attention_bias: a bias tensor for use in decoder self-attention\n \"\"\"\n if hparams.causal_decoder_self_attention:\n # Causal attention.\n if hparams.prepend_mode == \"prepend_inputs_full_attention\":\n decoder_self_attention_bias = (\n common_attention.attention_bias_prepend_inputs_full_attention(\n common_attention.embedding_to_padding(targets)))\n else:\n decoder_self_attention_bias = (\n common_attention.attention_bias_lower_triangle(\n common_layers.shape_list(targets)[1]))\n else:\n # Full attention.\n decoder_padding = common_attention.embedding_to_padding(targets)\n decoder_self_attention_bias = (\n common_attention.attention_bias_ignore_padding(decoder_padding))\n\n if features and \"targets_segmentation\" in features:\n # \"Packed\" dataset - keep the examples from seeing each other.\n targets_segmentation = features[\"targets_segmentation\"]\n targets_position = features[\"targets_position\"]\n decoder_self_attention_bias += common_attention.attention_bias_same_segment(\n targets_segmentation, targets_segmentation)\n else:\n targets_position = None\n if hparams.proximity_bias:\n decoder_self_attention_bias += common_attention.attention_bias_proximal(\n common_layers.shape_list(targets)[1])\n decoder_input = common_layers.shift_right_3d(targets)\n if hparams.pos == \"timing\":\n if targets_position is not None:\n decoder_input = common_attention.add_timing_signal_1d_given_position(\n decoder_input, targets_position)\n else:\n decoder_input = common_attention.add_timing_signal_1d(decoder_input)\n elif hparams.pos == \"emb\":\n decoder_input = common_attention.add_positional_embedding(\n decoder_input, hparams.max_length, \"targets_positional_embedding\",\n targets_position)\n\n if hparams.activation_dtype == \"bfloat16\":\n decoder_self_attention_bias = tf.cast(decoder_self_attention_bias,\n tf.bfloat16)\n return (decoder_input, decoder_self_attention_bias)\n\n\ndef transformer_decoder(decoder_input,\n encoder_output,\n decoder_self_attention_bias,\n encoder_decoder_attention_bias,\n hparams,\n cache=None,\n decode_loop_step=None,\n name=\"decoder\",\n save_weights_to=None,\n make_image_summary=True,\n losses=None): # pylint: disable=unused-argument\n \"\"\"A stack of transformer layers.\n\n Args:\n decoder_input: a Tensor\n encoder_output: a Tensor\n decoder_self_attention_bias: bias Tensor for self-attention\n (see common_attention.attention_bias())\n encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention\n (see common_attention.attention_bias())\n hparams: hyperparameters for model\n cache: dict, containing tensors which are the results of previous\n attentions, used for fast decoding.\n decode_loop_step: An integer, step number of the decoding loop.\n Only used for inference on TPU.\n name: a string\n save_weights_to: an optional dictionary to capture attention weights\n for visualization; the weights tensor will be appended there under\n a string key created from the variable scope (including name).\n make_image_summary: Whether to make an attention image summary.\n losses: optional list onto which to append extra training losses\n\n Returns:\n y: a Tensors\n \"\"\"\n x = decoder_input\n attention_dropout_broadcast_dims = (\n common_layers.comma_separated_string_to_integer_list(\n getattr(hparams, \"attention_dropout_broadcast_dims\", \"\")))\n\n mlperf_log.transformer_print(\n key=mlperf_log.MODEL_HP_NUM_HIDDEN_LAYERS,\n value=hparams.num_decoder_layers or hparams.num_hidden_layers)\n mlperf_log.transformer_print(\n key=mlperf_log.MODEL_HP_ATTENTION_DROPOUT,\n value=hparams.attention_dropout)\n mlperf_log.transformer_print(\n key=mlperf_log.MODEL_HP_ATTENTION_DENSE,\n value={\n \"use_bias\": \"false\",\n \"num_heads\": hparams.num_heads,\n \"hidden_size\": hparams.hidden_size\n })\n\n with tf.variable_scope(name):\n for layer in range(hparams.num_decoder_layers or hparams.num_hidden_layers):\n initial_sparsity = None\n if hparams.get(\"load_masks_from\"):\n initial_sparsity = hparams.get(\"initial_sparsity\")\n\n layer_name = \"layer_%d\" % layer\n layer_cache = cache[layer_name] if cache is not None else None\n with tf.variable_scope(layer_name):\n with tf.variable_scope(\"self_attention\"):\n y = sparse_attention.multihead_attention(\n common_layers.layer_preprocess(x, hparams),\n None,\n decoder_self_attention_bias,\n hparams.attention_key_channels or hparams.hidden_size,\n hparams.attention_value_channels or hparams.hidden_size,\n hparams.hidden_size,\n hparams.num_heads,\n hparams.attention_dropout,\n attention_type=hparams.self_attention_type,\n max_relative_position=hparams.max_relative_position,\n heads_share_relative_embedding=(\n hparams.heads_share_relative_embedding),\n add_relative_to_values=hparams.add_relative_to_values,\n save_weights_to=save_weights_to,\n cache=layer_cache,\n make_image_summary=make_image_summary,\n dropout_broadcast_dims=attention_dropout_broadcast_dims,\n max_length=hparams.get(\"max_length\"),\n decode_loop_step=decode_loop_step,\n vars_3d=hparams.get(\"attention_variables_3d\"),\n sparsity_technique=hparams.get(\"sparsity_technique\"),\n threshold=hparams.get(\"log_alpha_threshold\"),\n training=hparams.get(\"mode\") == tf.estimator.ModeKeys.TRAIN,\n clip_alpha=hparams.get(\"clip_log_alpha\"),\n initial_sparsity=initial_sparsity,\n split_heads=hparams.get(\"split_heads\"))\n x = common_layers.layer_postprocess(x, y, hparams)\n if encoder_output is not None:\n with tf.variable_scope(\"encdec_attention\"):\n y = sparse_attention.multihead_attention(\n common_layers.layer_preprocess(x, hparams),\n encoder_output,\n encoder_decoder_attention_bias,\n hparams.attention_key_channels or hparams.hidden_size,\n hparams.attention_value_channels or hparams.hidden_size,\n hparams.hidden_size,\n hparams.num_heads,\n hparams.attention_dropout,\n max_relative_position=hparams.max_relative_position,\n heads_share_relative_embedding=(\n hparams.heads_share_relative_embedding),\n add_relative_to_values=hparams.add_relative_to_values,\n save_weights_to=save_weights_to,\n cache=layer_cache,\n make_image_summary=make_image_summary,\n dropout_broadcast_dims=attention_dropout_broadcast_dims,\n max_length=hparams.get(\"max_length\"),\n vars_3d=hparams.get(\"attention_variables_3d\"),\n sparsity_technique=hparams.get(\"sparsity_technique\"),\n threshold=hparams.get(\"log_alpha_threshold\"),\n training=hparams.get(\"mode\") == tf.estimator.ModeKeys.TRAIN,\n clip_alpha=hparams.get(\"clip_log_alpha\"),\n initial_sparsity=initial_sparsity,\n split_heads=hparams.get(\"split_heads\"))\n x = common_layers.layer_postprocess(x, y, hparams)\n with tf.variable_scope(\"ffn\"):\n y = transformer_ffn_layer(\n common_layers.layer_preprocess(x, hparams),\n hparams)\n x = common_layers.layer_postprocess(x, y, hparams)\n # if normalization is done in layer_preprocess, then it should also be done\n # on the output, since the output can grow very large, being the sum of\n # a whole stack of unnormalized layer outputs.\n mlperf_log.transformer_print(\n key=mlperf_log.MODEL_HP_NORM,\n value={\"hidden_size\": hparams.hidden_size})\n return common_layers.layer_preprocess(x, hparams)\n\n\[email protected]_hparams\ndef sparse_transformer_base_v1():\n \"\"\"Set of hyperparameters.\"\"\"\n hparams = common_hparams.basic_params1()\n hparams.norm_type = \"layer\"\n hparams.hidden_size = 512\n hparams.batch_size = 4096\n hparams.max_length = 256\n hparams.clip_grad_norm = 0. # i.e. no gradient clipping\n hparams.optimizer_adam_epsilon = 1e-9\n hparams.learning_rate_schedule = \"legacy\"\n hparams.learning_rate_decay_scheme = \"noam\"\n hparams.learning_rate = 0.1\n hparams.learning_rate_warmup_steps = 4000\n hparams.initializer_gain = 1.0\n hparams.num_hidden_layers = 6\n hparams.initializer = \"uniform_unit_scaling\"\n hparams.weight_decay = 0.0\n hparams.optimizer_adam_beta1 = 0.9\n hparams.optimizer_adam_beta2 = 0.98\n hparams.num_sampled_classes = 0\n hparams.label_smoothing = 0.1\n hparams.shared_embedding_and_softmax_weights = True\n hparams.symbol_modality_num_shards = 16\n\n # Add new ones like this.\n hparams.add_hparam(\"filter_size\", 2048)\n # Layer-related flags. If zero, these fall back on hparams.num_hidden_layers.\n hparams.add_hparam(\"num_encoder_layers\", 0)\n hparams.add_hparam(\"num_decoder_layers\", 0)\n # Attention-related flags.\n hparams.add_hparam(\"num_heads\", 8)\n hparams.add_hparam(\"attention_key_channels\", 0)\n hparams.add_hparam(\"attention_value_channels\", 0)\n hparams.add_hparam(\"ffn_layer\", \"dense_relu_dense\")\n hparams.add_hparam(\"parameter_attention_key_channels\", 0)\n hparams.add_hparam(\"parameter_attention_value_channels\", 0)\n # All hyperparameters ending in \"dropout\" are automatically set to 0.0\n # when not in training mode.\n hparams.add_hparam(\"attention_dropout\", 0.0)\n hparams.add_hparam(\"attention_dropout_broadcast_dims\", \"\")\n hparams.add_hparam(\"relu_dropout\", 0.0)\n hparams.add_hparam(\"relu_dropout_broadcast_dims\", \"\")\n hparams.add_hparam(\"pos\", \"timing\") # timing, none\n hparams.add_hparam(\"nbr_decoder_problems\", 1)\n hparams.add_hparam(\"proximity_bias\", False)\n hparams.add_hparam(\"causal_decoder_self_attention\", True)\n hparams.add_hparam(\"use_pad_remover\", True)\n hparams.add_hparam(\"self_attention_type\", \"dot_product\")\n hparams.add_hparam(\"conv_first_kernel\", 3)\n hparams.add_hparam(\"attention_variables_3d\", False)\n hparams.add_hparam(\"use_target_space_embedding\", True)\n # These parameters are only used when ffn_layer==\"local_moe_tpu\"\n hparams.add_hparam(\"moe_overhead_train\", 1.0)\n hparams.add_hparam(\"moe_overhead_eval\", 2.0)\n hparams.moe_num_experts = 16\n hparams.moe_loss_coef = 1e-3\n\n # Sparsity hyper-parameters\n hparams.add_hparam(\"sparsity_technique\", None)\n hparams.add_hparam(\"log_alpha_threshold\", 3.0)\n\n # variational dropout & l0 parameters\n hparams.add_hparam(\"dkl_weight_fn\", \"linear\")\n\n # variational dropout parameters\n hparams.add_hparam(\"dkl_weight\", 1 / (4.5 * 10 ** 6))\n hparams.add_hparam(\"clip_log_alpha\", 8.0)\n hparams.add_hparam(\"dkl_weight_start\", 100000)\n hparams.add_hparam(\"dkl_weight_diff\", 100000)\n\n # l0-regularization parameters\n hparams.add_hparam(\"l0_norm_weight\", 1 / (4.5 * 10 ** 6))\n hparams.add_hparam(\"l0_weight_start\", 100000)\n hparams.add_hparam(\"l0_weight_diff\", 100000)\n\n # magnitude & random pruning parameters\n hparams.add_hparam(\"begin_pruning_step\", 0)\n hparams.add_hparam(\"end_pruning_step\", 200000)\n hparams.add_hparam(\"pruning_frequency\", 10000)\n hparams.add_hparam(\"target_sparsity\", .9)\n\n # whether we should prune the weights for\n hparams.add_hparam(\"split_heads\", False)\n\n # mp & rp parameters we don't really change\n hparams.add_hparam(\"threshold_decay\", 0.0)\n hparams.add_hparam(\"nbins\", 1024)\n hparams.add_hparam(\"sparsity_function_exponent\", 3.0)\n\n # use sparse embedding and softmax layer\n hparams.bottom = {\n \"targets\": sparse_modalities.targets_bottom,\n \"inputs\": sparse_modalities.bottom\n }\n hparams.top = {\n \"targets\": sparse_modalities.top,\n }\n\n # specify to load trained masks from checkpoint\n hparams.add_hparam(\"load_masks_from\", \"\")\n hparams.add_hparam(\"load_weights_from\", \"\")\n hparams.add_hparam(\"initial_sparsity\", 0.0)\n\n # If < 0, use this sparsity level for the embedding\n # matrix instead of the target_sparsity.\n hparams.add_hparam(\"embedding_sparsity\", -1.0)\n return hparams\n\n\[email protected]_hparams\ndef sparse_transformer_base_v2():\n \"\"\"Set of hyperparameters.\"\"\"\n hparams = sparse_transformer_base_v1()\n hparams.layer_preprocess_sequence = \"n\"\n hparams.layer_postprocess_sequence = \"da\"\n hparams.layer_prepostprocess_dropout = 0.1\n hparams.attention_dropout = 0.1\n hparams.relu_dropout = 0.1\n hparams.learning_rate_warmup_steps = 8000\n hparams.learning_rate = 0.2\n return hparams\n\n\[email protected]_hparams\ndef sparse_transformer_base_v3():\n \"\"\"Base parameters for Transformer model.\"\"\"\n # Update parameters here, then occasionally cut a versioned set, e.g.\n # transformer_base_v2.\n hparams = sparse_transformer_base_v2()\n hparams.optimizer_adam_beta2 = 0.997\n # New way of specifying learning rate schedule.\n # Equivalent to previous version.\n hparams.learning_rate_schedule = (\n \"constant*linear_warmup*rsqrt_decay*rsqrt_hidden_size\")\n hparams.learning_rate_constant = 2.0\n return hparams\n\n\[email protected]_hparams\ndef sparse_transformer_base():\n \"\"\"Base parameters for Transformer model.\"\"\"\n hparams = sparse_transformer_base_v3()\n return hparams\n\n\[email protected]_hparams\ndef sparse_transformer_tiny():\n hparams = sparse_transformer_base()\n hparams.num_hidden_layers = 2\n hparams.hidden_size = 128\n hparams.filter_size = 512\n hparams.num_heads = 4\n return hparams\n\n\[email protected]_hparams\ndef sparse_transformer_tiny_variational_dropout():\n hparams = sparse_transformer_tiny()\n hparams.sparsity_technique = \"variational_dropout\"\n return hparams\n\n\[email protected]_hparams\ndef sparse_transformer_tiny_l0_regularization():\n hparams = sparse_transformer_tiny()\n hparams.sparsity_technique = \"l0_regularization\"\n return hparams\n\n\[email protected]_hparams\ndef sparse_transformer_tiny_magnitude_pruning():\n hparams = sparse_transformer_tiny()\n hparams.sparsity_technique = \"magnitude_pruning\"\n return hparams\n\n\[email protected]_hparams\ndef sparse_transformer_tiny_shmp():\n hparams = sparse_transformer_tiny()\n hparams.sparsity_technique = \"magnitude_pruning\"\n hparams.split_heads = True\n return hparams\n\n\[email protected]_hparams\ndef sparse_transformer_tiny_random_pruning():\n hparams = sparse_transformer_tiny()\n hparams.sparsity_technique = \"random_pruning\"\n return hparams\n\n\ndef update_hparams_for_tpu(hparams):\n \"\"\"Change hparams to be compatible with TPU training.\"\"\"\n\n # Adafactor uses less memory than Adam.\n # switch to Adafactor with its recommended learning rate scheme.\n hparams.optimizer = \"Adafactor\"\n hparams.learning_rate_schedule = \"rsqrt_decay\"\n hparams.learning_rate_warmup_steps = 10000\n\n # Avoid an expensive concat on TPU.\n # >1 shards helps with faster parameter distribution on multi-GPU machines\n hparams.symbol_modality_num_shards = 1\n\n # Adaptive batch sizes and sequence lengths are not supported on TPU.\n # Instead, every batch has the same sequence length and the same batch size.\n # Longer sequences are dropped and shorter ones are padded.\n #\n # It is therefore suggested to use a problem where examples have been combined\n # to a longer length, e.g. the \"_packed\" problems.\n #\n # For problems with variable sequence lengths, this parameter controls the\n # maximum sequence length. Shorter sequences are dropped and longer ones\n # are padded.\n #\n # For problems with fixed sequence lengths - e.g. the \"_packed\" problems,\n # this hyperparameter is ignored.\n hparams.max_length = 64\n\n # TPUs have less memory than GPUs, so decrease the batch size\n hparams.batch_size = 2048\n\n # Using noise broadcast in the dropout layers saves memory during training.\n hparams.attention_dropout_broadcast_dims = \"0,1\" # batch, heads\n hparams.relu_dropout_broadcast_dims = \"1\" # length\n hparams.layer_prepostprocess_dropout_broadcast_dims = \"1\" # length\n\n\[email protected]_hparams\ndef sparse_transformer_tpu():\n \"\"\"HParams for Transformer model on TPU.\"\"\"\n hparams = sparse_transformer_base()\n update_hparams_for_tpu(hparams)\n return hparams\n\n\[email protected]_hparams\ndef sparse_transformer_tiny_tpu():\n hparams = sparse_transformer_tiny()\n update_hparams_for_tpu(hparams)\n return hparams\n\n\[email protected]_hparams\ndef sparse_transformer_magnitude_pruning_tpu():\n hparams = sparse_transformer_base()\n hparams.symbol_modality_num_shards = 1\n hparams.max_length = 64\n hparams.batch_size = 2048\n\n hparams.sparsity_technique = \"magnitude_pruning\"\n return hparams\n\n\[email protected]_hparams\ndef sparse_transformer_random_pruning_tpu():\n hparams = sparse_transformer_base()\n hparams.symbol_modality_num_shards = 1\n hparams.max_length = 64\n hparams.batch_size = 2048\n\n hparams.sparsity_technique = \"random_pruning\"\n return hparams\n\n\[email protected]_hparams\ndef sparse_transformer_variational_dropout_tpu():\n hparams = sparse_transformer_base()\n hparams.symbol_modality_num_shards = 1\n hparams.max_length = 64\n hparams.batch_size = 2048\n\n hparams.sparsity_technique = \"variational_dropout\"\n return hparams\n\n\[email protected]_hparams\ndef sparse_transformer_l0_regularization_tpu():\n hparams = sparse_transformer_base()\n hparams.symbol_modality_num_shards = 1\n hparams.max_length = 64\n hparams.batch_size = 2048\n\n hparams.sparsity_technique = \"l0_regularization\"\n return hparams\n\n\[email protected]_hparams\ndef sparse_transformer_mpfc_tpu():\n \"\"\"Magnitude pruning without embedding pruning.\"\"\"\n hparams = sparse_transformer_base()\n hparams.symbol_modality_num_shards = 1\n hparams.max_length = 64\n hparams.batch_size = 4096 # double the batch size\n\n hparams.sparsity_technique = \"magnitude_pruning\"\n\n # use the default modality, i.e. don't prune the embedding\n # or the final linear layer before the softmax.\n hparams.modality = {}\n return hparams\n\n\[email protected]_hparams\ndef sparse_transformer_mpfc_2k_tpu():\n hparams = sparse_transformer_mpfc_tpu()\n hparams.batch_size = 2048 # use the standard batch size\n return hparams\n\n\[email protected]_hparams\ndef sparse_transformer_split_head_mpfc_tpu():\n hparams = sparse_transformer_mpfc_tpu()\n\n # prune the weights for each attention head separately\n hparams.split_heads = True\n return hparams\n\n\[email protected]_hparams\ndef sparse_transformer_magnitude_pruning_4k_tpu():\n hparams = sparse_transformer_base()\n hparams.symbol_modality_num_shards = 1\n hparams.max_length = 64\n hparams.batch_size = 4096 # double the batch size\n\n hparams.sparsity_technique = \"magnitude_pruning\"\n return hparams\n\n\[email protected]_hparams\ndef sparse_transformer_split_head_magnitude_pruning_4k_tpu():\n hparams = sparse_transformer_magnitude_pruning_4k_tpu()\n hparams.split_heads = True\n return hparams\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# python3\n\"\"\"Adam with extra hyper parameters for l1, l2 reg and lr schedules.\"\"\"\n\nimport re\nfrom typing import Text, List, Dict, Any\nimport numpy as np\n\nfrom task_set import registry\nfrom task_set.optimizers import base\nfrom task_set.optimizers import utils\nimport tensorflow.compat.v1 as tf\n\n\nclass Adam8POptimizer(base.BaseOptimizer):\n r\"\"\"8 hyper parameter Adam.\n\n This is the Adam optimizer[1] with the addition of l1 and l2 regularization\n and a combination of linear and exponential learning rate decay.\n\n Note the l1 and l2 regularization is added to the loss. See AdamW[2] for a\n discussion of why this might be a bad idea.\n\n The update is as follows:\n\n # initialize variables\n m <- 0\n v <- 0\n beta1p <- beta1\n beta2p <- beta2\n\n # updating x \\in R^N:\n g = d/dx(f(x) + l2*||x||^2_2 + l1*||x||_1)\n\n m <- beta1 * m + (1.0 - beta1)*g\n v <- beta2 * v + (1.0 - beta2)*g^2\n\n mh <- m / (1 - beta1p)\n vh <- v / (v - beta2p)\n\n update <- mh / (sqrt(vh+1e-10) + epsilon)\n\n beta1p <- beta1 * beta1p\n beta2p <- beta2 * beta2p\n\n linear_factor <- max(1 - linear_decay * global_step, 0.0)\n exp_factor <- exp(-exponential_decay * global_step)\n lr = exp_factor * linear_factor * learning_rate\n\n x <- lr * linear_factor * exp_factor * update\n\n [1] https://arxiv.org/abs/1412.6980\n [2] https://arxiv.org/abs/1711.05101\n \"\"\"\n\n def __init__(\n self,\n learning_rate = 1e-3,\n beta1 = 0.9,\n beta2 = 0.999,\n epsilon = 1e-8,\n l1 = 1e-7,\n l2 = 1e-7,\n linear_decay = 0.0,\n exponential_decay = 0.0,\n reg_factor = 1.0,\n training_steps = 10000,\n ):\n \"\"\"Initialize the optimizer. See class documentation for equations.\"\"\"\n self._learning_rate = learning_rate\n self._beta1 = beta1\n self._beta2 = beta2\n self._epsilon = epsilon\n self._l1 = l1\n self._l2 = l2\n self._linear_decay = linear_decay\n self._exponential_decay = exponential_decay\n self._reg_factor = reg_factor\n self._training_steps = training_steps\n\n def _get_variable_name(self, param_name):\n \"\"\"Get the variable name from the tensor name.\"\"\"\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name\n\n def minimize(self, loss, global_step,\n var_list):\n \"\"\"Create op that applies Adam8p step.\"\"\"\n if not var_list:\n raise ValueError(\"Explicitly pass var_list!\")\n if not global_step:\n raise ValueError(\"Explicitly pass global_step!\")\n\n # Add regularization to the loss\n grads_and_vars = self.compute_gradients(loss, var_list=var_list)\n return self.apply_gradients(grads_and_vars, global_step=global_step)\n\n def apply_gradients(self, grads_and_vars, global_step, name=None):\n \"\"\"Perform an update with the parameters.\"\"\"\n\n # we meta-train with 10k steps. When applying to longer problems we want to\n # have a reasonable schedule so we rescale.\n\n rescale_global_step = float(10000) / self._training_steps * tf.to_float(\n global_step)\n\n beta1_power = tf.get_variable(\n dtype=tf.float32, name=\"beta1_power\", initializer=self._beta1)\n beta2_power = tf.get_variable(\n dtype=tf.float32, name=\"beta2_power\", initializer=self._beta2)\n\n exp_factor = tf.exp(-self._exponential_decay *\n tf.to_float(rescale_global_step))\n\n # lr reduction per step.\n linear_factor = tf.maximum(\n 1 - self._linear_decay * tf.to_float(rescale_global_step), 0.0)\n\n lr = exp_factor * linear_factor * self._learning_rate\n\n assignments = []\n for (grad, param) in grads_and_vars:\n if grad is None or param is None:\n continue\n # sparse to dense conversion\n grad = tf.convert_to_tensor(grad)\n\n param_name = self._get_variable_name(param.name)\n\n m = tf.get_variable(\n name=param_name + \"/adam_m\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n v = tf.get_variable(\n name=param_name + \"/adam_v\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n\n next_m = (self._beta1 * m + (1.0 - self._beta1) * grad)\n next_v = (self._beta2 * v + (1.0 - self._beta2) * tf.square(grad))\n next_m_hat = next_m / (1 - beta1_power)\n next_v_hat = next_v / (1 - beta2_power)\n update = next_m_hat / (tf.sqrt(next_v_hat + 1e-10) + self._epsilon)\n\n next_param = param - lr * update\n\n assignments.extend(\n [param.assign(next_param),\n m.assign(next_m),\n v.assign(next_v)])\n\n # Do this after all other assignments are done to prevent a race condition.\n with tf.control_dependencies(assignments):\n assignments.extend([\n beta1_power.assign(beta1_power * self._beta1),\n beta2_power.assign(beta2_power * self._beta2),\n global_step.assign_add(1),\n ])\n return tf.group(*assignments, name=name)\n\n def compute_gradients(self, loss, var_list=None, **kwargs):\n if not var_list:\n var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n\n if self._l1:\n l1 = tf.add_n(\n [tf.reduce_sum(tf.abs(p)) * self._reg_factor for p in var_list])\n loss = loss + l1 * self._l1\n if self._l2:\n l2 = tf.add_n(\n [tf.reduce_sum(tf.square(p)) * self._reg_factor for p in var_list])\n loss = loss + l2 * self._l2\n\n grads_and_vars = zip(\n tf.gradients(loss, var_list, colocate_gradients_with_ops=True),\n var_list)\n return grads_and_vars\n\n\nAdam8PConfig = Dict[Text, Any]\n\n\[email protected]_registry.register_sampler(\"adam8p_wide_grid\")\ndef sample_adam8p_wide_grid(seed):\n \"\"\"Sample a random configuration from a wide grid for adam8p.\"\"\"\n rng = np.random.RandomState(seed)\n cfg = {\n \"learning_rate\": utils.sample_log_float(rng, 1e-8, 1e1),\n \"beta1\": 1 - utils.sample_log_float(rng, 1e-4, 1e0),\n \"beta2\": 1 - utils.sample_log_float(rng, 1e-6, 1e0),\n \"epsilon\": utils.sample_log_float(rng, 1e-10, 1e3),\n \"l1\": utils.sample_log_float(rng, 1e-8, 1e1),\n \"l2\": utils.sample_log_float(rng, 1e-8, 1e1),\n \"linear_decay\": utils.sample_log_float(rng, 1e-7, 1e-4),\n \"exponential_decay\": utils.sample_log_float(rng, 1e-3, 1e-6),\n }\n return cfg\n\n\[email protected]_registry.register_getter(\"adam8p_wide_grid\")\ndef get_adam8p(\n cfg,\n training_steps = 10000 # pylint: disable=unused-argument\n):\n return Adam8POptimizer(**cfg)\n\n\[email protected]_registry.register_sampler(\"adam6p_wide_grid\")\ndef sample_adam6p_wide_grid(seed):\n \"\"\"Sample a random configuration from a wide grid for adam6p.\"\"\"\n rng = np.random.RandomState(seed + 123455)\n cfg = {\n \"learning_rate\": utils.sample_log_float(rng, 1e-8, 1e1),\n \"beta1\": 1 - utils.sample_log_float(rng, 1e-4, 1e0),\n \"beta2\": 1 - utils.sample_log_float(rng, 1e-6, 1e0),\n \"epsilon\": utils.sample_log_float(rng, 1e-10, 1e3),\n \"linear_decay\": utils.sample_log_float(rng, 1e-7, 1e-4),\n \"exponential_decay\": utils.sample_log_float(rng, 1e-3, 1e-6),\n }\n return cfg\n\n\[email protected]_registry.register_getter(\"adam6p_wide_grid\")\ndef get_adam6p(cfg, training_steps = 10000):\n return Adam8POptimizer(l1=0.0, l2=0.0, training_steps=training_steps, **cfg)\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"TensorFlow optimization example.\n\nFinding the transformation between octonion multiplication table and spin(8)\nGamma matrices in Green, Schwarz, Witten conventions.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.contrib import opt as contrib_opt\n\n\ndef get_gamma_vsc():\n \"\"\"Computes SO(8) gamma-matrices.\"\"\"\n # Conventions match Green, Schwarz, Witten's.\n entries = (\n \"007+ 016- 025- 034+ 043- 052+ 061+ 070- \"\n \"101+ 110- 123- 132+ 145+ 154- 167- 176+ \"\n \"204+ 215- 226+ 237- 240- 251+ 262- 273+ \"\n \"302+ 313+ 320- 331- 346- 357- 364+ 375+ \"\n \"403+ 412- 421+ 430- 447+ 456- 465+ 474- \"\n \"505+ 514+ 527+ 536+ 541- 550- 563- 572- \"\n \"606+ 617+ 624- 635- 642+ 653+ 660- 671- \"\n \"700+ 711+ 722+ 733+ 744+ 755+ 766+ 777+\")\n ret = numpy.zeros([8, 8, 8])\n for ijkc in entries.split():\n ijk = tuple(map(int, ijkc[:-1]))\n ret[ijk] = +1 if ijkc[-1] == '+' else -1\n return ret\n\n\ndef get_octonion_mult_table():\n \"\"\"Computes the octonionic multiplication table\"\"\"\n # Cf. diagram at: http://math.ucr.edu/home/baez/octonions/\n ret = numpy.zeros([8, 8, 8])\n fano_lines = \"124 156 137 235 267 346 457\"\n for n in range(1, 8):\n ret[0, n, n] = -1\n ret[n, n, 0] = ret[n, 0, n] = 1\n ret[0, 0, 0] = 1\n for cijk in fano_lines.split():\n ijk = map(int, cijk)\n for p, q, r in ((0, 1, 2), (1, 2, 0), (2, 0, 1)):\n # Note that we have to `go against the direction of the arrows'\n # to make the correspondence work.\n ret[ijk[r], ijk[p], ijk[q]] = -1\n ret[ijk[r], ijk[q], ijk[p]] = +1\n return ret\n\n\ndef find_transforms():\n with tf.Graph().as_default():\n # Ensure reproducibility by seeding random number generators.\n tf.set_random_seed(0)\n transforms = tf.get_variable('transforms', shape=(2, 8, 8),\n dtype=tf.float64,\n trainable=True,\n initializer=tf.random_normal_initializer())\n id8 = tf.constant(numpy.eye(8), dtype=tf.float64)\n gamma = tf.constant(get_gamma_vsc(), dtype=tf.float64)\n otable = tf.constant(get_octonion_mult_table(),\n dtype=tf.float64)\n # Transform gamma matrices step-by-step, since tf.einsum() does not\n # do SQL-like query planning optimization.\n rotated_gamma = tf.einsum(\n 'vAb,bB->vAB', tf.einsum('vab,aA->vAb', gamma, transforms[0]),\n transforms[1])\n delta_mult = rotated_gamma - otable\n delta_ortho_s = tf.einsum('ab,cb->ac',\n transforms[0], transforms[0]) - id8\n delta_ortho_c = tf.einsum('ab,cb->ac',\n transforms[1], transforms[1]) - id8\n # This 'loss' function punishes deviations of the rotated gamma matrices\n # from the octonionic multiplication table, and also deviations of the\n # spinor and cospinor transformation matrices from orthogonality.\n loss = (tf.nn.l2_loss(delta_mult) +\n tf.nn.l2_loss(delta_ortho_s) + tf.nn.l2_loss(delta_ortho_c))\n opt = contrib_opt.ScipyOptimizerInterface(loss, options=dict(maxiter=1000))\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n opt.minimize(session=sess)\n return sess.run([loss, transforms])\n\n\nloss, transforms = find_transforms()\nprint('Loss: %.6g, Transforms:\\n%r\\n' % (\n loss, numpy.round(transforms, decimals=5)))\n\n## Prints:\n# Loss: 2.19694e-11, Transforms:\n# array([[[ 0.5, -0. , -0. , 0.5, 0.5, 0. , 0.5, 0. ],\n# [-0.5, 0. , 0. , 0.5, -0.5, 0. , 0.5, 0. ],\n# [-0. , -0.5, 0.5, -0. , -0. , 0.5, 0. , 0.5],\n# [ 0. , 0.5, -0.5, -0. , -0. , 0.5, -0. , 0.5],\n# [-0.5, 0. , -0. , -0.5, 0.5, 0. , 0.5, 0. ],\n# [ 0.5, -0. , 0. , -0.5, -0.5, -0. , 0.5, -0. ],\n# [ 0. , -0.5, -0.5, 0. , 0. , -0.5, -0. , 0.5],\n# [-0. , 0.5, 0.5, 0. , -0. , -0.5, -0. , 0.5]],\n#\n# [[-0. , 0.5, 0.5, 0. , 0. , -0.5, 0. , 0.5],\n# [ 0. , 0.5, 0.5, -0. , -0. , 0.5, 0. , -0.5],\n# [ 0.5, -0. , 0. , 0.5, 0.5, 0. , -0.5, 0. ],\n# [ 0.5, 0. , -0. , -0.5, 0.5, 0. , 0.5, -0. ],\n# [-0. , -0.5, 0.5, -0. , 0. , -0.5, -0. , -0.5],\n# [-0. , -0.5, 0.5, -0. , 0. , 0.5, 0. , 0.5],\n# [ 0.5, 0. , 0. , 0.5, -0.5, -0. , 0.5, -0. ],\n# [ 0.5, -0. , -0. , -0.5, -0.5, -0. , -0.5, -0. ]]])\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for task_set.tasks.conv_pooling.\"\"\"\nfrom task_set.tasks import conv_pooling\nfrom task_set.tasks import family_test_utils\nimport tensorflow.compat.v1 as tf\n\n\nclass ConvPoolingTest(family_test_utils.TaskFamilyTestCase):\n\n def __init__(self, *args, **kwargs):\n super(ConvPoolingTest,\n self).__init__(conv_pooling.sample_conv_pooling_family_cfg,\n conv_pooling.get_conv_pooling_family, *args, **kwargs)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Experiments with crime data.\n\nIn this example, we examine MOEW on the Communities and Crime dataset from the\nUCI Machine Learning Repository, which contains the violent crime rate of\ncommunities. The goal is to predict whether a community has violent crime rate\nper 100k population above 0.28.\n\nIn addition to obtaining an accurate classifier, we also aim to improve its\nfairness. To this end, we divided the communities into 4 groups based on\nthe quartiles of white population percentage in each community. We seek a\nclassifier with high accuracy, but that has similar false positive rates (FPR)\nacross racial groups. Therefore, we evaluate classifiers based on two metrics:\noverall accuracy across all communities and the difference between the highest\nand lowest FPR across four racial groups (fairness violation).\n\nSee the paper for a detailed explanation of the experiment.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport random\nfrom absl import app\nfrom absl import flags\nimport numpy as np\nimport pandas as pd\nfrom sklearn.gaussian_process import GaussianProcessRegressor\nfrom sklearn.gaussian_process.kernels import ConstantKernel\nfrom sklearn.gaussian_process.kernels import RBF\nimport tensorflow.compat.v1 as tf\n\nFLAGS = flags.FLAGS\nflags.DEFINE_string('training_data_path', '', 'training dataset')\nflags.DEFINE_string('testing_data_path', '', 'testing dataset')\nflags.DEFINE_string('validation_data_path', '', 'validation dataset')\nflags.DEFINE_bool('uniform_weights', False, 'use uniform weights')\nflags.DEFINE_bool('propensity_weights', False, 'use propensity weihts')\nflags.DEFINE_bool('post_shift', True, 'apply post shift')\nflags.DEFINE_float('sampling_radius', 1.0, 'sampling radius')\n\nEMBEDDING_DIM = 4\nLEARNING_RATE = 0.001\nNUM_PARALLEL_ALPHAS = 5\nNUM_ALPHA_BATCHES = 10\nBATCH_SIZE = 100\nTRAINING_STEPS = 10000\nOUTPUT_DIM = 1\nCOVERAGE = 0.278\n\nFEATURES = [\n 'population', 'householdsize', 'agePct12t21', 'agePct12t29', 'agePct16t24',\n 'agePct65up', 'numbUrban', 'pctUrban', 'medIncome', 'pctWWage',\n 'pctWFarmSelf', 'pctWInvInc', 'pctWSocSec', 'pctWPubAsst', 'pctWRetire',\n 'medFamInc', 'perCapInc', 'NumUnderPov', 'PctPopUnderPov',\n 'PctLess9thGrade', 'PctNotHSGrad', 'PctBSorMore', 'PctUnemployed',\n 'PctEmploy', 'PctEmplManu', 'PctEmplProfServ', 'PctOccupManu',\n 'PctOccupMgmtProf', 'MalePctDivorce', 'MalePctNevMarr', 'FemalePctDiv',\n 'TotalPctDiv', 'PersPerFam', 'PctFam2Par', 'PctKids2Par',\n 'PctYoungKids2Par', 'PctTeen2Par', 'PctWorkMomYoungKids', 'PctWorkMom',\n 'NumIlleg', 'PctIlleg', 'NumImmig', 'PctImmigRecent', 'PctImmigRec5',\n 'PctImmigRec8', 'PctImmigRec10', 'PctRecentImmig', 'PctRecImmig5',\n 'PctRecImmig8', 'PctRecImmig10', 'PctSpeakEnglOnly', 'PctNotSpeakEnglWell',\n 'PctLargHouseFam', 'PctLargHouseOccup', 'PersPerOccupHous',\n 'PersPerOwnOccHous', 'PersPerRentOccHous', 'PctPersOwnOccup',\n 'PctPersDenseHous', 'PctHousLess3BR', 'MedNumBR', 'HousVacant',\n 'PctHousOccup', 'PctHousOwnOcc', 'PctVacantBoarded', 'PctVacMore6Mos',\n 'MedYrHousBuilt', 'PctHousNoPhone', 'PctWOFullPlumb', 'OwnOccLowQuart',\n 'OwnOccMedVal', 'OwnOccHiQuart', 'RentLowQ', 'RentMedian', 'RentHighQ',\n 'MedRent', 'MedRentPctHousInc', 'MedOwnCostPctInc', 'MedOwnCostPctIncNoMtg',\n 'NumInShelters', 'NumStreet', 'PctForeignBorn', 'PctBornSameState',\n 'PctSameHouse85', 'PctSameCity85', 'PctSameState85', 'LandArea', 'PopDens',\n 'PctUsePubTrans', 'racepctblack', 'racePctAsian', 'racePctWhite',\n 'racePctHisp'\n]\n\n\ndef get_fpr(sorted_group, cut_index):\n n = np.sum(label == 0.0 for (_, label) in sorted_group)\n fp = np.sum(label == 0.0 for (_, label) in sorted_group[:cut_index])\n return float(fp) / n\n\n\ndef errors(sorted_group, cut_index):\n fp = np.sum(label == 0.0 for (_, label) in sorted_group[:cut_index])\n fn = np.sum(label == 1.0 for (_, label) in sorted_group[cut_index:])\n return fp + fn\n\n\ndef find_threshold(labels, predictions, wqs, post_shift):\n \"\"\"Finds the post shift threshold for each group.\"\"\"\n if post_shift:\n sorted_groups = []\n for q in range(1, 5):\n sorted_group = sorted(\n [(prediction[0], label)\n for (prediction, label, wq) in zip(predictions, labels, wqs)\n if wq == q],\n reverse=True)\n sorted_groups.append(sorted_group)\n\n cut_indices = [\n int(len(sorted_group) * COVERAGE) for sorted_group in sorted_groups\n ]\n\n for _ in range(1000):\n fprs = [\n get_fpr(sorted_group, cut_index)\n for (sorted_group, cut_index) in zip(sorted_groups, cut_indices)\n ]\n min_fpr_index = np.argmin(fprs)\n max_fpr_index = np.argmax(fprs)\n cut_indices[min_fpr_index] = min(cut_indices[min_fpr_index] + 1,\n len(sorted_groups[min_fpr_index]))\n cut_indices[max_fpr_index] = max(cut_indices[max_fpr_index] - 1, 0)\n\n thresholds = [sorted_groups[q][cut_indices[q]][0] for q in range(4)]\n return thresholds\n else:\n return [np.percentile(predictions, 100 - COVERAGE * 100)] * 4\n\n\ndef metrics(labels, predictions, wqs, thresholds):\n \"\"\"Metric used for the experiment.\"\"\"\n sorted_groups = []\n cut_indices = []\n for q in range(1, 5):\n sorted_group = sorted(\n [(prediciton[0], label)\n for (prediciton, label, wq) in zip(predictions, labels, wqs)\n if wq == q],\n reverse=True)\n sorted_groups.append(sorted_group)\n cut_index = int(len(sorted_group) * 0.3)\n for i in range(len(sorted_group)):\n if sorted_group[i][0] <= thresholds[q - 1]:\n cut_index = i\n break\n cut_indices.append(cut_index)\n\n fprs = [\n get_fpr(sorted_group, cut_index)\n for (sorted_group, cut_index) in zip(sorted_groups, cut_indices)\n ]\n fairness_violation = max(fprs) - min(fprs)\n\n errs = [\n errors(sorted_group, cut_index)\n for (sorted_group, cut_index) in zip(sorted_groups, cut_indices)\n ]\n acc = 1.0 - np.sum(errs) / len(labels)\n\n return (acc, fairness_violation)\n\n\ndef classifier(x):\n logits = tf.layers.dense(inputs=x, units=1)\n return logits\n\n\ndef optimization(logits, y, population, embedding, alpha):\n \"\"\"Loss and optimization method.\"\"\"\n if FLAGS.uniform_weights:\n weights = tf.ones(shape=tf.shape(population))\n else:\n weights = tf.where(\n tf.greater(population, 0.01), tf.fill(tf.shape(population), 0.16),\n tf.fill(tf.shape(population), 2.5))\n if not FLAGS.propensity_weights:\n weights = tf.sigmoid(tf.matmul(embedding, alpha)) * weights\n weights /= tf.reduce_mean(weights)\n loss = tf.losses.hinge_loss(labels=y, logits=logits, weights=weights)\n optimizer = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)\n return optimizer, loss\n\n\ndef sample_from_ball(size=(1, 1), sampling_radius=2):\n count, dim = size\n points = np.random.normal(size=size)\n points /= np.linalg.norm(points, axis=1)[:, np.newaxis]\n scales = np.power(np.random.uniform(size=(count, 1)), 1 / dim)\n points *= scales * sampling_radius\n return points\n\n\ndef main(_):\n num_steps_autoencoder = 0 if FLAGS.uniform_weights else TRAINING_STEPS\n\n training_df = pd.read_csv(FLAGS.training_data_path, header=0, sep=',')\n testing_df = pd.read_csv(FLAGS.testing_data_path, header=0, sep=',')\n validation_df = pd.read_csv(FLAGS.validation_data_path, header=0, sep=',')\n\n train_labels = training_df['label']\n validation_labels = validation_df['label']\n test_labels = testing_df['label']\n train_population = training_df['population']\n train_features = training_df[FEATURES]\n validation_features = validation_df[FEATURES]\n test_features = testing_df[FEATURES]\n train_wqs = training_df['racePctWhite_quantile']\n validation_wqs = validation_df['racePctWhite_quantile']\n test_wqs = testing_df['racePctWhite_quantile']\n\n tf.reset_default_graph()\n x = tf.placeholder(tf.float32, shape=(None, len(FEATURES)), name='x')\n y = tf.placeholder(tf.float32, shape=(None, OUTPUT_DIM), name='y')\n population = tf.placeholder(\n tf.float32, shape=(None, OUTPUT_DIM), name='population')\n\n xy = tf.concat([x, y], axis=1)\n autoencoder_layer1 = tf.layers.dense(\n inputs=xy, units=10, activation=tf.sigmoid)\n autoencoder_embedding_layer = tf.layers.dense(\n inputs=autoencoder_layer1, units=EMBEDDING_DIM, activation=tf.sigmoid)\n autoencoder_layer3 = tf.layers.dense(\n inputs=autoencoder_embedding_layer, units=10, activation=tf.sigmoid)\n autoencoder_out_x = tf.layers.dense(\n inputs=autoencoder_layer3, units=len(FEATURES))\n autoencoder_out_y_logits = tf.layers.dense(\n inputs=autoencoder_layer3, units=OUTPUT_DIM)\n\n autoencoder_y_loss = tf.losses.hinge_loss(\n labels=y, logits=autoencoder_out_y_logits)\n autoencoder_x_loss = tf.losses.mean_squared_error(\n labels=x, predictions=autoencoder_out_x)\n autoencoder_loss = autoencoder_x_loss + autoencoder_y_loss\n autoencoder_optimizer = tf.train.AdamOptimizer(LEARNING_RATE).minimize(\n autoencoder_loss)\n\n parallel_logits = []\n parallel_losses = []\n parallel_optimizers = []\n\n parallel_alphas = tf.placeholder(\n tf.float32,\n shape=(NUM_PARALLEL_ALPHAS, EMBEDDING_DIM),\n name='parallel_alphas')\n unstack_parallel_alphas = tf.unstack(parallel_alphas, axis=0)\n embedding = tf.placeholder(\n tf.float32, shape=(None, EMBEDDING_DIM), name='embedding')\n\n with tf.variable_scope('classifiers'):\n for alpha_index in range(NUM_PARALLEL_ALPHAS):\n logits = classifier(x)\n alpha = tf.reshape(\n unstack_parallel_alphas[alpha_index], shape=[EMBEDDING_DIM, 1])\n optimizer, loss = optimization(logits, y, population, embedding, alpha)\n\n parallel_logits.append(logits)\n parallel_losses.append(loss)\n parallel_optimizers.append(optimizer)\n\n init = tf.global_variables_initializer()\n classifiers_init = tf.variables_initializer(\n tf.global_variables(scope='classifiers'))\n\n kernel = RBF(\n length_scale=FLAGS.sampling_radius,\n length_scale_bounds=(FLAGS.sampling_radius * 1e-3, FLAGS.sampling_radius *\n 1e3)) * ConstantKernel(1.0, (1e-3, 1e3))\n\n alphas = np.zeros(shape=(0, EMBEDDING_DIM))\n validation_metrics = []\n test_metrics = []\n\n with tf.Session() as sess:\n sess.run(init)\n # Training autoencoder\n for _ in range(num_steps_autoencoder):\n batch_index = random.sample(range(len(train_labels)), BATCH_SIZE)\n batch_x = train_features.iloc[batch_index, :].values\n batch_y = train_labels.iloc[batch_index].values.reshape(BATCH_SIZE, 1)\n _, _ = sess.run([autoencoder_optimizer, autoencoder_loss],\n feed_dict={\n x: batch_x,\n y: batch_y,\n })\n\n # GetCandidatesAlpha (Algorithm 2 in paper)\n for alpha_batch_index in range(NUM_ALPHA_BATCHES):\n sess.run(classifiers_init)\n if FLAGS.uniform_weights:\n alpha_batch = np.zeros(shape=(NUM_PARALLEL_ALPHAS, EMBEDDING_DIM))\n elif alpha_batch_index == 0:\n # We first start uniformly.\n alpha_batch = sample_from_ball(\n size=(NUM_PARALLEL_ALPHAS, EMBEDDING_DIM),\n sampling_radius=FLAGS.sampling_radius)\n else:\n # Use UCB to generate candidates.\n alpha_batch = np.zeros(shape=(0, EMBEDDING_DIM))\n sample_alphas = np.copy(alphas)\n sample_validation_metrics = [m[0] for m in validation_metrics]\n candidates = sample_from_ball(\n size=(10000, EMBEDDING_DIM), sampling_radius=FLAGS.sampling_radius)\n for alpha_index in range(NUM_PARALLEL_ALPHAS):\n gp = GaussianProcessRegressor(\n kernel=kernel, alpha=1e-1).fit(sample_alphas,\n sample_validation_metrics)\n\n metric_mles, metric_stds = gp.predict(candidates, return_std=True)\n metric_lcbs = metric_mles - 1.0 * metric_stds\n\n best_index = np.argmin(metric_lcbs)\n best_alpha = [candidates[best_index]]\n best_alpha_metric_ucb = metric_mles[best_index] \\\n + 1.0 * metric_stds[best_index]\n alpha_batch = np.concatenate([alpha_batch, best_alpha])\n\n # Add candidate to the GP, assuming the metric observation is the LCB.\n sample_alphas = np.concatenate([sample_alphas, best_alpha])\n sample_validation_metrics.append(best_alpha_metric_ucb)\n\n # Training classifiers\n for _ in range(TRAINING_STEPS):\n batch_index = random.sample(range(len(train_labels)), BATCH_SIZE)\n batch_x = train_features.iloc[batch_index, :].values\n batch_y = train_labels.iloc[batch_index].values.reshape(BATCH_SIZE, 1)\n batch_population = train_population.iloc[batch_index].values.reshape(\n BATCH_SIZE, 1)\n batch_embedding = sess.run(\n autoencoder_embedding_layer, feed_dict={\n x: batch_x,\n y: batch_y,\n })\n _, _ = sess.run(\n [parallel_optimizers, parallel_losses],\n feed_dict={\n x: batch_x,\n y: batch_y,\n population: batch_population,\n embedding: batch_embedding,\n parallel_alphas: alpha_batch,\n })\n\n parallel_train_logits = sess.run(\n parallel_logits,\n feed_dict={\n x: train_features.values,\n y: train_labels.values.reshape(len(train_labels), 1),\n })\n alphas = np.concatenate([alphas, alpha_batch])\n parallel_validation_logits = sess.run(\n parallel_logits,\n feed_dict={\n x: validation_features.values,\n y: validation_labels.values.reshape(len(validation_labels), 1),\n })\n parallel_test_logits = sess.run(\n parallel_logits,\n feed_dict={\n x: test_features.values,\n y: test_labels.values.reshape(len(test_labels), 1),\n })\n parallel_thresholds = [\n find_threshold(train_labels, train_logits, train_wqs,\n FLAGS.post_shift)\n for train_logits in parallel_train_logits\n ]\n logits_thresholds = zip(parallel_validation_logits, parallel_thresholds)\n parallel_validation_metrics = [\n metrics(validation_labels, logits, validation_wqs, thresholds)\n for (logits, thresholds) in logits_thresholds\n ]\n validation_metrics.extend(parallel_validation_metrics)\n parallel_test_metrics = [\n metrics(test_labels, test_logits, test_wqs, thresholds)\n for (test_logits,\n thresholds) in zip(parallel_test_logits, parallel_thresholds)\n ]\n test_metrics.extend(parallel_test_metrics)\n\n best_observed_index = np.argmin([m[0] for m in validation_metrics])\n print('[metric] validation_acc={}'.format(\n validation_metrics[best_observed_index][0]))\n print('[metric] validation_violation={}'.format(\n validation_metrics[best_observed_index][1]))\n print('[metric] test_acc={}'.format(test_metrics[best_observed_index][0]))\n print('[metric] test_violation={}'.format(\n test_metrics[best_observed_index][1]))\n\n return 0\n\n\nif __name__ == '__main__':\n app.run(main)\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint: python3\n\"\"\"Tests for jax_dft.scf.\"\"\"\n\nimport functools\nimport os\nimport shutil\nimport tempfile\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport jax\nfrom jax import random\nfrom jax import tree_util\nfrom jax.config import config\nfrom jax.experimental import stax\nimport jax.numpy as jnp\nimport numpy as np\nfrom scipy import optimize\n\nfrom jax_dft import neural_xc\nfrom jax_dft import np_utils\nfrom jax_dft import scf\nfrom jax_dft import utils\n\n\n# Set the default dtype as float64\nconfig.update('jax_enable_x64', True)\n\n\nclass ScfTest(parameterized.TestCase):\n\n def test_discrete_laplacian(self):\n np.testing.assert_allclose(\n scf.discrete_laplacian(6),\n [\n [-5. / 2, 4. / 3, -1. / 12, 0., 0., 0.],\n [4. / 3, -5. / 2, 4. / 3, -1. / 12, 0., 0.],\n [-1. / 12, 4. / 3, -5. / 2, 4. / 3, -1. / 12, 0.],\n [0., -1. / 12, 4. / 3, -5. / 2, 4. / 3, -1. / 12],\n [0., 0., -1. / 12, 4. / 3, -5. / 2, 4. / 3],\n [0., 0., 0., -1. / 12, 4. / 3, -5. / 2],\n ],\n atol=1e-6)\n\n def test_get_kinetic_matrix(self):\n np.testing.assert_allclose(\n scf.get_kinetic_matrix(grids=jnp.linspace(-10, 10, 6)),\n [\n [0.078125, -0.04166667, 0.00260417, 0., 0., 0.],\n [-0.04166667, 0.078125, -0.04166667, 0.00260417, 0., 0.],\n [0.00260417, -0.04166667, 0.078125, -0.04166667, 0.00260417, 0.],\n [0., 0.00260417, -0.04166667, 0.078125, -0.04166667, 0.00260417],\n [0., 0., 0.00260417, -0.04166667, 0.078125, -0.04166667],\n [0., 0., 0., 0.00260417, -0.04166667, 0.078125],\n ],\n atol=1e-6)\n\n @parameterized.parameters(\n # The normalized wavefunctions are\n # [[0., 0., 1. / sqrt(0.1), 0., 0.],\n # [0., -1. / sqrt(0.2), 0., 1. / sqrt(0.2), 0.]]\n # Intensities\n # [[0., 0., 10., 0., 0.],\n # [0., 5., 0., 5., 0.]]\n (1, np.array([0., 0., 10., 0., 0.])),\n (2, np.array([0., 0., 20., 0., 0.])),\n (3, np.array([0., 5., 20., 5., 0.])),\n (4, np.array([0., 10., 20., 10., 0.])),\n )\n def test_wavefunctions_to_density(self, num_electrons, expected_density):\n np.testing.assert_allclose(\n scf.wavefunctions_to_density(\n num_electrons=num_electrons,\n wavefunctions=jnp.array([\n [0., 0., 1., 0., 0.],\n [0., -1., 0., 1., 0.],\n ]),\n grids=jnp.arange(5) * 0.1),\n expected_density)\n\n @parameterized.parameters(\n (1, -1.), # total_eigen_energies = -1.\n (2, -2.), # total_eigen_energies = -1. - 1.\n (3, 0.), # total_eigen_energies = -1. - 1. + 2.\n (4, 2.), # total_eigen_energies = -1. - 1. + 2. + 2.\n (5, 7.), # total_eigen_energies = -1. - 1. + 2. + 2. + 5.\n (6, 12.), # total_eigen_energies = -1. - 1. + 2. + 2. + 5. + 5.\n )\n def test_get_total_eigen_energies(\n self, num_electrons, expected_total_eigen_energies):\n self.assertAlmostEqual(\n scf.get_total_eigen_energies(\n num_electrons=num_electrons,\n eigen_energies=jnp.array([-1., 2., 5.])),\n expected_total_eigen_energies)\n\n @parameterized.parameters(\n (1, 0.), # gap = -1. - (-1.)\n (2, 3.), # gap = 2. - (-1.)\n (3, 0.), # gap = 2. - 2.\n (4, 7.), # gap = 9. - 2.\n (5, 0.), # gap = 9. - 9.\n (6, 78.), # gap = 87. - 9.\n )\n def test_get_gap(self, num_electrons, expected_gap):\n self.assertAlmostEqual(\n scf.get_gap(\n num_electrons=num_electrons,\n eigen_energies=jnp.array([-1., 2., 9., 87.])),\n expected_gap)\n\n @parameterized.parameters(\n (1, 0.5, 0.), # total_eigen_energies = 0.5\n (2, 1., 1.), # total_eigen_energies = 0.5 + 0.5\n (3, 2.5, 0.), # total_eigen_energies = 0.5 + 0.5 + 1.5\n (4, 4., 1.), # total_eigen_energies = 0.5 + 0.5 + 1.5 + 1.5\n )\n def test_solve_noninteracting_system(\n self, num_electrons, expected_total_eigen_energies, expected_gap):\n # Quantum harmonic oscillator.\n grids = jnp.linspace(-10, 10, 1001)\n density, total_eigen_energies, gap = scf.solve_noninteracting_system(\n external_potential=0.5 * grids ** 2,\n num_electrons=num_electrons,\n grids=grids)\n self.assertTupleEqual(density.shape, (1001,))\n self.assertAlmostEqual(\n float(total_eigen_energies), expected_total_eigen_energies, places=7)\n self.assertAlmostEqual(float(gap), expected_gap, places=7)\n\n @parameterized.parameters(utils.soft_coulomb, utils.exponential_coulomb)\n def test_get_hartree_energy(self, interaction_fn):\n grids = jnp.linspace(-5, 5, 11)\n dx = utils.get_dx(grids)\n density = utils.gaussian(grids=grids, center=1., sigma=1.)\n\n # Compute the expected Hartree energy by nested for loops.\n expected_hartree_energy = 0.\n for x_0, n_0 in zip(grids, density):\n for x_1, n_1 in zip(grids, density):\n expected_hartree_energy += 0.5 * n_0 * n_1 * interaction_fn(\n x_0 - x_1) * dx ** 2\n\n self.assertAlmostEqual(\n float(scf.get_hartree_energy(\n density=density, grids=grids, interaction_fn=interaction_fn)),\n float(expected_hartree_energy))\n\n @parameterized.parameters(utils.soft_coulomb, utils.exponential_coulomb)\n def test_get_hartree_potential(self, interaction_fn):\n grids = jnp.linspace(-5, 5, 11)\n dx = utils.get_dx(grids)\n density = utils.gaussian(grids=grids, center=1., sigma=1.)\n\n # Compute the expected Hartree energy by nested for loops.\n expected_hartree_potential = np.zeros_like(grids)\n for i, x_0 in enumerate(grids):\n for x_1, n_1 in zip(grids, density):\n expected_hartree_potential[i] += np.sum(\n n_1 * interaction_fn(x_0 - x_1)) * dx\n\n np.testing.assert_allclose(\n scf.get_hartree_potential(\n density=density, grids=grids, interaction_fn=interaction_fn),\n expected_hartree_potential)\n\n def test_get_external_potential_energy(self):\n grids = jnp.linspace(-5, 5, 10001)\n self.assertAlmostEqual(\n float(scf.get_external_potential_energy(\n external_potential=-jnp.exp(-grids ** 2),\n density=jnp.exp(-(grids - 1) ** 2),\n grids=grids)),\n # Analytical solution:\n # integrate(-exp(-x^2) * exp(-(x - 1) ^ 2), {x, -inf, inf})\n # = -sqrt(pi / (2 * e))\n -np.sqrt(np.pi / (2 * np.e)))\n\n def test_get_xc_energy(self):\n grids = jnp.linspace(-5, 5, 10001)\n # We use the form of 3d LDA exchange functional as an example. So the\n # correlation contribution is 0.\n # exchange energy = -0.73855 \\int n^(4 / 3) dx\n # exchange energy density = -0.73855 n^(1 / 3)\n # Compute the exchange energy on density exp(-(x - 1) ^ 2:\n # -0.73855 * integrate(exp(-(x - 1) ^ 2) ^ (4 / 3), {x, -inf, inf})\n # = -1.13367\n xc_energy_density_fn = lambda density: -0.73855 * density ** (1 / 3)\n density = jnp.exp(-(grids - 1) ** 2)\n self.assertAlmostEqual(\n float(scf.get_xc_energy(\n density=density,\n xc_energy_density_fn=xc_energy_density_fn,\n grids=grids)),\n -1.13367,\n places=5)\n\n def test_get_xc_potential(self):\n grids = jnp.linspace(-5, 5, 10001)\n # We use the form of 3d LDA exchange functional as an example. So the\n # correlation contribution is 0.\n # exchange energy = -0.73855 \\int n^(4 / 3) dx\n # exchange potential should be -0.73855 * (4 / 3) n^(1 / 3)\n # by taking functional derivative on exchange energy.\n xc_energy_density_fn = lambda density: -0.73855 * density ** (1 / 3)\n density = jnp.exp(-(grids - 1) ** 2)\n np.testing.assert_allclose(\n scf.get_xc_potential(\n density,\n xc_energy_density_fn=xc_energy_density_fn,\n grids=grids),\n -0.73855 * (4 / 3) * density ** (1 / 3))\n\n def test_get_xc_potential_hartree(self):\n grids = jnp.linspace(-5, 5, 10001)\n density = utils.gaussian(grids=grids, center=1., sigma=1.)\n def half_hartree_potential(density):\n return 0.5 * scf.get_hartree_potential(\n density=density,\n grids=grids,\n interaction_fn=utils.exponential_coulomb)\n\n np.testing.assert_allclose(\n scf.get_xc_potential(\n density=density,\n xc_energy_density_fn=half_hartree_potential,\n grids=grids),\n scf.get_hartree_potential(\n density, grids=grids, interaction_fn=utils.exponential_coulomb))\n\n\nclass KohnShamStateTest(absltest.TestCase):\n\n def setUp(self):\n super().setUp()\n self.test_dir = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.test_dir)\n super().tearDown()\n\n def test_save_and_load_state(self):\n # Make up a random KohnShamState.\n state = scf.KohnShamState(\n density=np.random.random((5, 100)),\n total_energy=np.random.random(5,),\n locations=np.random.random((5, 2)),\n nuclear_charges=np.random.random((5, 2)),\n external_potential=np.random.random((5, 100)),\n grids=np.random.random((5, 100)),\n num_electrons=np.random.randint(10, size=5),\n hartree_potential=np.random.random((5, 100)))\n save_dir = os.path.join(self.test_dir, 'test_state')\n\n scf.save_state(save_dir, state)\n loaded_state = scf.load_state(save_dir)\n\n # Check fields.\n self.assertEqual(state._fields, loaded_state._fields)\n # Check values.\n for field in state._fields:\n value = getattr(state, field)\n if value is None:\n self.assertIsNone(getattr(loaded_state, field))\n else:\n np.testing.assert_allclose(value, getattr(loaded_state, field))\n\n\nclass KohnShamIterationTest(parameterized.TestCase):\n\n def setUp(self):\n super(KohnShamIterationTest, self).setUp()\n self.grids = jnp.linspace(-5, 5, 101)\n self.num_electrons = 2\n\n def _create_testing_initial_state(self, interaction_fn):\n locations = jnp.array([-0.5, 0.5])\n nuclear_charges = jnp.array([1, 1])\n return scf.KohnShamState(\n density=self.num_electrons * utils.gaussian(\n grids=self.grids, center=0., sigma=1.),\n # Set initial energy as inf, the actual value is not used in Kohn-Sham\n # calculation.\n total_energy=jnp.inf,\n locations=locations,\n nuclear_charges=nuclear_charges,\n external_potential=utils.get_atomic_chain_potential(\n grids=self.grids,\n locations=locations,\n nuclear_charges=nuclear_charges,\n interaction_fn=interaction_fn),\n grids=self.grids,\n num_electrons=self.num_electrons)\n\n def _test_state(self, state, initial_state):\n # The density in the next state should normalize to number of electrons.\n self.assertAlmostEqual(\n float(jnp.sum(state.density) * utils.get_dx(self.grids)),\n self.num_electrons)\n # The total energy should be finite after one iteration.\n self.assertTrue(jnp.isfinite(state.total_energy))\n self.assertLen(state.hartree_potential, len(state.grids))\n self.assertLen(state.xc_potential, len(state.grids))\n # locations, nuclear_charges, external_potential, grids and num_electrons\n # remain unchanged.\n np.testing.assert_allclose(initial_state.locations, state.locations)\n np.testing.assert_allclose(\n initial_state.nuclear_charges, state.nuclear_charges)\n np.testing.assert_allclose(\n initial_state.external_potential, state.external_potential)\n np.testing.assert_allclose(initial_state.grids, state.grids)\n self.assertEqual(initial_state.num_electrons, state.num_electrons)\n self.assertGreater(state.gap, 0)\n\n @parameterized.parameters(\n (utils.soft_coulomb, True),\n (utils.soft_coulomb, False),\n (utils.exponential_coulomb, True),\n (utils.exponential_coulomb, False),\n )\n def test_kohn_sham_iteration(\n self, interaction_fn, enforce_reflection_symmetry):\n initial_state = self._create_testing_initial_state(interaction_fn)\n next_state = scf.kohn_sham_iteration(\n state=initial_state,\n num_electrons=self.num_electrons,\n # Use 3d LDA exchange functional and zero correlation functional.\n xc_energy_density_fn=tree_util.Partial(\n lambda density: -0.73855 * density ** (1 / 3)),\n interaction_fn=interaction_fn,\n enforce_reflection_symmetry=enforce_reflection_symmetry)\n self._test_state(next_state, initial_state)\n\n @parameterized.parameters(\n (utils.soft_coulomb, True),\n (utils.soft_coulomb, False),\n (utils.exponential_coulomb, True),\n (utils.exponential_coulomb, False),\n )\n def test_kohn_sham_iteration_neural_xc(\n self, interaction_fn, enforce_reflection_symmetry):\n init_fn, xc_energy_density_fn = neural_xc.local_density_approximation(\n stax.serial(stax.Dense(8), stax.Elu, stax.Dense(1)))\n params_init = init_fn(rng=random.PRNGKey(0))\n initial_state = self._create_testing_initial_state(interaction_fn)\n next_state = scf.kohn_sham_iteration(\n state=initial_state,\n num_electrons=self.num_electrons,\n xc_energy_density_fn=tree_util.Partial(\n xc_energy_density_fn, params=params_init),\n interaction_fn=interaction_fn,\n enforce_reflection_symmetry=enforce_reflection_symmetry)\n self._test_state(next_state, initial_state)\n\n def test_kohn_sham_iteration_neural_xc_energy_loss_gradient(self):\n # The network only has one layer.\n # The initial params contains weights with shape (1, 1) and bias (1,).\n init_fn, xc_energy_density_fn = neural_xc.local_density_approximation(\n stax.serial(stax.Dense(1)))\n init_params = init_fn(rng=random.PRNGKey(0))\n initial_state = self._create_testing_initial_state(\n utils.exponential_coulomb)\n target_energy = 2.\n spec, flatten_init_params = np_utils.flatten(init_params)\n\n def loss(flatten_params, initial_state, target_energy):\n state = scf.kohn_sham_iteration(\n state=initial_state,\n num_electrons=self.num_electrons,\n xc_energy_density_fn=tree_util.Partial(\n xc_energy_density_fn,\n params=np_utils.unflatten(spec, flatten_params)),\n interaction_fn=utils.exponential_coulomb,\n enforce_reflection_symmetry=True)\n return (state.total_energy - target_energy) ** 2\n\n grad_fn = jax.grad(loss)\n\n params_grad = grad_fn(\n flatten_init_params,\n initial_state=initial_state,\n target_energy=target_energy)\n\n # Check gradient values.\n np.testing.assert_allclose(params_grad, [-1.40994668, -2.58881225])\n\n # Check whether the gradient values match the numerical gradient.\n np.testing.assert_allclose(\n optimize.approx_fprime(\n xk=flatten_init_params,\n f=functools.partial(\n loss, initial_state=initial_state, target_energy=target_energy),\n epsilon=1e-9),\n params_grad, atol=3e-4)\n\n def test_kohn_sham_iteration_neural_xc_density_loss_gradient(self):\n # The network only has one layer.\n # The initial params contains weights with shape (1, 1) and bias (1,).\n init_fn, xc_energy_density_fn = neural_xc.local_density_approximation(\n stax.serial(stax.Dense(1)))\n init_params = init_fn(rng=random.PRNGKey(0))\n initial_state = self._create_testing_initial_state(\n utils.exponential_coulomb)\n target_density = (\n utils.gaussian(grids=self.grids, center=-0.5, sigma=1.)\n + utils.gaussian(grids=self.grids, center=0.5, sigma=1.))\n spec, flatten_init_params = np_utils.flatten(init_params)\n\n def loss(flatten_params, initial_state, target_density):\n state = scf.kohn_sham_iteration(\n state=initial_state,\n num_electrons=self.num_electrons,\n xc_energy_density_fn=tree_util.Partial(\n xc_energy_density_fn,\n params=np_utils.unflatten(spec, flatten_params)),\n interaction_fn=utils.exponential_coulomb,\n enforce_reflection_symmetry=False)\n return jnp.sum(jnp.abs(state.density - target_density)) * utils.get_dx(\n self.grids)\n\n grad_fn = jax.grad(loss)\n\n params_grad = grad_fn(\n flatten_init_params,\n initial_state=initial_state,\n target_density=target_density)\n\n # Check gradient values.\n np.testing.assert_allclose(params_grad, [0.2013181, 0.], atol=1e-7)\n\n # Check whether the gradient values match the numerical gradient.\n np.testing.assert_allclose(\n optimize.approx_fprime(\n xk=flatten_init_params,\n f=functools.partial(\n loss,\n initial_state=initial_state,\n target_density=target_density),\n epsilon=1e-9),\n params_grad, atol=1e-4)\n\n def test_kohn_sham_iteration_neural_xc_density_loss_gradient_symmetry(self):\n # The network only has one layer.\n # The initial params contains weights with shape (1, 1) and bias (1,).\n init_fn, xc_energy_density_fn = neural_xc.local_density_approximation(\n stax.serial(stax.Dense(1)))\n init_params = init_fn(rng=random.PRNGKey(0))\n initial_state = self._create_testing_initial_state(\n utils.exponential_coulomb)\n target_density = (\n utils.gaussian(grids=self.grids, center=-0.5, sigma=1.)\n + utils.gaussian(grids=self.grids, center=0.5, sigma=1.))\n spec, flatten_init_params = np_utils.flatten(init_params)\n\n def loss(flatten_params, initial_state, target_density):\n state = scf.kohn_sham_iteration(\n state=initial_state,\n num_electrons=self.num_electrons,\n xc_energy_density_fn=tree_util.Partial(\n xc_energy_density_fn,\n params=np_utils.unflatten(spec, flatten_params)),\n interaction_fn=utils.exponential_coulomb,\n enforce_reflection_symmetry=True)\n return jnp.sum(jnp.abs(state.density - target_density)) * utils.get_dx(\n self.grids)\n\n grad_fn = jax.grad(loss)\n\n params_grad = grad_fn(\n flatten_init_params,\n initial_state=initial_state,\n target_density=target_density)\n\n # Check gradient values.\n np.testing.assert_allclose(params_grad, [0.2013181, 0.], atol=1e-7)\n\n # Check whether the gradient values match the numerical gradient.\n np.testing.assert_allclose(\n optimize.approx_fprime(\n xk=flatten_init_params,\n f=functools.partial(\n loss,\n initial_state=initial_state,\n target_density=target_density),\n epsilon=1e-9),\n params_grad, atol=1e-4)\n\n\nclass KohnShamTest(parameterized.TestCase):\n\n def setUp(self):\n super(KohnShamTest, self).setUp()\n self.grids = jnp.linspace(-5, 5, 101)\n self.num_electrons = 2\n self.locations = jnp.array([-0.5, 0.5])\n self.nuclear_charges = jnp.array([1, 1])\n\n def _create_testing_external_potential(self, interaction_fn):\n return utils.get_atomic_chain_potential(\n grids=self.grids,\n locations=self.locations,\n nuclear_charges=self.nuclear_charges,\n interaction_fn=interaction_fn)\n\n def _test_state(self, state, external_potential):\n # The density in the final state should normalize to number of electrons.\n self.assertAlmostEqual(\n float(jnp.sum(state.density) * utils.get_dx(self.grids)),\n self.num_electrons)\n # The total energy should be finite after iterations.\n self.assertTrue(jnp.isfinite(state.total_energy))\n self.assertLen(state.hartree_potential, len(state.grids))\n self.assertLen(state.xc_potential, len(state.grids))\n # locations, nuclear_charges, external_potential, grids and num_electrons\n # remain unchanged.\n np.testing.assert_allclose(state.locations, self.locations)\n np.testing.assert_allclose(state.nuclear_charges, self.nuclear_charges)\n np.testing.assert_allclose(\n external_potential, state.external_potential)\n np.testing.assert_allclose(state.grids, self.grids)\n self.assertEqual(state.num_electrons, self.num_electrons)\n self.assertGreater(state.gap, 0)\n\n @parameterized.parameters(utils.soft_coulomb, utils.exponential_coulomb)\n def test_kohn_sham(self, interaction_fn):\n state = scf.kohn_sham(\n locations=self.locations,\n nuclear_charges=self.nuclear_charges,\n num_electrons=self.num_electrons,\n num_iterations=3,\n grids=self.grids,\n # Use 3d LDA exchange functional and zero correlation functional.\n xc_energy_density_fn=tree_util.Partial(\n lambda density: -0.73855 * density ** (1 / 3)),\n interaction_fn=interaction_fn)\n for single_state in scf.state_iterator(state):\n self._test_state(\n single_state,\n self._create_testing_external_potential(interaction_fn))\n\n @parameterized.parameters(\n (-1., [False, False, False]),\n (jnp.inf, [True, True, True]),\n )\n def test_kohn_sham_convergence(\n self, density_mse_converge_tolerance, expected_converged):\n state = scf.kohn_sham(\n locations=self.locations,\n nuclear_charges=self.nuclear_charges,\n num_electrons=self.num_electrons,\n num_iterations=3,\n grids=self.grids,\n # Use 3d LDA exchange functional and zero correlation functional.\n xc_energy_density_fn=tree_util.Partial(\n lambda density: -0.73855 * density ** (1 / 3)),\n interaction_fn=utils.exponential_coulomb,\n density_mse_converge_tolerance=density_mse_converge_tolerance)\n np.testing.assert_allclose(state.converged, expected_converged)\n\n @parameterized.parameters(utils.soft_coulomb, utils.exponential_coulomb)\n def test_kohn_sham_neural_xc(self, interaction_fn):\n init_fn, xc_energy_density_fn = neural_xc.local_density_approximation(\n stax.serial(stax.Dense(8), stax.Elu, stax.Dense(1)))\n params_init = init_fn(rng=random.PRNGKey(0))\n state = scf.kohn_sham(\n locations=self.locations,\n nuclear_charges=self.nuclear_charges,\n num_electrons=self.num_electrons,\n num_iterations=3,\n grids=self.grids,\n xc_energy_density_fn=tree_util.Partial(\n xc_energy_density_fn, params=params_init),\n interaction_fn=interaction_fn)\n for single_state in scf.state_iterator(state):\n self._test_state(\n single_state,\n self._create_testing_external_potential(interaction_fn))\n\n def test_kohn_sham_neural_xc_energy_loss_gradient(self):\n # The network only has one layer.\n # The initial params contains weights with shape (1, 1) and bias (1,).\n init_fn, xc_energy_density_fn = neural_xc.local_density_approximation(\n stax.serial(stax.Dense(1)))\n init_params = init_fn(rng=random.PRNGKey(0))\n target_energy = 2.\n spec, flatten_init_params = np_utils.flatten(init_params)\n\n def loss(flatten_params, target_energy):\n state = scf.kohn_sham(\n locations=self.locations,\n nuclear_charges=self.nuclear_charges,\n num_electrons=self.num_electrons,\n num_iterations=3,\n grids=self.grids,\n xc_energy_density_fn=tree_util.Partial(\n xc_energy_density_fn,\n params=np_utils.unflatten(spec, flatten_params)),\n interaction_fn=utils.exponential_coulomb)\n final_state = scf.get_final_state(state)\n return (final_state.total_energy - target_energy) ** 2\n\n grad_fn = jax.grad(loss)\n\n params_grad = grad_fn(flatten_init_params, target_energy=target_energy)\n\n # Check gradient values.\n np.testing.assert_allclose(params_grad, [-3.908153, -5.448675], atol=1e-6)\n\n # Check whether the gradient values match the numerical gradient.\n np.testing.assert_allclose(\n optimize.approx_fprime(\n xk=flatten_init_params,\n f=functools.partial(loss, target_energy=target_energy),\n epsilon=1e-8),\n params_grad, atol=5e-3)\n\n def test_kohn_sham_neural_xc_density_loss_gradient(self):\n # The network only has one layer.\n # The initial params contains weights with shape (1, 1) and bias (1,).\n init_fn, xc_energy_density_fn = neural_xc.local_density_approximation(\n stax.serial(stax.Dense(1)))\n init_params = init_fn(rng=random.PRNGKey(0))\n target_density = (\n utils.gaussian(grids=self.grids, center=-0.5, sigma=1.)\n + utils.gaussian(grids=self.grids, center=0.5, sigma=1.))\n spec, flatten_init_params = np_utils.flatten(init_params)\n\n def loss(flatten_params, target_density):\n state = scf.kohn_sham(\n locations=self.locations,\n nuclear_charges=self.nuclear_charges,\n num_electrons=self.num_electrons,\n num_iterations=3,\n grids=self.grids,\n xc_energy_density_fn=tree_util.Partial(\n xc_energy_density_fn,\n params=np_utils.unflatten(spec, flatten_params)),\n interaction_fn=utils.exponential_coulomb,\n density_mse_converge_tolerance=-1)\n final_state = scf.get_final_state(state)\n return jnp.sum(\n jnp.abs(final_state.density - target_density)) * utils.get_dx(\n self.grids)\n\n grad_fn = jax.grad(loss)\n\n params_grad = grad_fn(flatten_init_params, target_density=target_density)\n\n # Check gradient values.\n np.testing.assert_allclose(params_grad, [0.2643006, 0.], atol=2e-6)\n # Check whether the gradient values match the numerical gradient.\n np.testing.assert_allclose(\n optimize.approx_fprime(\n xk=flatten_init_params,\n f=functools.partial(loss, target_density=target_density),\n epsilon=1e-9),\n params_grad, atol=3e-4)\n\n\nclass GetInitialDensityTest(absltest.TestCase):\n\n def setUp(self):\n super().setUp()\n self.states = scf.KohnShamState(\n density=np.random.random((5, 100)),\n total_energy=np.random.random(5,),\n locations=np.random.random((5, 2)),\n nuclear_charges=np.random.random((5, 2)),\n external_potential=np.random.random((5, 100)),\n grids=np.random.random((5, 100)),\n num_electrons=np.random.randint(10, size=5))\n\n def test_get_initial_density_exact(self):\n np.testing.assert_allclose(\n scf.get_initial_density(self.states, 'exact'),\n self.states.density)\n\n def test_get_initial_density_noninteracting(self):\n initial_density = scf.get_initial_density(self.states, 'noninteracting')\n self.assertEqual(initial_density.shape, (5, 100))\n\n def test_get_initial_density_unknown(self):\n with self.assertRaisesRegex(\n ValueError, 'Unknown initialization method foo'):\n scf.get_initial_density(self.states, 'foo')\n\n\nif __name__ == '__main__':\n absltest.main()\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for task_set.tasks.fixed_image_conv_test.\"\"\"\nfrom absl.testing import parameterized\n\nfrom task_set import registry\nfrom task_set.tasks import family_test_utils\nfrom task_set.tasks.fixed import fixed_image_conv # pylint: disable=unused-import\nimport tensorflow.compat.v1 as tf\n\n\nclass FixedImageConvTest(family_test_utils.SingleTaskTestCase):\n\n def test_right_number_of_tasks(self):\n task_names = registry.task_registry.get_all_fixed_config_names()\n self.assertLen(task_names, 23)\n\n @parameterized.parameters(registry.task_registry.get_all_fixed_config_names())\n def test_tasks(self, task_name):\n self.task_test(registry.task_registry.get_instance(task_name))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for task_set.tasks.fixed_maf.\"\"\"\nfrom absl.testing import parameterized\n\nfrom task_set import registry\nfrom task_set.tasks import family_test_utils\nfrom task_set.tasks.fixed import fixed_maf # pylint: disable=unused-import\nimport tensorflow.compat.v1 as tf\n\n\nclass FixedMAFTest(family_test_utils.SingleTaskTestCase):\n\n def test_right_number_of_tasks(self):\n task_names = registry.task_registry.get_all_fixed_config_names()\n self.assertLen(task_names, 3)\n\n @parameterized.parameters(registry.task_registry.get_all_fixed_config_names())\n def test_tasks(self, task_name):\n self.task_test(registry.task_registry.get_instance(task_name))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] | [
[
"tensorflow.compat.v1.gfile.Open"
],
[
"tensorflow.compat.v1.not_equal",
"tensorflow.compat.v1.data.TFRecordDataset",
"tensorflow.compat.v1.matching_files",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.equal",
"tensorflow.compat.v1.boolean_mask",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.data.experimental.shuffle_and_repeat",
"tensorflow.compat.v1.greater_equal",
"tensorflow.compat.v1.FixedLenSequenceFeature",
"tensorflow.compat.v1.constant",
"tensorflow.compat.v1.sequence_mask",
"tensorflow.compat.v1.to_int32",
"tensorflow.compat.v1.sparse_tensor_to_dense",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.random.truncated_normal",
"tensorflow.compat.v1.reduce_sum",
"tensorflow.compat.v1.multiply",
"tensorflow.compat.v1.random_uniform",
"tensorflow.compat.v1.ceil",
"tensorflow.compat.v1.data.RecordIODataset",
"tensorflow.compat.v1.assert_greater_equal",
"tensorflow.compat.v1.reduce_any",
"tensorflow.compat.v1.zeros",
"tensorflow.compat.v1.fill",
"tensorflow.compat.v1.minimum",
"tensorflow.compat.v1.parse_single_example",
"tensorflow.compat.v1.reduce_max",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.random.shuffle",
"tensorflow.compat.v1.stack",
"tensorflow.compat.v1.expand_dims",
"tensorflow.compat.v1.gather",
"tensorflow.compat.v1.to_float",
"tensorflow.compat.v1.rank",
"tensorflow.compat.v1.logging.info",
"tensorflow.compat.v1.range",
"tensorflow.compat.v1.size",
"tensorflow.compat.v1.squeeze",
"tensorflow.compat.v1.pad",
"tensorflow.compat.v1.greater"
],
[
"tensorflow.compat.v2.zeros_like",
"tensorflow.compat.v2.math.exp",
"numpy.log",
"tensorflow.compat.v2.Variable",
"numpy.abs",
"tensorflow.compat.v2.constant",
"tensorflow.compat.v2.ones_like",
"numpy.arange",
"tensorflow.compat.v2.enable_v2_behavior",
"tensorflow.compat.v2.GradientTape",
"numpy.concatenate",
"numpy.round",
"tensorflow.compat.v2.keras.optimizers.SGD",
"numpy.float64",
"tensorflow.compat.v2.reduce_sum",
"tensorflow.compat.v2.sign",
"tensorflow.compat.v2.abs",
"numpy.array"
],
[
"tensorflow.compat.v2.transpose",
"tensorflow.compat.v2.split",
"tensorflow.compat.v2.square",
"tensorflow.compat.v2.reduce_mean",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.reshape",
"tensorflow.compat.v2.nn.softmax_cross_entropy_with_logits",
"tensorflow.compat.v2.range",
"tensorflow.compat.v2.stack",
"tensorflow.compat.v2.matmul",
"tensorflow.compat.v2.reduce_sum",
"tensorflow.compat.v2.stop_gradient"
],
[
"tensorflow.compat.v1.train.polynomial_decay",
"tensorflow.compat.v1.square",
"tensorflow.compat.v1.multiply",
"tensorflow.compat.v1.trainable_variables",
"tensorflow.compat.v1.sqrt",
"tensorflow.compat.v1.zeros_initializer",
"tensorflow.compat.v1.train.get_or_create_global_step",
"tensorflow.compat.v1.gradients",
"tensorflow.compat.v1.group",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.clip_by_global_norm",
"tensorflow.compat.v1.constant",
"tensorflow.compat.v1.tpu.CrossShardOptimizer"
],
[
"tensorflow.compat.v1.get_default_graph",
"tensorflow.compat.v1.device",
"tensorflow.python.ops.control_flow_ops.with_dependencies",
"tensorflow.compat.v1.no_op",
"tensorflow.compat.v1.random_normal",
"tensorflow.compat.v1.trainable_variables",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.group",
"tensorflow.compat.v1.name_scope",
"tensorflow.compat.v1.Graph",
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.identity",
"tensorflow.compat.v1.app.run"
],
[
"tensorflow.compat.v2.transpose",
"tensorflow.compat.v2.linalg.matmul",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.math.abs",
"tensorflow.compat.v2.reduce_sum",
"tensorflow.compat.v2.math.reduce_sum",
"tensorflow.compat.v2.eye",
"tensorflow.compat.v2.math.log",
"tensorflow.compat.v2.abs",
"tensorflow.compat.v2.math.sign",
"tensorflow.compat.v2.math.greater",
"tensorflow.compat.v2.math.less",
"tensorflow.compat.v2.matmul",
"tensorflow.compat.v2.math.maximum",
"tensorflow.compat.v2.constant",
"tensorflow.compat.v2.zeros_like",
"tensorflow.compat.v2.concat",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.linalg.matvec",
"tensorflow.compat.v2.stop_gradient"
],
[
"numpy.var",
"tensorflow.compat.v1.random_normal",
"numpy.mean",
"tensorflow.compat.v1.test.main"
],
[
"tensorflow.data.Dataset.from_tensors",
"tensorflow.keras.losses.CategoricalCrossentropy",
"tensorflow.executing_eagerly",
"tensorflow.zeros",
"tensorflow.compat.v2.enable_v2_behavior",
"tensorflow.keras.optimizers.Adam",
"tensorflow.one_hot",
"tensorflow.keras.metrics.SparseCategoricalAccuracy",
"tensorflow.keras.metrics.Mean"
],
[
"tensorflow.compat.v1.nn.depthwise_conv2d",
"tensorflow.compat.v1.test.main",
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.compat.v1.get_variable",
"tensorflow.compat.v1.global_variables_initializer",
"numpy.std",
"tensorflow.compat.v1.nn.relu",
"numpy.mean",
"tensorflow.compat.v1.random.normal"
],
[
"pandas.concat"
],
[
"numpy.max",
"tensorflow.compat.v1.keras.Sequential",
"numpy.mean",
"numpy.argmin",
"tensorflow.compat.v1.keras.layers.Dense",
"tensorflow.compat.v1.keras.optimizers.Adagrad",
"tensorflow.compat.v1.constant",
"numpy.random.randint",
"pandas.read_csv",
"numpy.arange",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.compat.v1.enable_eager_execution",
"tensorflow.compat.v1.stop_gradient",
"tensorflow.compat.v1.set_random_seed",
"pandas.concat",
"numpy.quantile",
"sklearn.model_selection.train_test_split",
"tensorflow.compat.v1.keras.Input",
"numpy.array",
"numpy.sum",
"tensorflow.compat.v1.Variable",
"numpy.random.seed",
"numpy.ones"
],
[
"tensorflow.compat.v1.logging.warning",
"tensorflow.compat.v1.gfile.Glob",
"tensorflow.compat.v1.gfile.Open"
],
[
"torch.mean",
"matplotlib.pyplot.imshow",
"numpy.log",
"torch.randint",
"numpy.linspace",
"numpy.meshgrid",
"numpy.reshape",
"torch.cat",
"torch.from_numpy",
"matplotlib.pyplot.savefig",
"numpy.concatenate",
"numpy.binary_repr",
"torch.no_grad",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis",
"numpy.array",
"numpy.flip"
],
[
"tensorflow.compat.v1.summary.FileWriter",
"tensorflow.compat.v1.Summary.Value",
"tensorflow.compat.v1.disable_eager_execution"
],
[
"torch.mean",
"torch.nn.functional.softmax",
"torch.LongTensor",
"torch.randint",
"torch.ones",
"numpy.random.seed",
"torch.zeros",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.index_select"
],
[
"tensorflow.contrib.metrics.streaming_false_negative_rate",
"tensorflow.compat.v1.nn.sigmoid",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.metrics.false_negatives",
"tensorflow.contrib.metrics.streaming_false_positive_rate",
"tensorflow.compat.v1.layers.Dense",
"tensorflow.compat.v1.metrics.precision",
"tensorflow.compat.v1.metrics.true_negatives",
"tensorflow.compat.v1.metrics.recall",
"tensorflow.compat.v1.feature_column.input_layer",
"tensorflow.compat.v1.name_scope",
"tensorflow.compat.v1.estimator.EstimatorSpec",
"tensorflow.compat.v1.metrics.accuracy",
"tensorflow.compat.v1.reduce_mean",
"tensorflow.compat.v1.metrics.true_positives",
"tensorflow.compat.v1.metrics.false_positives",
"tensorflow.compat.v1.metrics.auc",
"tensorflow.contrib.framework.get_global_step",
"tensorflow.compat.v1.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.compat.v1.summary.histogram",
"tensorflow.compat.v1.greater"
],
[
"tensorflow.math.abs",
"tensorflow.concat",
"tensorflow.control_dependencies",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.reduce_logsumexp",
"tensorflow.Variable",
"tensorflow.keras.layers.Lambda",
"tensorflow.squeeze",
"tensorflow.stop_gradient",
"tensorflow.math.softplus",
"tensorflow.debugging.assert_rank_at_least",
"tensorflow.square",
"tensorflow.tile",
"tensorflow.shape",
"tensorflow.random.uniform",
"tensorflow.math.exp",
"tensorflow.math.sigmoid",
"tensorflow.linalg.set_diag",
"tensorflow.split",
"tensorflow.math.log_sigmoid",
"tensorflow.clip_by_value",
"tensorflow.transpose",
"tensorflow.reduce_mean",
"tensorflow.math.minimum",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.math.log",
"tensorflow.math.greater",
"tensorflow.math.maximum"
],
[
"pandas.concat",
"pandas.DataFrame"
],
[
"pandas.concat",
"pandas.read_csv",
"tensorflow.io.gfile.walk",
"tensorflow.io.gfile.exists",
"tensorflow.io.gfile.GFile",
"tensorflow.gfile.GFile",
"tensorflow.io.gfile.stat",
"tensorflow.io.gfile.makedirs",
"pandas.DataFrame",
"tensorflow.io.gfile.listdir"
],
[
"tensorflow.compat.v1.square",
"tensorflow.compat.v1.local_variables_initializer",
"tensorflow.compat.v1.test.main",
"tensorflow.compat.v1.trainable_variables",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.gradients",
"tensorflow.compat.v1.train.get_or_create_global_step",
"tensorflow.compat.v1.constant_initializer",
"tensorflow.compat.v1.constant"
],
[
"numpy.array",
"tensorflow.constant",
"tensorflow.test.main"
],
[
"tensorflow.compat.v1.no_op",
"tensorflow.compat.v1.group",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.greater_equal",
"tensorflow.compat.v1.constant",
"tensorflow.compat.v1.abs",
"tensorflow.compat.v1.reduce_sum",
"tensorflow.compat.v1.maximum",
"tensorflow.compat.v1.norm",
"tensorflow.compat.v1.square",
"numpy.log",
"tensorflow.compat.v1.assign",
"tensorflow.compat.v1.clip_by_value",
"tensorflow.compat.v1.summary.scalar",
"tensorflow.compat.v1.reduce_max",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.control_dependencies",
"tensorflow.compat.v1.assign_add",
"tensorflow.compat.v1.sign"
],
[
"numpy.log",
"numpy.testing.assert_allclose"
],
[
"numpy.expand_dims",
"tensorflow.python.ops.summary_ops_v2.always_record_summaries",
"tensorflow.compat.v2.summary.create_file_writer",
"tensorflow.compat.v2.enable_v2_behavior",
"tensorflow.compat.v2.train.Checkpoint"
],
[
"tensorflow.nn.relu",
"tensorflow.get_variable",
"tensorflow.nn.batch_normalization",
"tensorflow.control_dependencies",
"tensorflow.nn.max_pool",
"tensorflow.nn.moments",
"tensorflow.identity",
"tensorflow.constant_initializer",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.to_float",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.variable_scope",
"tensorflow.random_normal_initializer",
"tensorflow.nn.conv2d"
],
[
"numpy.array",
"tensorflow.constant",
"tensorflow.test.main"
],
[
"tensorflow.train.latest_checkpoint",
"tensorflow.Variable",
"tensorflow.io.gfile.exists",
"tensorflow.io.gfile.GFile",
"tensorflow.io.gfile.makedirs",
"tensorflow.config.experimental_run_functions_eagerly"
],
[
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.reduce_max",
"tensorflow.compat.v1.get_default_graph",
"tensorflow.reduce_mean",
"tensorflow.shape",
"tensorflow.reduce_sum",
"tensorflow.nn.moments",
"tensorflow.gradients",
"tensorflow.reduce_min",
"tensorflow.name_scope",
"tensorflow.square",
"tensorflow.image.total_variation",
"tensorflow.rank",
"tensorflow.sqrt",
"tensorflow.squared_difference",
"tensorflow.abs"
],
[
"tensorflow.boolean_mask",
"tensorflow.nn.relu",
"tensorflow.math.reduce_min",
"tensorflow.constant",
"tensorflow.math.count_nonzero",
"tensorflow.shape",
"tensorflow.cast",
"tensorflow.expand_dims",
"tensorflow.math.reduce_mean",
"tensorflow.stop_gradient",
"tensorflow.eye",
"tensorflow.zeros_like",
"tensorflow.split"
],
[
"tensorflow.compat.v1.train.RMSPropOptimizer",
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.compat.v1.reduce_mean",
"tensorflow.compat.v1.argmax",
"tensorflow.compat.v1.reduce_sum",
"tensorflow.compat.v1.one_hot",
"tensorflow.compat.v1.random.categorical",
"tensorflow.compat.v1.clip_by_value",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.variable_scope"
],
[
"tensorflow.compat.v1.ones",
"tensorflow.compat.v1.exp",
"numpy.abs",
"numpy.random.seed",
"tensorflow.compat.v1.test.main",
"numpy.sqrt",
"numpy.ones",
"numpy.random.normal",
"numpy.array",
"numpy.zeros"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.imshow",
"numpy.linspace",
"pandas.DataFrame",
"numpy.max",
"numpy.fill_diagonal",
"tensorflow.compat.v1.gfile.GFile",
"numpy.where",
"numpy.copy",
"matplotlib.pyplot.close",
"numpy.zeros",
"numpy.random.choice",
"numpy.min",
"matplotlib.pyplot.savefig",
"tensorflow.compat.v1.io.gfile.GFile",
"matplotlib.pyplot.ylabel",
"numpy.abs",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.clf",
"matplotlib.cm.get_cmap",
"matplotlib.pyplot.xlabel"
],
[
"tensorflow.compat.v2.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.compat.v2.maximum",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.one_hot",
"tensorflow.compat.v2.reduce_sum",
"tensorflow.compat.v2.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.compat.v2.keras.backend.ones"
],
[
"tensorflow.compat.v1.io.gfile.exists",
"tensorflow.compat.v1.logging.info",
"tensorflow.compat.v1.gfile.Open"
],
[
"tensorflow.compat.v2.io.gfile.makedirs"
],
[
"tensorflow.contrib.tpu.TPUEstimator",
"tensorflow.contrib.summary.create_file_writer",
"tensorflow.contrib.summary.all_summary_ops",
"tensorflow.compat.v1.less",
"tensorflow.contrib.summary.record_summaries_every_n_global_steps",
"tensorflow.contrib.tpu.RunConfig",
"tensorflow.compat.v1.train.get_or_create_global_step",
"tensorflow.contrib.summary.scalar",
"tensorflow.contrib.tpu.TPUConfig",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.constant"
],
[
"numpy.random.seed",
"tensorflow.compat.v2.data.experimental.cardinality",
"numpy.random.choice",
"tensorflow.compat.v2.keras.layers.Embedding",
"numpy.ones",
"tensorflow.compat.v2.reshape",
"numpy.sum",
"tensorflow.compat.v2.keras.backend.ones"
],
[
"tensorflow.compat.v1.train.import_meta_graph",
"numpy.asarray",
"numpy.fliplr",
"numpy.flipud",
"numpy.stack",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.Graph",
"numpy.float32",
"scipy.io.savemat",
"numpy.array"
],
[
"tensorflow.compat.v2.keras.losses.categorical_crossentropy",
"tensorflow.compat.v2.split",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.one_hot",
"tensorflow.compat.v2.expand_dims",
"tensorflow.compat.v2.gather",
"tensorflow.compat.v2.stop_gradient",
"tensorflow.compat.v2.random.uniform",
"tensorflow.compat.v2.tile"
],
[
"numpy.hstack",
"tensorflow.concat",
"tensorflow.keras.losses.MeanSquaredError",
"tensorflow.compat.v1.enable_eager_execution",
"tensorflow.keras.optimizers.Adam",
"numpy.float32",
"numpy.array",
"tensorflow.keras.metrics.Mean",
"tensorflow.GradientTape"
],
[
"numpy.unique",
"sklearn.preprocessing.OneHotEncoder",
"tensorflow.compat.v1.gfile.Open",
"numpy.concatenate",
"numpy.round",
"tensorflow.compat.v1.placeholder",
"numpy.argmax",
"sklearn.metrics.normalized_mutual_info_score",
"tensorflow.compat.v1.keras.layers.Input"
],
[
"tensorflow.clip_by_value",
"tensorflow.floormod",
"tensorflow.gather_nd",
"tensorflow.contrib.resampler.resampler",
"tensorflow.range",
"tensorflow.concat",
"tensorflow.stack",
"tensorflow.reduce_sum",
"tensorflow.reshape",
"tensorflow.summary.image",
"tensorflow.roll",
"tensorflow.zeros",
"tensorflow.ones",
"tensorflow.zeros_like",
"tensorflow.linspace",
"numpy.array",
"tensorflow.tile"
],
[
"tensorflow.compat.v1.nn.dropout",
"tensorflow.compat.v1.reduce_all",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.equal",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.logical_not",
"tensorflow.compat.v1.constant",
"tensorflow.compat.v1.ones",
"tensorflow.compat.v1.add_n",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.to_int64",
"tensorflow.compat.v1.zeros_like",
"tensorflow.python.util.nest.map_structure",
"tensorflow.compat.v1.gather_nd",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.transpose",
"tensorflow.compat.v1.less",
"tensorflow.compat.v1.fill",
"tensorflow.compat.v1.zeros",
"tensorflow.compat.v1.tile",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.TensorShape",
"tensorflow.compat.v1.expand_dims",
"tensorflow.compat.v1.to_float",
"tensorflow.compat.v1.slice",
"tensorflow.compat.v1.squeeze"
],
[
"tensorflow.compat.v1.square",
"tensorflow.compat.v1.abs",
"tensorflow.compat.v1.get_variable",
"tensorflow.compat.v1.convert_to_tensor",
"tensorflow.compat.v1.control_dependencies",
"tensorflow.compat.v1.sqrt",
"tensorflow.compat.v1.to_float",
"tensorflow.compat.v1.gradients",
"tensorflow.compat.v1.group",
"tensorflow.compat.v1.get_collection",
"tensorflow.compat.v1.zeros_initializer",
"numpy.random.RandomState"
],
[
"tensorflow.compat.v1.random_normal_initializer",
"numpy.eye",
"tensorflow.compat.v1.einsum",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.Session",
"numpy.round",
"tensorflow.compat.v1.Graph",
"tensorflow.compat.v1.nn.l2_loss",
"tensorflow.compat.v1.set_random_seed",
"numpy.zeros"
],
[
"tensorflow.compat.v1.test.main"
],
[
"tensorflow.compat.v1.concat",
"numpy.concatenate",
"tensorflow.compat.v1.shape",
"numpy.argmin",
"sklearn.gaussian_process.kernels.ConstantKernel",
"pandas.read_csv",
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.compat.v1.global_variables",
"tensorflow.compat.v1.reshape",
"numpy.copy",
"numpy.argmax",
"tensorflow.compat.v1.unstack",
"tensorflow.compat.v1.variable_scope",
"numpy.zeros",
"tensorflow.compat.v1.reduce_mean",
"sklearn.gaussian_process.kernels.RBF",
"numpy.sum",
"tensorflow.compat.v1.layers.dense",
"tensorflow.compat.v1.losses.mean_squared_error",
"tensorflow.compat.v1.losses.hinge_loss",
"numpy.linalg.norm",
"numpy.percentile",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.matmul",
"numpy.random.normal",
"tensorflow.compat.v1.placeholder",
"sklearn.gaussian_process.GaussianProcessRegressor",
"numpy.random.uniform",
"tensorflow.compat.v1.reset_default_graph",
"tensorflow.compat.v1.greater"
],
[
"numpy.random.random",
"numpy.sqrt",
"numpy.zeros_like",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.random.randint"
],
[
"tensorflow.compat.v1.test.main"
],
[
"tensorflow.compat.v1.test.main"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LuoYuanke/PrivChainer | [
"758d765c7903f6913cfd58c21db069d5f2a12203",
"758d765c7903f6913cfd58c21db069d5f2a12203",
"758d765c7903f6913cfd58c21db069d5f2a12203"
] | [
"chainer/functions/pooling/average_pooling_2d.py",
"chainer/utils/conv_nd.py",
"tests/chainer_tests/training_tests/updaters_tests/test_multiprocess_parallel_updater.py"
] | [
"import numpy\n\nimport chainer\nfrom chainer.backends import cuda\nfrom chainer import function_node\nfrom chainer.functions.pooling import pooling_2d\nfrom chainer.utils import conv\n\n\nclass AveragePooling2D(pooling_2d.Pooling2D):\n\n \"\"\"Average pooling over a set of 2d planes.\"\"\"\n # TODO(beam2d): Support cover_all mode.\n\n def forward_cpu(self, x):\n self._in_shape = x[0].shape\n self._in_dtype = x[0].dtype\n\n col = conv.im2col_cpu(x[0], self.kh, self.kw, self.sy, self.sx,\n self.ph, self.pw)\n y = col.mean(axis=(2, 3))\n return y,\n\n def forward_gpu(self, x):\n if chainer.should_use_cudnn('>=auto'):\n self.retain_inputs((0,))\n return super(AveragePooling2D, self).forward_gpu(x)\n\n self._in_shape = x[0].shape\n self._in_dtype = x[0].dtype\n\n n, c, h, w = x[0].shape\n y_h = conv.get_conv_outsize(h, self.kh, self.sy, self.ph)\n y_w = conv.get_conv_outsize(w, self.kw, self.sx, self.pw)\n y = cuda.cupy.empty((n, c, y_h, y_w), dtype=x[0].dtype)\n coeff = 1. / (self.kh * self.kw)\n kern = cuda.elementwise(\n 'raw T in, int32 h, int32 w,'\n 'int32 out_h, int32 out_w, int32 kh, int32 kw,'\n 'int32 sy, int32 sx, int32 ph, int32 pw, T coeff',\n 'T out', '''\n int c0 = i / (out_h * out_w);\n int out_y = i / out_w % out_h;\n int out_x = i % out_w;\n int in_y_0 = max(0, out_y * sy - ph);\n int in_y_1 = min(h, out_y * sy + kh - ph);\n int in_x_0 = max(0, out_x * sx - pw);\n int in_x_1 = min(w, out_x * sx + kw - pw);\n\n T val = 0;\n for (int y = in_y_0; y < in_y_1; ++y) {\n int offset_y = w * (y + h * c0);\n for (int x = in_x_0; x < in_x_1; ++x) {\n val = val + in[x + offset_y];\n }\n }\n out = val * coeff;\n ''', 'avg_pool_fwd')\n kern(x[0].reduced_view(), h, w, y_h, y_w, self.kh, self.kw,\n self.sy, self.sx, self.ph, self.pw, coeff, y)\n return y,\n\n def backward(self, indexes, gy):\n return AveragePooling2DGrad(self).apply(gy)\n\n def create_pool_desc(self):\n return cuda.cudnn.create_pooling_descriptor(\n (self.kh, self.kw), (self.sy, self.sx), (self.ph, self.pw),\n cuda.cuda.cudnn.CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING)\n\n\nclass AveragePooling2DGrad(function_node.FunctionNode):\n\n def __init__(self, apool2d):\n self.kh = apool2d.kh\n self.kw = apool2d.kw\n self.sy = apool2d.sy\n self.sx = apool2d.sx\n self.ph = apool2d.ph\n self.pw = apool2d.pw\n self._used_cudnn = apool2d._used_cudnn\n if not self._used_cudnn:\n self._in_shape = apool2d._in_shape\n self._in_dtype = apool2d._in_dtype\n self.apool2d = apool2d\n\n def forward_cpu(self, gy):\n h, w = self._in_shape[2:]\n gcol = numpy.tile(gy[0][:, :, None, None],\n (1, 1, self.kh, self.kw, 1, 1))\n gx = conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw, h, w)\n gx /= self.kh * self.kw\n return gx,\n\n def forward_gpu(self, gy):\n if self._used_cudnn:\n x, = self.apool2d.get_retained_inputs()\n return self.apool2d.backward_gpu((x.data,), gy)\n n, c, h, w = self._in_shape\n y_h, y_w = gy[0].shape[2:]\n gx = cuda.cupy.empty(self._in_shape, self._in_dtype)\n coeff = 1. / (self.kh * self.kw)\n cuda.elementwise(\n 'raw T gy, int32 h, int32 w,'\n 'int32 out_h, int32 out_w, int32 kh, int32 kw,'\n 'int32 sy, int32 sx, int32 ph, int32 pw, T coeff',\n 'T gx',\n '''\n int c0 = i / (h * w);\n int y = i / w % h + ph;\n int x = i % w + pw;\n int out_y_0 = max(0, (y - kh + sy) / sy);\n int out_y_1 = min(out_h, (y + sy) / sy);\n int out_x_0 = max(0, (x - kw + sx) / sx);\n int out_x_1 = min(out_w, (x + sx) / sx);\n int hc0 = out_h * c0;\n\n T val = 0;\n for (int out_y = out_y_0; out_y < out_y_1; ++out_y) {\n for (int out_x = out_x_0; out_x < out_x_1; ++out_x) {\n val = val + gy[out_x + out_w * (out_y + hc0)];\n }\n }\n gx = val * coeff;\n ''', 'avg_pool_bwd')(gy[0].reduced_view(),\n h, w, y_h, y_w, self.kh, self.kw,\n self.sy, self.sx, self.ph, self.pw, coeff,\n gx)\n return gx,\n\n def backward(self, indexes, grad_outputs):\n return AveragePooling2D(\n (self.kh, self.kw), (self.sy, self.sx), (self.ph, self.pw),\n False).apply(grad_outputs)\n\n\ndef average_pooling_2d(x, ksize, stride=None, pad=0):\n \"\"\"Spatial average pooling function.\n\n This function acts similarly to :class:`~functions.Convolution2D`, but\n it computes the average of input spatial patch for each channel\n without any parameter instead of computing the inner products.\n\n Args:\n x (~chainer.Variable): Input variable.\n ksize (int or pair of ints): Size of pooling window. ``ksize=k`` and\n ``ksize=(k, k)`` are equivalent.\n stride (int or pair of ints or None): Stride of pooling applications.\n ``stride=s`` and ``stride=(s, s)`` are equivalent. If ``None`` is\n specified, then it uses same stride as the pooling window size.\n pad (int or pair of ints): Spatial padding width for the input array.\n ``pad=p`` and ``pad=(p, p)`` are equivalent.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n .. note::\n\n This function currently does not support ``cover_all`` mode as\n :func:`max_pooling_2d`. Average pooling runs in non-cover-all mode.\n\n \"\"\"\n return AveragePooling2D(ksize, stride, pad, False).apply((x,))[0]\n",
"import itertools\nimport numpy\nimport six\n\nfrom chainer.backends import cuda\nfrom chainer.utils.conv import get_conv_outsize\nfrom chainer.utils import conv_nd_kernel\n\n\ndef as_tuple(x, n):\n if hasattr(x, '__getitem__'):\n assert len(x) == n\n return tuple(x)\n return (x,) * n\n\n\ndef im2col_nd_cpu(img, ksize, stride, pad, pval=0, cover_all=False):\n n, c = img.shape[0:2] # (n, c, d_1, d_2, ..., d_N)\n dims = img.shape[2:]\n ndim = len(dims)\n assert ndim == len(ksize) == len(stride) == len(pad)\n outs = tuple(get_conv_outsize(d, k, s, p, cover_all)\n for (d, k, s, p) in zip(dims, ksize, stride, pad))\n assert all(out > 0 for out in outs), 'Output sizes should be positive.'\n\n # Pad around image.\n pad_width = ((0, 0), (0, 0)) + tuple(\n (p, p + s - 1) for (s, p) in zip(stride, pad))\n img = numpy.pad(img, pad_width, mode='constant', constant_values=(pval,))\n\n # Make patch array with which we will compute correlation with filter.\n # shape: (n, c, k_1, k_2, ..., k_N, out_1, out_2, ..., out_N)\n shape = (n, c) + ksize + outs\n col = numpy.ndarray(shape, dtype=img.dtype)\n\n # Fill the patch array.\n colon = slice(None)\n for kxs in itertools.product(*[six.moves.range(k) for k in ksize]):\n # col[:, :, kx_1, kx_2, ..., kx_N, :, :, ..., :]\n col_index = (colon, colon) + kxs + (colon,) * ndim\n # img[:, :, kx_1:kx_lim_1:s_1, ..., kx_N:kx_lim_N:s_N]\n kx_lims = tuple(kx + s * out\n for (kx, s, out) in zip(kxs, stride, outs))\n img_index = (colon, colon) + tuple(\n slice(kx, kx_lim, s)\n for (kx, kx_lim, s) in zip(kxs, kx_lims, stride))\n col[col_index] = img[img_index]\n\n return col\n\n\ndef im2col_nd_gpu(img, ksize, stride, pad, cover_all=False):\n n, c = img.shape[0:2] # (n, c, d_1, d_2, ..., d_N)\n dims = img.shape[2:]\n ndim = len(dims)\n assert ndim == len(ksize) == len(stride) == len(pad)\n outs = tuple(get_conv_outsize(d, k, s, p, cover_all)\n for (d, k, s, p) in zip(dims, ksize, stride, pad))\n assert all(out > 0 for out in outs), 'Output sizes should be positive.'\n\n # col_shape: (n, c, k_1, k_2, ..., k_N, out_1, out_2, ..., out_N)\n shape = (n, c) + ksize + outs\n col = cuda.cupy.empty(shape, dtype=img.dtype)\n\n in_params, out_params, operation, name = \\\n conv_nd_kernel.Im2colNDKernel.generate(ndim)\n\n cuda.elementwise(in_params, out_params, operation, name)(\n img.reduced_view(), *(dims + outs + ksize + stride + pad + (col,)))\n\n return col\n\n\ndef col2im_nd_cpu(col, stride, pad, dims):\n n, c = col.shape[:2] # (n, c, kx_1, ..., kx_N, out_1, ..., out_N)\n mid = (len(col.shape) - 2) // 2 + 2\n ksize = col.shape[2:mid]\n outs = col.shape[mid:]\n colon = slice(None)\n assert len(outs) == len(ksize) == len(stride) == len(pad) == len(dims)\n\n # Image with padded size.\n img_shape = (n, c) + tuple(d + 2 * p + s - 1\n for (d, p, s) in zip(dims, pad, stride))\n img = numpy.zeros(img_shape, dtype=col.dtype)\n for kxs in itertools.product(*[six.moves.range(k) for k in ksize]):\n # (:, :, kx_1:kx_lim_1:s_1, ..., kx_N:kx_lim_N:s_N)\n kx_lims = tuple(kx + s * out\n for (kx, s, out) in zip(kxs, stride, outs))\n img_index = (colon, colon) + tuple(\n slice(kx, kx_lim, s)\n for (kx, kx_lim, s) in zip(kxs, kx_lims, stride))\n # (:, :, kx_1, kx_2, ..., kx_N, :, :, ..., :)\n col_index = (colon, colon) + kxs + (colon,) * len(outs)\n img[img_index] += col[col_index]\n\n # (:, :, p_1:d_1 + p_1, p_2:d_2 + p_2, ..., p_N:d_N + p_N]\n img_index = (colon, colon) + tuple(\n slice(p, d + p) for (p, d) in zip(pad, dims))\n return img[img_index]\n\n\ndef col2im_nd_gpu(col, stride, pad, dims):\n n, c = col.shape[:2] # (n, c, k_1, ..., k_N, out_1, ..., out_N)\n mid = (len(col.shape) - 2) // 2 + 2\n ksize = col.shape[2:mid]\n outs = col.shape[mid:]\n ndim = len(dims)\n assert len(outs) == len(ksize) == len(stride) == len(pad) == ndim\n\n img_shape = (n, c) + dims # (n, c, d_1, d_2, ..., d_N)\n img = cuda.cupy.empty(img_shape, dtype=col.dtype)\n\n in_params, out_params, operation, name = \\\n conv_nd_kernel.Col2imNDKernel.generate(ndim)\n\n cuda.elementwise(in_params, out_params, operation, name)(\n col.reduced_view(), *(dims + outs + ksize + stride + pad + (img,)))\n\n return img\n",
"import unittest\n\nimport numpy\n\nimport chainer\nfrom chainer import cuda\nfrom chainer import initializers\nfrom chainer import testing\nfrom chainer.testing import attr\nimport chainer.training.updaters.multiprocess_parallel_updater as mpu\n\nimport copy\n\n\nclass SimpleNet(chainer.Chain):\n insize = 5\n\n def __init__(self, dtype=numpy.float32):\n super(SimpleNet, self).__init__()\n self.dtype = dtype\n W = initializers.HeNormal(1 / numpy.sqrt(2), self.dtype)\n bias = initializers.Zero(self.dtype)\n with self.init_scope():\n self.conv = chainer.links.Convolution2D(2, 2, 3, initialW=W,\n initial_bias=bias)\n self.fc = chainer.links.Linear(18, 2, initialW=W,\n initial_bias=bias)\n self.train = True\n\n def clear(self):\n self.loss = None\n self.accuracy = None\n\n def __call__(self, x, t):\n h = chainer.functions.relu(self.conv(x))\n y = self.fc(h)\n\n self.loss = chainer.functions.softmax_cross_entropy(y, t)\n self.accuracy = chainer.functions.accuracy(y, t)\n\n return self.loss\n\n\[email protected](*testing.product({\n 'dtype': [numpy.float32, numpy.float16],\n}))\nclass TestGatherScatter(unittest.TestCase):\n\n def setUp(self):\n pass\n\n @attr.gpu\n def test_gather_scatter_grads(self):\n cupy = cuda.cupy\n model0 = SimpleNet(dtype=self.dtype)\n model1 = copy.deepcopy(model0)\n\n model0.to_gpu()\n model1.to_gpu()\n\n optimizer0 = chainer.optimizers.SGD(lr=1.0)\n optimizer0.setup(model0)\n\n optimizer1 = chainer.optimizers.SGD(lr=1.0)\n optimizer1.setup(model1)\n\n bsize = 8\n\n x = numpy.random.uniform(0, 1, (bsize, 2, 5, 5)).astype(self.dtype)\n t = numpy.empty(bsize, dtype=numpy.int32)\n for i in range(bsize):\n t[i] = i % 2\n\n x = chainer.Variable(chainer.cuda.to_gpu(x))\n t = chainer.Variable(chainer.cuda.to_gpu(t))\n\n loss0 = model0(x, t)\n\n model0.cleargrads()\n model1.cleargrads()\n\n loss0.backward()\n gg0 = mpu.gather_grads(model0)\n mpu.scatter_grads(model1, gg0)\n\n cupy.testing.assert_array_equal(model0.conv.W.grad, model1.conv.W.grad)\n cupy.testing.assert_array_equal(model0.conv.b.grad, model1.conv.b.grad)\n cupy.testing.assert_array_equal(model0.fc.W.grad, model1.fc.W.grad)\n cupy.testing.assert_array_equal(model0.fc.b.grad, model1.fc.b.grad)\n\n optimizer0.update()\n optimizer1.update()\n\n cupy.testing.assert_array_equal(model0.conv.W.data, model1.conv.W.data)\n cupy.testing.assert_array_equal(model0.conv.b.data, model1.conv.b.data)\n cupy.testing.assert_array_equal(model0.fc.W.data, model1.fc.W.data)\n cupy.testing.assert_array_equal(model0.fc.b.data, model1.fc.b.data)\n\n def test_gather_grads_raise_on_cpu(self):\n model = SimpleNet(dtype=self.dtype)\n with self.assertRaises(RuntimeError):\n mpu.gather_grads(model)\n\n @attr.gpu\n def test_gather_scatter_params(self):\n cupy = cuda.cupy\n model0 = SimpleNet(dtype=self.dtype)\n model1 = SimpleNet(dtype=self.dtype)\n\n model0.to_gpu()\n model1.to_gpu()\n\n gp0 = mpu.gather_params(model0)\n mpu.scatter_params(model1, gp0)\n\n cupy.testing.assert_array_equal(model0.conv.W.data, model1.conv.W.data)\n cupy.testing.assert_array_equal(model0.conv.b.data, model1.conv.b.data)\n cupy.testing.assert_array_equal(model0.fc.W.data, model1.fc.W.data)\n cupy.testing.assert_array_equal(model0.fc.b.data, model1.fc.b.data)\n\n def test_gather_params_raise_on_cpu(self):\n model = SimpleNet(dtype=self.dtype)\n with self.assertRaises(RuntimeError):\n mpu.gather_params(model)\n\n\nclass SimpleNetRawArray(chainer.Chain):\n\n def __init__(self, testcase):\n super(SimpleNetRawArray, self).__init__()\n with self.init_scope():\n self.conv = chainer.links.Convolution2D(2, 2, 3)\n self.fc = chainer.links.Linear(18, 2)\n\n self.train = True\n self.call_called = 0\n self.testcase = testcase\n\n def clear(self):\n self.loss = None\n self.accuracy = None\n\n def __call__(self, x, t):\n self.testcase.assertNotIsInstance(x, chainer.Variable)\n self.testcase.assertNotIsInstance(t, chainer.Variable)\n\n self.call_called += 1\n\n h = chainer.functions.relu(self.conv(x))\n y = self.fc(h)\n\n self.loss = chainer.functions.softmax_cross_entropy(y, t)\n self.accuracy = chainer.functions.accuracy(y, t)\n\n return self.loss\n\n\nclass TestRawArray(unittest.TestCase):\n\n def setUp(self):\n pass\n\n @attr.gpu\n def test_update_uses_raw_array(self):\n if mpu.MultiprocessParallelUpdater.available():\n model = SimpleNetRawArray(self)\n dataset = [((numpy.ones((2, 5, 5)) * i).astype(numpy.float32),\n numpy.int32(0)) for i in range(100)]\n\n batch_size = 5\n devices = (1,)\n iters = [chainer.iterators.SerialIterator(i, batch_size) for i in\n chainer.datasets.split_dataset_n_random(\n dataset, len(devices))]\n optimizer = chainer.optimizers.SGD(lr=1.0)\n optimizer.setup(model)\n updater = mpu.MultiprocessParallelUpdater(\n iters, optimizer, devices=devices)\n updater.update()\n\n self.assertEqual(model.call_called, 1)\n\n\ntesting.run_module(__name__, __file__)\n"
] | [
[
"numpy.tile"
],
[
"numpy.zeros",
"numpy.pad",
"numpy.ndarray"
],
[
"numpy.sqrt",
"numpy.int32",
"numpy.ones",
"numpy.random.uniform",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jwillis0720/pybody | [
"2d7c68650ac1ef5f3003ccb67171898eac1f63eb"
] | [
"src/sadie/renumbering/result.py"
] | [
"import logging\nimport pandas as pd\nfrom ast import literal_eval\n\nfrom .constants import NUMBERING_RESULTS\nfrom sadie.numbering.scheme_numbering import scheme_numbering\n\nlogger = logging.getLogger(\"NUMBERING\")\n\n\nclass NumberingResults(pd.DataFrame):\n def __init__(self, *args, scheme=\"\", region_definition=\"\", allowed_chains=[], allowed_species=[], **kwargs):\n # use the __init__ method from DataFrame to ensure\n # that we're inheriting the correct behavior\n super(NumberingResults, self).__init__(*args, **kwargs)\n # self[\"scheme\"] = scheme\n # self[\"region_definition\"] = region_definition\n # self[\"allowed_species\"] = \",\".join(allowed_species)\n # self[\"allowed_chains\"] = \",\".join(allowed_chains)\n # self._add_segment_regions()\n\n @property\n def _constructor(self):\n return NumberingResults\n\n def get_alignment_table(self) -> pd.DataFrame:\n \"\"\"Get a numbered alignment table from the numbering and insertions\n\n Returns\n -------\n pd.DataFrame\n A dataframe with Id, chain_type, scheme and numbering. Values are the amino acid sequences\n \"\"\"\n all_dataframes = []\n\n # I'm not sure if there is a more effiecient way to do this other than iterate through the df and pivot each row\n for index in range(len(self)):\n all_dataframes.append(self._pivot_alignment(self.iloc[index]))\n all_dataframes = pd.concat(all_dataframes)\n all_dataframes = all_dataframes.sort_index(axis=1, level=[0, 1])\n all_dataframes.columns = list(map(lambda x: str(x[0]) + x[1], all_dataframes.columns.values))\n all_dataframes = all_dataframes.reset_index()\n return self[[\"Id\", \"chain_type\", \"scheme\"]].merge(all_dataframes, on=\"Id\").copy()\n\n def _get_region(self, row, start, end, segment_name):\n with_segment = \"\".join(\n list(\n map(\n lambda x: x[-1],\n list(\n filter(\n lambda x: x[0] >= start and x[0] <= end,\n list(\n zip(\n row[\"Numbering\"],\n row[\"Insertion\"],\n row[\"Numbered_Sequence\"],\n )\n ),\n )\n ),\n )\n )\n )\n without_segment = with_segment.replace(\"-\", \"\")\n return pd.Series(\n {\n f\"{segment_name}_gaps\": with_segment,\n f\"{segment_name}_no_gaps\": without_segment,\n }\n )\n\n def _add_segment_regions(self) -> \"NumberingResults\":\n \"\"\"Private method to delineate the framework and cdr boundaries from the numbering\n\n Returns\n -------\n NumberingResults\n Instance of NumberingResults\n \"\"\"\n return_frames = []\n for group, sub_df in self.groupby([\"scheme\", \"region_definition\", \"Chain\"]):\n numbering = group[0]\n chain = {\"H\": \"heavy\", \"KL\": \"light\"}[group[-1]]\n boundaries = group[1]\n numbering_lookup = scheme_numbering[numbering][chain][boundaries]\n for region in [\n \"fwr1_aa\",\n \"cdr1_aa\",\n \"fwr2_aa\",\n \"cdr2_aa\",\n \"fwr3_aa\",\n \"cdr3_aa\",\n \"fwr4_aa\",\n ]:\n _start = numbering_lookup[f\"{region}_start\"]\n _end = numbering_lookup[f\"{region}_end\"]\n sub_df = sub_df.join(self.apply(lambda x: self._get_region(x, _start, _end, region), axis=1))\n return_frames.append(sub_df)\n segmented_df = pd.concat(return_frames).reset_index(drop=True)\n # everything preceding the antibody\n segmented_df[\"leader\"] = segmented_df[[\"sequence\", \"seqstart_index\"]].apply(lambda x: x[0][: x[1]], axis=1)\n\n # everything following the antibody. keyword tail will clash with pandas\n segmented_df[\"follow\"] = segmented_df[[\"sequence\", \"seqend_index\"]].apply(lambda x: x[0][x[1] + 1 :], axis=1)\n return segmented_df\n\n def _pivot_alignment(self, row: pd.Series) -> pd.DataFrame:\n \"\"\"Private method to pivot a segmented row into an alignment series\n\n Parameters\n ----------\n row : pd.Series\n indidual Numbering result row\n\n Returns\n -------\n pivoted dataframe\n \"\"\"\n pivoted_df = (\n pd.DataFrame(\n zip(row[\"Numbering\"], row[\"Insertion\"], row[\"Numbered_Sequence\"]),\n columns=[\"numbering\", \"insertion\", \"sequence\"],\n )\n .assign(Id=row[\"Id\"])\n .pivot(\"Id\", [\"numbering\", \"insertion\"], \"sequence\")\n )\n return pivoted_df\n\n def get_sanatized_antibodies(self):\n # drop sequences that don't start at the first amino acid and dont end at the last amino acid.\n return self[(self[\"seqstart_index\"] == 0) & (self[\"seqend_index\"] == self[\"sequence\"].str.len() - 1)]\n\n @staticmethod\n def read_csv(*args, **kwargs):\n return NumberingResults(\n pd.read_csv(\n *args,\n index_col=0,\n dtype=NUMBERING_RESULTS,\n converters={\"Numbering\": literal_eval, \"Insertion\": literal_eval, \"Numbered_Sequence\": literal_eval},\n **kwargs,\n )\n )\n\n def drop_bad_numbering(self) -> \"NumberingResults\":\n return self[(self[\"seqstart_index\"] == 0) & (self[\"seqend_index\"] == self[\"sequence\"].str.len() - 1)]\n"
] | [
[
"pandas.concat",
"pandas.read_csv",
"pandas.Series"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
rafaelcostafrf/UAV_3d_virtual_env | [
"bccaa52ec97fff5c0a17e1351a09f913d91c4c7b"
] | [
"environment/controller/ppo_test.py"
] | [
"import sys\nfrom quadrotor_env import quad, render, animation\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.distributions import MultivariateNormal\nimport numpy as np\nfrom quadrotor_env import quad, render, animation\nfrom model import ActorCritic\n\n\"\"\"\nMECHANICAL ENGINEERING POST-GRADUATE PROGRAM\nUNIVERSIDADE FEDERAL DO ABC - SANTO ANDRÉ, BRASIL\n\nNOME: RAFAEL COSTA FERNANDES\nRA: 21201920754\nE−MAIL: [email protected]\n\nDESCRIPTION:\n PPO testing algorithm (no training, only forward passes)\n\"\"\"\n\ntime_int_step = 0.01\nmax_timesteps = 1000\nT = 5\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nenv = quad(time_int_step, max_timesteps, euler=0, direct_control=1, deep_learning=1, T=T, debug=0)\nstate_dim = env.deep_learning_in_size\npolicy = ActorCritic(state_dim, action_dim=4, action_std=0).to(device)\n\n\n#LOAD TRAINED POLICY\ntry:\n policy.load_state_dict(torch.load('PPO_continuous_solved_drone.pth',map_location=device))\n print('Saved policy loaded')\nexcept:\n print('Could not load policy')\n sys.exit(1)\n\n#PLOTTER SETUP\nprint_states = [0, 2, 4, 6, 7, 8, 9, 10, 11, 12]\nplot_labels = ['x', 'y', 'z', 'phi', 'theta', 'psi', 'f1', 'f2', 'f3', 'f4']\nline_styles = ['-', '-', '-', '--', '--', '--', ':', ':', ':', ':',]\nplotter = render(print_states, plot_labels, line_styles, depth_plot_list=0, animate=0)\n\n\n\n# DO ONE RANDOM EPISODE\nplotter.clear()\nstate = env.reset()\nfirst_state = np.concatenate((env.previous_state[0:6],env.ang,np.zeros(4)))\nplotter.add(0,first_state)\ndone = False\nt=0\nwhile not done:\n t+=time_int_step\n action = policy.actor(torch.FloatTensor(state).to(device)).cpu().detach().numpy()\n state, _, done = env.step(action)\n plot_state = np.concatenate((env.state[0:6],env.ang,action))\n plotter.add(t,plot_state)\nprint('Env Solved, printing...')\nplotter.plot()\n# plotter.depth_plot()\nan = animation()\nan.animate(plotter.states)\nplotter.clear()\n\n"
] | [
[
"torch.load",
"numpy.concatenate",
"torch.FloatTensor",
"torch.cuda.is_available",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
techthiyanes/transformers | [
"705d65368fb28246534ef636fe62c008f4fb2682",
"705d65368fb28246534ef636fe62c008f4fb2682",
"705d65368fb28246534ef636fe62c008f4fb2682",
"705d65368fb28246534ef636fe62c008f4fb2682",
"b18dfd95e1f60ae65a959a7b255fc06522170d1b",
"705d65368fb28246534ef636fe62c008f4fb2682"
] | [
"tests/wav2vec2/test_modeling_flax_wav2vec2.py",
"src/transformers/pipelines/automatic_speech_recognition.py",
"tests/decision_transformer/test_modeling_decision_transformer.py",
"tests/dpt/test_modeling_dpt.py",
"src/transformers/models/convnext/modeling_tf_convnext.py",
"src/transformers/models/layoutlmv2/modeling_layoutlmv2.py"
] | [
"# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport inspect\nimport math\nimport unittest\n\nimport numpy as np\nfrom datasets import load_dataset\n\nfrom transformers import Wav2Vec2Config, is_flax_available\nfrom transformers.testing_utils import (\n is_librosa_available,\n is_pyctcdecode_available,\n require_flax,\n require_librosa,\n require_pyctcdecode,\n require_soundfile,\n slow,\n)\n\nfrom ..test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, random_attention_mask\n\n\nif is_flax_available():\n import jax\n import jax.numpy as jnp\n import optax\n from flax.traverse_util import flatten_dict\n from transformers import Wav2Vec2FeatureExtractor, Wav2Vec2Processor\n from transformers.models.wav2vec2.modeling_flax_wav2vec2 import (\n FlaxWav2Vec2ForCTC,\n FlaxWav2Vec2ForPreTraining,\n FlaxWav2Vec2GumbelVectorQuantizer,\n FlaxWav2Vec2Model,\n _compute_mask_indices,\n _sample_negative_indices,\n )\n\n\nif is_pyctcdecode_available():\n from transformers import Wav2Vec2ProcessorWithLM\n\n\nif is_librosa_available():\n import librosa\n\n\nclass FlaxWav2Vec2ModelTester:\n def __init__(\n self,\n parent,\n batch_size=13,\n seq_length=1024, # speech is longer\n is_training=False,\n hidden_size=24,\n feat_extract_norm=\"layer\",\n feat_extract_dropout=0.0,\n feat_extract_activation=\"gelu\",\n conv_dim=(32, 32, 32),\n conv_stride=(4, 4, 4),\n conv_kernel=(8, 8, 8),\n conv_bias=False,\n num_conv_pos_embeddings=16,\n num_conv_pos_embedding_groups=2,\n num_hidden_layers=4,\n num_attention_heads=2,\n hidden_dropout_prob=0.1, # this is most likely not correctly set yet\n intermediate_size=20,\n layer_norm_eps=1e-5,\n hidden_act=\"gelu\",\n initializer_range=0.02,\n vocab_size=32,\n do_stable_layer_norm=True,\n scope=None,\n ):\n self.parent = parent\n self.batch_size = batch_size\n self.seq_length = seq_length\n self.is_training = is_training\n self.hidden_size = hidden_size\n self.feat_extract_norm = feat_extract_norm\n self.feat_extract_dropout = feat_extract_dropout\n self.feat_extract_activation = feat_extract_activation\n self.conv_dim = conv_dim\n self.conv_stride = conv_stride\n self.conv_kernel = conv_kernel\n self.conv_bias = conv_bias\n self.num_conv_pos_embeddings = num_conv_pos_embeddings\n self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_dropout_prob = hidden_dropout_prob\n self.intermediate_size = intermediate_size\n self.layer_norm_eps = layer_norm_eps\n self.hidden_act = hidden_act\n self.initializer_range = initializer_range\n self.vocab_size = vocab_size\n self.do_stable_layer_norm = do_stable_layer_norm\n self.scope = scope\n\n output_seq_length = self.seq_length\n for kernel, stride in zip(self.conv_kernel, self.conv_stride):\n output_seq_length = (output_seq_length - (kernel - 1)) / stride\n self.output_seq_length = int(math.ceil(output_seq_length))\n self.encoder_seq_length = self.output_seq_length\n\n def prepare_config_and_inputs(self):\n input_values = floats_tensor([self.batch_size, self.seq_length], self.vocab_size)\n attention_mask = random_attention_mask([self.batch_size, self.seq_length])\n\n config = Wav2Vec2Config(\n do_stable_layer_norm=self.do_stable_layer_norm,\n hidden_size=self.hidden_size,\n feat_extract_norm=self.feat_extract_norm,\n feat_extract_dropout=self.feat_extract_dropout,\n feat_extract_activation=self.feat_extract_activation,\n conv_dim=self.conv_dim,\n conv_stride=self.conv_stride,\n conv_kernel=self.conv_kernel,\n conv_bias=self.conv_bias,\n num_conv_pos_embeddings=self.num_conv_pos_embeddings,\n num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups,\n num_hidden_layers=self.num_hidden_layers,\n num_attention_heads=self.num_attention_heads,\n hidden_dropout_prob=self.hidden_dropout_prob,\n intermediate_size=self.intermediate_size,\n layer_norm_eps=self.layer_norm_eps,\n hidden_act=self.hidden_act,\n initializer_range=self.initializer_range,\n vocab_size=self.vocab_size,\n )\n\n return config, input_values, attention_mask\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n config, input_values, attention_mask = config_and_inputs\n inputs_dict = {\"input_values\": input_values, \"attention_mask\": attention_mask}\n return config, inputs_dict\n\n\n@require_flax\nclass FlaxWav2Vec2ModelTest(FlaxModelTesterMixin, unittest.TestCase):\n all_model_classes = (\n (FlaxWav2Vec2Model, FlaxWav2Vec2ForCTC, FlaxWav2Vec2ForPreTraining) if is_flax_available() else ()\n )\n\n def setUp(self):\n self.model_tester = FlaxWav2Vec2ModelTester(self)\n\n def test_train(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n input_values = inputs_dict[\"input_values\"]\n attention_mask = inputs_dict[\"attention_mask\"]\n\n model = FlaxWav2Vec2ForPreTraining(config)\n\n features_shape = (\n input_values.shape[0],\n model._get_feat_extract_output_lengths(np.array(input_values.shape[1])),\n )\n\n batch_size, sequence_length = features_shape[:2]\n\n mask_prob = 0.5\n mask_length = 4\n mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length)\n\n dropout_rng, gumbel_rng = jax.random.split(jax.random.PRNGKey(0))\n\n output = model(\n input_values,\n attention_mask=attention_mask,\n mask_time_indices=mask_time_indices,\n train=True,\n dropout_rng=dropout_rng,\n gumbel_rng=gumbel_rng,\n )[0]\n\n self.assertTrue(output.shape == (batch_size, sequence_length, model.config.proj_codevector_dim))\n\n # overwrite because of `input_values`\n def test_forward_signature(self):\n config, _ = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n signature = inspect.signature(model.__call__)\n # signature.parameters is an OrderedDict => so arg_names order is deterministic\n arg_names = [*signature.parameters.keys()]\n\n expected_arg_names = [\"input_values\", \"attention_mask\"]\n self.assertListEqual(arg_names[:2], expected_arg_names)\n\n # overwrite because of `input_values`\n def test_jit_compilation(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n with self.subTest(model_class.__name__):\n prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)\n model = model_class(config)\n\n @jax.jit\n def model_jitted(input_values, attention_mask=None, **kwargs):\n return model(input_values=input_values, attention_mask=attention_mask, **kwargs)\n\n with self.subTest(\"JIT Enabled\"):\n jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple()\n\n with self.subTest(\"JIT Disabled\"):\n with jax.disable_jit():\n outputs = model_jitted(**prepared_inputs_dict).to_tuple()\n\n self.assertEqual(len(outputs), len(jitted_outputs))\n for jitted_output, output in zip(jitted_outputs, outputs):\n\n self.assertEqual(jitted_output.shape, output.shape)\n\n def test_freeze_feature_encoder(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n input_values = inputs_dict[\"input_values\"]\n attention_mask = inputs_dict[\"attention_mask\"]\n\n model = FlaxWav2Vec2ForPreTraining(config)\n params = model.params\n\n # dummy loss function\n def compute_loss(\n params, input_values, attention_mask, freeze_feature_encoder: bool = False, epsilon: float = 1e-8\n ):\n outputs = model(\n input_values,\n attention_mask=attention_mask,\n freeze_feature_encoder=freeze_feature_encoder,\n params=params,\n )\n # compute cosine similarity of projected and projected_quantized states\n cosine_sim = optax.cosine_similarity(\n outputs.projected_states, outputs.projected_quantized_states, epsilon=epsilon\n )\n loss = cosine_sim.sum()\n return loss, outputs.to_tuple()\n\n # transform the loss function to get the gradients\n grad_fn = jax.value_and_grad(compute_loss, has_aux=True)\n\n # compute loss, outputs and gradients for unfrozen model\n (loss, outputs), grads = grad_fn(params, input_values, attention_mask, freeze_feature_encoder=False)\n\n # compare to loss, outputs and gradients for frozen model\n (loss_frozen, outputs_frozen), grads_frozen = grad_fn(\n params, input_values, attention_mask, freeze_feature_encoder=True\n )\n\n # ensure that the outputs and losses remain precisely equal\n for output, output_frozen in zip(outputs, outputs_frozen):\n self.assertTrue((output == output_frozen).all())\n self.assertEqual(loss, loss_frozen)\n\n grads = flatten_dict(grads)\n grads_frozen = flatten_dict(grads_frozen)\n\n # ensure that the dicts of gradients contain the same keys\n self.assertEqual(grads.keys(), grads_frozen.keys())\n\n # ensure that the gradients of the feature extractor layers are precisely zero when frozen and contain non-zero entries when unfrozen\n feature_extractor_grads = tuple(grads[k] for k in grads if \"feature_extractor\" in k)\n feature_extractor_grads_frozen = tuple(grads_frozen[k] for k in grads_frozen if \"feature_extractor\" in k)\n\n for feature_extractor_grad, feature_extractor_grad_frozen in zip(\n feature_extractor_grads, feature_extractor_grads_frozen\n ):\n self.assertTrue((feature_extractor_grad_frozen == 0.0).all())\n self.assertTrue((feature_extractor_grad > 0.0).any())\n\n # ensure that the gradients of all unfrozen layers remain equal, i.e. all layers excluding the frozen 'feature_extractor'\n grads = tuple(grads[k] for k in grads if \"feature_extractor\" not in k)\n grads_frozen = tuple(grads_frozen[k] for k in grads_frozen if \"feature_extractor\" not in k)\n\n for grad, grad_frozen in zip(grads, grads_frozen):\n self.assertTrue((grad == grad_frozen).all())\n\n @slow\n def test_model_from_pretrained(self):\n for model_class_name in self.all_model_classes:\n model = model_class_name.from_pretrained(\"facebook/wav2vec2-large-960h-lv60-self\", from_pt=True)\n outputs = model(np.ones((1, 1024), dtype=\"f4\"))\n self.assertIsNotNone(outputs)\n\n\n@require_flax\nclass FlaxWav2Vec2UtilsTest(unittest.TestCase):\n def test_compute_mask_indices(self):\n batch_size = 4\n sequence_length = 60\n mask_prob = 0.5\n mask_length = 1\n\n mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length)\n\n self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)])\n\n def test_compute_mask_indices_overlap(self):\n batch_size = 4\n sequence_length = 80\n mask_prob = 0.5\n mask_length = 4\n\n mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length)\n\n # because of overlap mask don't have to add up exactly to `mask_prob * sequence_length`, but have to be smaller or equal\n for batch_sum in mask.sum(axis=-1):\n self.assertTrue(int(batch_sum) <= mask_prob * sequence_length)\n\n def test_compute_mask_indices_attn_mask_overlap(self):\n batch_size = 4\n sequence_length = 80\n mask_prob = 0.5\n mask_length = 4\n\n attention_mask = np.ones((batch_size, sequence_length), dtype=np.int32)\n attention_mask[:2, sequence_length // 2 :] = 0\n\n mask = _compute_mask_indices(\n (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask\n )\n\n for batch_sum in mask.sum(axis=-1):\n self.assertTrue(int(batch_sum) <= mask_prob * sequence_length)\n\n self.assertTrue(mask[:2, sequence_length // 2 :].sum() == 0)\n\n def test_compute_perplexity(self):\n probs = np.arange(100).reshape(2, 5, 10) / 100\n\n ppl = FlaxWav2Vec2GumbelVectorQuantizer._compute_perplexity(probs)\n self.assertTrue(abs(ppl.item() - 141.4291) < 1e-3)\n\n # mask half of the input\n mask = np.ones((2,), dtype=np.bool)\n mask[0] = 0\n\n ppl = FlaxWav2Vec2GumbelVectorQuantizer._compute_perplexity(probs, mask)\n self.assertTrue(abs(ppl.item() - 58.6757) < 1e-3)\n\n def test_sample_negatives(self):\n batch_size = 2\n sequence_length = 10\n hidden_size = 4\n num_negatives = 3\n\n features = (np.arange(sequence_length * hidden_size) // hidden_size).reshape(\n sequence_length, hidden_size\n ) # each value in vector consits of same value\n features = np.broadcast_to(features[None, :], (batch_size, sequence_length, hidden_size))\n\n negative_indices = _sample_negative_indices(features.shape, num_negatives)\n\n features = features.reshape(-1, hidden_size) # BTC => (BxT)C\n # take negative vectors from sampled indices\n sampled_negatives = features[negative_indices.reshape(-1)]\n negatives = sampled_negatives.reshape(batch_size, sequence_length, num_negatives, hidden_size).transpose(\n 2, 0, 1, 3\n )\n\n self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size))\n\n # make sure no negatively sampled vector is actually a positive one\n for negative in negatives:\n self.assertTrue(((negative - features.reshape(negative.shape)) == 0).sum() == 0.0)\n\n # make sure that full vectors are sampled and not values of vectors\n # => this means that `unique()` yields a single value for `hidden_size` dim\n self.assertEqual(np.unique(negatives, axis=-1).shape, (num_negatives, batch_size, sequence_length, 1))\n\n def test_sample_negatives_with_attn_mask(self):\n batch_size = 2\n sequence_length = 10\n hidden_size = 4\n num_negatives = 3\n\n features = (np.arange(sequence_length * hidden_size) // hidden_size).reshape(\n sequence_length, hidden_size\n ) # each value in vector consits of same value\n\n # second half of last input tensor is padded\n attention_mask = np.ones((batch_size, sequence_length), dtype=np.int8)\n attention_mask[-1, sequence_length // 2 :] = 0\n\n forbidden_indices = (\n np.arange(sequence_length // 2, sequence_length, dtype=np.int32) + (batch_size - 1) * sequence_length\n ).tolist()\n\n features = np.broadcast_to(features[None, :], (batch_size, sequence_length, hidden_size))\n\n negative_indices = _sample_negative_indices(features.shape, num_negatives, attention_mask=attention_mask)\n\n # make sure that no padding tokens are sampled\n self.assertTrue(all([idx not in negative_indices for idx in forbidden_indices]))\n\n features = features.reshape(-1, hidden_size) # BTC => (BxT)C\n # take negative vectors from sampled indices\n sampled_negatives = features[negative_indices.reshape(-1)]\n negatives = sampled_negatives.reshape(batch_size, sequence_length, num_negatives, hidden_size).transpose(\n 2, 0, 1, 3\n )\n\n self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size))\n\n # make sure no negatively sampled vector is actually a positive one\n for negative in negatives:\n self.assertTrue(((negative - features.reshape(negative.shape)) == 0).sum() == 0.0)\n\n # make sure that full vectors are sampled and not just slices of vectors\n # => this means that `unique()` yields a single value for `hidden_size` dim\n self.assertEqual(np.unique(negatives, axis=-1).shape, (num_negatives, batch_size, sequence_length, 1))\n\n\n@require_flax\n@require_soundfile\n@slow\nclass FlaxWav2Vec2ModelIntegrationTest(unittest.TestCase):\n def _load_datasamples(self, num_samples):\n ds = load_dataset(\"hf-internal-testing/librispeech_asr_dummy\", \"clean\", split=\"validation\")\n # automatic decoding with librispeech\n speech_samples = ds.sort(\"id\").filter(\n lambda x: x[\"id\"] in [f\"1272-141231-000{i}\" for i in range(num_samples)]\n )[:num_samples][\"audio\"]\n\n return [x[\"array\"] for x in speech_samples]\n\n def test_inference_ctc_robust_batched(self):\n model = FlaxWav2Vec2ForCTC.from_pretrained(\"facebook/wav2vec2-large-960h-lv60-self\", from_pt=True)\n processor = Wav2Vec2Processor.from_pretrained(\"facebook/wav2vec2-large-960h-lv60-self\", do_lower_case=True)\n\n input_speech = self._load_datasamples(4)\n\n inputs = processor(input_speech, return_tensors=\"np\", padding=True)\n\n input_values = inputs.input_values\n attention_mask = inputs.attention_mask\n\n logits = model(input_values, attention_mask=attention_mask).logits\n\n predicted_ids = jnp.argmax(logits, axis=-1)\n predicted_trans = processor.batch_decode(predicted_ids)\n\n EXPECTED_TRANSCRIPTIONS = [\n \"a man said to the universe sir i exist\",\n \"sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore\",\n \"the cut on his chest still dripping blood the ache of his overstrained eyes even the soaring arena around him with the thousands of spectators were trivialities not worth thinking about\",\n \"his instant panic was followed by a small sharp blow high on his chest\",\n ]\n self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS)\n\n def test_inference_pretrained(self):\n model = FlaxWav2Vec2ForPreTraining.from_pretrained(\"facebook/wav2vec2-large-lv60\", from_pt=True)\n feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(\n \"facebook/wav2vec2-large-lv60\", return_attention_mask=True\n )\n input_speech = self._load_datasamples(2)\n\n inputs_dict = feature_extractor(input_speech, return_tensors=\"np\", padding=True)\n\n features_shape = (\n inputs_dict[\"input_values\"].shape[0],\n model._get_feat_extract_output_lengths(np.array(inputs_dict[\"input_values\"].shape[1])),\n )\n\n mask_time_indices = _compute_mask_indices(\n features_shape,\n model.config.mask_time_prob,\n model.config.mask_time_length,\n min_masks=2,\n )\n\n outputs = model(\n inputs_dict.input_values,\n attention_mask=inputs_dict.attention_mask,\n mask_time_indices=mask_time_indices,\n )\n\n # compute cosine similarity\n cosine_sim = optax.cosine_similarity(\n outputs.projected_states, outputs.projected_quantized_states, epsilon=1e-8\n )\n\n # retrieve cosine sim of masked features\n cosine_sim_masked = cosine_sim[mask_time_indices]\n\n # ... now compare to randomly initialized model\n\n config = Wav2Vec2Config.from_pretrained(\"facebook/wav2vec2-large-lv60\")\n model_rand = FlaxWav2Vec2ForPreTraining(config)\n\n outputs_rand = model_rand(\n inputs_dict.input_values,\n attention_mask=inputs_dict.attention_mask,\n mask_time_indices=mask_time_indices,\n )\n\n # compute cosine similarity\n cosine_sim_rand = optax.cosine_similarity(\n outputs_rand.projected_states, outputs_rand.projected_quantized_states\n )\n\n # retrieve cosine sim of masked features\n cosine_sim_masked_rand = cosine_sim_rand[mask_time_indices]\n\n # a pretrained wav2vec2 model has learned to predict the quantized latent states\n # => the cosine similarity between quantized states and predicted states > 0.5\n # a random wav2vec2 model has not learned to predict the quantized latent states\n # => the cosine similarity between quantized states and predicted states is very likely < 0.1\n self.assertTrue(cosine_sim_masked.mean().item() - 5 * cosine_sim_masked_rand.mean().item() > 0)\n\n @require_pyctcdecode\n @require_librosa\n def test_wav2vec2_with_lm(self):\n ds = load_dataset(\"common_voice\", \"es\", split=\"test\", streaming=True)\n sample = next(iter(ds))\n\n resampled_audio = librosa.resample(sample[\"audio\"][\"array\"], 48_000, 16_000)\n\n model = FlaxWav2Vec2ForCTC.from_pretrained(\"patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm\")\n processor = Wav2Vec2ProcessorWithLM.from_pretrained(\"patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm\")\n\n input_values = processor(resampled_audio, return_tensors=\"np\").input_values\n\n logits = model(input_values).logits\n\n transcription = processor.batch_decode(np.array(logits)).text\n\n self.assertEqual(transcription[0], \"bien y qué regalo vas a abrir primero\")\n",
"# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom collections import defaultdict\nfrom typing import TYPE_CHECKING, Dict, Optional, Union\n\nimport numpy as np\n\nfrom ..utils import is_torch_available, logging\nfrom .audio_utils import ffmpeg_read\nfrom .base import ChunkPipeline\n\n\nif TYPE_CHECKING:\n from ...feature_extraction_sequence_utils import SequenceFeatureExtractor\n\nlogger = logging.get_logger(__name__)\n\nif is_torch_available():\n from ..models.auto.modeling_auto import MODEL_FOR_CTC_MAPPING, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING\n\n\ndef rescale_stride(tokens_or_logits, stride, ratio):\n \"\"\"\n Rescales the stride values from audio space to tokens/logits space.\n\n (160_000, 16_000, 16_000) -> (2000, 200, 200) for instance.\n \"\"\"\n # Shape is [B, SEQ] for tokens\n # [B, SEQ, V] for logits\n\n new_strides = []\n for input_n, left, right in stride:\n token_n = int(round(input_n * ratio))\n left = int(round(left / input_n * token_n))\n right = int(round(right / input_n * token_n))\n new_stride = (token_n, left, right)\n new_strides.append(new_stride)\n\n return new_strides\n\n\ndef chunk_iter(inputs, feature_extractor, chunk_len, stride_left, stride_right):\n inputs_len = inputs.shape[0]\n step = chunk_len - stride_left - stride_right\n for i in range(0, inputs_len, step):\n # add start and end paddings to the chunk\n chunk = inputs[i : i + chunk_len]\n processed = feature_extractor(chunk, sampling_rate=feature_extractor.sampling_rate, return_tensors=\"pt\")\n _stride_left = 0 if i == 0 else stride_left\n is_last = i + step + stride_left >= inputs_len\n _stride_right = 0 if is_last else stride_right\n if chunk.shape[0] > _stride_left:\n yield {\"is_last\": is_last, \"stride\": (chunk.shape[0], _stride_left, _stride_right), **processed}\n\n\nclass AutomaticSpeechRecognitionPipeline(ChunkPipeline):\n \"\"\"\n Pipeline that aims at extracting spoken text contained within some audio.\n\n The input can be either a raw waveform or a audio file. In case of the audio file, ffmpeg should be installed for\n to support multiple audio formats\n\n Arguments:\n model ([`PreTrainedModel`] or [`TFPreTrainedModel`]):\n The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from\n [`PreTrainedModel`] for PyTorch and [`TFPreTrainedModel`] for TensorFlow.\n tokenizer ([`PreTrainedTokenizer`]):\n The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from\n [`PreTrainedTokenizer`].\n feature_extractor ([`SequenceFeatureExtractor`]):\n The feature extractor that will be used by the pipeline to encode waveform for the model.\n chunk_length_s (`float`, *optional*, defaults to 0):\n The input length for in each chunk. If `chunk_length_s = 0` then chunking is disabled (default). Only\n available for CTC models, e.g. [`Wav2Vec2ForCTC`].\n\n <Tip>\n\n For more information on how to effectively use `chunk_length_s`, please have a look at the [ASR chunking\n blog post](https://huggingface.co/blog/asr-chunking).\n\n </Tip>\n\n stride_length_s (`float`, *optional*, defaults to `chunk_length_s / 6`):\n The length of stride on the left and right of each chunk. Used only with `chunk_length_s > 0`. This enables\n the model to *see* more context and infer letters better than without this context but the pipeline\n discards the stride bits at the end to make the final reconstitution as perfect as possible.\n\n <Tip>\n\n For more information on how to effectively use `stride_length_s`, please have a look at the [ASR chunking\n blog post](https://huggingface.co/blog/asr-chunking).\n\n </Tip>\n\n framework (`str`, *optional*):\n The framework to use, either `\"pt\"` for PyTorch or `\"tf\"` for TensorFlow. The specified framework must be\n installed. If no framework is specified, will default to the one currently installed. If no framework is\n specified and both frameworks are installed, will default to the framework of the `model`, or to PyTorch if\n no model is provided.\n device (`int`, *optional*, defaults to -1):\n Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on\n the associated CUDA device id.\n decoder (`pyctcdecode.BeamSearchDecoderCTC`, *optional*):\n [PyCTCDecode's\n BeamSearchDecoderCTC](https://github.com/kensho-technologies/pyctcdecode/blob/2fd33dc37c4111417e08d89ccd23d28e9b308d19/pyctcdecode/decoder.py#L180)\n can be passed for language model boosted decoding. See [`Wav2Vec2ProcessorWithLM`] for more information.\n \"\"\"\n\n def __init__(self, feature_extractor: Union[\"SequenceFeatureExtractor\", str], *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.feature_extractor = feature_extractor\n\n if self.model.__class__ in MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING.values():\n self.type = \"seq2seq\"\n elif (\n feature_extractor._processor_class\n and feature_extractor._processor_class.endswith(\"WithLM\")\n and kwargs.get(\"decoder\", None) is not None\n ):\n self.decoder = kwargs[\"decoder\"]\n self.type = \"ctc_with_lm\"\n else:\n self.type = \"ctc\"\n\n if self.framework == \"tf\":\n raise ValueError(\"The AutomaticSpeechRecognitionPipeline is only available in PyTorch.\")\n\n self.check_model_type(dict(MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING.items() + MODEL_FOR_CTC_MAPPING.items()))\n\n def __call__(\n self,\n inputs: Union[np.ndarray, bytes, str],\n **kwargs,\n ):\n \"\"\"\n Classify the sequence(s) given as inputs. See the [`AutomaticSpeechRecognitionPipeline`] documentation for more\n information.\n\n Args:\n inputs (`np.ndarray` or `bytes` or `str` or `dict`):\n The inputs is either :\n - `str` that is the filename of the audio file, the file will be read at the correct sampling rate\n to get the waveform using *ffmpeg*. This requires *ffmpeg* to be installed on the system.\n - `bytes` it is supposed to be the content of an audio file and is interpreted by *ffmpeg* in the\n same way.\n - (`np.ndarray` of shape (n, ) of type `np.float32` or `np.float64`)\n Raw audio at the correct sampling rate (no further check will be done)\n - `dict` form can be used to pass raw audio sampled at arbitrary `sampling_rate` and let this\n pipeline do the resampling. The dict must be in the format `{\"sampling_rate\": int, \"raw\":\n np.array}` with optionally a `\"stride\": (left: int, right: int)` than can ask the pipeline to\n treat the first `left` samples and last `right` samples to be ignored in decoding (but used at\n inference to provide more context to the model). Only use `stride` with CTC models.\n return_timestamps (*optional*, `str`):\n Only available for pure CTC models. If set to `\"char\"`, the pipeline will return `timestamps` along the\n text for every character in the text. For instance if you get `[{\"text\": \"h\", \"timestamps\": (0.5,0.6),\n {\"text\": \"i\", \"timestamps\": (0.7, .9)}]`, then it means the model predicts that the letter \"h\" was\n pronounced after `0.5` and before `0.6` seconds. If set to `\"word\"`, the pipeline will return\n `timestamps` along the text for every word in the text. For instance if you get `[{\"text\": \"hi \",\n \"timestamps\": (0.5,0.9), {\"text\": \"there\", \"timestamps\": (1.0, .1.5)}]`, then it means the model\n predicts that the word \"hi\" was pronounces before 0.5 and after 0.9 seconds.\n\n Return:\n `Dict`: A dictionary with the following keys:\n - **text** (`str` ) -- The recognized text.\n - **chunks** (*optional(, `List[Dict]`)\n When using `return_timestamps`, the `chunks` will become a list containing all the various text\n chunks identified by the model, *e.g.* `[{\"text\": \"hi \", \"timestamps\": (0.5,0.9), {\"text\":\n \"there\", \"timestamps\": (1.0, 1.5)}]`. The original full text can roughly be recovered by doing\n `\"\".join(chunk[\"text\"] for chunk in output[\"chunks\"])`.\n \"\"\"\n return super().__call__(inputs, **kwargs)\n\n def _sanitize_parameters(self, **kwargs):\n # No parameters on this pipeline right now\n preprocess_params = {}\n if \"chunk_length_s\" in kwargs:\n preprocess_params[\"chunk_length_s\"] = kwargs[\"chunk_length_s\"]\n if \"stride_length_s\" in kwargs:\n preprocess_params[\"stride_length_s\"] = kwargs[\"stride_length_s\"]\n\n postprocess_params = {}\n if \"decoder_kwargs\" in kwargs:\n postprocess_params[\"decoder_kwargs\"] = kwargs[\"decoder_kwargs\"]\n if \"return_timestamps\" in kwargs:\n postprocess_params[\"return_timestamps\"] = kwargs[\"return_timestamps\"]\n\n return preprocess_params, {}, postprocess_params\n\n def preprocess(self, inputs, chunk_length_s=0, stride_length_s=None):\n if isinstance(inputs, str):\n with open(inputs, \"rb\") as f:\n inputs = f.read()\n\n if isinstance(inputs, bytes):\n inputs = ffmpeg_read(inputs, self.feature_extractor.sampling_rate)\n\n stride = None\n extra = {}\n if isinstance(inputs, dict):\n stride = inputs.pop(\"stride\", None)\n _inputs = inputs.pop(\"raw\")\n in_sampling_rate = inputs.pop(\"sampling_rate\")\n extra = inputs\n inputs = _inputs\n if in_sampling_rate != self.feature_extractor.sampling_rate:\n import torch\n from torchaudio import functional as F\n\n inputs = F.resample(\n torch.from_numpy(inputs), in_sampling_rate, self.feature_extractor.sampling_rate\n ).numpy()\n ratio = self.feature_extractor.sampling_rate / in_sampling_rate\n else:\n ratio = 1\n if stride is not None:\n if stride[0] + stride[1] > inputs.shape[0]:\n raise ValueError(\"Stride is too large for input\")\n\n # Stride needs to get the chunk length here, it's going to get\n # swallowed by the `feature_extractor` later, and then batching\n # can add extra data in the inputs, so we need to keep track\n # of the original length in the stride so we can cut properly.\n stride = (inputs.shape[0], int(round(stride[0] * ratio)), int(round(stride[1] * ratio)))\n if not isinstance(inputs, np.ndarray):\n raise ValueError(f\"We expect a numpy ndarray as input, got `{type(inputs)}`\")\n if len(inputs.shape) != 1:\n raise ValueError(\"We expect a single channel audio input for AutomaticSpeechRecognitionPipeline\")\n\n if chunk_length_s:\n if stride_length_s is None:\n stride_length_s = chunk_length_s / 6\n\n if isinstance(stride_length_s, (int, float)):\n stride_length_s = [stride_length_s, stride_length_s]\n\n # XXX: Carefuly, this variable will not exist in `seq2seq` setting.\n # Currently chunking is not possible at this level for `seq2seq` so\n # it's ok.\n align_to = self.model.config.inputs_to_logits_ratio\n chunk_len = int(round(chunk_length_s * self.feature_extractor.sampling_rate / align_to)) * align_to\n stride_left = int(round(stride_length_s[0] * self.feature_extractor.sampling_rate / align_to)) * align_to\n stride_right = int(round(stride_length_s[1] * self.feature_extractor.sampling_rate / align_to)) * align_to\n\n if self.type not in {\"ctc\", \"ctc_with_lm\"}:\n raise ValueError(\n \"`chunk_length_s` is only valid for CTC models, use other chunking options for other models\"\n )\n if chunk_len < stride_left + stride_right:\n raise ValueError(\"Chunk length must be superior to stride length\")\n\n # make sure that\n for item in chunk_iter(inputs, self.feature_extractor, chunk_len, stride_left, stride_right):\n yield item\n else:\n processed = self.feature_extractor(\n inputs, sampling_rate=self.feature_extractor.sampling_rate, return_tensors=\"pt\"\n )\n if stride is not None:\n if self.model.__class__ in MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING.values():\n raise ValueError(\"Stride is only usable with CTC models, try removing it\")\n\n processed[\"stride\"] = stride\n yield {\"is_last\": True, **processed, **extra}\n\n def _forward(self, model_inputs):\n is_last = model_inputs.pop(\"is_last\")\n if self.type == \"seq2seq\":\n encoder = self.model.get_encoder()\n # we need to pass `processed.get(\"attention_mask\")` here since audio encoder\n # attention mask length is different from expected text decoder `encoder_attention_mask` length\n # `generate` magic to create the mask automatically won't work, we basically need to help\n # it here.\n # Consume values so we can let extra information flow freely through\n # the pipeline (important for `partial` in microphone)\n if \"input_features\" in model_inputs:\n inputs = model_inputs.pop(\"input_features\")\n elif \"input_values\" in model_inputs:\n inputs = model_inputs.pop(\"input_values\")\n else:\n raise ValueError(\n \"Seq2Seq speech recognition model requires either a \"\n f\"`input_features` or `input_values` key, but only has {model_inputs.keys()}\"\n )\n\n attention_mask = model_inputs.pop(\"attention_mask\", None)\n tokens = self.model.generate(\n encoder_outputs=encoder(inputs, attention_mask=attention_mask),\n attention_mask=attention_mask,\n )\n out = {\"tokens\": tokens}\n else:\n stride = model_inputs.pop(\"stride\", None)\n input_values = model_inputs.pop(\"input_values\")\n attention_mask = model_inputs.pop(\"attention_mask\", None)\n outputs = self.model(input_values=input_values, attention_mask=attention_mask)\n logits = outputs.logits\n\n if self.type == \"ctc_with_lm\":\n out = {\"logits\": logits}\n else:\n out = {\"tokens\": logits.argmax(dim=-1)}\n if stride is not None:\n # Send stride to `postprocess`.\n # it needs to be handled there where\n # the pieces are to be concatenated.\n ratio = 1 / self.model.config.inputs_to_logits_ratio\n if isinstance(stride, tuple):\n out[\"stride\"] = rescale_stride(logits, [stride], ratio)[0]\n else:\n out[\"stride\"] = rescale_stride(logits, stride, ratio)\n # Leftover\n extra = model_inputs\n return {\"is_last\": is_last, **out, **extra}\n\n def postprocess(self, model_outputs, decoder_kwargs: Optional[Dict] = None, return_timestamps=None):\n # Optional return types\n optional = {}\n\n if return_timestamps and self.type == \"seq2seq\":\n raise ValueError(\"We cannot return_timestamps yet on non-ctc models !\")\n if return_timestamps == \"char\" and self.type == \"ctc_with_lm\":\n raise ValueError(\"CTC with LM cannot return `char` timestamps, only `words`\")\n\n final_items = []\n key = \"logits\" if self.type == \"ctc_with_lm\" else \"tokens\"\n for outputs in model_outputs:\n items = outputs[key].numpy()\n stride = outputs.pop(\"stride\", None)\n if stride is not None:\n total_n, left, right = stride\n # Total_n might be < logits.shape[1]\n # because of padding, that's why\n # we need to reconstruct this information\n # This won't work with left padding (which doesn't exist right now)\n right_n = total_n - right\n items = items[:, left:right_n]\n final_items.append(items)\n items = np.concatenate(final_items, axis=1)\n items = items.squeeze(0)\n if self.type == \"ctc_with_lm\":\n if decoder_kwargs is None:\n decoder_kwargs = {}\n beams = self.decoder.decode_beams(items, **decoder_kwargs)\n text = beams[0][0]\n if return_timestamps:\n # Simply cast from pyctcdecode format to wav2vec2 format to leverage\n # pre-existing code later\n chunk_offset = beams[0][2]\n word_offsets = []\n for word, (start_offset, end_offset) in chunk_offset:\n word_offsets.append({\"word\": word, \"start_offset\": start_offset, \"end_offset\": end_offset})\n\n else:\n skip_special_tokens = self.type != \"ctc\"\n text = self.tokenizer.decode(items, skip_special_tokens=skip_special_tokens)\n if return_timestamps:\n char_offsets = self.tokenizer.decode(\n items, skip_special_tokens=skip_special_tokens, output_char_offsets=True\n )[\"char_offsets\"]\n if return_timestamps == \"word\":\n word_offsets = self.tokenizer._get_word_offsets(\n char_offsets, self.tokenizer.replace_word_delimiter_char\n )\n\n if return_timestamps:\n if return_timestamps == \"word\":\n offsets = word_offsets\n else:\n offsets = char_offsets\n chunks = []\n for item in offsets:\n start = item[\"start_offset\"] * self.model.config.inputs_to_logits_ratio\n start /= self.feature_extractor.sampling_rate\n\n stop = item[\"end_offset\"] * self.model.config.inputs_to_logits_ratio\n stop /= self.feature_extractor.sampling_rate\n\n chunks.append({\"text\": item[return_timestamps], \"timestamp\": (start, stop)})\n optional[\"chunks\"] = chunks\n\n extra = defaultdict(list)\n for output in model_outputs:\n output.pop(\"tokens\", None)\n output.pop(\"logits\", None)\n output.pop(\"is_last\", None)\n for k, v in output.items():\n extra[k].append(v)\n return {\"text\": text, **optional, **extra}\n",
"# coding=utf-8\n# Copyright 2022 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Testing suite for the PyTorch DecisionTransformer model. \"\"\"\n\n\nimport inspect\nimport unittest\n\nfrom transformers import DecisionTransformerConfig, is_torch_available\nfrom transformers.testing_utils import require_torch, slow, torch_device\n\nfrom ..generation.test_generation_utils import GenerationTesterMixin\nfrom ..test_configuration_common import ConfigTester\nfrom ..test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask\n\n\nif is_torch_available():\n import torch\n\n from transformers import DecisionTransformerModel\n from transformers.models.decision_transformer.modeling_decision_transformer import (\n DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,\n )\n\n\nclass DecisionTransformerModelTester:\n def __init__(\n self,\n parent,\n batch_size=13,\n seq_length=7,\n act_dim=6,\n state_dim=17,\n hidden_size=23,\n max_length=11,\n is_training=True,\n ):\n self.parent = parent\n self.batch_size = batch_size\n self.seq_length = seq_length\n self.act_dim = act_dim\n self.state_dim = state_dim\n self.hidden_size = hidden_size\n self.max_length = max_length\n self.is_training = is_training\n\n def prepare_config_and_inputs(self):\n states = floats_tensor((self.batch_size, self.seq_length, self.state_dim))\n actions = floats_tensor((self.batch_size, self.seq_length, self.act_dim))\n rewards = floats_tensor((self.batch_size, self.seq_length, 1))\n returns_to_go = floats_tensor((self.batch_size, self.seq_length, 1))\n timesteps = ids_tensor((self.batch_size, self.seq_length), vocab_size=1000)\n attention_mask = random_attention_mask((self.batch_size, self.seq_length))\n\n config = self.get_config()\n\n return (\n config,\n states,\n actions,\n rewards,\n returns_to_go,\n timesteps,\n attention_mask,\n )\n\n def get_config(self):\n return DecisionTransformerConfig(\n batch_size=self.batch_size,\n seq_length=self.seq_length,\n act_dim=self.act_dim,\n state_dim=self.state_dim,\n hidden_size=self.hidden_size,\n max_length=self.max_length,\n )\n\n def create_and_check_model(\n self,\n config,\n states,\n actions,\n rewards,\n returns_to_go,\n timesteps,\n attention_mask,\n ):\n model = DecisionTransformerModel(config=config)\n model.to(torch_device)\n model.eval()\n result = model(states, actions, rewards, returns_to_go, timesteps, attention_mask)\n\n self.parent.assertEqual(result.state_preds.shape, states.shape)\n self.parent.assertEqual(result.action_preds.shape, actions.shape)\n self.parent.assertEqual(result.return_preds.shape, returns_to_go.shape)\n self.parent.assertEqual(\n result.last_hidden_state.shape, (self.batch_size, self.seq_length * 3, self.hidden_size)\n ) # seq length *3 as there are 3 modelities: states, returns and actions\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n (\n config,\n states,\n actions,\n rewards,\n returns_to_go,\n timesteps,\n attention_mask,\n ) = config_and_inputs\n inputs_dict = {\n \"states\": states,\n \"actions\": actions,\n \"rewards\": rewards,\n \"returns_to_go\": returns_to_go,\n \"timesteps\": timesteps,\n \"attention_mask\": attention_mask,\n }\n return config, inputs_dict\n\n\n@require_torch\nclass DecisionTransformerModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):\n\n all_model_classes = (DecisionTransformerModel,) if is_torch_available() else ()\n all_generative_model_classes = ()\n\n # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids\n test_generate_without_input_ids = False\n\n # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features\n test_pruning = False\n test_resize_embeddings = False\n test_head_masking = False\n test_attention_outputs = False\n test_hidden_states_output = False\n test_inputs_embeds = False\n test_model_common_attributes = False\n test_gradient_checkpointing = False\n test_torchscript = False\n\n def setUp(self):\n self.model_tester = DecisionTransformerModelTester(self)\n self.config_tester = ConfigTester(self, config_class=DecisionTransformerConfig, hidden_size=37)\n\n def test_config(self):\n self.config_tester.run_common_tests()\n\n def test_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_model(*config_and_inputs)\n\n @slow\n def test_model_from_pretrained(self):\n for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n model = DecisionTransformerModel.from_pretrained(model_name)\n self.assertIsNotNone(model)\n\n def test_forward_signature(self):\n config, _ = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n signature = inspect.signature(model.forward)\n # signature.parameters is an OrderedDict => so arg_names order is deterministic\n arg_names = [*signature.parameters.keys()]\n\n expected_arg_names = [\n \"states\",\n \"actions\",\n \"rewards\",\n \"returns_to_go\",\n \"timesteps\",\n \"attention_mask\",\n ]\n\n self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)\n\n\n@require_torch\nclass DecisionTransformerModelIntegrationTest(unittest.TestCase):\n @slow\n def test_autoregressive_prediction(self):\n \"\"\"\n An integration test that performs autoregressive prediction of state, action and return\n from a sequence of state, actions and returns. Test is performed over two timesteps.\n\n \"\"\"\n\n NUM_STEPS = 2 # number of steps of autoregressive prediction we will perform\n TARGET_RETURN = 10 # defined by the RL environment, may be normalized\n model = DecisionTransformerModel.from_pretrained(\"edbeeching/decision-transformer-gym-hopper-expert\")\n model = model.to(torch_device)\n config = model.config\n torch.manual_seed(0)\n state = torch.randn(1, 1, config.state_dim).to(device=torch_device, dtype=torch.float32) # env.reset()\n\n expected_outputs = torch.tensor([[0.2384, -0.2955, 0.8741], [0.6765, -0.0793, -0.1298]], device=torch_device)\n\n returns_to_go = torch.tensor(TARGET_RETURN, device=torch_device, dtype=torch.float32).reshape(1, 1, 1)\n states = state\n actions = torch.zeros(1, 0, config.act_dim, device=torch_device, dtype=torch.float32)\n rewards = torch.zeros(1, 0, device=torch_device, dtype=torch.float32)\n timesteps = torch.tensor(0, device=torch_device, dtype=torch.long).reshape(1, 1)\n\n for step in range(NUM_STEPS):\n actions = torch.cat([actions, torch.zeros(1, 1, config.act_dim, device=torch_device)], dim=1)\n rewards = torch.cat([rewards, torch.zeros(1, 1, device=torch_device)], dim=1)\n\n attention_mask = torch.ones(1, states.shape[1]).to(dtype=torch.long, device=states.device)\n\n with torch.no_grad():\n _, action_pred, _ = model(\n states=states,\n actions=actions,\n rewards=rewards,\n returns_to_go=returns_to_go,\n timesteps=timesteps,\n attention_mask=attention_mask,\n return_dict=False,\n )\n\n self.assertEqual(action_pred.shape, actions.shape)\n self.assertTrue(torch.allclose(action_pred[0, -1], expected_outputs[step], atol=1e-4))\n state, reward, _, _ = ( # env.step(action)\n torch.randn(1, 1, config.state_dim).to(device=torch_device, dtype=torch.float32),\n 1.0,\n False,\n {},\n )\n\n actions[-1] = action_pred[0, -1]\n states = torch.cat([states, state], dim=1)\n pred_return = returns_to_go[0, -1] - reward\n returns_to_go = torch.cat([returns_to_go, pred_return.reshape(1, 1, 1)], dim=1)\n timesteps = torch.cat(\n [timesteps, torch.ones((1, 1), device=torch_device, dtype=torch.long) * (step + 1)], dim=1\n )\n",
"# coding=utf-8\n# Copyright 2022 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Testing suite for the PyTorch DPT model. \"\"\"\n\n\nimport inspect\nimport unittest\n\nfrom transformers import DPTConfig\nfrom transformers.file_utils import is_torch_available, is_vision_available\nfrom transformers.models.auto import get_values\nfrom transformers.testing_utils import require_torch, require_vision, slow, torch_device\n\nfrom ..test_configuration_common import ConfigTester\nfrom ..test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor\n\n\nif is_torch_available():\n import torch\n from torch import nn\n\n from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel\n from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST\n\n\nif is_vision_available():\n from PIL import Image\n\n from transformers import DPTFeatureExtractor\n\n\nclass DPTModelTester:\n def __init__(\n self,\n parent,\n batch_size=2,\n image_size=32,\n patch_size=16,\n num_channels=3,\n is_training=True,\n use_labels=True,\n hidden_size=32,\n num_hidden_layers=4,\n backbone_out_indices=[0, 1, 2, 3],\n num_attention_heads=4,\n intermediate_size=37,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n initializer_range=0.02,\n num_labels=3,\n scope=None,\n ):\n self.parent = parent\n self.batch_size = batch_size\n self.image_size = image_size\n self.patch_size = patch_size\n self.num_channels = num_channels\n self.is_training = is_training\n self.use_labels = use_labels\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.backbone_out_indices = backbone_out_indices\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.hidden_act = hidden_act\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.initializer_range = initializer_range\n self.num_labels = num_labels\n self.scope = scope\n # expected sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)\n num_patches = (image_size // patch_size) ** 2\n self.expected_seq_length = num_patches + 1\n\n def prepare_config_and_inputs(self):\n pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])\n\n labels = None\n if self.use_labels:\n labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)\n\n config = self.get_config()\n\n return config, pixel_values, labels\n\n def get_config(self):\n return DPTConfig(\n image_size=self.image_size,\n patch_size=self.patch_size,\n num_channels=self.num_channels,\n hidden_size=self.hidden_size,\n num_hidden_layers=self.num_hidden_layers,\n backbone_out_indices=self.backbone_out_indices,\n num_attention_heads=self.num_attention_heads,\n intermediate_size=self.intermediate_size,\n hidden_act=self.hidden_act,\n hidden_dropout_prob=self.hidden_dropout_prob,\n attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n is_decoder=False,\n initializer_range=self.initializer_range,\n )\n\n def create_and_check_model(self, config, pixel_values, labels):\n model = DPTModel(config=config)\n model.to(torch_device)\n model.eval()\n result = model(pixel_values)\n self.parent.assertEqual(\n result.last_hidden_state.shape, (self.batch_size, self.expected_seq_length, self.hidden_size)\n )\n\n def create_and_check_for_depth_estimation(self, config, pixel_values, labels):\n config.num_labels = self.num_labels\n model = DPTForDepthEstimation(config)\n model.to(torch_device)\n model.eval()\n result = model(pixel_values)\n self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size))\n\n def create_and_check_for_semantic_segmentation(self, config, pixel_values, labels):\n config.num_labels = self.num_labels\n model = DPTForSemanticSegmentation(config)\n model.to(torch_device)\n model.eval()\n result = model(pixel_values, labels=labels)\n self.parent.assertEqual(\n result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size)\n )\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n config, pixel_values, labels = config_and_inputs\n inputs_dict = {\"pixel_values\": pixel_values}\n return config, inputs_dict\n\n\n@require_torch\nclass DPTModelTest(ModelTesterMixin, unittest.TestCase):\n \"\"\"\n Here we also overwrite some of the tests of test_modeling_common.py, as DPT does not use input_ids, inputs_embeds,\n attention_mask and seq_length.\n \"\"\"\n\n all_model_classes = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()\n\n test_pruning = False\n test_resize_embeddings = False\n test_head_masking = False\n\n def setUp(self):\n self.model_tester = DPTModelTester(self)\n self.config_tester = ConfigTester(self, config_class=DPTConfig, has_text_modality=False, hidden_size=37)\n\n def test_config(self):\n self.config_tester.run_common_tests()\n\n def test_inputs_embeds(self):\n # DPT does not use inputs_embeds\n pass\n\n def test_model_common_attributes(self):\n config, _ = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n self.assertIsInstance(model.get_input_embeddings(), (nn.Module))\n x = model.get_output_embeddings()\n self.assertTrue(x is None or isinstance(x, nn.Linear))\n\n def test_forward_signature(self):\n config, _ = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n signature = inspect.signature(model.forward)\n # signature.parameters is an OrderedDict => so arg_names order is deterministic\n arg_names = [*signature.parameters.keys()]\n\n expected_arg_names = [\"pixel_values\"]\n self.assertListEqual(arg_names[:1], expected_arg_names)\n\n def test_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_model(*config_and_inputs)\n\n def test_for_depth_estimation(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_for_depth_estimation(*config_and_inputs)\n\n def test_for_semantic_segmentation(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs)\n\n def test_attention_outputs(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n config.return_dict = True\n\n # in DPT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)\n seq_len = self.model_tester.expected_seq_length\n\n for model_class in self.all_model_classes:\n inputs_dict[\"output_attentions\"] = True\n inputs_dict[\"output_hidden_states\"] = False\n config.return_dict = True\n model = model_class(config)\n model.to(torch_device)\n model.eval()\n with torch.no_grad():\n outputs = model(**self._prepare_for_class(inputs_dict, model_class))\n\n self.assertEqual(len(outputs.attentions), self.model_tester.num_hidden_layers)\n\n # check that output_attentions also work using config\n del inputs_dict[\"output_attentions\"]\n config.output_attentions = True\n model = model_class(config)\n model.to(torch_device)\n model.eval()\n with torch.no_grad():\n outputs = model(**self._prepare_for_class(inputs_dict, model_class))\n attentions = outputs.attentions\n self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)\n\n self.assertListEqual(\n list(attentions[0].shape[-3:]),\n [self.model_tester.num_attention_heads, seq_len, seq_len],\n )\n out_len = len(outputs)\n\n # Check attention is always last and order is fine\n inputs_dict[\"output_attentions\"] = True\n inputs_dict[\"output_hidden_states\"] = True\n model = model_class(config)\n model.to(torch_device)\n model.eval()\n with torch.no_grad():\n outputs = model(**self._prepare_for_class(inputs_dict, model_class))\n\n self.assertEqual(out_len + 1, len(outputs))\n\n self_attentions = outputs.attentions\n\n self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)\n self.assertListEqual(\n list(self_attentions[0].shape[-3:]),\n [self.model_tester.num_attention_heads, seq_len, seq_len],\n )\n\n def test_hidden_states_output(self):\n def check_hidden_states_output(inputs_dict, config, model_class):\n model = model_class(config)\n model.to(torch_device)\n model.eval()\n\n with torch.no_grad():\n outputs = model(**self._prepare_for_class(inputs_dict, model_class))\n\n hidden_states = outputs.hidden_states\n\n expected_num_layers = getattr(\n self.model_tester, \"expected_num_hidden_layers\", self.model_tester.num_hidden_layers + 1\n )\n self.assertEqual(len(hidden_states), expected_num_layers)\n\n # DPT has a different seq_length\n seq_len = self.model_tester.expected_seq_length\n\n self.assertListEqual(\n list(hidden_states[0].shape[-2:]),\n [seq_len, self.model_tester.hidden_size],\n )\n\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n inputs_dict[\"output_hidden_states\"] = True\n check_hidden_states_output(inputs_dict, config, model_class)\n\n # check that output_hidden_states also work using config\n del inputs_dict[\"output_hidden_states\"]\n config.output_hidden_states = True\n\n check_hidden_states_output(inputs_dict, config, model_class)\n\n def test_training(self):\n for model_class in self.all_model_classes:\n if model_class.__name__ == \"DPTForDepthEstimation\":\n continue\n\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n config.return_dict = True\n\n if model_class in get_values(MODEL_MAPPING):\n continue\n\n model = model_class(config)\n model.to(torch_device)\n model.train()\n inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)\n loss = model(**inputs).loss\n loss.backward()\n\n def test_training_gradient_checkpointing(self):\n for model_class in self.all_model_classes:\n if model_class.__name__ == \"DPTForDepthEstimation\":\n continue\n\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n config.use_cache = False\n config.return_dict = True\n\n if model_class in get_values(MODEL_MAPPING) or not model_class.supports_gradient_checkpointing:\n continue\n model = model_class(config)\n model.to(torch_device)\n model.gradient_checkpointing_enable()\n model.train()\n inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)\n loss = model(**inputs).loss\n loss.backward()\n\n @slow\n def test_model_from_pretrained(self):\n for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n model = DPTModel.from_pretrained(model_name)\n self.assertIsNotNone(model)\n\n\n# We will verify our results on an image of cute cats\ndef prepare_img():\n image = Image.open(\"./tests/fixtures/tests_samples/COCO/000000039769.png\")\n return image\n\n\n@require_torch\n@require_vision\n@slow\nclass DPTModelIntegrationTest(unittest.TestCase):\n def test_inference_depth_estimation(self):\n feature_extractor = DPTFeatureExtractor.from_pretrained(\"Intel/dpt-large\")\n model = DPTForDepthEstimation.from_pretrained(\"Intel/dpt-large\").to(torch_device)\n\n image = prepare_img()\n inputs = feature_extractor(images=image, return_tensors=\"pt\").to(torch_device)\n\n # forward pass\n with torch.no_grad():\n outputs = model(**inputs)\n predicted_depth = outputs.predicted_depth\n\n # verify the predicted depth\n expected_shape = torch.Size((1, 384, 384))\n self.assertEqual(predicted_depth.shape, expected_shape)\n\n expected_slice = torch.tensor(\n [[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]]\n ).to(torch_device)\n\n self.assertTrue(torch.allclose(outputs.predicted_depth[0, :3, :3], expected_slice, atol=1e-4))\n\n def test_inference_semantic_segmentation(self):\n feature_extractor = DPTFeatureExtractor.from_pretrained(\"Intel/dpt-large-ade\")\n model = DPTForSemanticSegmentation.from_pretrained(\"Intel/dpt-large-ade\").to(torch_device)\n\n image = prepare_img()\n inputs = feature_extractor(images=image, return_tensors=\"pt\").to(torch_device)\n\n # forward pass\n with torch.no_grad():\n outputs = model(**inputs)\n\n # verify the logits\n expected_shape = torch.Size((1, 150, 480, 480))\n self.assertEqual(outputs.logits.shape, expected_shape)\n\n expected_slice = torch.tensor(\n [[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]]\n ).to(torch_device)\n\n self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], expected_slice, atol=1e-4))\n",
"# coding=utf-8\n# Copyright 2022 Meta Platforms Inc. and The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" TF 2.0 ConvNext model.\"\"\"\n\n\nfrom typing import Dict, Optional, Tuple, Union\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom ...activations_tf import get_tf_activation\nfrom ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling, TFSequenceClassifierOutput\nfrom ...modeling_tf_utils import (\n TFModelInputType,\n TFPreTrainedModel,\n TFSequenceClassificationLoss,\n get_initializer,\n keras_serializable,\n unpack_inputs,\n)\nfrom ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings\nfrom .configuration_convnext import ConvNextConfig\n\n\nlogger = logging.get_logger(__name__)\n\n\n_CONFIG_FOR_DOC = \"ConvNextConfig\"\n_CHECKPOINT_FOR_DOC = \"facebook/convnext-tiny-224\"\n\n\nclass TFConvNextDropPath(tf.keras.layers.Layer):\n \"\"\"Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).\n References:\n (1) github.com:rwightman/pytorch-image-models\n \"\"\"\n\n def __init__(self, drop_path, **kwargs):\n super().__init__(**kwargs)\n self.drop_path = drop_path\n\n def call(self, x, training=None):\n if training:\n keep_prob = 1 - self.drop_path\n shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)\n random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)\n random_tensor = tf.floor(random_tensor)\n return (x / keep_prob) * random_tensor\n return x\n\n\nclass TFConvNextEmbeddings(tf.keras.layers.Layer):\n \"\"\"This class is comparable to (and inspired by) the SwinEmbeddings class\n found in src/transformers/models/swin/modeling_swin.py.\n \"\"\"\n\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n self.patch_embeddings = tf.keras.layers.Conv2D(\n filters=config.hidden_sizes[0],\n kernel_size=config.patch_size,\n strides=config.patch_size,\n name=\"patch_embeddings\",\n kernel_initializer=get_initializer(config.initializer_range),\n bias_initializer=\"zeros\",\n )\n self.layernorm = tf.keras.layers.LayerNormalization(epsilon=1e-6, name=\"layernorm\")\n\n def call(self, pixel_values):\n if isinstance(pixel_values, dict):\n pixel_values = pixel_values[\"pixel_values\"]\n\n # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.\n # So change the input format from `NCHW` to `NHWC`.\n # shape = (batch_size, in_height, in_width, in_channels=num_channels)\n pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))\n\n embeddings = self.patch_embeddings(pixel_values)\n embeddings = self.layernorm(embeddings)\n return embeddings\n\n\nclass TFConvNextLayer(tf.keras.layers.Layer):\n \"\"\"This corresponds to the `Block` class in the original implementation.\n\n There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C,\n H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back\n\n The authors used (2) as they find it slightly faster in PyTorch. Since we already permuted the inputs to follow\n NHWC ordering, we can just apply the operations straight-away without the permutation.\n\n Args:\n config ([`ConvNextConfig`]): Model configuration class.\n dim (`int`): Number of input channels.\n drop_path (`float`): Stochastic depth rate. Default: 0.0.\n \"\"\"\n\n def __init__(self, config, dim, drop_path=0.0, **kwargs):\n super().__init__(**kwargs)\n self.dim = dim\n self.config = config\n self.dwconv = tf.keras.layers.Conv2D(\n filters=dim,\n kernel_size=7,\n padding=\"same\",\n groups=dim,\n kernel_initializer=get_initializer(config.initializer_range),\n bias_initializer=\"zeros\",\n name=\"dwconv\",\n ) # depthwise conv\n self.layernorm = tf.keras.layers.LayerNormalization(\n epsilon=1e-6,\n name=\"layernorm\",\n )\n self.pwconv1 = tf.keras.layers.Dense(\n units=4 * dim,\n kernel_initializer=get_initializer(config.initializer_range),\n bias_initializer=\"zeros\",\n name=\"pwconv1\",\n ) # pointwise/1x1 convs, implemented with linear layers\n self.act = get_tf_activation(config.hidden_act)\n self.pwconv2 = tf.keras.layers.Dense(\n units=dim,\n kernel_initializer=get_initializer(config.initializer_range),\n bias_initializer=\"zeros\",\n name=\"pwconv2\",\n )\n # Using `layers.Activation` instead of `tf.identity` to better control `training`\n # behaviour.\n self.drop_path = (\n TFConvNextDropPath(drop_path, name=\"drop_path\")\n if drop_path > 0.0\n else tf.keras.layers.Activation(\"linear\", name=\"drop_path\")\n )\n\n def build(self, input_shape: tf.TensorShape):\n # PT's `nn.Parameters` must be mapped to a TF layer weight to inherit the same name hierarchy (and vice-versa)\n self.layer_scale_parameter = (\n self.add_weight(\n shape=(self.dim,),\n initializer=tf.keras.initializers.Constant(value=self.config.layer_scale_init_value),\n trainable=True,\n name=\"layer_scale_parameter\",\n )\n if self.config.layer_scale_init_value > 0\n else None\n )\n super().build(input_shape)\n\n def call(self, hidden_states, training=False):\n input = hidden_states\n x = self.dwconv(hidden_states)\n x = self.layernorm(x)\n x = self.pwconv1(x)\n x = self.act(x)\n x = self.pwconv2(x)\n\n if self.layer_scale_parameter is not None:\n x = self.layer_scale_parameter * x\n\n x = input + self.drop_path(x, training=training)\n return x\n\n\nclass TFConvNextStage(tf.keras.layers.Layer):\n \"\"\"ConvNext stage, consisting of an optional downsampling layer + multiple residual blocks.\n\n Args:\n config ([`ConvNextConfig`]): Model configuration class.\n in_channels (`int`): Number of input channels.\n out_channels (`int`): Number of output channels.\n depth (`int`): Number of residual blocks.\n drop_path_rates(`List[float]`): Stochastic depth rates for each layer.\n \"\"\"\n\n def __init__(\n self, config, in_channels, out_channels, kernel_size=2, stride=2, depth=2, drop_path_rates=None, **kwargs\n ):\n super().__init__(**kwargs)\n if in_channels != out_channels or stride > 1:\n self.downsampling_layer = [\n tf.keras.layers.LayerNormalization(\n epsilon=1e-6,\n name=\"downsampling_layer.0\",\n ),\n # Inputs to this layer will follow NHWC format since we\n # transposed the inputs from NCHW to NHWC in the `TFConvNextEmbeddings`\n # layer. All the outputs throughout the model will be in NHWC\n # from this point on until the output where we again change to\n # NCHW.\n tf.keras.layers.Conv2D(\n filters=out_channels,\n kernel_size=kernel_size,\n strides=stride,\n kernel_initializer=get_initializer(config.initializer_range),\n bias_initializer=\"zeros\",\n name=\"downsampling_layer.1\",\n ),\n ]\n else:\n self.downsampling_layer = [tf.identity]\n\n drop_path_rates = drop_path_rates or [0.0] * depth\n self.layers = [\n TFConvNextLayer(\n config,\n dim=out_channels,\n drop_path=drop_path_rates[j],\n name=f\"layers.{j}\",\n )\n for j in range(depth)\n ]\n\n def call(self, hidden_states):\n for layer in self.downsampling_layer:\n hidden_states = layer(hidden_states)\n for layer in self.layers:\n hidden_states = layer(hidden_states)\n return hidden_states\n\n\nclass TFConvNextEncoder(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n self.stages = []\n drop_path_rates = [x for x in tf.linspace(0.0, config.drop_path_rate, sum(config.depths))]\n cur = 0\n prev_chs = config.hidden_sizes[0]\n for i in range(config.num_stages):\n out_chs = config.hidden_sizes[i]\n stage = TFConvNextStage(\n config,\n in_channels=prev_chs,\n out_channels=out_chs,\n stride=2 if i > 0 else 1,\n depth=config.depths[i],\n drop_path_rates=drop_path_rates[cur],\n name=f\"stages.{i}\",\n )\n self.stages.append(stage)\n cur += config.depths[i]\n prev_chs = out_chs\n\n def call(self, hidden_states, output_hidden_states=False, return_dict=True):\n all_hidden_states = () if output_hidden_states else None\n\n for i, layer_module in enumerate(self.stages):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n hidden_states = layer_module(hidden_states)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)\n\n return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states)\n\n\n@keras_serializable\nclass TFConvNextMainLayer(tf.keras.layers.Layer):\n config_class = ConvNextConfig\n\n def __init__(self, config: ConvNextConfig, add_pooling_layer: bool = True, **kwargs):\n super().__init__(**kwargs)\n\n self.config = config\n self.embeddings = TFConvNextEmbeddings(config, name=\"embeddings\")\n self.encoder = TFConvNextEncoder(config, name=\"encoder\")\n self.layernorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"layernorm\")\n # We are setting the `data_format` like so because from here on we will revert to the\n # NCHW output format\n self.pooler = tf.keras.layers.GlobalAvgPool2D(data_format=\"channels_first\") if add_pooling_layer else None\n\n @unpack_inputs\n def call(\n self,\n pixel_values: Optional[TFModelInputType] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n training: bool = False,\n ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if pixel_values is None:\n raise ValueError(\"You have to specify pixel_values\")\n\n embedding_output = self.embeddings(pixel_values, training=training)\n\n encoder_outputs = self.encoder(\n embedding_output,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n )\n\n last_hidden_state = encoder_outputs[0]\n # Change to NCHW output format have uniformity in the modules\n last_hidden_state = tf.transpose(last_hidden_state, perm=(0, 3, 1, 2))\n pooled_output = self.layernorm(self.pooler(last_hidden_state))\n\n # Change the other hidden state outputs to NCHW as well\n if output_hidden_states:\n hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])\n\n if not return_dict:\n return (last_hidden_state, pooled_output) + encoder_outputs[1:]\n\n return TFBaseModelOutputWithPooling(\n last_hidden_state=last_hidden_state,\n pooler_output=pooled_output,\n hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states,\n )\n\n\nclass TFConvNextPreTrainedModel(TFPreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = ConvNextConfig\n base_model_prefix = \"convnext\"\n main_input_name = \"pixel_values\"\n\n @property\n def dummy_inputs(self) -> Dict[str, tf.Tensor]:\n \"\"\"\n Dummy inputs to build the network.\n\n Returns:\n `Dict[str, tf.Tensor]`: The dummy inputs.\n \"\"\"\n VISION_DUMMY_INPUTS = tf.random.uniform(\n shape=(\n 3,\n self.config.num_channels,\n self.config.image_size,\n self.config.image_size,\n ),\n dtype=tf.float32,\n )\n return {\"pixel_values\": tf.constant(VISION_DUMMY_INPUTS)}\n\n @tf.function(\n input_signature=[\n {\n \"pixel_values\": tf.TensorSpec((None, None, None, None), tf.float32, name=\"pixel_values\"),\n }\n ]\n )\n def serving(self, inputs):\n \"\"\"\n Method used for serving the model.\n\n Args:\n inputs (`Dict[str, tf.Tensor]`):\n The input of the saved model as a dictionary of tensors.\n \"\"\"\n return self.call(inputs)\n\n\nCONVNEXT_START_DOCSTRING = r\"\"\"\n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n <Tip>\n\n TF 2.0 models accepts two formats as inputs:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional arguments.\n\n This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the\n tensors in the first argument of the model call function: `model(inputs)`.\n\n </Tip>\n\n Parameters:\n config ([`ConvNextConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\nCONVNEXT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`ConvNextFeatureExtractor`]. See\n [`ConvNextFeatureExtractor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n eager mode, in graph mode the value will always be set to True.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare ConvNext model outputting raw features without any specific head on top.\",\n CONVNEXT_START_DOCSTRING,\n)\nclass TFConvNextModel(TFConvNextPreTrainedModel):\n def __init__(self, config, *inputs, add_pooling_layer=True, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.convnext = TFConvNextMainLayer(config, add_pooling_layer=add_pooling_layer, name=\"convnext\")\n\n @unpack_inputs\n @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)\n def call(\n self,\n pixel_values: Optional[TFModelInputType] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n training: bool = False,\n ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:\n r\"\"\"\n Returns:\n\n Examples:\n\n ```python\n >>> from transformers import ConvNextFeatureExtractor, TFConvNextModel\n >>> from PIL import Image\n >>> import requests\n\n >>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> feature_extractor = ConvNextFeatureExtractor.from_pretrained(\"facebook/convnext-tiny-224\")\n >>> model = TFConvNextModel.from_pretrained(\"facebook/convnext-tiny-224\")\n\n >>> inputs = feature_extractor(images=image, return_tensors=\"tf\")\n >>> outputs = model(**inputs)\n >>> last_hidden_states = outputs.last_hidden_state\n ```\"\"\"\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if pixel_values is None:\n raise ValueError(\"You have to specify pixel_values\")\n\n outputs = self.convnext(\n pixel_values=pixel_values,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n )\n\n if not return_dict:\n return (outputs[0],) + outputs[1:]\n\n return TFBaseModelOutputWithPooling(\n last_hidden_state=outputs.last_hidden_state,\n pooler_output=outputs.pooler_output,\n hidden_states=outputs.hidden_states,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n ConvNext Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n \"\"\",\n CONVNEXT_START_DOCSTRING,\n)\nclass TFConvNextForImageClassification(TFConvNextPreTrainedModel, TFSequenceClassificationLoss):\n def __init__(self, config: ConvNextConfig, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n\n self.num_labels = config.num_labels\n self.convnext = TFConvNextMainLayer(config, name=\"convnext\")\n\n # Classifier head\n self.classifier = tf.keras.layers.Dense(\n units=config.num_labels,\n kernel_initializer=get_initializer(config.initializer_range),\n bias_initializer=\"zeros\",\n name=\"classifier\",\n )\n\n @unpack_inputs\n @add_start_docstrings_to_model_forward(CONVNEXT_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)\n def call(\n self,\n pixel_values: Optional[TFModelInputType] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n labels: Optional[Union[np.ndarray, tf.Tensor]] = None,\n training: Optional[bool] = False,\n ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:\n r\"\"\"\n labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):\n Labels for computing the image classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n\n Returns:\n\n Examples:\n\n ```python\n >>> from transformers import ConvNextFeatureExtractor, TFConvNextForImageClassification\n >>> import tensorflow as tf\n >>> from PIL import Image\n >>> import requests\n\n >>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> feature_extractor = ConvNextFeatureExtractor.from_pretrained(\"facebook/convnext-tiny-224\")\n >>> model = TFViTForImageClassification.from_pretrained(\"facebook/convnext-tiny-224\")\n\n >>> inputs = feature_extractor(images=image, return_tensors=\"tf\")\n >>> outputs = model(**inputs)\n >>> logits = outputs.logits\n >>> # model predicts one of the 1000 ImageNet classes\n >>> predicted_class_idx = tf.math.argmax(logits, axis=-1)[0]\n >>> print(\"Predicted class:\", model.config.id2label[int(predicted_class_idx)])\n ```\"\"\"\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if pixel_values is None:\n raise ValueError(\"You have to specify pixel_values\")\n\n outputs = self.convnext(\n pixel_values,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n )\n\n pooled_output = outputs.pooler_output if return_dict else outputs[1]\n\n logits = self.classifier(pooled_output)\n loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TFSequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n )\n",
"# coding=utf-8\n# Copyright 2021 Microsoft Research The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch LayoutLMv2 model.\"\"\"\n\n\nimport math\nfrom typing import Optional, Tuple, Union\n\nimport torch\nimport torch.utils.checkpoint\nfrom torch import nn\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n\nfrom ...activations import ACT2FN\nfrom ...modeling_outputs import (\n BaseModelOutput,\n BaseModelOutputWithPooling,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n)\nfrom ...modeling_utils import PreTrainedModel\nfrom ...pytorch_utils import apply_chunking_to_forward\nfrom ...utils import (\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n is_detectron2_available,\n logging,\n replace_return_docstrings,\n requires_backends,\n)\nfrom .configuration_layoutlmv2 import LayoutLMv2Config\n\n\n# soft dependency\nif is_detectron2_available():\n import detectron2\n from detectron2.modeling import META_ARCH_REGISTRY\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"microsoft/layoutlmv2-base-uncased\"\n_CONFIG_FOR_DOC = \"LayoutLMv2Config\"\n_TOKENIZER_FOR_DOC = \"LayoutLMv2Tokenizer\"\n\nLAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"microsoft/layoutlmv2-base-uncased\",\n \"microsoft/layoutlmv2-large-uncased\",\n # See all LayoutLMv2 models at https://huggingface.co/models?filter=layoutlmv2\n]\n\n\nclass LayoutLMv2Embeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\"\"\"\n\n def __init__(self, config):\n super(LayoutLMv2Embeddings, self).__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n\n self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)\n self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)\n self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)\n self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\n\n def _calc_spatial_position_embeddings(self, bbox):\n try:\n left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])\n upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])\n right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])\n lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])\n except IndexError as e:\n raise IndexError(\"The `bbox` coordinate values should be within 0-1000 range.\") from e\n\n h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1])\n w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0])\n\n spatial_position_embeddings = torch.cat(\n [\n left_position_embeddings,\n upper_position_embeddings,\n right_position_embeddings,\n lower_position_embeddings,\n h_position_embeddings,\n w_position_embeddings,\n ],\n dim=-1,\n )\n return spatial_position_embeddings\n\n\nclass LayoutLMv2SelfAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, \"embedding_size\"):\n raise ValueError(\n f\"The hidden size ({config.hidden_size}) is not a multiple of the number of attention \"\n f\"heads ({config.num_attention_heads})\"\n )\n self.fast_qkv = config.fast_qkv\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.has_relative_attention_bias = config.has_relative_attention_bias\n self.has_spatial_attention_bias = config.has_spatial_attention_bias\n\n if config.fast_qkv:\n self.qkv_linear = nn.Linear(config.hidden_size, 3 * self.all_head_size, bias=False)\n self.q_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size))\n self.v_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size))\n else:\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def compute_qkv(self, hidden_states):\n if self.fast_qkv:\n qkv = self.qkv_linear(hidden_states)\n q, k, v = torch.chunk(qkv, 3, dim=-1)\n if q.ndimension() == self.q_bias.ndimension():\n q = q + self.q_bias\n v = v + self.v_bias\n else:\n _sz = (1,) * (q.ndimension() - 1) + (-1,)\n q = q + self.q_bias.view(*_sz)\n v = v + self.v_bias.view(*_sz)\n else:\n q = self.query(hidden_states)\n k = self.key(hidden_states)\n v = self.value(hidden_states)\n return q, k, v\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n output_attentions=False,\n rel_pos=None,\n rel_2d_pos=None,\n ):\n q, k, v = self.compute_qkv(hidden_states)\n\n # (B, L, H*D) -> (B, H, L, D)\n query_layer = self.transpose_for_scores(q)\n key_layer = self.transpose_for_scores(k)\n value_layer = self.transpose_for_scores(v)\n\n query_layer = query_layer / math.sqrt(self.attention_head_size)\n # [BSZ, NAT, L, L]\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n if self.has_relative_attention_bias:\n attention_scores += rel_pos\n if self.has_spatial_attention_bias:\n attention_scores += rel_2d_pos\n attention_scores = attention_scores.float().masked_fill_(attention_mask.to(torch.bool), float(\"-inf\"))\n attention_probs = nn.functional.softmax(attention_scores, dim=-1, dtype=torch.float32).type_as(value_layer)\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n return outputs\n\n\nclass LayoutLMv2Attention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.self = LayoutLMv2SelfAttention(config)\n self.output = LayoutLMv2SelfOutput(config)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n output_attentions=False,\n rel_pos=None,\n rel_2d_pos=None,\n ):\n self_outputs = self.self(\n hidden_states,\n attention_mask,\n head_mask,\n output_attentions,\n rel_pos=rel_pos,\n rel_2d_pos=rel_2d_pos,\n )\n attention_output = self.output(self_outputs[0], hidden_states)\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n return outputs\n\n\nclass LayoutLMv2SelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->LayoutLMv2\nclass LayoutLMv2Intermediate(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->LayoutLM\nclass LayoutLMv2Output(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass LayoutLMv2Layer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n self.seq_len_dim = 1\n self.attention = LayoutLMv2Attention(config)\n self.intermediate = LayoutLMv2Intermediate(config)\n self.output = LayoutLMv2Output(config)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n output_attentions=False,\n rel_pos=None,\n rel_2d_pos=None,\n ):\n self_attention_outputs = self.attention(\n hidden_states,\n attention_mask,\n head_mask,\n output_attentions=output_attentions,\n rel_pos=rel_pos,\n rel_2d_pos=rel_2d_pos,\n )\n attention_output = self_attention_outputs[0]\n\n outputs = self_attention_outputs[1:] # add self attentions if we output attention weights\n\n layer_output = apply_chunking_to_forward(\n self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output\n )\n outputs = (layer_output,) + outputs\n\n return outputs\n\n def feed_forward_chunk(self, attention_output):\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n return layer_output\n\n\ndef relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):\n \"\"\"\n Adapted from Mesh Tensorflow:\n https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593\n Translate relative position to a bucket number for relative attention. The relative position is defined as\n memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to\n position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small\n absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions\n >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should\n allow for more graceful generalization to longer sequences than the model has been trained on.\n\n Args:\n relative_position: an int32 Tensor\n bidirectional: a boolean - whether the attention is bidirectional\n num_buckets: an integer\n max_distance: an integer\n\n Returns:\n a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)\n \"\"\"\n\n ret = 0\n if bidirectional:\n num_buckets //= 2\n ret += (relative_position > 0).long() * num_buckets\n n = torch.abs(relative_position)\n else:\n n = torch.max(-relative_position, torch.zeros_like(relative_position))\n # now n is in the range [0, inf)\n\n # half of the buckets are for exact increments in positions\n max_exact = num_buckets // 2\n is_small = n < max_exact\n\n # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance\n val_if_large = max_exact + (\n torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)\n ).to(torch.long)\n val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))\n\n ret += torch.where(is_small, n, val_if_large)\n return ret\n\n\nclass LayoutLMv2Encoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.layer = nn.ModuleList([LayoutLMv2Layer(config) for _ in range(config.num_hidden_layers)])\n\n self.has_relative_attention_bias = config.has_relative_attention_bias\n self.has_spatial_attention_bias = config.has_spatial_attention_bias\n\n if self.has_relative_attention_bias:\n self.rel_pos_bins = config.rel_pos_bins\n self.max_rel_pos = config.max_rel_pos\n self.rel_pos_onehot_size = config.rel_pos_bins\n self.rel_pos_bias = nn.Linear(self.rel_pos_onehot_size, config.num_attention_heads, bias=False)\n\n if self.has_spatial_attention_bias:\n self.max_rel_2d_pos = config.max_rel_2d_pos\n self.rel_2d_pos_bins = config.rel_2d_pos_bins\n self.rel_2d_pos_onehot_size = config.rel_2d_pos_bins\n self.rel_pos_x_bias = nn.Linear(self.rel_2d_pos_onehot_size, config.num_attention_heads, bias=False)\n self.rel_pos_y_bias = nn.Linear(self.rel_2d_pos_onehot_size, config.num_attention_heads, bias=False)\n\n self.gradient_checkpointing = False\n\n def _calculate_1d_position_embeddings(self, hidden_states, position_ids):\n rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)\n rel_pos = relative_position_bucket(\n rel_pos_mat,\n num_buckets=self.rel_pos_bins,\n max_distance=self.max_rel_pos,\n )\n rel_pos = nn.functional.one_hot(rel_pos, num_classes=self.rel_pos_onehot_size).type_as(hidden_states)\n rel_pos = self.rel_pos_bias(rel_pos).permute(0, 3, 1, 2)\n rel_pos = rel_pos.contiguous()\n return rel_pos\n\n def _calculate_2d_position_embeddings(self, hidden_states, bbox):\n position_coord_x = bbox[:, :, 0]\n position_coord_y = bbox[:, :, 3]\n rel_pos_x_2d_mat = position_coord_x.unsqueeze(-2) - position_coord_x.unsqueeze(-1)\n rel_pos_y_2d_mat = position_coord_y.unsqueeze(-2) - position_coord_y.unsqueeze(-1)\n rel_pos_x = relative_position_bucket(\n rel_pos_x_2d_mat,\n num_buckets=self.rel_2d_pos_bins,\n max_distance=self.max_rel_2d_pos,\n )\n rel_pos_y = relative_position_bucket(\n rel_pos_y_2d_mat,\n num_buckets=self.rel_2d_pos_bins,\n max_distance=self.max_rel_2d_pos,\n )\n rel_pos_x = nn.functional.one_hot(rel_pos_x, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states)\n rel_pos_y = nn.functional.one_hot(rel_pos_y, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states)\n rel_pos_x = self.rel_pos_x_bias(rel_pos_x).permute(0, 3, 1, 2)\n rel_pos_y = self.rel_pos_y_bias(rel_pos_y).permute(0, 3, 1, 2)\n rel_pos_x = rel_pos_x.contiguous()\n rel_pos_y = rel_pos_y.contiguous()\n rel_2d_pos = rel_pos_x + rel_pos_y\n return rel_2d_pos\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n bbox=None,\n position_ids=None,\n ):\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n\n rel_pos = (\n self._calculate_1d_position_embeddings(hidden_states, position_ids)\n if self.has_relative_attention_bias\n else None\n )\n rel_2d_pos = (\n self._calculate_2d_position_embeddings(hidden_states, bbox) if self.has_spatial_attention_bias else None\n )\n\n for i, layer_module in enumerate(self.layer):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_head_mask = head_mask[i] if head_mask is not None else None\n\n if self.gradient_checkpointing and self.training:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer_module),\n hidden_states,\n attention_mask,\n layer_head_mask,\n rel_pos=rel_pos,\n rel_2d_pos=rel_2d_pos,\n )\n else:\n layer_outputs = layer_module(\n hidden_states,\n attention_mask,\n layer_head_mask,\n output_attentions,\n rel_pos=rel_pos,\n rel_2d_pos=rel_2d_pos,\n )\n\n hidden_states = layer_outputs[0]\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[1],)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(\n v\n for v in [\n hidden_states,\n all_hidden_states,\n all_self_attentions,\n ]\n if v is not None\n )\n return BaseModelOutput(\n last_hidden_state=hidden_states,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n )\n\n\nclass LayoutLMv2PreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = LayoutLMv2Config\n pretrained_model_archive_map = LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST\n base_model_prefix = \"layoutlmv2\"\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights\"\"\"\n if isinstance(module, nn.Linear):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, LayoutLMv2Encoder):\n module.gradient_checkpointing = value\n\n\ndef my_convert_sync_batchnorm(module, process_group=None):\n # same as `nn.modules.SyncBatchNorm.convert_sync_batchnorm` but allowing converting from `detectron2.layers.FrozenBatchNorm2d`\n if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):\n return nn.modules.SyncBatchNorm.convert_sync_batchnorm(module, process_group)\n module_output = module\n if isinstance(module, detectron2.layers.FrozenBatchNorm2d):\n module_output = torch.nn.SyncBatchNorm(\n num_features=module.num_features,\n eps=module.eps,\n affine=True,\n track_running_stats=True,\n process_group=process_group,\n )\n module_output.weight = torch.nn.Parameter(module.weight)\n module_output.bias = torch.nn.Parameter(module.bias)\n module_output.running_mean = module.running_mean\n module_output.running_var = module.running_var\n module_output.num_batches_tracked = torch.tensor(0, dtype=torch.long, device=module.running_mean.device)\n for name, child in module.named_children():\n module_output.add_module(name, my_convert_sync_batchnorm(child, process_group))\n del module\n return module_output\n\n\nclass LayoutLMv2VisualBackbone(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.cfg = config.get_detectron2_config()\n meta_arch = self.cfg.MODEL.META_ARCHITECTURE\n model = META_ARCH_REGISTRY.get(meta_arch)(self.cfg)\n assert isinstance(model.backbone, detectron2.modeling.backbone.FPN)\n self.backbone = model.backbone\n\n assert len(self.cfg.MODEL.PIXEL_MEAN) == len(self.cfg.MODEL.PIXEL_STD)\n num_channels = len(self.cfg.MODEL.PIXEL_MEAN)\n self.register_buffer(\n \"pixel_mean\",\n torch.Tensor(self.cfg.MODEL.PIXEL_MEAN).view(num_channels, 1, 1),\n )\n self.register_buffer(\"pixel_std\", torch.Tensor(self.cfg.MODEL.PIXEL_STD).view(num_channels, 1, 1))\n self.out_feature_key = \"p2\"\n if torch.are_deterministic_algorithms_enabled():\n logger.warning(\"using `AvgPool2d` instead of `AdaptiveAvgPool2d`\")\n input_shape = (224, 224)\n backbone_stride = self.backbone.output_shape()[self.out_feature_key].stride\n self.pool = nn.AvgPool2d(\n (\n math.ceil(math.ceil(input_shape[0] / backbone_stride) / config.image_feature_pool_shape[0]),\n math.ceil(math.ceil(input_shape[1] / backbone_stride) / config.image_feature_pool_shape[1]),\n )\n )\n else:\n self.pool = nn.AdaptiveAvgPool2d(config.image_feature_pool_shape[:2])\n if len(config.image_feature_pool_shape) == 2:\n config.image_feature_pool_shape.append(self.backbone.output_shape()[self.out_feature_key].channels)\n assert self.backbone.output_shape()[self.out_feature_key].channels == config.image_feature_pool_shape[2]\n\n def forward(self, images):\n images_input = ((images if torch.is_tensor(images) else images.tensor) - self.pixel_mean) / self.pixel_std\n features = self.backbone(images_input)\n features = features[self.out_feature_key]\n features = self.pool(features).flatten(start_dim=2).transpose(1, 2).contiguous()\n return features\n\n def synchronize_batch_norm(self):\n if not (\n torch.distributed.is_available()\n and torch.distributed.is_initialized()\n and torch.distributed.get_rank() > -1\n ):\n raise RuntimeError(\"Make sure torch.distributed is set up properly.\")\n\n self_rank = torch.distributed.get_rank()\n node_size = torch.cuda.device_count()\n world_size = torch.distributed.get_world_size()\n if not (world_size & node_size == 0):\n raise RuntimeError(\"Make sure the number of processes can be divided by the number of nodes\")\n\n node_global_ranks = [list(range(i * node_size, (i + 1) * node_size)) for i in range(world_size // node_size)]\n sync_bn_groups = [\n torch.distributed.new_group(ranks=node_global_ranks[i]) for i in range(world_size // node_size)\n ]\n node_rank = self_rank // node_size\n\n self.backbone = my_convert_sync_batchnorm(self.backbone, process_group=sync_bn_groups[node_rank])\n\n\nLAYOUTLMV2_START_DOCSTRING = r\"\"\"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`LayoutLMv2Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\nLAYOUTLMV2_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (`torch.LongTensor` of shape `{0}`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`LayoutLMv2Tokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n\n bbox (`torch.LongTensor` of shape `({0}, 4)`, *optional*):\n Bounding boxes of each input sequence tokens. Selected in the range `[0,\n config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)\n format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,\n y1) represents the position of the lower right corner.\n\n image (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` or `detectron.structures.ImageList` whose `tensors` is of shape `(batch_size, num_channels, height, width)`):\n Batch of document images.\n\n attention_mask (`torch.FloatTensor` of shape `{0}`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `{0}`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`torch.LongTensor` of shape `{0}`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert *input_ids* indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\n\n\nclass LayoutLMv2Pooler(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n pooled_output = self.activation(pooled_output)\n return pooled_output\n\n\n@add_start_docstrings(\n \"The bare LayoutLMv2 Model transformer outputting raw hidden-states without any specific head on top.\",\n LAYOUTLMV2_START_DOCSTRING,\n)\nclass LayoutLMv2Model(LayoutLMv2PreTrainedModel):\n def __init__(self, config):\n requires_backends(self, \"detectron2\")\n super().__init__(config)\n self.config = config\n self.has_visual_segment_embedding = config.has_visual_segment_embedding\n self.embeddings = LayoutLMv2Embeddings(config)\n\n self.visual = LayoutLMv2VisualBackbone(config)\n self.visual_proj = nn.Linear(config.image_feature_pool_shape[-1], config.hidden_size)\n if self.has_visual_segment_embedding:\n self.visual_segment_embedding = nn.Parameter(nn.Embedding(1, config.hidden_size).weight[0])\n self.visual_LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.visual_dropout = nn.Dropout(config.hidden_dropout_prob)\n\n self.encoder = LayoutLMv2Encoder(config)\n self.pooler = LayoutLMv2Pooler(config)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _calc_text_embeddings(self, input_ids, bbox, position_ids, token_type_ids, inputs_embeds=None):\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n\n if position_ids is None:\n position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids)\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n\n if inputs_embeds is None:\n inputs_embeds = self.embeddings.word_embeddings(input_ids)\n position_embeddings = self.embeddings.position_embeddings(position_ids)\n spatial_position_embeddings = self.embeddings._calc_spatial_position_embeddings(bbox)\n token_type_embeddings = self.embeddings.token_type_embeddings(token_type_ids)\n\n embeddings = inputs_embeds + position_embeddings + spatial_position_embeddings + token_type_embeddings\n embeddings = self.embeddings.LayerNorm(embeddings)\n embeddings = self.embeddings.dropout(embeddings)\n return embeddings\n\n def _calc_img_embeddings(self, image, bbox, position_ids):\n visual_embeddings = self.visual_proj(self.visual(image))\n position_embeddings = self.embeddings.position_embeddings(position_ids)\n spatial_position_embeddings = self.embeddings._calc_spatial_position_embeddings(bbox)\n embeddings = visual_embeddings + position_embeddings + spatial_position_embeddings\n if self.has_visual_segment_embedding:\n embeddings += self.visual_segment_embedding\n embeddings = self.visual_LayerNorm(embeddings)\n embeddings = self.visual_dropout(embeddings)\n return embeddings\n\n def _calc_visual_bbox(self, image_feature_pool_shape, bbox, device, final_shape):\n visual_bbox_x = (\n torch.arange(\n 0,\n 1000 * (image_feature_pool_shape[1] + 1),\n 1000,\n device=device,\n dtype=bbox.dtype,\n )\n // self.config.image_feature_pool_shape[1]\n )\n visual_bbox_y = (\n torch.arange(\n 0,\n 1000 * (self.config.image_feature_pool_shape[0] + 1),\n 1000,\n device=device,\n dtype=bbox.dtype,\n )\n // self.config.image_feature_pool_shape[0]\n )\n visual_bbox = torch.stack(\n [\n visual_bbox_x[:-1].repeat(image_feature_pool_shape[0], 1),\n visual_bbox_y[:-1].repeat(image_feature_pool_shape[1], 1).transpose(0, 1),\n visual_bbox_x[1:].repeat(image_feature_pool_shape[0], 1),\n visual_bbox_y[1:].repeat(image_feature_pool_shape[1], 1).transpose(0, 1),\n ],\n dim=-1,\n ).view(-1, bbox.size(-1))\n\n visual_bbox = visual_bbox.repeat(final_shape[0], 1, 1)\n\n return visual_bbox\n\n @add_start_docstrings_to_model_forward(LAYOUTLMV2_INPUTS_DOCSTRING.format(\"(batch_size, sequence_length)\"))\n @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n bbox: Optional[torch.LongTensor] = None,\n image: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, BaseModelOutputWithPooling]:\n r\"\"\"\n Returns:\n\n Examples:\n\n ```python\n >>> from transformers import LayoutLMv2Processor, LayoutLMv2Model\n >>> from PIL import Image\n\n >>> processor = LayoutLMv2Processor.from_pretrained(\"microsoft/layoutlmv2-base-uncased\")\n >>> model = LayoutLMv2Model.from_pretrained(\"microsoft/layoutlmv2-base-uncased\")\n\n >>> image = Image.open(\"name_of_your_document - can be a png file, pdf, etc.\").convert(\"RGB\")\n\n >>> encoding = processor(image, return_tensors=\"pt\")\n\n >>> outputs = model(**encoding)\n >>> last_hidden_states = outputs.last_hidden_state\n ```\"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n visual_shape = list(input_shape)\n visual_shape[1] = self.config.image_feature_pool_shape[0] * self.config.image_feature_pool_shape[1]\n visual_shape = torch.Size(visual_shape)\n final_shape = list(input_shape)\n final_shape[1] += visual_shape[1]\n final_shape = torch.Size(final_shape)\n\n visual_bbox = self._calc_visual_bbox(self.config.image_feature_pool_shape, bbox, device, final_shape)\n final_bbox = torch.cat([bbox, visual_bbox], dim=1)\n\n if attention_mask is None:\n attention_mask = torch.ones(input_shape, device=device)\n\n visual_attention_mask = torch.ones(visual_shape, device=device)\n final_attention_mask = torch.cat([attention_mask, visual_attention_mask], dim=1)\n\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n if position_ids is None:\n seq_length = input_shape[1]\n position_ids = self.embeddings.position_ids[:, :seq_length]\n position_ids = position_ids.expand(input_shape)\n\n visual_position_ids = torch.arange(0, visual_shape[1], dtype=torch.long, device=device).repeat(\n input_shape[0], 1\n )\n final_position_ids = torch.cat([position_ids, visual_position_ids], dim=1)\n\n if bbox is None:\n bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device)\n\n text_layout_emb = self._calc_text_embeddings(\n input_ids=input_ids,\n bbox=bbox,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n inputs_embeds=inputs_embeds,\n )\n\n visual_emb = self._calc_img_embeddings(\n image=image,\n bbox=visual_bbox,\n position_ids=visual_position_ids,\n )\n final_emb = torch.cat([text_layout_emb, visual_emb], dim=1)\n\n extended_attention_mask = final_attention_mask.unsqueeze(1).unsqueeze(2)\n\n extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n\n if head_mask is not None:\n if head_mask.dim() == 1:\n head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)\n head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)\n elif head_mask.dim() == 2:\n head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)\n head_mask = head_mask.to(dtype=next(self.parameters()).dtype)\n else:\n head_mask = [None] * self.config.num_hidden_layers\n\n encoder_outputs = self.encoder(\n final_emb,\n extended_attention_mask,\n bbox=final_bbox,\n position_ids=final_position_ids,\n head_mask=head_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output)\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPooling(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n LayoutLMv2 Model with a sequence classification head on top (a linear layer on top of the concatenation of the\n final hidden state of the [CLS] token, average-pooled initial visual embeddings and average-pooled final visual\n embeddings, e.g. for document image classification tasks such as the\n [RVL-CDIP](https://www.cs.cmu.edu/~aharley/rvl-cdip/) dataset.\n \"\"\",\n LAYOUTLMV2_START_DOCSTRING,\n)\nclass LayoutLMv2ForSequenceClassification(LayoutLMv2PreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.layoutlmv2 = LayoutLMv2Model(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size * 3, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.layoutlmv2.embeddings.word_embeddings\n\n @add_start_docstrings_to_model_forward(LAYOUTLMV2_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n bbox: Optional[torch.LongTensor] = None,\n image: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, SequenceClassifierOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n\n Returns:\n\n Examples:\n\n ```python\n >>> from transformers import LayoutLMv2Processor, LayoutLMv2ForSequenceClassification\n >>> from PIL import Image\n >>> import torch\n\n >>> processor = LayoutLMv2Processor.from_pretrained(\"microsoft/layoutlmv2-base-uncased\")\n >>> model = LayoutLMv2ForSequenceClassification.from_pretrained(\"microsoft/layoutlmv2-base-uncased\")\n\n >>> image = Image.open(\"name_of_your_document - can be a png file, pdf, etc.\").convert(\"RGB\")\n\n >>> encoding = processor(image, return_tensors=\"pt\")\n >>> sequence_label = torch.tensor([1])\n\n >>> outputs = model(**encoding, labels=sequence_label)\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n ```\"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n visual_shape = list(input_shape)\n visual_shape[1] = self.config.image_feature_pool_shape[0] * self.config.image_feature_pool_shape[1]\n visual_shape = torch.Size(visual_shape)\n final_shape = list(input_shape)\n final_shape[1] += visual_shape[1]\n final_shape = torch.Size(final_shape)\n\n visual_bbox = self.layoutlmv2._calc_visual_bbox(\n self.config.image_feature_pool_shape, bbox, device, final_shape\n )\n\n visual_position_ids = torch.arange(0, visual_shape[1], dtype=torch.long, device=device).repeat(\n input_shape[0], 1\n )\n\n initial_image_embeddings = self.layoutlmv2._calc_img_embeddings(\n image=image,\n bbox=visual_bbox,\n position_ids=visual_position_ids,\n )\n\n outputs = self.layoutlmv2(\n input_ids=input_ids,\n bbox=bbox,\n image=image,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n sequence_output, final_image_embeddings = outputs[0][:, :seq_length], outputs[0][:, seq_length:]\n\n cls_final_output = sequence_output[:, 0, :]\n\n # average-pool the visual embeddings\n pooled_initial_image_embeddings = initial_image_embeddings.mean(dim=1)\n pooled_final_image_embeddings = final_image_embeddings.mean(dim=1)\n # concatenate with cls_final_output\n sequence_output = torch.cat(\n [cls_final_output, pooled_initial_image_embeddings, pooled_final_image_embeddings], dim=1\n )\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n if self.config.problem_type is None:\n if self.num_labels == 1:\n self.config.problem_type = \"regression\"\n elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = \"single_label_classification\"\n else:\n self.config.problem_type = \"multi_label_classification\"\n\n if self.config.problem_type == \"regression\":\n loss_fct = MSELoss()\n if self.num_labels == 1:\n loss = loss_fct(logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(logits, labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n LayoutLMv2 Model with a token classification head on top (a linear layer on top of the text part of the hidden\n states) e.g. for sequence labeling (information extraction) tasks such as\n [FUNSD](https://guillaumejaume.github.io/FUNSD/), [SROIE](https://rrc.cvc.uab.es/?ch=13),\n [CORD](https://github.com/clovaai/cord) and [Kleister-NDA](https://github.com/applicaai/kleister-nda).\n \"\"\",\n LAYOUTLMV2_START_DOCSTRING,\n)\nclass LayoutLMv2ForTokenClassification(LayoutLMv2PreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.layoutlmv2 = LayoutLMv2Model(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.layoutlmv2.embeddings.word_embeddings\n\n @add_start_docstrings_to_model_forward(LAYOUTLMV2_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n bbox: Optional[torch.LongTensor] = None,\n image: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, TokenClassifierOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.\n\n Returns:\n\n Examples:\n\n ```python\n >>> from transformers import LayoutLMv2Processor, LayoutLMv2ForTokenClassification\n >>> from PIL import Image\n\n >>> processor = LayoutLMv2Processor.from_pretrained(\"microsoft/layoutlmv2-base-uncased\", revision=\"no_ocr\")\n >>> model = LayoutLMv2ForTokenClassification.from_pretrained(\"microsoft/layoutlmv2-base-uncased\")\n\n >>> image = Image.open(\"name_of_your_document - can be a png file, pdf, etc.\").convert(\"RGB\")\n >>> words = [\"hello\", \"world\"]\n >>> boxes = [[1, 2, 3, 4], [5, 6, 7, 8]] # make sure to normalize your bounding boxes\n >>> word_labels = [0, 1]\n\n >>> encoding = processor(image, words, boxes=boxes, word_labels=word_labels, return_tensors=\"pt\")\n\n >>> outputs = model(**encoding)\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n ```\"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.layoutlmv2(\n input_ids=input_ids,\n bbox=bbox,\n image=image,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n # only take the text part of the output representations\n sequence_output = outputs[0][:, :seq_length]\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n LayoutLMv2 Model with a span classification head on top for extractive question-answering tasks such as\n [DocVQA](https://rrc.cvc.uab.es/?ch=17) (a linear layer on top of the text part of the hidden-states output to\n compute `span start logits` and `span end logits`).\n \"\"\",\n LAYOUTLMV2_START_DOCSTRING,\n)\nclass LayoutLMv2ForQuestionAnswering(LayoutLMv2PreTrainedModel):\n def __init__(self, config, has_visual_segment_embedding=True):\n super().__init__(config)\n self.num_labels = config.num_labels\n config.has_visual_segment_embedding = has_visual_segment_embedding\n self.layoutlmv2 = LayoutLMv2Model(config)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.layoutlmv2.embeddings.word_embeddings\n\n @add_start_docstrings_to_model_forward(LAYOUTLMV2_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n bbox: Optional[torch.LongTensor] = None,\n image: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n start_positions: Optional[torch.LongTensor] = None,\n end_positions: Optional[torch.LongTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, QuestionAnsweringModelOutput]:\n r\"\"\"\n start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence\n are not taken into account for computing the loss.\n end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence\n are not taken into account for computing the loss.\n\n Returns:\n\n Examples:\n\n ```python\n >>> from transformers import LayoutLMv2Processor, LayoutLMv2ForQuestionAnswering\n >>> from PIL import Image\n >>> import torch\n\n >>> processor = LayoutLMv2Processor.from_pretrained(\"microsoft/layoutlmv2-base-uncased\")\n >>> model = LayoutLMv2ForQuestionAnswering.from_pretrained(\"microsoft/layoutlmv2-base-uncased\")\n\n >>> image = Image.open(\"name_of_your_document - can be a png file, pdf, etc.\").convert(\"RGB\")\n >>> question = \"what's his name?\"\n\n >>> encoding = processor(image, question, return_tensors=\"pt\")\n >>> start_positions = torch.tensor([1])\n >>> end_positions = torch.tensor([3])\n\n >>> outputs = model(**encoding, start_positions=start_positions, end_positions=end_positions)\n >>> loss = outputs.loss\n >>> start_scores = outputs.start_logits\n >>> end_scores = outputs.end_logits\n ```\"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.layoutlmv2(\n input_ids=input_ids,\n bbox=bbox,\n image=image,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n # only take the text part of the output representations\n sequence_output = outputs[0][:, :seq_length]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1).contiguous()\n end_logits = end_logits.squeeze(-1).contiguous()\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions = start_positions.clamp(0, ignored_index)\n end_positions = end_positions.clamp(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (start_logits, end_logits) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return QuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n"
] | [
[
"numpy.unique",
"numpy.arange",
"numpy.ones",
"numpy.broadcast_to",
"numpy.array"
],
[
"numpy.concatenate",
"torch.from_numpy"
],
[
"torch.ones",
"torch.zeros",
"torch.cat",
"torch.manual_seed",
"torch.randn",
"torch.tensor",
"torch.no_grad",
"torch.allclose"
],
[
"torch.allclose",
"torch.Size",
"torch.no_grad",
"torch.tensor"
],
[
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.keras.initializers.Constant",
"tensorflow.transpose",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.GlobalAvgPool2D",
"tensorflow.constant",
"tensorflow.shape",
"tensorflow.random.uniform",
"tensorflow.floor",
"tensorflow.TensorSpec"
],
[
"torch.abs",
"torch.nn.functional.softmax",
"torch.cat",
"torch.zeros",
"torch.nn.modules.SyncBatchNorm.convert_sync_batchnorm",
"torch.nn.functional.one_hot",
"torch.nn.Embedding",
"torch.nn.SyncBatchNorm",
"torch.nn.BCEWithLogitsLoss",
"torch.where",
"torch.are_deterministic_algorithms_enabled",
"torch.full_like",
"torch.distributed.get_rank",
"torch.nn.Dropout",
"torch.Size",
"torch.ones",
"torch.nn.CrossEntropyLoss",
"torch.tensor",
"torch.arange",
"torch.nn.Parameter",
"torch.zeros_like",
"torch.distributed.is_initialized",
"torch.is_tensor",
"torch.nn.Linear",
"torch.distributed.is_available",
"torch.cuda.device_count",
"torch.distributed.get_world_size",
"torch.Tensor",
"torch.nn.LayerNorm",
"torch.nn.Tanh",
"torch.matmul",
"torch.nn.AdaptiveAvgPool2d",
"torch.distributed.new_group",
"torch.chunk",
"torch.nn.MSELoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
templeblock/automl | [
"0a73e836fd4a9d22919cb1ff5af9ca30082fa4b2"
] | [
"efficientdet/det_model_fn.py"
] | [
"# Copyright 2020 Google Research. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Model function definition, including both architecture and loss.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport re\nfrom absl import logging\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\nimport coco_metric\nimport efficientdet_arch\nimport hparams_config\nimport iou_utils\nimport nms_np\nimport retinanet_arch\nimport utils\nfrom keras import anchors\nfrom keras import postprocess\n\n_DEFAULT_BATCH_SIZE = 64\n\n\ndef update_learning_rate_schedule_parameters(params):\n \"\"\"Updates params that are related to the learning rate schedule.\"\"\"\n # params['batch_size'] is per-shard within model_fn if strategy=tpu.\n batch_size = (\n params['batch_size'] * params['num_shards']\n if params['strategy'] == 'tpu' else params['batch_size'])\n # Learning rate is proportional to the batch size\n params['adjusted_learning_rate'] = (\n params['learning_rate'] * batch_size / _DEFAULT_BATCH_SIZE)\n steps_per_epoch = params['num_examples_per_epoch'] / batch_size\n params['lr_warmup_step'] = int(params['lr_warmup_epoch'] * steps_per_epoch)\n params['first_lr_drop_step'] = int(params['first_lr_drop_epoch'] *\n steps_per_epoch)\n params['second_lr_drop_step'] = int(params['second_lr_drop_epoch'] *\n steps_per_epoch)\n params['total_steps'] = int(params['num_epochs'] * steps_per_epoch)\n params['steps_per_epoch'] = steps_per_epoch\n\n\ndef stepwise_lr_schedule(adjusted_learning_rate, lr_warmup_init, lr_warmup_step,\n first_lr_drop_step, second_lr_drop_step, global_step):\n \"\"\"Handles linear scaling rule, gradual warmup, and LR decay.\"\"\"\n # lr_warmup_init is the starting learning rate; the learning rate is linearly\n # scaled up to the full learning rate after `lr_warmup_step` before decaying.\n logging.info('LR schedule method: stepwise')\n linear_warmup = (\n lr_warmup_init +\n (tf.cast(global_step, dtype=tf.float32) / lr_warmup_step *\n (adjusted_learning_rate - lr_warmup_init)))\n learning_rate = tf.where(global_step < lr_warmup_step, linear_warmup,\n adjusted_learning_rate)\n lr_schedule = [[1.0, lr_warmup_step], [0.1, first_lr_drop_step],\n [0.01, second_lr_drop_step]]\n for mult, start_global_step in lr_schedule:\n learning_rate = tf.where(global_step < start_global_step, learning_rate,\n adjusted_learning_rate * mult)\n return learning_rate\n\n\ndef cosine_lr_schedule(adjusted_lr, lr_warmup_init, lr_warmup_step, total_steps,\n step):\n logging.info('LR schedule method: cosine')\n linear_warmup = (\n lr_warmup_init + (tf.cast(step, dtype=tf.float32) / lr_warmup_step *\n (adjusted_lr - lr_warmup_init)))\n decay_steps = tf.cast(total_steps - lr_warmup_step, tf.float32)\n cosine_lr = 0.5 * adjusted_lr * (\n 1 + tf.cos(np.pi * tf.cast(step, tf.float32) / decay_steps))\n return tf.where(step < lr_warmup_step, linear_warmup, cosine_lr)\n\n\ndef polynomial_lr_schedule(adjusted_lr, lr_warmup_init, lr_warmup_step, power,\n total_steps, step):\n logging.info('LR schedule method: polynomial')\n linear_warmup = (\n lr_warmup_init + (tf.cast(step, dtype=tf.float32) / lr_warmup_step *\n (adjusted_lr - lr_warmup_init)))\n polynomial_lr = adjusted_lr * tf.pow(\n 1 - (tf.cast(step, tf.float32) / total_steps), power)\n return tf.where(step < lr_warmup_step, linear_warmup, polynomial_lr)\n\n\ndef learning_rate_schedule(params, global_step):\n \"\"\"Learning rate schedule based on global step.\"\"\"\n lr_decay_method = params['lr_decay_method']\n if lr_decay_method == 'stepwise':\n return stepwise_lr_schedule(params['adjusted_learning_rate'],\n params['lr_warmup_init'],\n params['lr_warmup_step'],\n params['first_lr_drop_step'],\n params['second_lr_drop_step'], global_step)\n\n if lr_decay_method == 'cosine':\n return cosine_lr_schedule(params['adjusted_learning_rate'],\n params['lr_warmup_init'],\n params['lr_warmup_step'], params['total_steps'],\n global_step)\n\n if lr_decay_method == 'polynomial':\n return polynomial_lr_schedule(params['adjusted_learning_rate'],\n params['lr_warmup_init'],\n params['lr_warmup_step'],\n params['poly_lr_power'],\n params['total_steps'], global_step)\n\n if lr_decay_method == 'constant':\n return params['adjusted_learning_rate']\n\n raise ValueError('unknown lr_decay_method: {}'.format(lr_decay_method))\n\n\ndef focal_loss(y_pred, y_true, alpha, gamma, normalizer, label_smoothing=0.0):\n \"\"\"Compute the focal loss between `logits` and the golden `target` values.\n\n Focal loss = -(1-pt)^gamma * log(pt)\n where pt is the probability of being classified to the true class.\n\n Args:\n y_pred: A float32 tensor of size [batch, height_in, width_in,\n num_predictions].\n y_true: A float32 tensor of size [batch, height_in, width_in,\n num_predictions].\n alpha: A float32 scalar multiplying alpha to the loss from positive examples\n and (1-alpha) to the loss from negative examples.\n gamma: A float32 scalar modulating loss from hard and easy examples.\n normalizer: Divide loss by this value.\n label_smoothing: Float in [0, 1]. If > `0` then smooth the labels.\n\n Returns:\n loss: A float32 scalar representing normalized total loss.\n \"\"\"\n with tf.name_scope('focal_loss'):\n alpha = tf.convert_to_tensor(alpha, dtype=y_pred.dtype)\n gamma = tf.convert_to_tensor(gamma, dtype=y_pred.dtype)\n\n # compute focal loss multipliers before label smoothing, such that it will\n # not blow up the loss.\n pred_prob = tf.sigmoid(y_pred)\n p_t = (y_true * pred_prob) + ((1 - y_true) * (1 - pred_prob))\n alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)\n modulating_factor = (1.0 - p_t) ** gamma\n\n # apply label smoothing for cross_entropy for each entry.\n y_true = y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing\n ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true, logits=y_pred)\n\n # compute the final loss and return\n return alpha_factor * modulating_factor * ce / normalizer\n\n\ndef _box_loss(box_outputs, box_targets, num_positives, delta=0.1):\n \"\"\"Computes box regression loss.\"\"\"\n # delta is typically around the mean value of regression target.\n # for instances, the regression targets of 512x512 input with 6 anchors on\n # P3-P7 pyramid is about [0.1, 0.1, 0.2, 0.2].\n normalizer = num_positives * 4.0\n mask = tf.not_equal(box_targets, 0.0)\n box_loss = tf.losses.huber_loss(\n box_targets,\n box_outputs,\n weights=mask,\n delta=delta,\n reduction=tf.losses.Reduction.SUM)\n box_loss /= normalizer\n return box_loss\n\n\ndef _box_iou_loss(box_outputs, box_targets, num_positives, iou_loss_type):\n \"\"\"Computes box iou loss.\"\"\"\n normalizer = num_positives * 4.0\n box_iou_loss = iou_utils.iou_loss(box_outputs, box_targets, iou_loss_type)\n box_iou_loss = tf.reduce_sum(box_iou_loss) / normalizer\n return box_iou_loss\n\n\ndef detection_loss(cls_outputs, box_outputs, labels, params):\n \"\"\"Computes total detection loss.\n\n Computes total detection loss including box and class loss from all levels.\n Args:\n cls_outputs: an OrderDict with keys representing levels and values\n representing logits in [batch_size, height, width, num_anchors].\n box_outputs: an OrderDict with keys representing levels and values\n representing box regression targets in [batch_size, height, width,\n num_anchors * 4].\n labels: the dictionary that returned from dataloader that includes\n groundtruth targets.\n params: the dictionary including training parameters specified in\n default_haprams function in this file.\n\n Returns:\n total_loss: an integer tensor representing total loss reducing from\n class and box losses from all levels.\n cls_loss: an integer tensor representing total class loss.\n box_loss: an integer tensor representing total box regression loss.\n box_iou_loss: an integer tensor representing total box iou loss.\n \"\"\"\n # Sum all positives in a batch for normalization and avoid zero\n # num_positives_sum, which would lead to inf loss during training\n num_positives_sum = tf.reduce_sum(labels['mean_num_positives']) + 1.0\n levels = cls_outputs.keys()\n\n cls_losses = []\n box_losses = []\n for level in levels:\n # Onehot encoding for classification labels.\n cls_targets_at_level = tf.one_hot(labels['cls_targets_%d' % level],\n params['num_classes'])\n\n if params['data_format'] == 'channels_first':\n bs, _, width, height, _ = cls_targets_at_level.get_shape().as_list()\n cls_targets_at_level = tf.reshape(cls_targets_at_level,\n [bs, -1, width, height])\n else:\n bs, width, height, _, _ = cls_targets_at_level.get_shape().as_list()\n cls_targets_at_level = tf.reshape(cls_targets_at_level,\n [bs, width, height, -1])\n box_targets_at_level = labels['box_targets_%d' % level]\n\n cls_loss = focal_loss(\n cls_outputs[level],\n cls_targets_at_level,\n params['alpha'],\n params['gamma'],\n normalizer=num_positives_sum,\n label_smoothing=params['label_smoothing'])\n\n if params['data_format'] == 'channels_first':\n cls_loss = tf.reshape(cls_loss,\n [bs, -1, width, height, params['num_classes']])\n else:\n cls_loss = tf.reshape(cls_loss,\n [bs, width, height, -1, params['num_classes']])\n cls_loss *= tf.cast(\n tf.expand_dims(tf.not_equal(labels['cls_targets_%d' % level], -2), -1),\n tf.float32)\n cls_losses.append(tf.reduce_sum(cls_loss))\n\n if params['box_loss_weight']:\n box_losses.append(\n _box_loss(\n box_outputs[level],\n box_targets_at_level,\n num_positives_sum,\n delta=params['delta']))\n\n if params['iou_loss_type']:\n input_anchors = anchors.Anchors(params['min_level'], params['max_level'],\n params['num_scales'],\n params['aspect_ratios'],\n params['anchor_scale'],\n params['image_size'])\n box_output_list = [tf.reshape(box_outputs[i], [-1, 4]) for i in levels]\n box_outputs = tf.concat(box_output_list, axis=0)\n box_target_list = [\n tf.reshape(labels['box_targets_%d' % level], [-1, 4])\n for level in levels\n ]\n box_targets = tf.concat(box_target_list, axis=0)\n anchor_boxes = tf.tile(input_anchors.boxes, [params['batch_size'], 1])\n box_outputs = anchors.decode_box_outputs(box_outputs, anchor_boxes)\n box_targets = anchors.decode_box_outputs(box_targets, anchor_boxes)\n box_iou_loss = _box_iou_loss(box_outputs, box_targets, num_positives_sum,\n params['iou_loss_type'])\n\n else:\n box_iou_loss = 0\n\n # Sum per level losses to total loss.\n cls_loss = tf.add_n(cls_losses)\n box_loss = tf.add_n(box_losses) if box_losses else 0\n\n total_loss = (\n cls_loss +\n params['box_loss_weight'] * box_loss +\n params['iou_loss_weight'] * box_iou_loss)\n\n return total_loss, cls_loss, box_loss, box_iou_loss\n\n\ndef reg_l2_loss(weight_decay, regex=r'.*(kernel|weight):0$'):\n \"\"\"Return regularization l2 loss loss.\"\"\"\n var_match = re.compile(regex)\n return weight_decay * tf.add_n([\n tf.nn.l2_loss(v)\n for v in tf.trainable_variables()\n if var_match.match(v.name)\n ])\n\n\ndef _model_fn(features, labels, mode, params, model, variable_filter_fn=None):\n \"\"\"Model definition entry.\n\n Args:\n features: the input image tensor with shape [batch_size, height, width, 3].\n The height and width are fixed and equal.\n labels: the input labels in a dictionary. The labels include class targets\n and box targets which are dense label maps. The labels are generated from\n get_input_fn function in data/dataloader.py\n mode: the mode of TPUEstimator including TRAIN, EVAL, and PREDICT.\n params: the dictionary defines hyperparameters of model. The default\n settings are in default_hparams function in this file.\n model: the model outputs class logits and box regression outputs.\n variable_filter_fn: the filter function that takes trainable_variables and\n returns the variable list after applying the filter rule.\n\n Returns:\n tpu_spec: the TPUEstimatorSpec to run training, evaluation, or prediction.\n\n Raises:\n RuntimeError: if both ckpt and backbone_ckpt are set.\n \"\"\"\n utils.image('input_image', features)\n training_hooks = []\n\n def _model_outputs(inputs):\n # Convert params (dict) to Config for easier access.\n return model(inputs, config=hparams_config.Config(params))\n\n precision = utils.get_precision(params['strategy'], params['mixed_precision'])\n cls_outputs, box_outputs = utils.build_model_with_precision(\n precision, _model_outputs, features, params['is_training_bn'])\n\n levels = cls_outputs.keys()\n for level in levels:\n cls_outputs[level] = tf.cast(cls_outputs[level], tf.float32)\n box_outputs[level] = tf.cast(box_outputs[level], tf.float32)\n\n # First check if it is in PREDICT mode.\n if mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n 'image': features,\n }\n for level in levels:\n predictions['cls_outputs_%d' % level] = cls_outputs[level]\n predictions['box_outputs_%d' % level] = box_outputs[level]\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n # Set up training loss and learning rate.\n update_learning_rate_schedule_parameters(params)\n global_step = tf.train.get_or_create_global_step()\n learning_rate = learning_rate_schedule(params, global_step)\n\n # cls_loss and box_loss are for logging. only total_loss is optimized.\n det_loss, cls_loss, box_loss, box_iou_loss = detection_loss(\n cls_outputs, box_outputs, labels, params)\n reg_l2loss = reg_l2_loss(params['weight_decay'])\n total_loss = det_loss + reg_l2loss\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n utils.scalar('lrn_rate', learning_rate)\n utils.scalar('trainloss/cls_loss', cls_loss)\n utils.scalar('trainloss/box_loss', box_loss)\n utils.scalar('trainloss/det_loss', det_loss)\n utils.scalar('trainloss/reg_l2_loss', reg_l2loss)\n utils.scalar('trainloss/loss', total_loss)\n if params['iou_loss_type']:\n utils.scalar('trainloss/box_iou_loss', box_iou_loss)\n train_epochs = tf.cast(global_step, tf.float32) / params['steps_per_epoch']\n utils.scalar('train_epochs', train_epochs)\n\n moving_average_decay = params['moving_average_decay']\n if moving_average_decay:\n ema = tf.train.ExponentialMovingAverage(\n decay=moving_average_decay, num_updates=global_step)\n ema_vars = utils.get_ema_vars()\n if params['strategy'] == 'horovod':\n import horovod.tensorflow as hvd # pylint: disable=g-import-not-at-top\n learning_rate = learning_rate * hvd.size()\n if mode == tf.estimator.ModeKeys.TRAIN:\n if params['optimizer'].lower() == 'sgd':\n optimizer = tf.train.MomentumOptimizer(\n learning_rate, momentum=params['momentum'])\n elif params['optimizer'].lower() == 'adam':\n optimizer = tf.train.AdamOptimizer(learning_rate)\n else:\n raise ValueError('optimizers should be adam or sgd')\n\n if params['strategy'] == 'tpu':\n optimizer = tf.tpu.CrossShardOptimizer(optimizer)\n elif params['strategy'] == 'horovod':\n optimizer = hvd.DistributedOptimizer(optimizer)\n training_hooks = [hvd.BroadcastGlobalVariablesHook(0)]\n\n # Batch norm requires update_ops to be added as a train_op dependency.\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n var_list = tf.trainable_variables()\n if variable_filter_fn:\n var_list = variable_filter_fn(var_list)\n\n if params.get('clip_gradients_norm', 0) > 0:\n logging.info('clip gradients norm by %f', params['clip_gradients_norm'])\n grads_and_vars = optimizer.compute_gradients(total_loss, var_list)\n with tf.name_scope('clip'):\n grads = [gv[0] for gv in grads_and_vars]\n tvars = [gv[1] for gv in grads_and_vars]\n clipped_grads, gnorm = tf.clip_by_global_norm(\n grads, params['clip_gradients_norm'])\n utils.scalar('gnorm', gnorm)\n grads_and_vars = list(zip(clipped_grads, tvars))\n\n with tf.control_dependencies(update_ops):\n train_op = optimizer.apply_gradients(grads_and_vars, global_step)\n else:\n with tf.control_dependencies(update_ops):\n train_op = optimizer.minimize(\n total_loss, global_step, var_list=var_list)\n\n if moving_average_decay:\n with tf.control_dependencies([train_op]):\n train_op = ema.apply(ema_vars)\n\n else:\n train_op = None\n\n eval_metrics = None\n if mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(**kwargs):\n \"\"\"Returns a dictionary that has the evaluation metrics.\"\"\"\n if params['nms_configs'].get('pyfunc', True):\n detections_bs = []\n for index in range(kwargs['boxes'].shape[0]):\n nms_configs = params['nms_configs']\n detections = tf.numpy_function(\n functools.partial(nms_np.per_class_nms, nms_configs=nms_configs),\n [\n kwargs['boxes'][index],\n kwargs['scores'][index],\n kwargs['classes'][index],\n tf.slice(kwargs['image_ids'], [index], [1]),\n tf.slice(kwargs['image_scales'], [index], [1]),\n params['num_classes'],\n nms_configs['max_output_size'],\n ], tf.float32)\n detections_bs.append(detections)\n else:\n # These two branches should be equivalent, but currently they are not.\n # TODO(tanmingxing): enable the non_pyfun path after bug fix.\n nms_boxes, nms_scores, nms_classes, _ = postprocess.per_class_nms(\n params, kwargs['boxes'], kwargs['scores'], kwargs['classes'],\n kwargs['image_scales'])\n img_ids = tf.cast(\n tf.expand_dims(kwargs['image_ids'], -1), nms_scores.dtype)\n detections_bs = [\n img_ids * tf.ones_like(nms_scores),\n nms_boxes[:, :, 1],\n nms_boxes[:, :, 0],\n nms_boxes[:, :, 3] - nms_boxes[:, :, 1],\n nms_boxes[:, :, 2] - nms_boxes[:, :, 0],\n nms_scores,\n nms_classes,\n ]\n detections_bs = tf.stack(detections_bs, axis=-1, name='detnections')\n\n if params.get('testdev_dir', None):\n logging.info('Eval testdev_dir %s', params['testdev_dir'])\n eval_metric = coco_metric.EvaluationMetric(\n testdev_dir=params['testdev_dir'])\n coco_metrics = eval_metric.estimator_metric_fn(detections_bs,\n tf.zeros([1]))\n else:\n logging.info('Eval val with groudtruths %s.', params['val_json_file'])\n eval_metric = coco_metric.EvaluationMetric(\n filename=params['val_json_file'])\n coco_metrics = eval_metric.estimator_metric_fn(\n detections_bs, kwargs['groundtruth_data'])\n\n # Add metrics to output.\n cls_loss = tf.metrics.mean(kwargs['cls_loss_repeat'])\n box_loss = tf.metrics.mean(kwargs['box_loss_repeat'])\n output_metrics = {\n 'cls_loss': cls_loss,\n 'box_loss': box_loss,\n }\n output_metrics.update(coco_metrics)\n return output_metrics\n\n cls_loss_repeat = tf.reshape(\n tf.tile(tf.expand_dims(cls_loss, 0), [\n params['batch_size'],\n ]), [params['batch_size'], 1])\n box_loss_repeat = tf.reshape(\n tf.tile(tf.expand_dims(box_loss, 0), [\n params['batch_size'],\n ]), [params['batch_size'], 1])\n\n cls_outputs = postprocess.to_list(cls_outputs)\n box_outputs = postprocess.to_list(box_outputs)\n params['nms_configs']['max_nms_inputs'] = anchors.MAX_DETECTION_POINTS\n boxes, scores, classes = postprocess.pre_nms(params, cls_outputs,\n box_outputs)\n metric_fn_inputs = {\n 'cls_loss_repeat': cls_loss_repeat,\n 'box_loss_repeat': box_loss_repeat,\n 'image_ids': labels['source_ids'],\n 'groundtruth_data': labels['groundtruth_data'],\n 'image_scales': labels['image_scales'],\n 'boxes': boxes,\n 'scores': scores,\n 'classes': classes,\n }\n eval_metrics = (metric_fn, metric_fn_inputs)\n\n checkpoint = params.get('ckpt') or params.get('backbone_ckpt')\n\n if checkpoint and mode == tf.estimator.ModeKeys.TRAIN:\n # Initialize the model from an EfficientDet or backbone checkpoint.\n if params.get('ckpt') and params.get('backbone_ckpt'):\n raise RuntimeError(\n '--backbone_ckpt and --checkpoint are mutually exclusive')\n\n if params.get('backbone_ckpt'):\n var_scope = params['backbone_name'] + '/'\n if params['ckpt_var_scope'] is None:\n # Use backbone name as default checkpoint scope.\n ckpt_scope = params['backbone_name'] + '/'\n else:\n ckpt_scope = params['ckpt_var_scope'] + '/'\n else:\n # Load every var in the given checkpoint\n var_scope = ckpt_scope = '/'\n\n def scaffold_fn():\n \"\"\"Loads pretrained model through scaffold function.\"\"\"\n logging.info('restore variables from %s', checkpoint)\n\n var_map = utils.get_ckpt_var_map(\n ckpt_path=checkpoint,\n ckpt_scope=ckpt_scope,\n var_scope=var_scope,\n skip_mismatch=params['skip_mismatch'])\n\n tf.train.init_from_checkpoint(checkpoint, var_map)\n\n return tf.train.Scaffold()\n elif mode == tf.estimator.ModeKeys.EVAL and moving_average_decay:\n\n def scaffold_fn():\n \"\"\"Load moving average variables for eval.\"\"\"\n logging.info('Load EMA vars with ema_decay=%f', moving_average_decay)\n restore_vars_dict = ema.variables_to_restore(ema_vars)\n saver = tf.train.Saver(restore_vars_dict)\n return tf.train.Scaffold(saver=saver)\n else:\n scaffold_fn = None\n\n if params['strategy'] != 'tpu':\n # Profile every 1K steps.\n profile_hook = tf.train.ProfilerHook(\n save_steps=1000, output_dir=params['model_dir'])\n training_hooks.append(profile_hook)\n\n # Report memory allocation if OOM\n class OomReportingHook(tf.estimator.SessionRunHook):\n\n def before_run(self, run_context):\n return tf.estimator.SessionRunArgs(\n fetches=[],\n options=tf.RunOptions(report_tensor_allocations_upon_oom=True))\n\n training_hooks.append(OomReportingHook())\n\n return tf.estimator.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n eval_metrics=eval_metrics,\n host_call=utils.get_tpu_host_call(global_step, params),\n scaffold_fn=scaffold_fn,\n training_hooks=training_hooks)\n\n\ndef retinanet_model_fn(features, labels, mode, params):\n \"\"\"RetinaNet model.\"\"\"\n variable_filter_fn = functools.partial(\n retinanet_arch.remove_variables, resnet_depth=params['resnet_depth'])\n return _model_fn(\n features,\n labels,\n mode,\n params,\n model=retinanet_arch.retinanet,\n variable_filter_fn=variable_filter_fn)\n\n\ndef efficientdet_model_fn(features, labels, mode, params):\n \"\"\"EfficientDet model.\"\"\"\n variable_filter_fn = functools.partial(\n efficientdet_arch.freeze_vars, pattern=params['var_freeze_expr'])\n return _model_fn(\n features,\n labels,\n mode,\n params,\n model=efficientdet_arch.efficientdet,\n variable_filter_fn=variable_filter_fn)\n\n\ndef get_model_arch(model_name='efficientdet-d0'):\n \"\"\"Get model architecture for a given model name.\"\"\"\n if 'retinanet' in model_name:\n return retinanet_arch.retinanet\n\n if 'efficientdet' in model_name:\n return efficientdet_arch.efficientdet\n\n raise ValueError('Invalide model name {}'.format(model_name))\n\n\ndef get_model_fn(model_name='efficientdet-d0'):\n \"\"\"Get model fn for a given model name.\"\"\"\n if 'retinanet' in model_name:\n return retinanet_model_fn\n\n if 'efficientdet' in model_name:\n return efficientdet_model_fn\n\n raise ValueError('Invalide model name {}'.format(model_name))\n"
] | [
[
"tensorflow.compat.v1.not_equal",
"tensorflow.compat.v1.metrics.mean",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.train.ExponentialMovingAverage",
"tensorflow.compat.v1.train.Scaffold",
"tensorflow.compat.v1.train.Saver",
"tensorflow.compat.v1.tpu.CrossShardOptimizer",
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.compat.v1.add_n",
"tensorflow.compat.v1.reduce_sum",
"tensorflow.compat.v1.sigmoid",
"tensorflow.compat.v1.trainable_variables",
"tensorflow.compat.v1.one_hot",
"tensorflow.compat.v1.train.get_or_create_global_step",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.name_scope",
"tensorflow.compat.v1.where",
"tensorflow.compat.v1.ones_like",
"tensorflow.compat.v1.estimator.EstimatorSpec",
"tensorflow.compat.v1.zeros",
"tensorflow.compat.v1.RunOptions",
"tensorflow.compat.v1.tile",
"tensorflow.compat.v1.get_collection",
"tensorflow.compat.v1.losses.huber_loss",
"tensorflow.compat.v1.train.init_from_checkpoint",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.train.ProfilerHook",
"tensorflow.compat.v1.stack",
"tensorflow.compat.v1.expand_dims",
"tensorflow.compat.v1.convert_to_tensor",
"tensorflow.compat.v1.control_dependencies",
"tensorflow.compat.v1.slice",
"tensorflow.compat.v1.train.MomentumOptimizer",
"tensorflow.compat.v1.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.compat.v1.nn.l2_loss",
"tensorflow.compat.v1.clip_by_global_norm"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
holajoa/keras-YOLOv3-model-set | [
"c15b8a2f48371c063f6482b25593dc70d5956323",
"c15b8a2f48371c063f6482b25593dc70d5956323",
"c15b8a2f48371c063f6482b25593dc70d5956323"
] | [
"yolo3/models/yolo3_resnet50.py",
"yolo3/data.py",
"tracking/eval/tools/mot16_annotation.py"
] | [
"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"YOLO_v3 ResNet50 Model Defined in Keras.\"\"\"\r\n\r\nfrom tensorflow.keras.layers import UpSampling2D, Concatenate\r\nfrom tensorflow.keras.models import Model\r\nfrom tensorflow.keras.applications.resnet import ResNet50\r\n\r\nfrom yolo3.models.layers import yolo3_predictions, yolo3lite_predictions, tiny_yolo3_predictions, tiny_yolo3lite_predictions\r\n\r\n\r\ndef yolo3_resnet50_body(inputs, num_anchors, num_classes):\r\n \"\"\"Create YOLO_V3 ResNet50 model CNN body in Keras.\"\"\"\r\n resnet50 = ResNet50(input_tensor=inputs, weights='imagenet', include_top=False)\r\n print('backbone layers number: {}'.format(len(resnet50.layers)))\r\n\r\n # input: 416 x 416 x 3\r\n # conv5_block3_out: 13 x 13 x 2048\r\n # conv4_block6_out: 26 x 26 x 1024\r\n # conv3_block4_out: 52 x 52 x 512\r\n\r\n # f1 :13 x 13 x 2048\r\n f1 = resnet50.get_layer('conv5_block3_out').output\r\n # f2: 26 x 26 x 1024\r\n f2 = resnet50.get_layer('conv4_block6_out').output\r\n # f3 : 52 x 52 x 512\r\n f3 = resnet50.get_layer('conv3_block4_out').output\r\n\r\n f1_channel_num = 1024\r\n f2_channel_num = 512\r\n f3_channel_num = 256\r\n\r\n y1, y2, y3 = yolo3_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes)\r\n\r\n return Model(inputs = inputs, outputs=[y1,y2,y3])\r\n\r\n\r\ndef yolo3lite_resnet50_body(inputs, num_anchors, num_classes):\r\n '''Create YOLO_v3 Lite ResNet50 model CNN body in keras.'''\r\n resnet50 = ResNet50(input_tensor=inputs, weights='imagenet', include_top=False)\r\n print('backbone layers number: {}'.format(len(resnet50.layers)))\r\n\r\n # input: 416 x 416 x 3\r\n # conv5_block3_out: 13 x 13 x 2048\r\n # conv4_block6_out: 26 x 26 x 1024\r\n # conv3_block4_out: 52 x 52 x 512\r\n\r\n # f1 :13 x 13 x 2048\r\n f1 = resnet50.get_layer('conv5_block3_out').output\r\n # f2: 26 x 26 x 1024\r\n f2 = resnet50.get_layer('conv4_block6_out').output\r\n # f3 : 52 x 52 x 512\r\n f3 = resnet50.get_layer('conv3_block4_out').output\r\n\r\n f1_channel_num = 1024\r\n f2_channel_num = 512\r\n f3_channel_num = 256\r\n\r\n y1, y2, y3 = yolo3lite_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes)\r\n\r\n return Model(inputs = inputs, outputs=[y1,y2,y3])\r\n\r\n\r\ndef yolo3lite_spp_resnet50_body(inputs, num_anchors, num_classes):\r\n '''Create YOLO_v3 Lite SPP ResNet50 model CNN body in keras.'''\r\n resnet50 = ResNet50(input_tensor=inputs, weights='imagenet', include_top=False)\r\n print('backbone layers number: {}'.format(len(resnet50.layers)))\r\n\r\n # input: 416 x 416 x 3\r\n # conv5_block3_out: 13 x 13 x 2048\r\n # conv4_block6_out: 26 x 26 x 1024\r\n # conv3_block4_out: 52 x 52 x 512\r\n\r\n # f1 :13 x 13 x 2048\r\n f1 = resnet50.get_layer('conv5_block3_out').output\r\n # f2: 26 x 26 x 1024\r\n f2 = resnet50.get_layer('conv4_block6_out').output\r\n # f3 : 52 x 52 x 512\r\n f3 = resnet50.get_layer('conv3_block4_out').output\r\n\r\n f1_channel_num = 1024\r\n f2_channel_num = 512\r\n f3_channel_num = 256\r\n\r\n y1, y2, y3 = yolo3lite_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes, use_spp=True)\r\n\r\n return Model(inputs = inputs, outputs=[y1,y2,y3])\r\n\r\n\r\ndef tiny_yolo3_resnet50_body(inputs, num_anchors, num_classes):\r\n '''Create Tiny YOLO_v3 ResNet50 model CNN body in keras.'''\r\n resnet50 = ResNet50(input_tensor=inputs, weights='imagenet', include_top=False)\r\n print('backbone layers number: {}'.format(len(resnet50.layers)))\r\n\r\n # input: 416 x 416 x 3\r\n # conv5_block3_out: 13 x 13 x 2048\r\n # conv4_block6_out: 26 x 26 x 1024\r\n # conv3_block4_out: 52 x 52 x 512\r\n\r\n # f1 :13 x 13 x 2048\r\n f1 = resnet50.get_layer('conv5_block3_out').output\r\n # f2: 26 x 26 x 1024\r\n f2 = resnet50.get_layer('conv4_block6_out').output\r\n\r\n f1_channel_num = 1024\r\n f2_channel_num = 512\r\n\r\n y1, y2 = tiny_yolo3_predictions((f1, f2), (f1_channel_num, f2_channel_num), num_anchors, num_classes)\r\n\r\n return Model(inputs, [y1,y2])\r\n\r\n\r\ndef tiny_yolo3lite_resnet50_body(inputs, num_anchors, num_classes):\r\n '''Create Tiny YOLO_v3 Lite ResNet50 model CNN body in keras.'''\r\n resnet50 = ResNet50(input_tensor=inputs, weights='imagenet', include_top=False)\r\n print('backbone layers number: {}'.format(len(resnet50.layers)))\r\n\r\n # input: 416 x 416 x 3\r\n # conv5_block3_out: 13 x 13 x 2048\r\n # conv4_block6_out: 26 x 26 x 1024\r\n # conv3_block4_out: 52 x 52 x 512\r\n\r\n # f1 :13 x 13 x 2048\r\n f1 = resnet50.get_layer('conv5_block3_out').output\r\n # f2: 26 x 26 x 1024\r\n f2 = resnet50.get_layer('conv4_block6_out').output\r\n\r\n f1_channel_num = 1024\r\n f2_channel_num = 512\r\n\r\n y1, y2 = tiny_yolo3lite_predictions((f1, f2), (f1_channel_num, f2_channel_num), num_anchors, num_classes)\r\n\r\n return Model(inputs, [y1,y2])\r\n\r\n",
"#!/usr/bin/python3\n# -*- coding=utf-8 -*-\n\"\"\"training data generation functions.\"\"\"\nimport numpy as np\nimport random, math\nfrom PIL import Image\nfrom tensorflow.keras.utils import Sequence\nfrom common.data_utils import normalize_image, letterbox_resize, random_resize_crop_pad, reshape_boxes, random_hsv_distort, random_horizontal_flip, random_vertical_flip, random_grayscale, random_brightness, random_chroma, random_contrast, random_sharpness, random_blur, random_motion_blur, random_rotate, random_gridmask, random_mosaic_augment, random_mosaic_augment_v5\nfrom common.utils import get_multiscale_list\n\n\ndef get_ground_truth_data(annotation_line, input_shape, augment=True, max_boxes=100):\n '''random preprocessing for real-time data augmentation'''\n line = annotation_line.split()\n image = Image.open(line[0])\n image_size = image.size\n model_input_size = tuple(reversed(input_shape))\n boxes = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]])\n\n if not augment:\n new_image, padding_size, offset = letterbox_resize(image, target_size=model_input_size, return_padding_info=True)\n image_data = np.array(new_image)\n image_data = normalize_image(image_data)\n\n # reshape boxes\n boxes = reshape_boxes(boxes, src_shape=image_size, target_shape=model_input_size, padding_shape=padding_size, offset=offset)\n if len(boxes)>max_boxes:\n boxes = boxes[:max_boxes]\n\n # fill in box data\n box_data = np.zeros((max_boxes,5))\n if len(boxes)>0:\n box_data[:len(boxes)] = boxes\n\n return image_data, box_data\n\n # random resize image and crop|padding to target size\n image, padding_size, padding_offset = random_resize_crop_pad(image, target_size=model_input_size)\n\n # random horizontal flip image\n image, horizontal_flip = random_horizontal_flip(image)\n\n # random adjust brightness\n image = random_brightness(image)\n\n # random adjust color level\n image = random_chroma(image)\n\n # random adjust contrast\n image = random_contrast(image)\n\n # random adjust sharpness\n image = random_sharpness(image)\n\n # random convert image to grayscale\n image = random_grayscale(image)\n\n # random do normal blur to image\n #image = random_blur(image)\n\n # random do motion blur to image\n #image = random_motion_blur(image, prob=0.2)\n\n # random vertical flip image\n image, vertical_flip = random_vertical_flip(image)\n\n # random distort image in HSV color space\n # NOTE: will cost more time for preprocess\n # and slow down training speed\n #image = random_hsv_distort(image)\n\n # reshape boxes based on augment\n boxes = reshape_boxes(boxes, src_shape=image_size, target_shape=model_input_size, padding_shape=padding_size, offset=padding_offset, horizontal_flip=horizontal_flip, vertical_flip=vertical_flip)\n\n # random rotate image and boxes\n image, boxes = random_rotate(image, boxes)\n\n # random add gridmask augment for image and boxes\n image, boxes = random_gridmask(image, boxes)\n\n if len(boxes)>max_boxes:\n boxes = boxes[:max_boxes]\n\n # prepare image & box data\n image_data = np.array(image)\n image_data = normalize_image(image_data)\n box_data = np.zeros((max_boxes,5))\n if len(boxes)>0:\n box_data[:len(boxes)] = boxes\n\n return image_data, box_data\n\n\ndef preprocess_true_boxes(true_boxes, input_shape, anchors, num_classes, multi_anchor_assign, iou_thresh=0.2):\n '''Preprocess true boxes to training input format\n\n Parameters\n ----------\n true_boxes: array, shape=(m, T, 5)\n Absolute x_min, y_min, x_max, y_max, class_id relative to input_shape.\n input_shape: array-like, hw, multiples of 32\n anchors: array, shape=(N, 2), wh\n num_classes: integer\n multi_anchor_assign: boolean, whether to use iou_thresh to assign multiple\n anchors for a single ground truth\n\n Returns\n -------\n y_true: list of array, shape like yolo_outputs, xywh are reletive value\n\n '''\n assert (true_boxes[..., 4] < num_classes).all(), 'class id must be less than num_classes'\n num_layers = len(anchors)//3 # default setting\n anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [0,1,2]]\n\n #Transform box info to (x_center, y_center, box_width, box_height, cls_id)\n #and image relative coordinate.\n true_boxes = np.array(true_boxes, dtype='float32')\n input_shape = np.array(input_shape, dtype='int32')\n boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2\n boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]\n true_boxes[..., 0:2] = boxes_xy / input_shape[::-1]\n true_boxes[..., 2:4] = boxes_wh / input_shape[::-1]\n\n batch_size = true_boxes.shape[0]\n grid_shapes = [input_shape//{0:32, 1:16, 2:8}[l] for l in range(num_layers)]\n y_true = [np.zeros((batch_size, grid_shapes[l][0], grid_shapes[l][1], len(anchor_mask[l]), 5 + num_classes),\n dtype='float32') for l in range(num_layers)]\n\n # Expand dim to apply broadcasting.\n anchors = np.expand_dims(anchors, 0)\n anchor_maxes = anchors / 2.\n anchor_mins = -anchor_maxes\n valid_mask = boxes_wh[..., 0] > 0\n\n for b in range(batch_size):\n # Discard zero rows.\n wh = boxes_wh[b, valid_mask[b]]\n if len(wh) == 0:\n continue\n\n # Expand dim to apply broadcasting.\n wh = np.expand_dims(wh, -2)\n box_maxes = wh / 2.\n box_mins = -box_maxes\n\n intersect_mins = np.maximum(box_mins, anchor_mins)\n intersect_maxes = np.minimum(box_maxes, anchor_maxes)\n intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)\n intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]\n box_area = wh[..., 0] * wh[..., 1]\n anchor_area = anchors[..., 0] * anchors[..., 1]\n iou = intersect_area / (box_area + anchor_area - intersect_area)\n\n # Sort anchors according to IoU score\n # to find out best assignment\n best_anchors = np.argsort(iou, axis=-1)[..., ::-1]\n\n if not multi_anchor_assign:\n best_anchors = best_anchors[..., 0]\n # keep index dim for the loop in following\n best_anchors = np.expand_dims(best_anchors, -1)\n\n for t, row in enumerate(best_anchors):\n for l in range(num_layers):\n for n in row:\n # use different matching policy for single & multi anchor assign\n if multi_anchor_assign:\n matching_rule = (iou[t, n] > iou_thresh and n in anchor_mask[l])\n else:\n matching_rule = (n in anchor_mask[l])\n\n if matching_rule:\n i = np.floor(true_boxes[b, t, 0] * grid_shapes[l][1]).astype('int32')\n j = np.floor(true_boxes[b, t, 1] * grid_shapes[l][0]).astype('int32')\n k = anchor_mask[l].index(n)\n c = true_boxes[b, t, 4].astype('int32')\n y_true[l][b, j, i, k, 0:4] = true_boxes[b, t, 0:4]\n y_true[l][b, j, i, k, 4] = 1\n y_true[l][b, j, i, k, 5+c] = 1\n\n return y_true\n\n\nclass Yolo3DataGenerator(Sequence):\n def __init__(self, annotation_lines, batch_size, input_shape, anchors, num_classes, enhance_augment=None, rescale_interval=-1, multi_anchor_assign=False, shuffle=True, **kwargs):\n self.annotation_lines = annotation_lines\n self.batch_size = batch_size\n self.input_shape = input_shape\n self.anchors = anchors\n self.num_classes = num_classes\n self.enhance_augment = enhance_augment\n self.multi_anchor_assign = multi_anchor_assign\n self.indexes = np.arange(len(self.annotation_lines))\n self.shuffle = shuffle\n # prepare multiscale config\n # TODO: error happens when using Sequence data generator with\n # multiscale input shape, disable multiscale first\n if rescale_interval != -1:\n raise ValueError(\"tf.keras.Sequence generator doesn't support multiscale input, pls remove related config\")\n #self.rescale_interval = rescale_interval\n self.rescale_interval = -1\n\n self.rescale_step = 0\n self.input_shape_list = get_multiscale_list()\n\n def __len__(self):\n # get iteration loops on each epoch\n return max(1, math.ceil(len(self.annotation_lines) / float(self.batch_size)))\n\n def __getitem__(self, index):\n # generate annotation indexes for every batch\n batch_indexs = self.indexes[index*self.batch_size : (index+1)*self.batch_size]\n # fetch annotation lines based on index\n batch_annotation_lines = [self.annotation_lines[i] for i in batch_indexs]\n\n if self.rescale_interval > 0:\n # Do multi-scale training on different input shape\n self.rescale_step = (self.rescale_step + 1) % self.rescale_interval\n if self.rescale_step == 0:\n self.input_shape = self.input_shape_list[random.randint(0, len(self.input_shape_list)-1)]\n\n image_data = []\n box_data = []\n for b in range(self.batch_size):\n image, box = get_ground_truth_data(batch_annotation_lines[b], self.input_shape, augment=True)\n image_data.append(image)\n box_data.append(box)\n image_data = np.array(image_data)\n box_data = np.array(box_data)\n\n if self.enhance_augment == 'mosaic':\n # add random mosaic augment on batch ground truth data\n image_data, box_data = random_mosaic_augment(image_data, box_data, prob=0.2)\n #elif self.enhance_augment == 'mosaic_v5':\n # mosaic augment from YOLOv5\n #image_data, box_data = random_mosaic_augment_v5(image_data, box_data, prob=0.2)\n\n y_true = preprocess_true_boxes(box_data, self.input_shape, self.anchors, self.num_classes, self.multi_anchor_assign)\n\n return [image_data, *y_true], np.zeros(self.batch_size)\n\n def on_epoch_end(self):\n # shuffle annotation data on epoch end\n if self.shuffle == True:\n np.random.shuffle(self.annotation_lines)\n\n\n\ndef yolo3_data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes, enhance_augment, rescale_interval, multi_anchor_assign):\n '''data generator for fit_generator'''\n n = len(annotation_lines)\n i = 0\n # prepare multiscale config\n rescale_step = 0\n input_shape_list = get_multiscale_list()\n while True:\n if rescale_interval > 0:\n # Do multi-scale training on different input shape\n rescale_step = (rescale_step + 1) % rescale_interval\n if rescale_step == 0:\n input_shape = input_shape_list[random.randint(0, len(input_shape_list)-1)]\n\n image_data = []\n box_data = []\n for b in range(batch_size):\n if i==0:\n np.random.shuffle(annotation_lines)\n image, box = get_ground_truth_data(annotation_lines[i], input_shape, augment=True)\n image_data.append(image)\n box_data.append(box)\n i = (i+1) % n\n image_data = np.array(image_data)\n box_data = np.array(box_data)\n\n if enhance_augment == 'mosaic':\n # add random mosaic augment on batch ground truth data\n image_data, box_data = random_mosaic_augment(image_data, box_data, prob=0.2)\n #elif enhance_augment == 'mosaic_v5':\n # mosaic augment from YOLOv5\n #image_data, box_data = random_mosaic_augment_v5(image_data, box_data, prob=0.2)\n\n y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes, multi_anchor_assign)\n yield [image_data, *y_true], np.zeros(batch_size)\n\ndef yolo3_data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes, enhance_augment=None, rescale_interval=-1, multi_anchor_assign=False, **kwargs):\n n = len(annotation_lines)\n if n==0 or batch_size<=0: return None\n return yolo3_data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes, enhance_augment, rescale_interval, multi_anchor_assign)\n\n",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n'''\nscript to convert MOT16 ground truth and detection txt annotation file\nto pymot json format file\n'''\nimport os, argparse\nimport json\nimport numpy as np\nfrom tqdm import tqdm\n#from collections import OrderedDict\n\n\ndef convert_mot16_annotation(annotation_file, output_json, ground_truth):\n \"\"\"\n MOT16 txt annotation file line format\n ground truth (gt.txt):\n <frame_id>,<track_id>,<bbox_left>,<bbox_top>,<bbox_width>,<bbox_height>,<ignore>,<classes>\n box are grouped with track id, like\n 1,1,912,484,97,109,0,7,1\n 2,1,912,484,97,109,0,7,1\n ...\n 1,2,1338,418,167,379,1,1,1\n 2,2,1342,417,168,380,1,1,1\n ...\n\n detection (det.txt):\n <frame_id>,<track_id(-1)>,<bbox_left>,<bbox_top>,<bbox_width>,<bbox_height>,<confidence>,<mot3d_x(-1)>,<mot3d_y(-1)>,<mot3d_z(-1)>\n box are grouped with frame id, like\n 1,-1,1359.1,413.27,120.26,362.77,2.3092,-1,-1,-1\n 1,-1,571.03,402.13,104.56,315.68,1.5028,-1,-1,-1\n ...\n 2,-1,1359.1,413.27,120.26,362.77,2.4731,-1,-1,-1\n 2,-1,584.04,446.86,84.742,256.23,1.2369,-1,-1,-1\n ...\n \"\"\"\n # init json dict for final output\n output_dict = {\n 'filename': annotation_file,\n 'class': 'video',\n 'frames': []\n }\n\n # track statistic count\n total_track = []\n ignored_track = []\n\n # load annotation file\n with open(annotation_file, 'r') as f:\n annotation_lines = f.readlines()\n\n # get sorted frame id list\n frame_ids = [int(annotation.split(',')[0]) for annotation in annotation_lines]\n frame_ids = list(np.unique(frame_ids))\n\n pbar = tqdm(total=len(frame_ids), desc='annotation convert')\n for frame_id in frame_ids:\n pbar.update(1)\n\n # init json dict for 1 frame\n annotation_key = 'annotations' if ground_truth else 'hypotheses'\n frame_dict = {\n 'timestamp': float(frame_id), # just use frame id as timestamp\n 'num': int(frame_id), # just use frame id as num\n 'class': 'frame',\n annotation_key: []\n }\n\n # walk through annotation lines to pick frame boxes\n for annotation in annotation_lines:\n annotation = annotation.split(',')\n annotation_frame_id = int(annotation[0])\n\n if annotation_frame_id == frame_id:\n # prepare json dict for 1 box\n track_dict = {\n 'height': float(annotation[5]),\n 'width': float(annotation[4]),\n 'id': annotation[1],\n 'y': float(annotation[3]),\n 'x': float(annotation[2]),\n }\n\n # set dco flag by 'ignore' for ground truth\n if ground_truth:\n track_dict['dco'] = not bool(int(annotation[6]))\n\n # count track\n if track_dict['id'] not in total_track:\n total_track.append(track_dict['id'])\n if (track_dict['dco'] == True) and (track_dict['id'] not in ignored_track):\n ignored_track.append(track_dict['id'])\n\n frame_dict[annotation_key].append(track_dict)\n\n output_dict['frames'].append(frame_dict)\n pbar.close()\n\n # save output json\n with open(output_json, 'w') as fp:\n json.dump([output_dict], fp, indent=4)\n\n # print out track statistic\n print('\\nDone for %s. Related statistic:'%(annotation_file))\n print('frame number: %d'%(len(frame_ids)))\n print('bbox number: %d'%(len(annotation_lines)))\n print('total track number: %d'%(len(total_track)))\n print('ignored track number: %d'%(len(ignored_track)))\n\n\n\ndef main():\n parser = argparse.ArgumentParser(description='convert MOT16 annotations to pymot json format')\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument('--ground_truth_file', type=str, default=None, help=\"converted ground truth annotation file\")\n group.add_argument('--detection_file', type=str, default=None, help=\"converted detection file\")\n\n parser.add_argument('--output_json', type=str, required=True, help='Output json file')\n\n args = parser.parse_args()\n\n # specify annotation_file and output_path\n annotation_file = args.ground_truth_file if args.ground_truth_file else args.detection_result_file\n # a trick: using args.ground_truth_file as flag to check if we're converting a ground truth annotation\n convert_mot16_annotation(annotation_file, args.output_json, args.ground_truth_file)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"tensorflow.keras.applications.resnet.ResNet50",
"tensorflow.keras.models.Model"
],
[
"numpy.expand_dims",
"numpy.maximum",
"numpy.minimum",
"numpy.random.shuffle",
"numpy.floor",
"numpy.argsort",
"numpy.array",
"numpy.zeros"
],
[
"numpy.unique"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ruthlorenz/ESMValTool | [
"c3c61b5341037d01c776c3524c0dd4c767507a3d",
"c3c61b5341037d01c776c3524c0dd4c767507a3d"
] | [
"esmvaltool/diag_scripts/ocean/diagnostic_profiles.py",
"esmvaltool/diag_scripts/ocean/diagnostic_transects.py"
] | [
"\"\"\"\nDiagnostic:\n\nDiagnostic to produce images of the profile over time from a cube.\nThese plost show cube value (ie temperature) on the x-axis, and depth/height\non the y axis. The colour scale is the annual mean of the cube data.\n\nNote that this diagnostic assumes that the preprocessors do the bulk of the\nhard work, and that the cube received by this diagnostic (via the settings.yml\nand metadata.yml files) has a time component, and depth component, but no\nlatitude or longitude coordinates.\n\nAn approproate preprocessor for a 3D+time field would be:\npreprocessors:\n prep_profile:\n extract_volume:\n long1: 0.\n long2: 20.\n lat1: -30.\n lat2: 30.\n z_min: 0.\n z_max: 3000.\n average_region:\n coord1: longitude\n coord2: latitude\n\nThis tool is part of the ocean diagnostic tools package in the ESMValTool.\n\nAuthor: Lee de Mora (PML)\n [email protected]\n\"\"\"\nimport logging\nimport os\nimport sys\nimport matplotlib\nmatplotlib.use('Agg') # noqa\nimport matplotlib.pyplot as plt\n\nimport iris\nimport iris.quickplot as qplt\n\nimport diagnostic_tools as diagtools\nfrom esmvaltool.diag_scripts.shared import run_diagnostic\n\n# This part sends debug statements to stdout\nlogger = logging.getLogger(os.path.basename(__file__))\nlogging.getLogger().addHandler(logging.StreamHandler(sys.stdout))\n\n\ndef determine_profiles_str(cube):\n \"\"\"\n Determine a string from the cube, to describe the profile.\n\n Used in image titles, descriptions and filenames.\n \"\"\"\n options = ['latitude', 'longitude']\n for option in options:\n coord = cube.coord(option)\n if len(coord.points) > 1:\n continue\n value = coord.points.mean()\n if option == 'latitude':\n return str(value) + ' N'\n if option == 'longitude':\n if value > 180.:\n return str(value - 360.) + ' W'\n return str(value) + ' E'\n return ''\n\n\ndef make_profiles_plots(\n cfg,\n metadata,\n filename,\n):\n \"\"\"\n Make a simple profile plot for an individual model.\n\n The cfg is the opened global config,\n metadata is the metadata dictionairy\n filename is the preprocessing model file.\n \"\"\"\n # Load cube and set up units\n cube = iris.load_cube(filename)\n cube = diagtools.bgc_units(cube, metadata['short_name'])\n\n # Make annual means from:\n cube = cube.aggregated_by('year', iris.analysis.MEAN)\n\n # Is this data is a multi-model dataset?\n multi_model = metadata['dataset'].find('MultiModel') > -1\n\n #\n times = cube.coord('time')\n times_float = diagtools.timecoord_to_float(times)\n time_0 = times_float[0]\n\n cmap = plt.cm.get_cmap('jet')\n\n plot_details = {}\n for time_index, time in enumerate(times_float):\n\n color = cmap((time - time_0) / (times_float[-1] - time_0))\n\n qplt.plot(cube[time_index, :], cube[time_index, :].coord('depth'),\n c=color)\n\n plot_details[time_index] = {'c': color, 'ls': '-', 'lw': 1,\n 'label': str(int(time))}\n\n # Add title to plot\n title = ' '.join([\n metadata['dataset'],\n metadata['long_name'],\n ])\n plt.title(title)\n\n # Add Legend outside right.\n diagtools.add_legend_outside_right(plot_details, plt.gca())\n\n # Load image format extention\n image_extention = diagtools.get_image_format(cfg)\n\n # Determine image filename:\n if multi_model:\n path = diagtools.folder(\n cfg['plot_dir']) + os.path.basename(filename).replace(\n '.nc', '_profile' + image_extention)\n else:\n path = diagtools.get_image_path(\n cfg,\n metadata,\n suffix='profile' + image_extention,\n )\n\n # Saving files:\n if cfg['write_plots']:\n logger.info('Saving plots to %s', path)\n plt.savefig(path)\n\n plt.close()\n\n\ndef main(cfg):\n \"\"\"\n Load the config file, and send it to the plot maker.\n\n The cfg is the opened global config.\n \"\"\"\n for index, metadata_filename in enumerate(cfg['input_files']):\n logger.info(\n 'metadata filename:\\t%s',\n metadata_filename\n )\n\n metadatas = diagtools.get_input_files(cfg, index=index)\n for filename in sorted(metadatas.keys()):\n\n logger.info('-----------------')\n logger.info(\n 'model filenames:\\t%s',\n filename,\n )\n\n ######\n # Time series of individual model\n make_profiles_plots(cfg, metadatas[filename], filename)\n\n logger.info('Success')\n\n\nif __name__ == '__main__':\n with run_diagnostic() as config:\n main(config)\n",
"\"\"\"\nDiagnostic transect:\n\nDiagnostic to produce images of a transect.\nThese plost show either latitude or longitude against depth, and the cube value\nis used as the colour scale.\n\nNote that this diagnostic assumes that the preprocessors do the bulk of the\nhard work, and that the cube received by this diagnostic (via the settings.yml\nand metadata.yml files) has no time component, and one of the latitude or\nlongitude coordinates has been reduced to a single value.\n\nAn approproate preprocessor for a 3D+time field would be:\npreprocessors:\n prep_transect:\n time_average:\n extract_slice: # Atlantic Meridional Transect\n latitude: [-50.,50.]\n longitude: 332.\n\nThis tool is part of the ocean diagnostic tools package in the ESMValTool.\n\nAuthor: Lee de Mora (PML)\n [email protected]\n\"\"\"\nimport logging\nimport os\nimport sys\nimport matplotlib\nmatplotlib.use('Agg') # noqa\nimport iris\n\nimport iris.quickplot as qplt\nimport matplotlib.pyplot as plt\n\nimport diagnostic_tools as diagtools\nfrom esmvaltool.diag_scripts.shared import run_diagnostic\n\n# This part sends debug statements to stdout\nlogger = logging.getLogger(os.path.basename(__file__))\nlogging.getLogger().addHandler(logging.StreamHandler(sys.stdout))\n\n\ndef determine_transect_str(cube):\n \"\"\"\n Determine the Transect String\n\n Takes a guess at a string to describe the transect.\n \"\"\"\n options = ['latitude', 'longitude']\n for option in options:\n coord = cube.coord(option)\n if len(coord.points) > 1:\n continue\n value = coord.points.mean()\n if option == 'latitude':\n return str(value) + ' N'\n if option == 'longitude':\n if value > 180.:\n return str(value - 360.) + ' W'\n return str(value) + ' E'\n return ''\n\n\ndef make_transects_plots(\n cfg,\n metadata,\n filename,\n):\n \"\"\"\n Make a simple plot of the transect for an indivudual model.\n\n The cfg is the opened global config,\n metadata is the metadata dictionairy\n filename is the preprocessing model file.\n \"\"\"\n # Load cube and set up units\n cube = iris.load_cube(filename)\n cube = diagtools.bgc_units(cube, metadata['short_name'])\n\n # Is this data is a multi-model dataset?\n multi_model = metadata['dataset'].find('MultiModel') > -1\n\n # Make a dict of cubes for each layer.\n\n qplt.contourf(cube, 25, linewidth=0, rasterized=True)\n plt.axes().set_yscale('log')\n\n # Add title to plot\n title = ' '.join(\n [metadata['dataset'], metadata['long_name'],\n determine_transect_str(cube)])\n plt.title(title)\n\n # Load image format extention\n image_extention = diagtools.get_image_format(cfg)\n\n # Determine image filename:\n if multi_model:\n path = diagtools.folder(\n cfg['plot_dir']) + os.path.basename(filename).replace(\n '.nc', '_transect' + image_extention)\n else:\n path = diagtools.get_image_path(\n cfg,\n metadata,\n suffix='transect' + image_extention,\n )\n\n # Saving files:\n if cfg['write_plots']:\n logger.info('Saving plots to %s', path)\n plt.savefig(path)\n\n plt.close()\n\n\ndef main(cfg):\n \"\"\"\n Load the config file, and send it to the plot maker.\n\n The cfg is the opened global config.\n \"\"\"\n #####\n for index, metadata_filename in enumerate(cfg['input_files']):\n logger.info(\n 'metadata filename:\\t%s',\n metadata_filename,\n )\n\n metadata = diagtools.get_input_files(cfg, index=index)\n for filename in sorted(metadata.keys()):\n\n logger.info('-----------------')\n logger.info(\n 'model filenames:\\t%s',\n filename,\n )\n\n ######\n # Time series of individual model\n make_transects_plots(cfg, metadata[filename], filename)\n\n logger.info('Success')\n\n\nif __name__ == '__main__':\n with run_diagnostic() as config:\n main(config)\n"
] | [
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.title",
"matplotlib.pyplot.cm.get_cmap",
"matplotlib.use",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.close"
],
[
"matplotlib.pyplot.title",
"matplotlib.use",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.close"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vlad17/BlitzML | [
"f13e089acf7435416bec17e87e5b3130426fc2cd",
"f13e089acf7435416bec17e87e5b3130426fc2cd"
] | [
"test/python/test_problem_options.py",
"test/python/test_sparse_logreg.py"
] | [
"import unittest\nimport blitzml\nimport numpy as np\n\nfrom common import captured_output\n\nclass TestProblemOptions(unittest.TestCase):\n def setUp(self):\n A = np.arange(20).reshape(5, 4)\n b = np.arange(5).astype(np.float64)\n self.prob = blitzml.LassoProblem(A, b)\n\n def tearDown(self):\n del self.prob\n\n def test_min_time(self):\n self.assertLessEqual(self.prob._min_time, 0.)\n self.prob._min_time = 2.0\n self.assertEqual(self.prob._min_time, 2.0)\n\n def test_max_time(self):\n self.assertGreaterEqual(self.prob._max_time, 3600.)\n self.prob._max_time = 5.0\n self.assertEqual(self.prob._max_time, 5.0)\n\n def test_max_iterations(self):\n self.assertGreaterEqual(self.prob._max_iterations, 100)\n self.prob._max_iterations = 10\n self.assertEqual(self.prob._max_iterations, 10)\n\n def test_tolerance(self):\n self.assertGreater(self.prob._stopping_tolerance, 0.)\n self.prob._stopping_tolerance = 0.\n self.assertEqual(self.prob._stopping_tolerance, 0.)\n self.prob._stopping_tolerance = 0.1\n self.assertEqual(self.prob._stopping_tolerance, 0.1)\n\n def test_verbose(self):\n self.assertEqual(self.prob._verbose, False)\n self.prob._verbose = True\n self.assertEqual(self.prob._verbose, True)\n\n def test_use_screening(self):\n self.assertEqual(self.prob._use_screening, True)\n self.prob._use_screening = False\n self.assertEqual(self.prob._use_screening, False)\n\n def test_use_working_sets(self):\n self.assertEqual(self.prob._use_working_sets, True)\n self.prob._use_working_sets = False\n self.assertEqual(self.prob._use_working_sets, False)\n\n def test_suppress_warnings(self):\n bad_log_dir = \"path/to/bad_log/dir/zxc8aj3n\"\n with captured_output() as out:\n self.prob.solve(self.prob.compute_max_l1_penalty(),\n log_directory=bad_log_dir)\n self.assertIn(\"Warning\", out[0])\n\n blitzml.suppress_warnings()\n\n with captured_output() as out:\n self.prob.solve(self.prob.compute_max_l1_penalty(),\n log_directory=bad_log_dir)\n self.assertNotIn(\"Warning\", out[0])\n\n blitzml.unsuppress_warnings()\n\n",
"import unittest\nimport blitzml\nimport numpy as np\nfrom scipy import sparse as sp\n\nfrom common import captured_output\nfrom common import matrix_vector_product\nfrom common import normalize_labels\n\ndef is_solution(sol, A, b, lam, tol=1e-3):\n Aomega = sol.bias + matrix_vector_product(A, sol.weights)\n exp_bAomega = np.exp(b * Aomega)\n grads = matrix_vector_product(A.T, -b / (1 + exp_bAomega))\n max_grads = np.max(abs(grads))\n if max_grads > lam * (1 + tol):\n return False\n pos_grads_diff = grads[sol.weights > 0] + lam\n if len(pos_grads_diff) and max(abs(pos_grads_diff)) > lam * tol:\n return False\n neg_grads_diff = grads[sol.weights < 0] - lam\n if len(neg_grads_diff) and max(abs(neg_grads_diff)) > lam * tol:\n return False\n return True\n\n\n\nclass TestSparseLogRegInitialConditions(unittest.TestCase):\n def test_sparse_logreg_bad_initial_conditions(self):\n n = 7\n d = 3\n A = np.arange(n * d).reshape(n, d)\n b = normalize_labels(np.arange(n), True)\n prob = blitzml.SparseLogisticRegressionProblem(A, b)\n lammax = prob.compute_max_l1_penalty()\n weights0 = -1 * np.arange(d)\n lam = 0.02 * lammax\n sol = prob.solve(lam, initial_weights=weights0, stopping_tolerance=1e-6)\n self.assertEqual(is_solution(sol, A, b, lam), True)\n\n def test_sparse_logreg_good_initial_conditions(self):\n n = 9\n d = 21\n np.random.seed(0)\n A = np.random.randn(n, d)\n b = normalize_labels(np.random.randn(n), True)\n prob = blitzml.SparseLogisticRegressionProblem(A, b)\n lammax = prob.compute_max_l1_penalty()\n lam = 0.03 * lammax\n sol0 = prob.solve(lam, stopping_tolerance=1e-4, max_time=1.0)\n sol = prob.solve(lam, initial_weights=sol0.weights, max_time=-1.0)\n self.assertEqual(is_solution(sol, A, b, lam), True)\n\n\nclass TestSparseLogRegBadLabels(unittest.TestCase):\n def test_sparse_logreg_non_pm1_labels(self):\n b = np.array([-1., 0., 1.])\n A = np.zeros((3, 3))\n with captured_output() as out:\n prob = blitzml.SparseLogisticRegressionProblem(A, b)\n message = out[0]\n self.assertIn(\"Warning\", message)\n\n def test_sparse_logreg_bad_label_too_large(self):\n b = np.array([-1., 0., 2.])\n A = np.zeros((3, 3))\n with captured_output() as out:\n prob = blitzml.SparseLogisticRegressionProblem(A, b)\n message = out[0]\n self.assertIn(\"Warning\", message)\n\n def test_sparse_logreg_bad_label_too_small(self):\n b = np.array([-1., 0., -2.])\n A = np.zeros((3, 3))\n with captured_output() as out:\n prob = blitzml.SparseLogisticRegressionProblem(A, b)\n message = out[0]\n self.assertIn(\"Warning\", message)\n\n def test_sparse_logreg_dimension_mismatch(self):\n b = np.array([-1., 0., -2.])\n A = np.zeros((2, 3))\n def make_prob():\n prob = blitzml.SparseLogisticRegressionProblem(A, b)\n self.assertRaises(ValueError, make_prob)\n\n def test_sparse_logreg_all_positive_labels_warning(self):\n b = np.array([0., 1.0, 0.5])\n A = np.zeros((3, 3))\n with captured_output() as out:\n prob = blitzml.SparseLogisticRegressionProblem(A, b)\n message = out[0]\n self.assertIn(\"Warning\", message)\n\n\n"
] | [
[
"numpy.arange"
],
[
"numpy.random.seed",
"numpy.arange",
"numpy.random.randn",
"numpy.exp",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
5joono/Swin-Transformer | [
"b5b7e85aa11ad72b2bec2d458fa78066e4c3d0f2"
] | [
"multiprune_plusone/multiprune_plusone.py"
] | [
"import os\nimport numpy as np\nimport pandas as pd\nos.environ['MKL_THREADING_LAYER'] = 'GNU'\n\n# df = pd.DataFrame(columns=['multiprune', 'headstr', 'pluslayer', 'plushead', 'acc1'])\n# df.to_csv(\"multiprune_plusone.csv\",index=False)\n\nprevheadlist = [set([7]),set([11]),set([0]),set([7]),set([9]),set([9])]\nplusheadlist = [set(range(12))-{7},set(range(12))-{11},set(range(12))-{0},set(range(12))-{7},set(range(12))-{9},set(range(12))-{9}]\n\nfor multiprune in range(1,12):\n \n headstr = []\n for oneset in prevheadlist:\n setstr = [str(int(s)) for s in oneset]\n setstr = '+'.join(setstr)\n headstr.append(setstr)\n headstr = '.'.join(headstr)\n \n for pluslayer in range(6):\n for plushead in plusheadlist[pluslayer]:\n os.system(f'python -m torch.distributed.launch --nproc_per_node 1 --master_port 12345 main.py --eval --cfg configs/swin_tiny_patch4_window7_224.yaml --resume swin_tiny_patch4_window7_224.pth --data-path data/imagenet/ --prune {multiprune}_{headstr}_{pluslayer}_{plushead}')\n \n df = pd.read_csv(\"multiprune_plusone.csv\")\n df = df[(df.multiprune == multiprune) & (df.pluslayer == pluslayer)]\n df = df.apply(pd.to_numeric, errors = 'coerce')\n max_acc1_idx = df.idxmax().acc1\n plusheadlist[pluslayer].remove(df.loc[max_acc1_idx].plushead)\n prevheadlist[pluslayer].add(df.loc[max_acc1_idx].plushead)\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
duanzhiihao/mycv | [
"184b52f7a5c1b6f603122d4f4050952b65ba0ead",
"184b52f7a5c1b6f603122d4f4050952b65ba0ead",
"184b52f7a5c1b6f603122d4f4050952b65ba0ead",
"184b52f7a5c1b6f603122d4f4050952b65ba0ead"
] | [
"mycv/train.py",
"scripts/divide_images.py",
"mycv/utils/aug.py",
"mycv/models/probabilistic/logmixture.py"
] | [
"from mycv.utils.general import disable_multithreads\ndisable_multithreads()\nimport os\nfrom pathlib import Path\nimport argparse\nfrom tqdm import tqdm\nimport math\nimport torch\nimport torch.cuda.amp as amp\nfrom torch.optim.lr_scheduler import LambdaLR\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nimport wandb\n\nfrom mycv.utils.general import increment_dir\nfrom mycv.utils.torch_utils import set_random_seeds, ModelEMA\nfrom mycv.datasets.imagenet import ImageNetCls, imagenet_val\n\n\ndef cal_acc(p: torch.Tensor, labels: torch.LongTensor):\n assert not p.requires_grad and p.device == labels.device\n assert p.dim() == 2 and p.shape[0] == labels.shape[0]\n _, p_cls = torch.max(p, dim=1)\n tp = (p_cls == labels)\n acc = tp.sum() / len(tp)\n return acc\n\n\ndef train():\n # ====== set the run settings ======\n parser = argparse.ArgumentParser()\n parser.add_argument('--project', type=str, default='imagenet')\n parser.add_argument('--group', type=str, default='mini200')\n parser.add_argument('--model', type=str, default='csp_s')\n parser.add_argument('--resume', type=str, default='')\n parser.add_argument('--batch_size', type=int, default=128)\n parser.add_argument('--amp', type=bool, default=True)\n parser.add_argument('--ema', type=bool, default=True)\n parser.add_argument('--optimizer', type=str, default='SGD', choices=['Adam', 'SGD'])\n parser.add_argument('--epochs', type=int, default=100)\n parser.add_argument('--metric', type=str, default='top1', choices=['top1'])\n parser.add_argument('--device', type=int, default=0)\n parser.add_argument('--workers', type=int, default=4)\n parser.add_argument('--local_rank', type=int, default=-1, help='DDP arg, do not modify')\n parser.add_argument('--wbmode', action='store_true')\n cfg = parser.parse_args()\n # model\n cfg.img_size = 224\n cfg.input_norm = False\n cfg.sync_bn = False\n # optimizer\n cfg.lr = 0.01\n cfg.momentum = 0.9\n cfg.weight_decay = 0.0001\n cfg.nesterov = False\n # lr scheduler\n cfg.lrf = 0.2 # min lr factor\n cfg.lr_warmup_epochs = 1\n # EMA\n # cfg.ema_decay = 0.999\n cfg.ema_warmup_epochs = 4\n # Main process\n IS_MAIN = (cfg.local_rank in [-1, 0])\n\n # check arguments\n metric: str = cfg.metric.lower()\n epochs: int = cfg.epochs\n local_rank: int = cfg.local_rank\n world_size: int = int(os.environ.get('WORLD_SIZE', 1))\n assert local_rank == int(os.environ.get('RANK', -1)), 'Only support single node'\n assert cfg.batch_size % world_size == 0, 'batch_size must be multiple of device count'\n batch_size: int = cfg.batch_size // world_size\n if IS_MAIN:\n print(cfg, '\\n')\n print('Batch size on each single GPU =', batch_size, '\\n')\n # fix random seeds for reproducibility\n set_random_seeds(1)\n torch.backends.cudnn.benchmark = True\n # device setting\n assert torch.cuda.is_available()\n if local_rank == -1: # Single GPU\n device = torch.device(f'cuda:{cfg.device}')\n else: # DDP mode\n assert torch.cuda.device_count() > local_rank and torch.distributed.is_available()\n torch.cuda.set_device(local_rank)\n device = torch.device('cuda', local_rank)\n torch.distributed.init_process_group(\n backend='nccl', init_method='env://', world_size=world_size, rank=local_rank\n )\n print(f'Local rank: {local_rank}, using device {device}:', 'device property:',\n torch.cuda.get_device_properties(device))\n\n # Dataset\n if IS_MAIN:\n print('Initializing Datasets and Dataloaders...')\n if cfg.group == 'default':\n train_split = 'train'\n val_split = 'val'\n cfg.num_class = 1000\n elif cfg.group == 'mini200':\n train_split = 'train200_600'\n val_split = 'val200_600'\n cfg.num_class = 200\n else:\n raise ValueError()\n # training set\n trainset = ImageNetCls(train_split, img_size=cfg.img_size, input_norm=cfg.input_norm)\n sampler = torch.utils.data.distributed.DistributedSampler(\n trainset, num_replicas=world_size, rank=local_rank, shuffle=True\n ) if local_rank != -1 else None\n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=batch_size, shuffle=(sampler is None), sampler=sampler,\n num_workers=cfg.workers, pin_memory=True\n )\n # test set\n testloader = torch.utils.data.DataLoader(\n ImageNetCls(split=val_split, img_size=cfg.img_size, input_norm=cfg.input_norm),\n batch_size=batch_size, shuffle=False, num_workers=cfg.workers//2,\n pin_memory=True, drop_last=False\n )\n\n # Initialize model\n if cfg.model == 'res50':\n from mycv.models.cls.resnet import resnet50\n model = resnet50(num_classes=cfg.num_class)\n elif cfg.model == 'res101':\n from mycv.models.cls.resnet import resnet101\n model = resnet101(num_classes=cfg.num_class)\n elif cfg.model.startswith('yolov5'):\n from mycv.models.yolov5.cls import YOLOv5Cls\n assert cfg.model[-1] in ['s', 'm', 'l']\n model = YOLOv5Cls(model=cfg.model[-1], num_class=cfg.num_class)\n elif cfg.model.startswith('csp'):\n from mycv.models.yolov5.cls import CSP\n assert cfg.model[-1] in ['s', 'm', 'l']\n model = CSP(model=cfg.model[-1], num_class=cfg.num_class)\n else:\n raise NotImplementedError()\n model = model.to(device)\n # loss function\n loss_func = torch.nn.CrossEntropyLoss(reduction='mean')\n\n # different optimization setting for different layers\n pgb, pgw = [], []\n for k, v in model.named_parameters():\n if ('.bn' in k) or ('.bias' in k): # batchnorm or bias\n pgb.append(v)\n else: # conv weights\n assert '.weight' in k\n pgw.append(v)\n parameters = [\n {'params': pgb, 'lr': cfg.lr, 'weight_decay': 0.0},\n {'params': pgw, 'lr': cfg.lr, 'weight_decay': cfg.weight_decay}\n ]\n if IS_MAIN:\n print('Parameter groups:', [len(pg['params']) for pg in parameters])\n del pgb, pgw\n\n # optimizer\n if cfg.optimizer == 'SGD':\n optimizer = torch.optim.SGD(parameters, lr=cfg.lr,\n momentum=cfg.momentum, nesterov=cfg.nesterov)\n elif cfg.optimizer == 'Adam':\n optimizer = torch.optim.Adam(parameters, lr=cfg.lr)\n else:\n raise ValueError()\n # AMP\n scaler = amp.GradScaler(enabled=cfg.amp)\n\n log_parent = Path(f'runs/{cfg.project}')\n wb_id = None\n results = {metric: 0}\n if cfg.resume:\n # resume\n run_name = cfg.resume\n log_dir = log_parent / run_name\n assert log_dir.is_dir()\n checkpoint = torch.load(log_dir / 'last.pt')\n model.load_state_dict(checkpoint['model'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n scaler.load_state_dict(checkpoint['scaler'])\n start_epoch = checkpoint['epoch'] + 1\n cur_fitness = best_fitness = checkpoint.get(metric, 0)\n if IS_MAIN:\n wb_id = open(log_dir / 'wandb_id.txt', 'r').read()\n else:\n # new experiment\n run_name = increment_dir(dir_root=log_parent, name=cfg.model)\n log_dir = log_parent / run_name # wandb logging dir\n if IS_MAIN:\n os.makedirs(log_dir, exist_ok=False)\n print(str(model), file=open(log_dir / 'model.txt', 'w'))\n start_epoch = 0\n cur_fitness = best_fitness = 0\n\n # initialize wandb\n if IS_MAIN:\n wbrun = wandb.init(project=cfg.project, group=cfg.group, name=run_name, config=cfg,\n dir='runs/', resume='allow', id=wb_id, mode=cfg.wbmode)\n cfg = wbrun.config\n cfg.log_dir = log_dir\n cfg.wandb_id = wbrun.id\n if not (log_dir / 'wandb_id.txt').exists():\n with open(log_dir / 'wandb_id.txt', 'w') as f:\n f.write(wbrun.id)\n else:\n wbrun = None\n\n # lr scheduler\n def warmup_cosine(x):\n warmup_iter = cfg.lr_warmup_epochs * len(trainloader)\n if x < warmup_iter:\n factor = x / warmup_iter\n else:\n _cur = x - warmup_iter + 1\n _total = epochs * len(trainloader)\n factor = cfg.lrf + 0.5 * (1 - cfg.lrf) * (1 + math.cos(_cur * math.pi / _total))\n return factor\n scheduler = LambdaLR(optimizer, lr_lambda=warmup_cosine, last_epoch=start_epoch - 1)\n\n # SyncBatchNorm\n if local_rank != -1 and cfg.sync_bn:\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n\n # Exponential moving average\n if IS_MAIN and cfg.ema:\n emas = [\n ModelEMA(model, decay=0.99),\n ModelEMA(model, decay=0.999),\n ModelEMA(model, decay=0.9999)\n ]\n for ema in emas:\n ema.updates = start_epoch * len(trainloader) # set EMA updates\n ema.warmup = cfg.ema_warmup_epochs * len(trainloader) # set EMA warmup\n else:\n emas = None\n\n # DDP mode\n if local_rank != -1:\n model = DDP(model, device_ids=[local_rank], output_device=local_rank)\n\n # ======================== start training ========================\n niter = s = None\n for epoch in range(start_epoch, epochs):\n model.train()\n if local_rank != -1:\n trainloader.sampler.set_epoch(epoch)\n optimizer.zero_grad()\n\n pbar = enumerate(trainloader)\n train_loss, train_acc = 0.0, 0.0\n if IS_MAIN:\n pbar_title = ('%-10s' * 6) % (\n 'Epoch', 'GPU_mem', 'lr', 'tr_loss', 'tr_acc', metric\n )\n print('\\n' + pbar_title) # title\n pbar = tqdm(pbar, total=len(trainloader))\n for i, (imgs, labels) in pbar:\n # debugging\n # if True:\n # import matplotlib.pyplot as plt\n # from mycv.datasets.food101 import CLASS_NAMES\n # for im, lbl in zip(imgs, labels):\n # im = im * trainset._input_std + trainset._input_mean\n # im = im.permute(1,2,0).numpy()\n # print(CLASS_NAMES[lbl])\n # plt.imshow(im); plt.show()\n imgs = imgs.to(device=device)\n labels = labels.to(device=device)\n\n # forward\n with amp.autocast(enabled=cfg.amp):\n p = model(imgs)\n loss = loss_func(p, labels) * imgs.shape[0]\n if local_rank != -1:\n loss = loss * world_size\n # loss is averaged within image, sumed over batch, and sumed over gpus\n # backward, update\n scaler.scale(loss).backward()\n scaler.step(optimizer)\n scaler.update()\n optimizer.zero_grad()\n if emas:\n for ema in emas:\n ema.update(model)\n # Scheduler\n scheduler.step()\n\n # logging\n if IS_MAIN:\n niter = epoch * len(trainloader) + i\n cur_lr = optimizer.param_groups[0]['lr']\n loss = loss.detach().cpu().item()\n acc = cal_acc(p.detach(), labels)\n train_loss = (train_loss*i + loss) / (i+1)\n train_acc = (train_acc*i + acc) / (i+1)\n mem = torch.cuda.max_memory_allocated(device) / 1e9\n s = ('%-10s' * 2 + '%-10.4g' * 4) % (\n f'{epoch}/{epochs-1}', f'{mem:.3g}G',\n cur_lr, train_loss, 100*train_acc, 100*cur_fitness\n )\n pbar.set_description(s)\n torch.cuda.reset_peak_memory_stats()\n # Weights & Biases logging\n if niter % 100 == 0:\n wbrun.log({\n 'general/lr': cur_lr,\n 'metric/train_loss': train_loss,\n 'metric/train_acc': train_acc,\n 'ema/n_updates': emas[0].updates if emas is not None else 0,\n 'ema0/decay': emas[0].get_decay() if emas is not None else 0,\n 'ema1/decay': emas[1].get_decay() if emas is not None else 0,\n 'ema2/decay': emas[2].get_decay() if emas is not None else 0,\n }, step=niter)\n # logging end\n # ----Mini batch end\n # ----Epoch end\n # If DDP mode, synchronize model parameters on all gpus\n if local_rank != -1:\n model._sync_params_and_buffers(authoritative_rank=0)\n\n # Evaluation\n if IS_MAIN:\n # results is like {'top1': xxx, 'top5': xxx}\n _log_dic = {'general/epoch': epoch}\n results = imagenet_val(model, split=val_split, testloader=testloader)\n _log_dic.update({'metric/plain_val_'+k: v for k,v in results.items()})\n\n res_emas = torch.zeros(len(emas))\n if emas is not None:\n for ei, ema in enumerate(emas):\n results = imagenet_val(ema.ema, split=val_split, testloader=testloader)\n _log_dic.update({f'metric/ema{ei}_val_'+k: v for k,v in results.items()})\n res_emas[ei] = results[metric]\n # select best result among all emas\n _idx = torch.argmax(res_emas)\n cur_fitness = res_emas[_idx]\n _save_model = emas[_idx].ema\n best_decay = emas[_idx].final_decay\n else:\n cur_fitness = results[metric]\n _save_model = model\n best_decay = 0\n # wandb log\n wbrun.log(_log_dic, step=niter)\n # Write evaluation results\n res = s + '||' + '%10.4g' * 1 % (results[metric])\n with open(log_dir / 'results.txt', 'a') as f:\n f.write(res + '\\n')\n # save last checkpoint\n checkpoint = {\n 'model' : _save_model.state_dict(),\n 'optimizer' : optimizer.state_dict(),\n 'scaler' : scaler.state_dict(),\n 'epoch' : epoch,\n metric : cur_fitness,\n 'best_decay': best_decay\n }\n torch.save(checkpoint, log_dir / 'last.pt')\n # save best checkpoint\n if cur_fitness > best_fitness:\n best_fitness = cur_fitness\n torch.save(checkpoint, log_dir / 'best.pt')\n del checkpoint\n # ----Epoch end\n # ----Training end\n\n\nif __name__ == '__main__':\n train()\n\n # from mycv.models.cls.resnet import resnet50\n # model = resnet50(num_classes=1000)\n # weights = torch.load('weights/resnet50-19c8e357.pth')\n # model.load_state_dict(weights)\n # model = model.cuda()\n # model.eval()\n # results = imagenet_val(model, img_size=224, batch_size=64, workers=4)\n # print(results['top1'])\n",
"import argparse\nimport os\nfrom tqdm import tqdm\nfrom pathlib import Path\nimport numpy as np\nimport cv2\n\nfrom mycv.paths import IMPROC_DIR\nfrom mycv.utils.image import scale\n\n\ndef divide(args):\n img_dir = Path(args.img_dir)\n assert img_dir.is_dir()\n\n patch_size = args.size # patch size\n step = args.step\n new_dir = img_dir.parent / f'{img_dir.stem}_hw{patch_size}s{step}'\n new_dir.mkdir(exist_ok=False)\n print(f'Saving to {new_dir}...')\n\n pbar = tqdm(os.listdir(img_dir))\n skipped = 0\n for imname in pbar:\n assert imname.endswith('.png'), f'Expect {imname} to be a png file.'\n impath = str(img_dir / imname)\n im = cv2.imread(impath)\n assert im is not None, impath\n\n imh, imw = im.shape[:2]\n # skip the image if it's too small\n if min(imh, imw) < patch_size:\n skipped += 1\n pbar.set_description(f'Copy-paste {imname} ({imh}x{imw}), total={skipped}')\n save_path = new_dir / imname\n cv2.imwrite(str(save_path), im)\n continue\n\n h_starts = np.arange(0, imh, step=step)[:-1]\n w_starts = np.arange(0, imw, step=step)[:-1]\n for y1 in h_starts:\n for x1 in w_starts:\n y2 = y1 + patch_size\n if y2 > imh:\n assert y1 == h_starts[-1], f'{y1}, {h_starts}'\n y2 = imh\n x2 = x1 + patch_size\n if x2 > imw:\n assert x1 == w_starts[-1], f'{x1}, {w_starts}'\n x2 = imw\n window = im[y1:y2, x1:x2, :]\n if min(window.shape[:2]) < step:\n print(f'window shape={window.shape} < step={step}, skipped')\n # import matplotlib.pyplot as plt\n # plt.imshow(window); plt.show()\n continue\n # assert window.shape == (sz, sz, 3)\n save_path = new_dir / (f'{imname[:-4]}_{y1}_{x1}.png')\n cv2.imwrite(str(save_path), window)\n debug = 1\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--img_dir', type=str, default=str(IMPROC_DIR/'clic/train'))\n parser.add_argument('--size', type=int, default=512)\n parser.add_argument('--step', type=int, default=257)\n args = parser.parse_args()\n divide(args)\n",
"from typing import Union\nimport random\nimport numpy as np\nimport cv2\nfrom PIL import Image\nimport torch\nimport torchvision.transforms.functional as tvf\n\nimport mycv.utils.image as imgUtils\n\n\ndef rand_aug_cls(im: np.ndarray):\n ''' Data augmentation for image classification\n\n Args:\n im: BGR\n '''\n assert imgUtils.is_image(im)\n # horizontal flip\n if torch.rand(1) > 0.5:\n im = cv2.flip(im, 1)\n # color\n im = augment_hsv(im, hgain=0.004, sgain=0.4, vgain=0.2)\n # Additive Gaussian\n # im = im.astype(np.float32)\n # im = im + np.random.randn(im.shape)\n return im\n\n\ndef random_scale(im: np.ndarray, low: int, high: int):\n ''' random scale\n '''\n assert imgUtils.is_image(im)\n size = random.randint(low, high)\n im = imgUtils.scale(im, size, side='shorter')\n return im\n\n\ndef random_crop(im: np.ndarray, crop_hw: tuple):\n \"\"\" random crop\n\n Args:\n im (np.ndarray): [description]\n crop_hw (tuple): [description]\n \"\"\"\n if imgUtils.is_image(im, cv2_ok=True, pil_ok=False):\n im = _random_crop_cv2(im, crop_hw)\n elif imgUtils.is_image(im, cv2_ok=False, pil_ok=True):\n im = _random_crop_pil(im, crop_hw)\n else:\n raise ValueError('Input is neither a valid cv2 or PIL image.')\n return im\n\n\ndef _random_crop_cv2(im: np.ndarray, crop_hw: tuple):\n \"\"\" random crop for cv2\n \"\"\"\n height, width = im.shape[:2]\n if (height, width) == crop_hw:\n return im\n ch, cw = crop_hw\n assert height >= ch and width >= cw\n y1 = random.randint(0, height-ch)\n x1 = random.randint(0, width-cw)\n im = im[y1:y1+ch, x1:x1+cw, :]\n return im\n\n\ndef _random_crop_pil(img: Image.Image, crop_hw: tuple):\n \"\"\" random crop for cv2\n \"\"\"\n height, width = img.height, img.width\n if (height, width) == crop_hw:\n return img\n ch, cw = crop_hw\n assert height >= ch and width >= cw\n y1 = random.randint(0, height-ch)\n x1 = random.randint(0, width-cw)\n img = img.crop(box=(x1, y1, x1+cw, y1+ch))\n return img\n\n\ndef augment_hsv(im, hgain=0.1, sgain=0.5, vgain=0.5):\n '''\n HSV space augmentation\n '''\n raise DeprecationWarning()\n\n r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains\n hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV))\n assert im.dtype == np.uint8\n\n x = np.arange(0, 256, dtype=np.int16)\n lut_hue = ((x * r[0]) % 180).astype(np.uint8)\n lut_sat = np.clip(x * r[1], 0, 255).astype(np.uint8)\n lut_val = np.clip(x * r[2], 0, 255).astype(np.uint8)\n\n img_hsv = cv2.merge(\n (cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))\n ).astype(np.uint8)\n im = cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR)\n return im\n\n\ndef random_hflip(im: Union[np.ndarray, Image.Image]):\n '''\n Random horizontal flip with probability 0.5\n\n Args:\n im: cv2 or PIL image\n '''\n flag = (torch.rand(1) > 0.5).item()\n if flag:\n return im\n\n if isinstance(im, np.ndarray):\n im = cv2.flip(im, 1) # horizontal flip\n elif isinstance(im, Image.Image):\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\n else:\n raise ValueError()\n\n return im\n\n\nif __name__ == \"__main__\":\n from mycv.paths import MYCV_DIR\n img_path = MYCV_DIR / 'images/bus.jpg'\n assert img_path.exists()\n\n im = cv2.imread(str(img_path))\n im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n\n # import albumentations as album\n # import matplotlib.pyplot as plt\n # plt.figure(); plt.axis('off'); plt.imshow(im)\n \n # transform = album.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.7, hue=0.06, p=1)\n # for _ in range(8):\n # # imaug = augment_hsv(im, hgain=0.1, sgain=0, vgain=0)\n # imaug = transform(image=im)['image']\n # plt.figure(); plt.imshow(imaug)\n\n # plt.show()\n",
"import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as tnf\n\n\ndef get_conv(in_ch, out_ch, kernel_size, stride, padding, zero_bias=True, zero_weights=False):\n conv = nn.Conv2d(in_ch, out_ch, kernel_size, stride, padding)\n if zero_bias:\n conv.bias.data.mul_(0.0)\n if zero_weights:\n conv.weight.data.mul_(0.0)\n return conv\n\n\ndef const_max(t, constant):\n other = torch.ones_like(t) * constant\n return torch.max(t, other)\n\n\ndef const_min(t, constant):\n other = torch.ones_like(t) * constant\n return torch.min(t, other)\n\n\ndef log_prob_from_logits(x):\n \"\"\" numerically stable log_softmax implementation that prevents overflow \"\"\"\n axis = len(x.shape) - 1\n m = x.max(dim=axis, keepdim=True)[0]\n return x - m - torch.log(torch.exp(x - m).sum(dim=axis, keepdim=True))\n\n\ndef discretized_mix_logistic_loss(x, l, low_bit=False):\n \"\"\" log-likelihood for mixture of discretized logistics, assumes the data has been rescaled to [-1,1] interval \"\"\"\n # Adapted from https://github.com/openai/pixel-cnn/blob/master/pixel_cnn_pp/nn.py\n xs = [s for s in x.shape] # true image (i.e. labels) to regress to, e.g. (B,32,32,3)\n ls = [s for s in l.shape] # predicted distribution, e.g. (B,32,32,100)\n nr_mix = int(ls[-1] / 10) # here and below: unpacking the params of the mixture of logistics\n logit_probs = l[:, :, :, :nr_mix]\n l = torch.reshape(l[:, :, :, nr_mix:], xs + [nr_mix * 3])\n means = l[:, :, :, :, :nr_mix]\n log_scales = const_max(l[:, :, :, :, nr_mix:2 * nr_mix], -7.)\n coeffs = torch.tanh(l[:, :, :, :, 2 * nr_mix:3 * nr_mix])\n x = torch.reshape(x, xs + [1]) + torch.zeros(xs + [nr_mix]).to(x.device) # here and below: getting the means and adjusting them based on preceding sub-pixels\n m2 = torch.reshape(means[:, :, :, 1, :] + coeffs[:, :, :, 0, :] * x[:, :, :, 0, :], [xs[0], xs[1], xs[2], 1, nr_mix])\n m3 = torch.reshape(means[:, :, :, 2, :] + coeffs[:, :, :, 1, :] * x[:, :, :, 0, :] + coeffs[:, :, :, 2, :] * x[:, :, :, 1, :], [xs[0], xs[1], xs[2], 1, nr_mix])\n means = torch.cat([torch.reshape(means[:, :, :, 0, :], [xs[0], xs[1], xs[2], 1, nr_mix]), m2, m3], dim=3)\n centered_x = x - means\n inv_stdv = torch.exp(-log_scales)\n if low_bit:\n plus_in = inv_stdv * (centered_x + 1. / 31.)\n cdf_plus = torch.sigmoid(plus_in)\n min_in = inv_stdv * (centered_x - 1. / 31.)\n else:\n plus_in = inv_stdv * (centered_x + 1. / 255.)\n cdf_plus = torch.sigmoid(plus_in)\n min_in = inv_stdv * (centered_x - 1. / 255.)\n cdf_min = torch.sigmoid(min_in)\n log_cdf_plus = plus_in - tnf.softplus(plus_in) # log probability for edge case of 0 (before scaling)\n log_one_minus_cdf_min = -tnf.softplus(min_in) # log probability for edge case of 255 (before scaling)\n cdf_delta = cdf_plus - cdf_min # probability for all other cases\n mid_in = inv_stdv * centered_x\n log_pdf_mid = mid_in - log_scales - 2. * tnf.softplus(mid_in) # log probability in the center of the bin, to be used in extreme cases (not actually used in our code)\n\n # now select the right output: left edge case, right edge case, normal case, extremely low prob case (doesn't actually happen for us)\n\n # this is what we are really doing, but using the robust version below for extreme cases in other applications and to avoid NaN issue with tf.select()\n # log_probs = tf.select(x < -0.999, log_cdf_plus, tf.select(x > 0.999, log_one_minus_cdf_min, tf.log(cdf_delta)))\n\n # robust version, that still works if probabilities are below 1e-5 (which never happens in our code)\n # tensorflow backpropagates through tf.select() by multiplying with zero instead of selecting: this requires use to use some ugly tricks to avoid potential NaNs\n # the 1e-12 in tf.maximum(cdf_delta, 1e-12) is never actually used as output, it's purely there to get around the tf.select() gradient issue\n # if the probability on a sub-pixel is below 1e-5, we use an approximation based on the assumption that the log-density is constant in the bin of the observed sub-pixel value\n if low_bit:\n log_probs = torch.where(x < -0.999,\n log_cdf_plus,\n torch.where(x > 0.999,\n log_one_minus_cdf_min,\n torch.where(cdf_delta > 1e-5,\n torch.log(const_max(cdf_delta, 1e-12)),\n log_pdf_mid - math.log(15.5))))\n else:\n log_probs = torch.where(x < -0.999,\n log_cdf_plus,\n torch.where(x > 0.999,\n log_one_minus_cdf_min,\n torch.where(cdf_delta > 1e-5,\n torch.log(const_max(cdf_delta, 1e-12)),\n log_pdf_mid - math.log(127.5))))\n log_probs = log_probs.sum(dim=3) + log_prob_from_logits(logit_probs)\n mixture_probs = torch.logsumexp(log_probs, -1)\n xnB, xnH, xnW, xnC, xnMixture = x.shape\n return -1. * mixture_probs.sum(dim=[1, 2]) / (xnC * xnH * xnW)\n\n\ndef sample_from_discretized_mix_logistic(l, nr_mix):\n ls = [s for s in l.shape]\n xs = ls[:-1] + [3]\n # unpack parameters\n logit_probs = l[:, :, :, :nr_mix]\n l = torch.reshape(l[:, :, :, nr_mix:], xs + [nr_mix * 3])\n # sample mixture indicator from softmax\n eps = torch.empty(logit_probs.shape, device=l.device).uniform_(1e-5, 1. - 1e-5)\n amax = torch.argmax(logit_probs - torch.log(-torch.log(eps)), dim=3)\n sel = tnf.one_hot(amax, num_classes=nr_mix).float()\n sel = torch.reshape(sel, xs[:-1] + [1, nr_mix])\n # select logistic parameters\n means = (l[:, :, :, :, :nr_mix] * sel).sum(dim=4)\n log_scales = const_max((l[:, :, :, :, nr_mix:nr_mix * 2] * sel).sum(dim=4), -7.)\n coeffs = (torch.tanh(l[:, :, :, :, nr_mix * 2:nr_mix * 3]) * sel).sum(dim=4)\n # sample from logistic & clip to interval\n # we don't actually round to the nearest 8bit value when sampling\n u = torch.empty(means.shape, device=means.device).uniform_(1e-5, 1. - 1e-5)\n x = means + torch.exp(log_scales) * (torch.log(u) - torch.log(1. - u))\n x0 = const_min(const_max(x[:, :, :, 0], -1.), 1.)\n x1 = const_min(const_max(x[:, :, :, 1] + coeffs[:, :, :, 0] * x0, -1.), 1.)\n x2 = const_min(const_max(x[:, :, :, 2] + coeffs[:, :, :, 1] * x0 + coeffs[:, :, :, 2] * x1, -1.), 1.)\n return torch.cat([torch.reshape(x0, xs[:-1] + [1]), torch.reshape(x1, xs[:-1] + [1]), torch.reshape(x2, xs[:-1] + [1])], dim=3)\n\n\nclass DmolNet(nn.Module):\n def __init__(self, hyp=None, width=None, num_mixtures=None, low_bit=False):\n super().__init__()\n if hasattr(hyp, 'dec_configs'):\n _cfg = hyp.dec_configs[-1]\n width = _cfg.get('out_ch', _cfg.get('width'))\n num_mixtures = hyp.num_mixtures\n low_bit = hyp.get('low_bit', False)\n self.num_mixtures = num_mixtures\n self.low_bit = low_bit\n self.out_conv = get_conv(width, num_mixtures * 10, kernel_size=1, stride=1, padding=0)\n\n def nll(self, pred_features, x_tgt):\n # return discretized_mix_logistic_loss(x=x, l=self.forward(pred_features), low_bit=self.H.dataset in ['ffhq_256'])\n params = self.forward(pred_features)\n return discretized_mix_logistic_loss(x=x_tgt, l=params, low_bit=self.low_bit)\n\n def bpp(self, pred_features, x_tgt):\n nC = x_tgt.shape[1]\n assert nC == 3\n nll = self.nll(pred_features, x_tgt)\n bpp = nll * nC * math.log2(math.e)\n return bpp\n\n def forward(self, px_z):\n xhat = self.out_conv(px_z)\n return xhat.permute(0, 2, 3, 1)\n\n def sample(self, px_z):\n xhat = sample_from_discretized_mix_logistic(self.forward(px_z), self.num_mixtures)\n imgs = (xhat + 1.0) / 2\n imgs = imgs.detach().cpu().clone().clamp_(min=0, max=1).permute(0, 3, 1, 2)\n # xhat = xhat.mul_(255.0).round_().to(dtype=torch.uint8).numpy()\n # xhat = np.minimum(np.maximum(0.0, xhat), 255.0).astype(np.uint8)\n return imgs\n\n\nclass MyDmolNet(nn.Module):\n def __init__(self, in_ch, num_mixtures=10, loss='bits'):\n super().__init__()\n self.num_mixtures = num_mixtures\n self.out_conv = get_conv(in_ch, num_mixtures * 10, kernel_size=1, stride=1, padding=0)\n self.loss = loss # bits or nats\n\n def forward(self, px_z):\n xhat = self.out_conv(px_z)\n return xhat.permute(0, 2, 3, 1)\n\n def nll(self, pred_features, x_tgt):\n # return discretized_mix_logistic_loss(x=x, l=self.forward(pred_features), low_bit=self.H.dataset in ['ffhq_256'])\n params = self.forward(pred_features)\n return discretized_mix_logistic_loss(x=x_tgt, l=params, low_bit=False)\n\n def bpp(self, pred_features, x_tgt):\n nC = x_tgt.shape[1]\n assert nC == 3\n nll = self.nll(pred_features, x_tgt)\n bpp = nll * nC * math.log2(math.e)\n return bpp\n\n def loss_func(self, pred_features, x_tgt):\n if self.loss == 'nats':\n nll = self.nll(pred_features, x_tgt)\n stats = {\n 'loss': nll,\n 'nll': nll.detach().item()\n }\n elif self.loss == 'bits':\n bpp = self.bpp(pred_features, x_tgt)\n stats = {\n 'loss': bpp,\n 'bpp': bpp.detach().item()\n }\n return stats\n\n def sample(self, px_z):\n xhat = sample_from_discretized_mix_logistic(self.forward(px_z), self.num_mixtures)\n imgs = (xhat + 1.0) / 2\n imgs = imgs.detach().cpu().clone().clamp_(min=0, max=1).permute(0, 3, 1, 2)\n # xhat = xhat.mul_(255.0).round_().to(dtype=torch.uint8).numpy()\n # xhat = np.minimum(np.maximum(0.0, xhat), 255.0).astype(np.uint8)\n return imgs\n\n\n# class DiscreteLogisticMixture(nn.Module):\n# def __init__(self):\n# super().__init__()\n\n# def forward(self, pred, x_tgt):\n# # compute nll\n# pred = pred.permute(0, 2, 3, 1)\n# loss = discretized_mix_logistic_loss(x=x_tgt, l=pred, low_bit=False)\n# return loss\n\n# def sample(self, px_z):\n# xhat = sample_from_discretized_mix_logistic(self.forward(px_z), self.num_mixtures)\n# imgs = (xhat + 1.0) / 2\n# imgs = imgs.detach().cpu().clone().clamp_(min=0, max=1).permute(0, 3, 1, 2)\n# # xhat = xhat.mul_(255.0).round_().to(dtype=torch.uint8).numpy()\n# # xhat = np.minimum(np.maximum(0.0, xhat), 255.0).astype(np.uint8)\n# return imgs\n"
] | [
[
"torch.cuda.get_device_properties",
"torch.optim.lr_scheduler.LambdaLR",
"torch.max",
"torch.load",
"torch.utils.data.DataLoader",
"torch.nn.SyncBatchNorm.convert_sync_batchnorm",
"torch.cuda.amp.autocast",
"torch.cuda.is_available",
"torch.device",
"torch.save",
"torch.nn.CrossEntropyLoss",
"torch.distributed.init_process_group",
"torch.utils.data.distributed.DistributedSampler",
"torch.optim.SGD",
"torch.optim.Adam",
"torch.cuda.amp.GradScaler",
"torch.distributed.is_available",
"torch.cuda.device_count",
"torch.nn.parallel.DistributedDataParallel",
"torch.cuda.set_device",
"torch.cuda.max_memory_allocated",
"torch.cuda.reset_peak_memory_stats",
"torch.argmax"
],
[
"numpy.arange"
],
[
"numpy.arange",
"torch.rand",
"numpy.random.uniform",
"numpy.clip"
],
[
"torch.sigmoid",
"torch.max",
"torch.empty",
"torch.zeros",
"torch.reshape",
"torch.min",
"torch.nn.Conv2d",
"torch.nn.functional.softplus",
"torch.tanh",
"torch.exp",
"torch.log",
"torch.nn.functional.one_hot",
"torch.logsumexp",
"torch.ones_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
geophysics-ubonn/crtomo_tools | [
"a01b4d31d7250bc729605ae4dc035f108168128e"
] | [
"examples/01_modelling/plot_06_synthetic_4d.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nGenerating a 4D synthetic data set with noise.\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nA 2D space, time and frequency data set is generated for testing purposes in\nreda.\n\"\"\"\n###############################################################################\n# imports\nimport os\nfrom glob import glob\n\nimport numpy as np\n\nimport crtomo\nimport reda\n\n###############################################################################\n# Generate the forward models\nfrequencies = np.logspace(-3, 3, 5)\ngrid = crtomo.crt_grid(\n 'data_synthetic_4d/elem.dat', 'data_synthetic_4d/elec.dat'\n)\n\n# this context manager makes sure that all output is relative to the given\n# directory\nwith reda.CreateEnterDirectory('output_synthetic_4d'):\n for nr, anomaly_z_pos in enumerate(range(0, -10, -3)):\n outdir = 'modV_{:02}'.format(nr)\n if os.path.isdir(outdir):\n continue\n sinv = crtomo.eitMan(grid=grid, frequencies=frequencies)\n sinv.add_homogeneous_model(100, 0)\n sinv.set_area_to_single_colecole(\n 18, 22, anomaly_z_pos -2.0, anomaly_z_pos,\n [100, 0.1, 0.04, 0.6]\n )\n r = sinv.plot_forward_models()\n r['rmag']['fig'].savefig('forward_rmag_{:02}.pdf'.format(nr))\n r['rpha']['fig'].savefig('forward_rpha_{:02}.pdf'.format(nr))\n for f, td in sinv.tds.items():\n td.configs.gen_dipole_dipole(skipc=0, nr_voltage_dipoles=40)\n td.configs.gen_reciprocals(append=True)\n r = sinv.measurements()\n\n sinv.save_measurements_to_directory(outdir)\n\n # plot pseudosections\n Vdirs = sorted(glob('modV*'))\n for nr, Vdir in enumerate(Vdirs):\n seit = reda.sEIT()\n seit.import_crtomo(Vdir)\n seit.compute_K_analytical(spacing=1)\n seit.plot_pseudosections(\n 'r', return_fig=True\n ).savefig('ps_r_{:02}.jpg'.format(nr), dpi=300)\n seit.plot_pseudosections(\n 'rho_a', return_fig=True\n ).savefig('ps_rho_a_{:02}.jpg'.format(nr), dpi=300)\n seit.plot_pseudosections(\n 'rpha', return_fig=True\n ).savefig('ps_rpha_{:02}.jpg'.format(nr), dpi=300)\n\n\n###############################################################################\n# now generate noisy data\n\n# this context manager makes sure that all output is relative to the given\n# directory\nwith reda.CreateEnterDirectory('output_synthetic_4d'):\n Vdirs = sorted(glob('modV*'))\n for nr, Vdir in enumerate(Vdirs):\n seit = reda.sEIT()\n seit.import_crtomo(Vdir)\n seit.compute_K_analytical(spacing=1)\n # use different seeds for different time steps\n np.random.seed(34 + nr)\n noise = np.random.normal(loc=0, scale=1, size=seit.data.shape[0])\n r_save = seit.data['r'].values.copy()\n seit.data['r'] = r_save + noise * r_save / 8000.0 * np.log(seit.data['k'])\n seit.data['rho_a'] = seit.data['r'] * seit.data['k']\n seit.plot_pseudosections(\n 'rho_a', return_fig=True\n ).savefig('noisy_ps_rho_a_{:02}.jpg'.format(nr), dpi=300)\n rpha_save = seit.data['rpha'].values.copy()\n noise_rpha = np.random.normal(loc=0, scale=1, size=seit.data.shape[0])\n seit.data['rpha'] = rpha_save + noise_rpha * rpha_save / 10.0\n seit.plot_pseudosections(\n 'rpha', return_fig=True\n ).savefig('ps_rpha_{:02}.jpg'.format(nr), dpi=300)\n seit.export_to_crtomo_multi_frequency(Vdir + '_noisy')\n"
] | [
[
"numpy.logspace",
"numpy.random.normal",
"numpy.log",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
singh-karanpal/Capstone | [
"807ca3f70276a0dd17244a123a759a914d358424"
] | [
"src/models/model_evaluate.py"
] | [
"# author: Carlina Kim, Karanpal Singh, Sukriti Trehan, Victor Cuspinera\n# date: 2020-06-21\n\n'''This script will read the saved theme/subtheme model(s), padded validation sets and y validation sets for model evaluation, \nand will save the evaluation results in the specified directory.\n\nThere are 2 parameters Input Path and Output Path where you want to save the evaluation results.\n\nUsage: model_evaluate.py --level='theme' --output_dir=<destination_dir_path>\n\nExample:\n python src/models/model_evaluate.py --level='theme' --output_dir=reports/\n python src/models/model_evaluate.py --level='subtheme' --output_dir=reports/\n\nOptions:\n--input_dir=<input_dir_path> Directory name for the padded documents and embeddings\n--output_dir=<destination_dir_path> Directory for saving evaluated results\n'''\n\nimport pandas as pd\nimport numpy as np\nfrom docopt import docopt\n\nfrom sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score, precision_recall_curve\nimport matplotlib.pyplot as plt\n\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\n\nopt = docopt(__doc__)\n\nprint(\"\\n-----START: model_evaluate.py-----\\n\")\n\ndef main(level, output_dir):\n \"\"\"\n Takes the input level and calls model_evaluate class with \n output_dir as argument \n \"\"\"\n me = model_evaluate()\n me.get_evaluations(level=level, output_dir=output_dir)\n print('Thanks for your patience, the evaluation process has finished!\\n')\n print('----END: model_evaluate.py----\\n')\n return\n\nclass model_evaluate:\n # Loads data and evaluates saved theme model and subtheme models on validation set\n \n def eval_metrics(self, model_name, x_valid, y_valid, level='theme'):\n \"\"\"\n Evaluates model results on different threshold levels and produces data table/\n precision recall curves\n\n Parameters\n -----------\n model_name: (TensforFlow Saved model)\n x_valid: (pandas dataframe) dataframe with validation comments\n y_valid: (numpy array) array with labels\n level: (string) Takes value 'theme' or 'subtheme' to evaluate accordingly\n\n Returns\n -------\n Pandas DataFrame or matplotlib plot\n dataframe with evaluation metrics including precision, recall, f1 score at\n different threshold values\n \"\"\"\n pred_values = model_name.predict(x_valid)\n\n if level == 'theme':\n precision_dict = dict()\n recall_dict = dict()\n thresh_dict = dict()\n\n precision_dict[\"BiGRU + Fasttext\"], recall_dict[\"BiGRU + Fasttext\"], thresh_dict[\"BiGRU + Fasttext\"] = precision_recall_curve(y_valid.ravel(), pred_values.ravel())\n\n labels = []\n labels = list(precision_dict.keys())\n\n plt.figure()\n plt.step(recall_dict['BiGRU + Fasttext'], precision_dict['BiGRU + Fasttext'], where='post', color='orange')\n\n plt.xlabel('Recall', fontsize=18)\n plt.ylabel('Precision', fontsize=18)\n plt.axhline(y=0.743643, xmin=0, xmax=0.71, ls='--', color=\"cornflowerblue\")\n plt.axvline(x=0.705382, ymin=0, ymax=0.71, ls='--', color=\"cornflowerblue\")\n plt.ylim([0.0, 1.05])\n plt.xlim([0.0, 1.0])\n plt.xticks(fontsize=14)\n plt.yticks(fontsize=14)\n plt.legend(labels, loc=(1.01, .79), prop=dict(size=14))\n plt.title('Precision Recall Curves for best performing model', fontsize = 18)\n plt.savefig('reports/figures/pr_curve_valid_theme.png')\n\n # PRECISION & RECALL\n predictions_results = []\n\n thresholds=[0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n\n for val in thresholds:\n pred=pred_values.copy()\n pred[pred>=val]=1\n pred[pred<val]=0\n\n accuracy = accuracy_score(y_valid, pred, normalize=True, sample_weight=None)\n precision = precision_score(y_valid, pred, average='micro')\n recall = recall_score(y_valid, pred, average='micro')\n f1 = f1_score(y_valid, pred, average='micro')\n case= {'Threshold': val,\n 'Accuracy': accuracy,\n 'Precision': precision,\n 'Recall': recall,\n 'F1-measure': f1}\n predictions_results.append(case)\n\n return pd.DataFrame(predictions_results)\n \n def get_evaluations(self, level, output_dir):\n \"\"\"\n Evaluates models by using eval_metrics function\n \"\"\"\n if level == 'theme':\n print(\"**Loading data**\")\n x_valid = np.load('data/interim/question1_models/advance/X_valid_padded.npy')\n y_valid = np.load('data/interim/question1_models/advance/y_valid.npy')\n print(\"**Loading the saved theme model**\")\n model = tf.keras.models.load_model('models/Theme_Model/theme_model')\n print(\"**Predicting on validation set using saved model and evaluating metrics**\")\n results = self.eval_metrics(model_name = model, x_valid = x_valid, y_valid = y_valid)\n print(\"**Saving results**\")\n results.to_csv(output_dir + '/tables/theme_tables/theme_valid_eval.csv')\n print(\"Evaluations saved to reports/\")\n\n else:\n print(\"Loading data and evaluating the subthemes model on validation set\")\n themes = ['CPD', 'CB', 'EWC', 'Exec', 'FWE',\n 'SP', 'RE', 'Sup', 'SW', 'TEPE', 'VMG', 'OTH']\n\n for label in themes:\n print(\"****Label:\", label, \"****\")\n print(\"**Loading data**\")\n x_valid = np.load('data/interim/subthemes/' + str(label) + '/X_valid_padded.npy')\n # self.x_valids.append(x_valid)\n y_valid = np.load('data/interim/subthemes/' + str(label) + '/y_valid.npy')\n # self.y_valids.append(y_valid)\n print(\"**Loading the saved subtheme model**\")\n model = tf.keras.models.load_model('models/Subtheme_Models/' + str(label).lower() + '_model')\n # self.models.append(model)\n print(\"**Predicting on validation set using saved model and evaluating metrics**\")\n results = self.eval_metrics(model_name = model, x_valid = x_valid, y_valid = y_valid, level = 'subtheme')\n print(\"**Saving results**\")\n results.to_csv(output_dir + '/tables/subtheme_tables' + str(label).lower() + '_valid_eval.csv')\n print(\"Process of subtheme\", label, \"model completed\\n\")\n print(\"Evaluations saved to reports/tables\")\n\nif __name__ == \"__main__\":\n main(opt[\"--level\"], opt[\"--output_dir\"])\n"
] | [
[
"matplotlib.pyplot.step",
"pandas.DataFrame",
"sklearn.metrics.f1_score",
"numpy.load",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"sklearn.metrics.precision_score",
"matplotlib.pyplot.savefig",
"sklearn.metrics.recall_score",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.axvline",
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.compat.v1.keras.models.load_model",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"sklearn.metrics.accuracy_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ImageMarkup/isic-archive | [
"d221af3368baf3a06ecab67e69e9d0077426c8f9"
] | [
"isic_archive/models/segmentation_helpers/scikit.py"
] | [
"import collections\nimport io\nfrom typing import BinaryIO, Tuple, Union\nimport warnings\n\nimport numpy\nimport skimage.io\nimport skimage.measure\nimport skimage.morphology\nimport skimage.segmentation\nimport skimage.transform\n\nfrom .base import BaseSegmentationHelper\n\n\nclass ScikitSegmentationHelper(BaseSegmentationHelper):\n @classmethod\n def loadImage(cls, imageDataStream: Union[BinaryIO, str]) -> numpy.ndarray:\n \"\"\"\n Load an image into an RGB array.\n\n :param imageDataStream: A file-like object containing the encoded\n (JPEG, etc.) image data or a file path.\n :return: A Numpy array with the RGB image data.\n \"\"\"\n imageData = skimage.io.imread(imageDataStream, plugin='pil')\n\n if len(imageData.shape) == 1 and imageData.shape[0] > 1:\n # Some images seem to have a 2nd (or 3rd+) layer, which should be ignored\n # https://github.com/scikit-image/scikit-image/issues/2154\n # The first element within the result should be the main image\n imageData = imageData[0]\n\n if len(imageData.shape) == 3 and imageData.shape[2] == 4:\n # cv2.floodFill doesn't work correctly with array views, so copy\n imageData = imageData[:, :, :3].copy()\n return imageData\n\n @classmethod\n def writeImage(cls, image, encoding='png', width=None):\n if width is not None:\n factor = float(width) / image.shape[1]\n image = skimage.transform.rescale(image, factor)\n\n imageStream = io.BytesIO()\n with warnings.catch_warnings():\n # Ignore warnings about low contrast images, as masks are often empty\n warnings.filterwarnings('ignore', r'^.* is a low contrast image$', UserWarning)\n # The 'pil' plugin is about 40% faster than the default 'imageio' plugin\n # The 'pil' plugin uses 'format_str' as an argument, not 'format'\n skimage.io.imsave(imageStream, image, plugin='pil', format_str=encoding)\n imageStream.seek(0)\n return imageStream\n\n @classmethod\n def segment(cls, image: numpy.ndarray, seedCoord: Tuple[int, int], tolerance: int\n ) -> numpy.ndarray:\n \"\"\"\n Do a flood-fill segmentation of an image, yielding a single contiguous region with no holes.\n\n :param image: A Numpy array with the image to be segmented.\n :param seedCoord: (X, Y) coordinates of the segmentation seed point.\n :param tolerance: The intensity tolerance value for the segmentation.\n :return: The mask image of the segmented region, with values 0 or 255.\n \"\"\"\n maskImage = cls._floodFill(\n image,\n seedCoord,\n tolerance)\n\n # Now, fill in any holes in the maskImage\n # First, add a padded border, allowing the next operation to reach\n # around edge-touching components\n maskImage = numpy.pad(maskImage, 1, 'constant', constant_values=1)\n maskImageBackground = cls._floodFill(\n maskImage,\n # The seed point is a part of the padded border of maskImage\n seedCoord=(0, 0),\n # The seed point and border will have a value of 1, but we want to\n # also include the actual mask background, which has a value of 0\n tolerance=1)\n # Remove the extra padding\n maskImageBackground = maskImageBackground[1:-1, 1:-1]\n # Flip the background, to get the mask with holes removed\n maskImage = numpy.invert(maskImageBackground)\n\n return maskImage\n\n @classmethod\n def _clippedAdd(cls, array, value):\n typeInfo = numpy.iinfo(array.dtype)\n newArray = array.astype(int)\n newArray += value\n return newArray.clip(typeInfo.min, typeInfo.max).astype(array.dtype)\n\n @classmethod\n def _floodFill(\n cls, image: numpy.ndarray, seedCoord: Tuple[int, int], tolerance: int,\n connectivity: int = 8) -> numpy.ndarray:\n \"\"\"\n Segment an image into a region connected to a seed point, using OpenCV.\n\n :param image: The image to be segmented.\n :param seedCoord: The point inside the connected region where the\n segmentation will start.\n :param tolerance: The maximum color/intensity difference between the\n seed point and a point in the connected region.\n :param connectivity: (optional) The number of allowed connectivity\n propagation directions. Allowed values are:\n * 4 for edge pixels\n * 8 for edge and corner pixels\n :returns: A binary label mask, with an extra 1-pixel wide padded border.\n The values are either ``0`` or ``fillValue``.\n \"\"\"\n seedValue = image[seedCoord[1], seedCoord[0]]\n seedValueMin = cls._clippedAdd(seedValue, -tolerance)\n seedValueMax = cls._clippedAdd(seedValue, tolerance)\n\n if connectivity == 4:\n connectivityArg = 1\n elif connectivity == 8:\n connectivityArg = 2\n else:\n raise ValueError('Unknown connectivity value.')\n\n binaryImage = numpy.logical_and(\n image >= seedValueMin,\n image <= seedValueMax\n )\n if len(image.shape) == 3:\n # Reduce RGB components, requiring all to be within threshold\n binaryImage = numpy.all(binaryImage, 2)\n\n labelImage = skimage.measure.label(\n binaryImage.astype(int),\n return_num=False,\n connectivity=connectivityArg\n )\n del binaryImage\n\n maskImage = numpy.equal(\n labelImage, labelImage[seedCoord[1], seedCoord[0]])\n del labelImage\n maskImage = maskImage.astype(numpy.uint8) * 255\n\n return maskImage\n\n @classmethod\n def _structuringElement(cls, shape, radius, elementType=bool):\n size = (radius * 2) + 1\n\n if shape == 'circle':\n element = skimage.morphology.disk(radius, elementType)\n elif shape == 'cross':\n element = numpy.zeros((size, size), elementType)\n element[:, size // 2] = elementType(True)\n element[size // 2, :] = elementType(True)\n elif shape == 'square':\n element = skimage.morphology.square(size, elementType)\n else:\n raise ValueError('Unknown element shape value.')\n\n return element\n\n @classmethod\n def _binaryOpening(cls, image, elementShape='circle', elementRadius=5):\n element = cls._structuringElement(elementShape, elementRadius, bool)\n\n morphedImage = skimage.morphology.binary_opening(\n image=image,\n selem=element\n )\n return morphedImage\n\n @classmethod\n def _collapseCoords(cls, coords):\n collapsedCoords = [coords[0]]\n collapsedCoords.extend([\n coord\n for prevCoord, coord, nextCoord in zip(\n coords[0:], coords[1:], coords[2:])\n if numpy.cross(nextCoord - prevCoord, coord - prevCoord) != 0\n ])\n collapsedCoords.append(coords[-1])\n collapsedCoords = numpy.array(collapsedCoords)\n return collapsedCoords\n\n @classmethod\n def maskToContour(cls, maskImage: numpy.ndarray) -> numpy.ndarray:\n \"\"\"\n Extract the contour line within a segmented label mask, using Scikit-Image.\n\n :param maskImage: A binary label mask of numpy.uint8.\n :return: An array of point pairs.\n \"\"\"\n if maskImage.dtype != numpy.uint8:\n raise TypeError('maskImage must be an array of uint8.')\n\n coords = skimage.measure.find_contours(\n # TODO: threshold image more efficiently\n array=maskImage.astype(bool).astype(numpy.double),\n level=0.5,\n fully_connected='low',\n positive_orientation='low'\n )\n coords = numpy.fliplr(coords[0])\n coords = cls._collapseCoords(coords)\n return coords\n\n @classmethod\n def contourToMask(cls, imageShape: Tuple[int, int], coords: numpy.ndarray) -> numpy.ndarray:\n \"\"\"\n Convert a contour line to a label mask.\n\n :param imageShape: The [Y, X] shape of the image.\n :param coords: An array of point pairs.\n :return: A binary label mask of numpy.uint8.\n \"\"\"\n maskImage = skimage.measure.grid_points_in_poly(\n shape=imageShape,\n verts=numpy.fliplr(coords)\n ).astype(numpy.uint8)\n maskImage *= 255\n return maskImage\n\n @classmethod\n def _slic(cls, image, numSegments=None, segmentSize=None):\n compactness = 0.01 # make superpixels highly deformable\n maxIter = 10\n sigma = 2.0\n\n if numSegments and segmentSize:\n raise ValueError(\n 'Only one of numSegments or segmentSize may be set.')\n elif numSegments:\n pass\n elif segmentSize:\n numSegments = (image.shape[0] * image.shape[1]) / (segmentSize ** 2)\n else:\n raise ValueError('One of numSegments or segmentSize must be set.')\n\n labelImage = skimage.segmentation.slic(\n image,\n n_segments=numSegments,\n compactness=compactness,\n max_iter=maxIter,\n sigma=sigma,\n enforce_connectivity=True,\n min_size_factor=0.5,\n slic_zero=True\n )\n return labelImage\n\n class _PersistentCounter(object):\n def __init__(self):\n self.value = 0\n\n def __call__(self):\n ret = self.value\n self.value += 1\n return ret\n\n @classmethod\n def _uint64ToRGB(cls, val):\n return numpy.dstack((\n val.astype(numpy.uint8),\n (val >> numpy.uint64(8)).astype(numpy.uint8),\n (val >> numpy.uint64(16)).astype(numpy.uint8)\n ))\n\n @classmethod\n def _RGBTounit64(cls, val: numpy.ndarray) -> numpy.ndarray:\n \"\"\"\n Decode an RGB representation of a superpixel label into its native scalar value.\n\n :param val: A single pixel, or a 3-channel image.\n This is an numpy.ndarray of uint8, with a shape [3] or [n, m, 3].\n \"\"\"\n return \\\n (val[..., 0].astype(numpy.uint64)) + \\\n (val[..., 1].astype(numpy.uint64) << numpy.uint64(8)) + \\\n (val[..., 2].astype(numpy.uint64) << numpy.uint64(16))\n\n @classmethod\n def superpixels(cls, image):\n superpixelLabels = cls._slic(image, numSegments=1000)\n superpixels = cls._uint64ToRGB(superpixelLabels)\n return superpixels\n\n @classmethod\n def superpixels_legacy(cls, image, coords):\n maskImage = cls.contourToMask(image.shape[:2], coords)\n\n from .opencv import OpenCVSegmentationHelper\n # This operation is much faster in OpenCV\n maskImage = OpenCVSegmentationHelper._binaryOpening(\n maskImage.astype(numpy.uint8),\n elementShape='circle',\n elementRadius=5\n ).astype(bool)\n\n insideImage = image.copy()\n insideImage[numpy.logical_not(maskImage)] = 0\n insideSuperpixelLabels = cls._slic(insideImage, segmentSize=20)\n\n outsideImage = image.copy()\n outsideImage[maskImage] = 0\n outsideSuperpixelLabels = cls._slic(outsideImage, segmentSize=60)\n\n # https://stackoverflow.com/questions/16210738/implementation-of-numpy-in1d-for-2d-arrays\n insideSuperpixelMask = numpy.in1d(\n insideSuperpixelLabels.flat,\n numpy.unique(insideSuperpixelLabels[maskImage])\n ).reshape(insideSuperpixelLabels.shape)\n\n combinedSuperpixelLabels = outsideSuperpixelLabels.copy()\n combinedSuperpixelLabels[insideSuperpixelMask] = \\\n insideSuperpixelLabels[insideSuperpixelMask] + \\\n outsideSuperpixelLabels.max() + 10000\n\n labelValues = collections.defaultdict(cls._PersistentCounter())\n for value in numpy.nditer(combinedSuperpixelLabels,\n op_flags=['readwrite']):\n value[...] = labelValues[value.item()]\n\n combinedSuperpixels = cls._uint64ToRGB(combinedSuperpixelLabels)\n return combinedSuperpixels\n"
] | [
[
"numpy.logical_not",
"numpy.pad",
"numpy.invert",
"numpy.nditer",
"numpy.fliplr",
"numpy.unique",
"numpy.all",
"numpy.uint64",
"numpy.iinfo",
"numpy.equal",
"numpy.cross",
"numpy.array",
"numpy.logical_and",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yypurpose/mmdetection | [
"ec6bfd96eae0af047c623f3d1ec31b0b3f1f4a6c",
"ec6bfd96eae0af047c623f3d1ec31b0b3f1f4a6c",
"ec6bfd96eae0af047c623f3d1ec31b0b3f1f4a6c",
"ec6bfd96eae0af047c623f3d1ec31b0b3f1f4a6c"
] | [
"mmdet/models/backbones/res2net.py",
"mmdet/models/roi_heads/mask_scoring_roi_head.py",
"mmdet/models/roi_heads/test_mixins.py",
"mmdet/models/necks/nas_fpn.py"
] | [
"import math\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.utils.checkpoint as cp\r\nfrom mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init,\r\n kaiming_init)\r\nfrom mmcv.runner import load_checkpoint\r\nfrom torch.nn.modules.batchnorm import _BatchNorm\r\n\r\nfrom mmdet.utils import get_root_logger\r\nfrom ..builder import BACKBONES\r\nfrom .resnet import Bottleneck as _Bottleneck\r\nfrom .resnet import ResNet\r\n\r\n\r\nclass Bottle2neck(_Bottleneck):\r\n expansion = 4\r\n\r\n def __init__(self,\r\n inplanes,\r\n planes,\r\n scales=4,\r\n base_width=26,\r\n base_channels=64,\r\n stage_type='normal',\r\n **kwargs):\r\n \"\"\"Bottle2neck block for Res2Net.\r\n\r\n If style is \"pytorch\", the stride-two layer is the 3x3 conv layer, if\r\n it is \"caffe\", the stride-two layer is the first 1x1 conv layer.\r\n \"\"\"\r\n super(Bottle2neck, self).__init__(inplanes, planes, **kwargs)\r\n assert scales > 1, 'Res2Net degenerates to ResNet when scales = 1.'\r\n width = int(math.floor(self.planes * (base_width / base_channels)))\r\n\r\n self.norm1_name, norm1 = build_norm_layer(\r\n self.norm_cfg, width * scales, postfix=1)\r\n self.norm3_name, norm3 = build_norm_layer(\r\n self.norm_cfg, self.planes * self.expansion, postfix=3)\r\n\r\n self.conv1 = build_conv_layer(\r\n self.conv_cfg,\r\n self.inplanes,\r\n width * scales,\r\n kernel_size=1,\r\n stride=self.conv1_stride,\r\n bias=False)\r\n self.add_module(self.norm1_name, norm1)\r\n\r\n if stage_type == 'stage' and self.conv2_stride != 1:\r\n self.pool = nn.AvgPool2d(\r\n kernel_size=3, stride=self.conv2_stride, padding=1)\r\n convs = []\r\n bns = []\r\n\r\n fallback_on_stride = False\r\n if self.with_dcn:\r\n fallback_on_stride = self.dcn.pop('fallback_on_stride', False)\r\n if not self.with_dcn or fallback_on_stride:\r\n for i in range(scales - 1):\r\n convs.append(\r\n build_conv_layer(\r\n self.conv_cfg,\r\n width,\r\n width,\r\n kernel_size=3,\r\n stride=self.conv2_stride,\r\n padding=self.dilation,\r\n dilation=self.dilation,\r\n bias=False))\r\n bns.append(\r\n build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])\r\n self.convs = nn.ModuleList(convs)\r\n self.bns = nn.ModuleList(bns)\r\n else:\r\n assert self.conv_cfg is None, 'conv_cfg must be None for DCN'\r\n for i in range(scales - 1):\r\n convs.append(\r\n build_conv_layer(\r\n self.dcn,\r\n width,\r\n width,\r\n kernel_size=3,\r\n stride=self.conv2_stride,\r\n padding=self.dilation,\r\n dilation=self.dilation,\r\n bias=False))\r\n bns.append(\r\n build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])\r\n self.convs = nn.ModuleList(convs)\r\n self.bns = nn.ModuleList(bns)\r\n\r\n self.conv3 = build_conv_layer(\r\n self.conv_cfg,\r\n width * scales,\r\n self.planes * self.expansion,\r\n kernel_size=1,\r\n bias=False)\r\n self.add_module(self.norm3_name, norm3)\r\n\r\n self.stage_type = stage_type\r\n self.scales = scales\r\n self.width = width\r\n delattr(self, 'conv2')\r\n delattr(self, self.norm2_name)\r\n\r\n def forward(self, x):\r\n \"\"\"Forward function.\"\"\"\r\n\r\n def _inner_forward(x):\r\n identity = x\r\n\r\n out = self.conv1(x)\r\n out = self.norm1(out)\r\n out = self.relu(out)\r\n\r\n if self.with_plugins:\r\n out = self.forward_plugin(out, self.after_conv1_plugin_names)\r\n\r\n spx = torch.split(out, self.width, 1)\r\n sp = self.convs[0](spx[0].contiguous())\r\n sp = self.relu(self.bns[0](sp))\r\n out = sp\r\n for i in range(1, self.scales - 1):\r\n if self.stage_type == 'stage':\r\n sp = spx[i]\r\n else:\r\n sp = sp + spx[i]\r\n sp = self.convs[i](sp.contiguous())\r\n sp = self.relu(self.bns[i](sp))\r\n out = torch.cat((out, sp), 1)\r\n\r\n if self.stage_type == 'normal' or self.conv2_stride == 1:\r\n out = torch.cat((out, spx[self.scales - 1]), 1)\r\n elif self.stage_type == 'stage':\r\n out = torch.cat((out, self.pool(spx[self.scales - 1])), 1)\r\n\r\n if self.with_plugins:\r\n out = self.forward_plugin(out, self.after_conv2_plugin_names)\r\n\r\n out = self.conv3(out)\r\n out = self.norm3(out)\r\n\r\n if self.with_plugins:\r\n out = self.forward_plugin(out, self.after_conv3_plugin_names)\r\n\r\n if self.downsample is not None:\r\n identity = self.downsample(x)\r\n\r\n out += identity\r\n\r\n return out\r\n\r\n if self.with_cp and x.requires_grad:\r\n out = cp.checkpoint(_inner_forward, x)\r\n else:\r\n out = _inner_forward(x)\r\n\r\n out = self.relu(out)\r\n\r\n return out\r\n\r\n\r\nclass Res2Layer(nn.Sequential):\r\n \"\"\"Res2Layer to build Res2Net style backbone.\r\n\r\n Args:\r\n block (nn.Module): block used to build ResLayer.\r\n inplanes (int): inplanes of block.\r\n planes (int): planes of block.\r\n num_blocks (int): number of blocks.\r\n stride (int): stride of the first block. Default: 1\r\n avg_down (bool): Use AvgPool instead of stride conv when\r\n downsampling in the bottle2neck. Default: False\r\n conv_cfg (dict): dictionary to construct and config conv layer.\r\n Default: None\r\n norm_cfg (dict): dictionary to construct and config norm layer.\r\n Default: dict(type='BN')\r\n scales (int): Scales used in Res2Net. Default: 4\r\n base_width (int): Basic width of each scale. Default: 26\r\n \"\"\"\r\n\r\n def __init__(self,\r\n block,\r\n inplanes,\r\n planes,\r\n num_blocks,\r\n stride=1,\r\n avg_down=True,\r\n conv_cfg=None,\r\n norm_cfg=dict(type='BN'),\r\n scales=4,\r\n base_width=26,\r\n **kwargs):\r\n self.block = block\r\n\r\n downsample = None\r\n if stride != 1 or inplanes != planes * block.expansion:\r\n downsample = nn.Sequential(\r\n nn.AvgPool2d(\r\n kernel_size=stride,\r\n stride=stride,\r\n ceil_mode=True,\r\n count_include_pad=False),\r\n build_conv_layer(\r\n conv_cfg,\r\n inplanes,\r\n planes * block.expansion,\r\n kernel_size=1,\r\n stride=1,\r\n bias=False),\r\n build_norm_layer(norm_cfg, planes * block.expansion)[1],\r\n )\r\n\r\n layers = []\r\n layers.append(\r\n block(\r\n inplanes=inplanes,\r\n planes=planes,\r\n stride=stride,\r\n downsample=downsample,\r\n conv_cfg=conv_cfg,\r\n norm_cfg=norm_cfg,\r\n scales=scales,\r\n base_width=base_width,\r\n stage_type='stage',\r\n **kwargs))\r\n inplanes = planes * block.expansion\r\n for i in range(1, num_blocks):\r\n layers.append(\r\n block(\r\n inplanes=inplanes,\r\n planes=planes,\r\n stride=1,\r\n conv_cfg=conv_cfg,\r\n norm_cfg=norm_cfg,\r\n scales=scales,\r\n base_width=base_width,\r\n **kwargs))\r\n super(Res2Layer, self).__init__(*layers)\r\n\r\n\r\[email protected]_module()\r\nclass Res2Net(ResNet):\r\n \"\"\"Res2Net backbone.\r\n\r\n Args:\r\n scales (int): Scales used in Res2Net. Default: 4\r\n base_width (int): Basic width of each scale. Default: 26\r\n depth (int): Depth of res2net, from {50, 101, 152}.\r\n in_channels (int): Number of input image channels. Default: 3.\r\n num_stages (int): Res2net stages. Default: 4.\r\n strides (Sequence[int]): Strides of the first block of each stage.\r\n dilations (Sequence[int]): Dilation of each stage.\r\n out_indices (Sequence[int]): Output from which stages.\r\n style (str): `pytorch` or `caffe`. If set to \"pytorch\", the stride-two\r\n layer is the 3x3 conv layer, otherwise the stride-two layer is\r\n the first 1x1 conv layer.\r\n deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv\r\n avg_down (bool): Use AvgPool instead of stride conv when\r\n downsampling in the bottle2neck.\r\n frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\r\n -1 means not freezing any parameters.\r\n norm_cfg (dict): Dictionary to construct and config norm layer.\r\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\r\n freeze running stats (mean and var). Note: Effect on Batch Norm\r\n and its variants only.\r\n plugins (list[dict]): List of plugins for stages, each dict contains:\r\n\r\n - cfg (dict, required): Cfg dict to build plugin.\r\n - position (str, required): Position inside block to insert\r\n plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'.\r\n - stages (tuple[bool], optional): Stages to apply plugin, length\r\n should be same as 'num_stages'.\r\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\r\n memory while slowing down the training speed.\r\n zero_init_residual (bool): Whether to use zero init for last norm layer\r\n in resblocks to let them behave as identity.\r\n\r\n Example:\r\n >>> from mmdet.models import Res2Net\r\n >>> import torch\r\n >>> self = Res2Net(depth=50, scales=4, base_width=26)\r\n >>> self.eval()\r\n >>> inputs = torch.rand(1, 3, 32, 32)\r\n >>> level_outputs = self.forward(inputs)\r\n >>> for level_out in level_outputs:\r\n ... print(tuple(level_out.shape))\r\n (1, 256, 8, 8)\r\n (1, 512, 4, 4)\r\n (1, 1024, 2, 2)\r\n (1, 2048, 1, 1)\r\n \"\"\"\r\n\r\n arch_settings = {\r\n 50: (Bottle2neck, (3, 4, 6, 3)),\r\n 101: (Bottle2neck, (3, 4, 23, 3)),\r\n 152: (Bottle2neck, (3, 8, 36, 3))\r\n }\r\n\r\n def __init__(self,\r\n scales=4,\r\n base_width=26,\r\n style='pytorch',\r\n deep_stem=True,\r\n avg_down=True,\r\n **kwargs):\r\n self.scales = scales\r\n self.base_width = base_width\r\n super(Res2Net, self).__init__(\r\n style='pytorch', deep_stem=True, avg_down=True, **kwargs)\r\n\r\n def make_res_layer(self, **kwargs):\r\n return Res2Layer(\r\n scales=self.scales,\r\n base_width=self.base_width,\r\n base_channels=self.base_channels,\r\n **kwargs)\r\n\r\n def init_weights(self, pretrained=None):\r\n \"\"\"Initialize the weights in backbone.\r\n\r\n Args:\r\n pretrained (str, optional): Path to pre-trained weights.\r\n Defaults to None.\r\n \"\"\"\r\n if isinstance(pretrained, str):\r\n logger = get_root_logger()\r\n load_checkpoint(self, pretrained, strict=False, logger=logger)\r\n elif pretrained is None:\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n kaiming_init(m)\r\n elif isinstance(m, (_BatchNorm, nn.GroupNorm)):\r\n constant_init(m, 1)\r\n\r\n if self.dcn is not None:\r\n for m in self.modules():\r\n if isinstance(m, Bottle2neck):\r\n # dcn in Res2Net bottle2neck is in ModuleList\r\n for n in m.convs:\r\n if hasattr(n, 'conv_offset'):\r\n constant_init(n.conv_offset, 0)\r\n\r\n if self.zero_init_residual:\r\n for m in self.modules():\r\n if isinstance(m, Bottle2neck):\r\n constant_init(m.norm3, 0)\r\n else:\r\n raise TypeError('pretrained must be a str or None')\r\n",
"import torch\r\n\r\nfrom mmdet.core import bbox2roi\r\nfrom ..builder import HEADS, build_head\r\nfrom .standard_roi_head import StandardRoIHead\r\n\r\n\r\[email protected]_module()\r\nclass MaskScoringRoIHead(StandardRoIHead):\r\n \"\"\"Mask Scoring RoIHead for Mask Scoring RCNN.\r\n\r\n https://arxiv.org/abs/1903.00241\r\n \"\"\"\r\n\r\n def __init__(self, mask_iou_head, **kwargs):\r\n assert mask_iou_head is not None\r\n super(MaskScoringRoIHead, self).__init__(**kwargs)\r\n self.mask_iou_head = build_head(mask_iou_head)\r\n\r\n def init_weights(self, pretrained):\r\n \"\"\"Initialize the weights in head.\r\n\r\n Args:\r\n pretrained (str, optional): Path to pre-trained weights.\r\n Defaults to None.\r\n \"\"\"\r\n super(MaskScoringRoIHead, self).init_weights(pretrained)\r\n self.mask_iou_head.init_weights()\r\n\r\n def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,\r\n img_metas):\r\n \"\"\"Run forward function and calculate loss for Mask head in\r\n training.\"\"\"\r\n pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])\r\n mask_results = super(MaskScoringRoIHead,\r\n self)._mask_forward_train(x, sampling_results,\r\n bbox_feats, gt_masks,\r\n img_metas)\r\n if mask_results['loss_mask'] is None:\r\n return mask_results\r\n\r\n # mask iou head forward and loss\r\n pos_mask_pred = mask_results['mask_pred'][\r\n range(mask_results['mask_pred'].size(0)), pos_labels]\r\n mask_iou_pred = self.mask_iou_head(mask_results['mask_feats'],\r\n pos_mask_pred)\r\n pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)),\r\n pos_labels]\r\n\r\n mask_iou_targets = self.mask_iou_head.get_targets(\r\n sampling_results, gt_masks, pos_mask_pred,\r\n mask_results['mask_targets'], self.train_cfg)\r\n loss_mask_iou = self.mask_iou_head.loss(pos_mask_iou_pred,\r\n mask_iou_targets)\r\n mask_results['loss_mask'].update(loss_mask_iou)\r\n return mask_results\r\n\r\n def simple_test_mask(self,\r\n x,\r\n img_metas,\r\n det_bboxes,\r\n det_labels,\r\n rescale=False):\r\n \"\"\"Obtain mask prediction without augmentation.\"\"\"\r\n # image shapes of images in the batch\r\n ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)\r\n scale_factors = tuple(meta['scale_factor'] for meta in img_metas)\r\n\r\n num_imgs = len(det_bboxes)\r\n if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):\r\n num_classes = self.mask_head.num_classes\r\n segm_results = [[[] for _ in range(num_classes)]\r\n for _ in range(num_imgs)]\r\n mask_scores = [[[] for _ in range(num_classes)]\r\n for _ in range(num_imgs)]\r\n else:\r\n # if det_bboxes is rescaled to the original image size, we need to\r\n # rescale it back to the testing scale to obtain RoIs.\r\n if rescale and not isinstance(scale_factors[0], float):\r\n scale_factors = [\r\n torch.from_numpy(scale_factor).to(det_bboxes[0].device)\r\n for scale_factor in scale_factors\r\n ]\r\n _bboxes = [\r\n det_bboxes[i][:, :4] *\r\n scale_factors[i] if rescale else det_bboxes[i]\r\n for i in range(num_imgs)\r\n ]\r\n mask_rois = bbox2roi(_bboxes)\r\n mask_results = self._mask_forward(x, mask_rois)\r\n concat_det_labels = torch.cat(det_labels)\r\n # get mask scores with mask iou head\r\n mask_feats = mask_results['mask_feats']\r\n mask_pred = mask_results['mask_pred']\r\n mask_iou_pred = self.mask_iou_head(\r\n mask_feats, mask_pred[range(concat_det_labels.size(0)),\r\n concat_det_labels])\r\n # split batch mask prediction back to each image\r\n num_bboxes_per_img = tuple(len(_bbox) for _bbox in _bboxes)\r\n mask_preds = mask_pred.split(num_bboxes_per_img, 0)\r\n mask_iou_preds = mask_iou_pred.split(num_bboxes_per_img, 0)\r\n\r\n # apply mask post-processing to each image individually\r\n segm_results = []\r\n mask_scores = []\r\n for i in range(num_imgs):\r\n if det_bboxes[i].shape[0] == 0:\r\n segm_results.append(\r\n [[] for _ in range(self.mask_head.num_classes)])\r\n mask_scores.append(\r\n [[] for _ in range(self.mask_head.num_classes)])\r\n else:\r\n segm_result = self.mask_head.get_seg_masks(\r\n mask_preds[i], _bboxes[i], det_labels[i],\r\n self.test_cfg, ori_shapes[i], scale_factors[i],\r\n rescale)\r\n # get mask scores with mask iou head\r\n mask_score = self.mask_iou_head.get_mask_scores(\r\n mask_iou_preds[i], det_bboxes[i], det_labels[i])\r\n segm_results.append(segm_result)\r\n mask_scores.append(mask_score)\r\n return list(zip(segm_results, mask_scores))\r\n",
"import logging\r\nimport sys\r\n\r\nimport torch\r\n\r\nfrom mmdet.core import (bbox2roi, bbox_mapping, merge_aug_bboxes,\r\n merge_aug_masks, multiclass_nms)\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\nif sys.version_info >= (3, 7):\r\n from mmdet.utils.contextmanagers import completed\r\n\r\n\r\nclass BBoxTestMixin(object):\r\n\r\n if sys.version_info >= (3, 7):\r\n\r\n async def async_test_bboxes(self,\r\n x,\r\n img_metas,\r\n proposals,\r\n rcnn_test_cfg,\r\n rescale=False,\r\n bbox_semaphore=None,\r\n global_lock=None):\r\n \"\"\"Asynchronized test for box head without augmentation.\"\"\"\r\n rois = bbox2roi(proposals)\r\n roi_feats = self.bbox_roi_extractor(\r\n x[:len(self.bbox_roi_extractor.featmap_strides)], rois)\r\n if self.with_shared_head:\r\n roi_feats = self.shared_head(roi_feats)\r\n sleep_interval = rcnn_test_cfg.get('async_sleep_interval', 0.017)\r\n\r\n async with completed(\r\n __name__, 'bbox_head_forward',\r\n sleep_interval=sleep_interval):\r\n cls_score, bbox_pred = self.bbox_head(roi_feats)\r\n\r\n img_shape = img_metas[0]['img_shape']\r\n scale_factor = img_metas[0]['scale_factor']\r\n det_bboxes, det_labels = self.bbox_head.get_bboxes(\r\n rois,\r\n cls_score,\r\n bbox_pred,\r\n img_shape,\r\n scale_factor,\r\n rescale=rescale,\r\n cfg=rcnn_test_cfg)\r\n return det_bboxes, det_labels\r\n\r\n def simple_test_bboxes(self,\r\n x,\r\n img_metas,\r\n proposals,\r\n rcnn_test_cfg,\r\n rescale=False):\r\n \"\"\"Test only det bboxes without augmentation.\"\"\"\r\n rois = bbox2roi(proposals)\r\n bbox_results = self._bbox_forward(x, rois)\r\n img_shapes = tuple(meta['img_shape'] for meta in img_metas)\r\n scale_factors = tuple(meta['scale_factor'] for meta in img_metas)\r\n\r\n # split batch bbox prediction back to each image\r\n cls_score = bbox_results['cls_score']\r\n bbox_pred = bbox_results['bbox_pred']\r\n num_proposals_per_img = tuple(len(p) for p in proposals)\r\n rois = rois.split(num_proposals_per_img, 0)\r\n cls_score = cls_score.split(num_proposals_per_img, 0)\r\n\r\n # some detector with_reg is False, bbox_pred will be None\r\n if bbox_pred is not None:\r\n # the bbox prediction of some detectors like SABL is not Tensor\r\n if isinstance(bbox_pred, torch.Tensor):\r\n bbox_pred = bbox_pred.split(num_proposals_per_img, 0)\r\n else:\r\n bbox_pred = self.bbox_head.bbox_pred_split(\r\n bbox_pred, num_proposals_per_img)\r\n else:\r\n bbox_pred = (None, ) * len(proposals)\r\n\r\n # apply bbox post-processing to each image individually\r\n det_bboxes = []\r\n det_labels = []\r\n for i in range(len(proposals)):\r\n det_bbox, det_label = self.bbox_head.get_bboxes(\r\n rois[i],\r\n cls_score[i],\r\n bbox_pred[i],\r\n img_shapes[i],\r\n scale_factors[i],\r\n rescale=rescale,\r\n cfg=rcnn_test_cfg)\r\n det_bboxes.append(det_bbox)\r\n det_labels.append(det_label)\r\n return det_bboxes, det_labels\r\n\r\n def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg):\r\n \"\"\"Test det bboxes with test time augmentation.\"\"\"\r\n aug_bboxes = []\r\n aug_scores = []\r\n for x, img_meta in zip(feats, img_metas):\r\n # only one image in the batch\r\n img_shape = img_meta[0]['img_shape']\r\n scale_factor = img_meta[0]['scale_factor']\r\n flip = img_meta[0]['flip']\r\n flip_direction = img_meta[0]['flip_direction']\r\n # TODO more flexible\r\n proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,\r\n scale_factor, flip, flip_direction)\r\n rois = bbox2roi([proposals])\r\n bbox_results = self._bbox_forward(x, rois)\r\n bboxes, scores = self.bbox_head.get_bboxes(\r\n rois,\r\n bbox_results['cls_score'],\r\n bbox_results['bbox_pred'],\r\n img_shape,\r\n scale_factor,\r\n rescale=False,\r\n cfg=None)\r\n aug_bboxes.append(bboxes)\r\n aug_scores.append(scores)\r\n # after merging, bboxes will be rescaled to the original image size\r\n merged_bboxes, merged_scores = merge_aug_bboxes(\r\n aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)\r\n det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,\r\n rcnn_test_cfg.score_thr,\r\n rcnn_test_cfg.nms,\r\n rcnn_test_cfg.max_per_img)\r\n return det_bboxes, det_labels\r\n\r\n\r\nclass MaskTestMixin(object):\r\n\r\n if sys.version_info >= (3, 7):\r\n\r\n async def async_test_mask(self,\r\n x,\r\n img_metas,\r\n det_bboxes,\r\n det_labels,\r\n rescale=False,\r\n mask_test_cfg=None):\r\n \"\"\"Asynchronized test for mask head without augmentation.\"\"\"\r\n # image shape of the first image in the batch (only one)\r\n ori_shape = img_metas[0]['ori_shape']\r\n scale_factor = img_metas[0]['scale_factor']\r\n if det_bboxes.shape[0] == 0:\r\n segm_result = [[] for _ in range(self.mask_head.num_classes)]\r\n else:\r\n if rescale and not isinstance(scale_factor,\r\n (float, torch.Tensor)):\r\n scale_factor = det_bboxes.new_tensor(scale_factor)\r\n _bboxes = (\r\n det_bboxes[:, :4] *\r\n scale_factor if rescale else det_bboxes)\r\n mask_rois = bbox2roi([_bboxes])\r\n mask_feats = self.mask_roi_extractor(\r\n x[:len(self.mask_roi_extractor.featmap_strides)],\r\n mask_rois)\r\n\r\n if self.with_shared_head:\r\n mask_feats = self.shared_head(mask_feats)\r\n if mask_test_cfg and mask_test_cfg.get('async_sleep_interval'):\r\n sleep_interval = mask_test_cfg['async_sleep_interval']\r\n else:\r\n sleep_interval = 0.035\r\n async with completed(\r\n __name__,\r\n 'mask_head_forward',\r\n sleep_interval=sleep_interval):\r\n mask_pred = self.mask_head(mask_feats)\r\n segm_result = self.mask_head.get_seg_masks(\r\n mask_pred, _bboxes, det_labels, self.test_cfg, ori_shape,\r\n scale_factor, rescale)\r\n return segm_result\r\n\r\n def simple_test_mask(self,\r\n x,\r\n img_metas,\r\n det_bboxes,\r\n det_labels,\r\n rescale=False):\r\n \"\"\"Simple test for mask head without augmentation.\"\"\"\r\n # image shapes of images in the batch\r\n ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)\r\n scale_factors = tuple(meta['scale_factor'] for meta in img_metas)\r\n num_imgs = len(det_bboxes)\r\n if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):\r\n segm_results = [[[] for _ in range(self.mask_head.num_classes)]\r\n for _ in range(num_imgs)]\r\n else:\r\n # if det_bboxes is rescaled to the original image size, we need to\r\n # rescale it back to the testing scale to obtain RoIs.\r\n if rescale and not isinstance(scale_factors[0], float):\r\n scale_factors = [\r\n torch.from_numpy(scale_factor).to(det_bboxes[0].device)\r\n for scale_factor in scale_factors\r\n ]\r\n if torch.onnx.is_in_onnx_export():\r\n # avoid mask_pred.split with static number of prediction\r\n mask_preds = []\r\n _bboxes = []\r\n for i, boxes in enumerate(det_bboxes):\r\n boxes = boxes[:, :4]\r\n if rescale:\r\n boxes *= scale_factors[i]\r\n _bboxes.append(boxes)\r\n img_inds = boxes[:, :1].clone() * 0 + i\r\n mask_rois = torch.cat([img_inds, boxes], dim=-1)\r\n mask_result = self._mask_forward(x, mask_rois)\r\n mask_preds.append(mask_result['mask_pred'])\r\n else:\r\n _bboxes = [\r\n det_bboxes[i][:, :4] *\r\n scale_factors[i] if rescale else det_bboxes[i][:, :4]\r\n for i in range(len(det_bboxes))\r\n ]\r\n mask_rois = bbox2roi(_bboxes)\r\n mask_results = self._mask_forward(x, mask_rois)\r\n mask_pred = mask_results['mask_pred']\r\n # split batch mask prediction back to each image\r\n num_mask_roi_per_img = [\r\n det_bbox.shape[0] for det_bbox in det_bboxes\r\n ]\r\n mask_preds = mask_pred.split(num_mask_roi_per_img, 0)\r\n\r\n # apply mask post-processing to each image individually\r\n segm_results = []\r\n for i in range(num_imgs):\r\n if det_bboxes[i].shape[0] == 0:\r\n segm_results.append(\r\n [[] for _ in range(self.mask_head.num_classes)])\r\n else:\r\n segm_result = self.mask_head.get_seg_masks(\r\n mask_preds[i], _bboxes[i], det_labels[i],\r\n self.test_cfg, ori_shapes[i], scale_factors[i],\r\n rescale)\r\n segm_results.append(segm_result)\r\n return segm_results\r\n\r\n def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):\r\n \"\"\"Test for mask head with test time augmentation.\"\"\"\r\n if det_bboxes.shape[0] == 0:\r\n segm_result = [[] for _ in range(self.mask_head.num_classes)]\r\n else:\r\n aug_masks = []\r\n for x, img_meta in zip(feats, img_metas):\r\n img_shape = img_meta[0]['img_shape']\r\n scale_factor = img_meta[0]['scale_factor']\r\n flip = img_meta[0]['flip']\r\n flip_direction = img_meta[0]['flip_direction']\r\n _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,\r\n scale_factor, flip, flip_direction)\r\n mask_rois = bbox2roi([_bboxes])\r\n mask_results = self._mask_forward(x, mask_rois)\r\n # convert to numpy array to save memory\r\n aug_masks.append(\r\n mask_results['mask_pred'].sigmoid().cpu().numpy())\r\n merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg)\r\n\r\n ori_shape = img_metas[0][0]['ori_shape']\r\n segm_result = self.mask_head.get_seg_masks(\r\n merged_masks,\r\n det_bboxes,\r\n det_labels,\r\n self.test_cfg,\r\n ori_shape,\r\n scale_factor=1.0,\r\n rescale=False)\r\n return segm_result\r\n",
"import torch.nn as nn\r\nfrom mmcv.cnn import ConvModule, caffe2_xavier_init\r\nfrom mmcv.ops.merge_cells import GlobalPoolingCell, SumCell\r\n\r\nfrom ..builder import NECKS\r\n\r\n\r\[email protected]_module()\r\nclass NASFPN(nn.Module):\r\n \"\"\"NAS-FPN.\r\n\r\n Implementation of `NAS-FPN: Learning Scalable Feature Pyramid Architecture\r\n for Object Detection <https://arxiv.org/abs/1904.07392>`_\r\n\r\n Args:\r\n in_channels (List[int]): Number of input channels per scale.\r\n out_channels (int): Number of output channels (used at each scale)\r\n num_outs (int): Number of output scales.\r\n stack_times (int): The number of times the pyramid architecture will\r\n be stacked.\r\n start_level (int): Index of the start input backbone level used to\r\n build the feature pyramid. Default: 0.\r\n end_level (int): Index of the end input backbone level (exclusive) to\r\n build the feature pyramid. Default: -1, which means the last level.\r\n add_extra_convs (bool): It decides whether to add conv\r\n layers on top of the original feature maps. Default to False.\r\n If True, its actual mode is specified by `extra_convs_on_inputs`.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n in_channels,\r\n out_channels,\r\n num_outs,\r\n stack_times,\r\n start_level=0,\r\n end_level=-1,\r\n add_extra_convs=False,\r\n norm_cfg=None):\r\n super(NASFPN, self).__init__()\r\n assert isinstance(in_channels, list)\r\n self.in_channels = in_channels\r\n self.out_channels = out_channels\r\n self.num_ins = len(in_channels) # num of input feature levels\r\n self.num_outs = num_outs # num of output feature levels\r\n self.stack_times = stack_times\r\n self.norm_cfg = norm_cfg\r\n\r\n if end_level == -1:\r\n self.backbone_end_level = self.num_ins\r\n assert num_outs >= self.num_ins - start_level\r\n else:\r\n # if end_level < inputs, no extra level is allowed\r\n self.backbone_end_level = end_level\r\n assert end_level <= len(in_channels)\r\n assert num_outs == end_level - start_level\r\n self.start_level = start_level\r\n self.end_level = end_level\r\n self.add_extra_convs = add_extra_convs\r\n\r\n # add lateral connections\r\n self.lateral_convs = nn.ModuleList()\r\n for i in range(self.start_level, self.backbone_end_level):\r\n l_conv = ConvModule(\r\n in_channels[i],\r\n out_channels,\r\n 1,\r\n norm_cfg=norm_cfg,\r\n act_cfg=None)\r\n self.lateral_convs.append(l_conv)\r\n\r\n # add extra downsample layers (stride-2 pooling or conv)\r\n extra_levels = num_outs - self.backbone_end_level + self.start_level\r\n self.extra_downsamples = nn.ModuleList()\r\n for i in range(extra_levels):\r\n extra_conv = ConvModule(\r\n out_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None)\r\n self.extra_downsamples.append(\r\n nn.Sequential(extra_conv, nn.MaxPool2d(2, 2)))\r\n\r\n # add NAS FPN connections\r\n self.fpn_stages = nn.ModuleList()\r\n for _ in range(self.stack_times):\r\n stage = nn.ModuleDict()\r\n # gp(p6, p4) -> p4_1\r\n stage['gp_64_4'] = GlobalPoolingCell(\r\n in_channels=out_channels,\r\n out_channels=out_channels,\r\n out_norm_cfg=norm_cfg)\r\n # sum(p4_1, p4) -> p4_2\r\n stage['sum_44_4'] = SumCell(\r\n in_channels=out_channels,\r\n out_channels=out_channels,\r\n out_norm_cfg=norm_cfg)\r\n # sum(p4_2, p3) -> p3_out\r\n stage['sum_43_3'] = SumCell(\r\n in_channels=out_channels,\r\n out_channels=out_channels,\r\n out_norm_cfg=norm_cfg)\r\n # sum(p3_out, p4_2) -> p4_out\r\n stage['sum_34_4'] = SumCell(\r\n in_channels=out_channels,\r\n out_channels=out_channels,\r\n out_norm_cfg=norm_cfg)\r\n # sum(p5, gp(p4_out, p3_out)) -> p5_out\r\n stage['gp_43_5'] = GlobalPoolingCell(with_out_conv=False)\r\n stage['sum_55_5'] = SumCell(\r\n in_channels=out_channels,\r\n out_channels=out_channels,\r\n out_norm_cfg=norm_cfg)\r\n # sum(p7, gp(p5_out, p4_2)) -> p7_out\r\n stage['gp_54_7'] = GlobalPoolingCell(with_out_conv=False)\r\n stage['sum_77_7'] = SumCell(\r\n in_channels=out_channels,\r\n out_channels=out_channels,\r\n out_norm_cfg=norm_cfg)\r\n # gp(p7_out, p5_out) -> p6_out\r\n stage['gp_75_6'] = GlobalPoolingCell(\r\n in_channels=out_channels,\r\n out_channels=out_channels,\r\n out_norm_cfg=norm_cfg)\r\n self.fpn_stages.append(stage)\r\n\r\n def init_weights(self):\r\n \"\"\"Initialize the weights of module.\"\"\"\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n caffe2_xavier_init(m)\r\n\r\n def forward(self, inputs):\r\n \"\"\"Forward function.\"\"\"\r\n # build P3-P5\r\n feats = [\r\n lateral_conv(inputs[i + self.start_level])\r\n for i, lateral_conv in enumerate(self.lateral_convs)\r\n ]\r\n # build P6-P7 on top of P5\r\n for downsample in self.extra_downsamples:\r\n feats.append(downsample(feats[-1]))\r\n\r\n p3, p4, p5, p6, p7 = feats\r\n\r\n for stage in self.fpn_stages:\r\n # gp(p6, p4) -> p4_1\r\n p4_1 = stage['gp_64_4'](p6, p4, out_size=p4.shape[-2:])\r\n # sum(p4_1, p4) -> p4_2\r\n p4_2 = stage['sum_44_4'](p4_1, p4, out_size=p4.shape[-2:])\r\n # sum(p4_2, p3) -> p3_out\r\n p3 = stage['sum_43_3'](p4_2, p3, out_size=p3.shape[-2:])\r\n # sum(p3_out, p4_2) -> p4_out\r\n p4 = stage['sum_34_4'](p3, p4_2, out_size=p4.shape[-2:])\r\n # sum(p5, gp(p4_out, p3_out)) -> p5_out\r\n p5_tmp = stage['gp_43_5'](p4, p3, out_size=p5.shape[-2:])\r\n p5 = stage['sum_55_5'](p5, p5_tmp, out_size=p5.shape[-2:])\r\n # sum(p7, gp(p5_out, p4_2)) -> p7_out\r\n p7_tmp = stage['gp_54_7'](p5, p4_2, out_size=p7.shape[-2:])\r\n p7 = stage['sum_77_7'](p7, p7_tmp, out_size=p7.shape[-2:])\r\n # gp(p7_out, p5_out) -> p6_out\r\n p6 = stage['gp_75_6'](p7, p5, out_size=p6.shape[-2:])\r\n\r\n return p3, p4, p5, p6, p7\r\n"
] | [
[
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.AvgPool2d",
"torch.utils.checkpoint.checkpoint",
"torch.split"
],
[
"torch.from_numpy",
"torch.cat"
],
[
"torch.onnx.is_in_onnx_export",
"torch.from_numpy",
"torch.cat"
],
[
"torch.nn.MaxPool2d",
"torch.nn.ModuleList",
"torch.nn.ModuleDict"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
untzag/WrightTools | [
"05480d2f91ceeca422d9e5ac381fce1840207cb0",
"05480d2f91ceeca422d9e5ac381fce1840207cb0",
"05480d2f91ceeca422d9e5ac381fce1840207cb0"
] | [
"WrightTools/data/_data.py",
"WrightTools/data/_brunold.py",
"WrightTools/data/_tensor27.py"
] | [
"\"\"\"Central data class and associated.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport collections\nimport operator\nimport functools\nimport warnings\n\nimport numpy as np\n\nimport h5py\n\nimport scipy\nfrom scipy.interpolate import griddata, interp1d\n\nfrom .._group import Group\nfrom .. import collection as wt_collection\nfrom .. import exceptions as wt_exceptions\nfrom .. import kit as wt_kit\nfrom .. import units as wt_units\nfrom ._axis import Axis, identifier_to_operator\nfrom ._channel import Channel\nfrom ._constant import Constant\nfrom ._variable import Variable\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\n__all__ = [\"Data\"]\n\n\n# --- class ---------------------------------------------------------------------------------------\n\n\nclass Data(Group):\n \"\"\"Multidimensional dataset.\"\"\"\n\n class_name = \"Data\"\n\n def __init__(self, *args, **kwargs):\n self._axes = []\n self._constants = []\n Group.__init__(self, *args, **kwargs)\n # populate axes, constants from attrs string\n for identifier in self.attrs.get(\"axes\", []):\n if hasattr(identifier, \"decode\"):\n identifier = identifier.decode()\n expression, units = identifier.split(\"{\")\n units = units.replace(\"}\", \"\").strip()\n if units == \"None\":\n units = None\n # Should not be needed for wt5 >= 1.0.3, kept for opening older wt5 files.\n for i in identifier_to_operator.keys():\n expression = expression.replace(i, identifier_to_operator[i])\n expression = expression.replace(\" \", \"\") # remove all whitespace\n axis = Axis(self, expression, units)\n self._axes.append(axis)\n for identifier in self.attrs.get(\"constants\", []):\n if hasattr(identifier, \"decode\"):\n identifier = identifier.decode()\n expression, units = identifier.split(\"{\")\n units = units.replace(\"}\", \"\").strip()\n if units == \"None\":\n units = None\n for i in identifier_to_operator.keys():\n expression = expression.replace(i, identifier_to_operator[i])\n expression = expression.replace(\" \", \"\") # remove all whitespace\n const = Constant(self, expression, units)\n self._constants.append(const)\n self._current_axis_identities_in_natural_namespace = []\n if self.file.mode is not None and self.file.mode != \"r\":\n self._on_constants_updated()\n self._on_axes_updated()\n # the following are populated if not already recorded\n self.channel_names\n self.source\n self.variable_names\n\n def __repr__(self) -> str:\n return \"<WrightTools.Data '{0}' {1} at {2}>\".format(\n self.natural_name, str(self.axis_names), \"::\".join([self.filepath, self.name])\n )\n\n @property\n def axes(self) -> tuple:\n return tuple(self._axes)\n\n @property\n def axis_expressions(self) -> tuple:\n \"\"\"Axis expressions.\"\"\"\n return tuple(a.expression for a in self._axes)\n\n @property\n def axis_names(self) -> tuple:\n \"\"\"Axis names.\"\"\"\n return tuple(a.natural_name for a in self._axes)\n\n @property\n def constants(self) -> tuple:\n return tuple(self._constants)\n\n @property\n def constant_expressions(self) -> tuple:\n \"\"\"Axis expressions.\"\"\"\n return tuple(a.expression for a in self._constants)\n\n @property\n def constant_names(self) -> tuple:\n \"\"\"Axis names.\"\"\"\n return tuple(a.natural_name for a in self._constants)\n\n @property\n def channel_names(self) -> tuple:\n \"\"\"Channel names.\"\"\"\n if \"channel_names\" not in self.attrs.keys():\n self.attrs[\"channel_names\"] = np.array([], dtype=\"S\")\n return tuple(s.decode() for s in self.attrs[\"channel_names\"])\n\n @channel_names.setter\n def channel_names(self, value):\n \"\"\"Set channel names.\"\"\"\n self.attrs[\"channel_names\"] = np.array(value, dtype=\"S\")\n\n @property\n def channels(self) -> tuple:\n \"\"\"Channels.\"\"\"\n return tuple(self[n] for n in self.channel_names)\n\n @property\n def datasets(self) -> tuple:\n \"\"\"Datasets.\"\"\"\n return tuple(v for _, v in self.items() if isinstance(v, h5py.Dataset))\n\n @property\n def kind(self):\n \"\"\"Kind.\"\"\"\n if \"kind\" not in self.attrs.keys():\n self.attrs[\"kind\"] = \"None\"\n value = self.attrs[\"kind\"]\n return value if not value == \"None\" else None\n\n @property\n def ndim(self) -> int:\n \"\"\"Get number of dimensions.\"\"\"\n try:\n assert self._ndim is not None\n except (AssertionError, AttributeError):\n if len(self.variables) == 0:\n self._ndim = 0\n else:\n self._ndim = self.variables[0].ndim\n finally:\n return self._ndim\n\n @property\n def shape(self) -> tuple:\n \"\"\"Shape.\"\"\"\n try:\n assert self._shape is not None\n except (AssertionError, AttributeError):\n self._shape = wt_kit.joint_shape(*self.variables)\n finally:\n return self._shape\n\n @property\n def size(self) -> int:\n \"\"\"Size.\"\"\"\n return functools.reduce(operator.mul, self.shape)\n\n @property\n def source(self):\n \"\"\"Source.\"\"\"\n if \"source\" not in self.attrs.keys():\n self.attrs[\"source\"] = \"None\"\n value = self.attrs[\"source\"]\n return value if not value == \"None\" else None\n\n @property\n def units(self) -> tuple:\n \"\"\"All axis units.\"\"\"\n return tuple(a.units for a in self._axes)\n\n @property\n def constant_units(self) -> tuple:\n \"\"\"All constant units.\"\"\"\n return tuple(a.units for a in self._constants)\n\n @property\n def variable_names(self) -> tuple:\n \"\"\"Variable names.\"\"\"\n if \"variable_names\" not in self.attrs.keys():\n self.attrs[\"variable_names\"] = np.array([], dtype=\"S\")\n return tuple(s.decode() for s in self.attrs[\"variable_names\"])\n\n @variable_names.setter\n def variable_names(self, value):\n \"\"\"Set variable names.\"\"\"\n self.attrs[\"variable_names\"] = np.array(value, dtype=\"S\")\n\n @property\n def variables(self) -> tuple:\n \"\"\"Variables.\"\"\"\n try:\n assert self._variables is not None\n except (AssertionError, AttributeError):\n self._variables = [self[n] for n in self.variable_names]\n finally:\n return tuple(self._variables)\n\n @property\n def _leaf(self):\n return \"{0} {1}\".format(self.natural_name, self.shape)\n\n def _on_axes_updated(self):\n \"\"\"Method to run when axes are changed in any way.\n\n Propagates updated axes properly.\n \"\"\"\n # update attrs\n self.attrs[\"axes\"] = np.array([a.identity.encode() for a in self._axes], dtype=\"S\")\n # remove old attributes\n while len(self._current_axis_identities_in_natural_namespace) > 0:\n key = self._current_axis_identities_in_natural_namespace.pop(0)\n try:\n delattr(self, key)\n except AttributeError:\n pass # already gone\n # populate new attributes\n for a in self._axes:\n key = a.natural_name\n setattr(self, key, a)\n self._current_axis_identities_in_natural_namespace.append(key)\n\n def _on_constants_updated(self):\n \"\"\"Method to run when constants are changed in any way.\n\n Propagates updated constants properly.\n \"\"\"\n # update attrs\n self.attrs[\"constants\"] = np.array(\n [a.identity.encode() for a in self._constants], dtype=\"S\"\n )\n\n def _print_branch(self, prefix, depth, verbose):\n def print_leaves(prefix, lis, vline=True):\n for i, item in enumerate(lis):\n if vline:\n a = \"│ \"\n else:\n a = \" \"\n if i + 1 == len(lis):\n b = \"└── \"\n else:\n b = \"├── \"\n s = prefix + a + b + \"{0}: {1}\".format(i, item._leaf)\n print(s)\n\n if verbose:\n # axes\n print(prefix + \"├── axes\")\n print_leaves(prefix, self.axes)\n # constants\n print(prefix + \"├── constants\")\n print_leaves(prefix, self.constants)\n # variables\n print(prefix + \"├── variables\")\n print_leaves(prefix, self.variables)\n # channels\n print(prefix + \"└── channels\")\n print_leaves(prefix, self.channels, vline=False)\n else:\n # axes\n s = \"axes: \"\n s += \", \".join([\"{0} ({1})\".format(a.expression, a.units) for a in self.axes])\n print(prefix + \"├── \" + s)\n # constants\n s = \"constants: \"\n s += \", \".join(\n [\"{0} ({1} {2})\".format(a.expression, a.value, a.units) for a in self.constants]\n )\n print(prefix + \"├── \" + s)\n # channels\n s = \"channels: \"\n s += \", \".join(self.channel_names)\n print(prefix + \"└── \" + s)\n\n def bring_to_front(self, channel):\n \"\"\"Bring a specific channel to the zero-indexed position in channels.\n\n All other channels get pushed back but remain in order.\n\n Parameters\n ----------\n channel : int or str\n Channel index or name.\n \"\"\"\n channel_index = wt_kit.get_index(self.channel_names, channel)\n new = list(self.channel_names)\n new.insert(0, new.pop(channel_index))\n self.channel_names = new\n\n def chop(self, *args, at={}, parent=None, verbose=True) -> wt_collection.Collection:\n \"\"\"Divide the dataset into its lower-dimensionality components.\n\n Parameters\n ----------\n axis : str or int (args)\n Axes of the returned data objects. Strings refer to the names of\n axes in this object, integers refer to their index. Provide multiple\n axes to return multidimensional data objects.\n at : dict (optional)\n Choice of position along an axis. Keys are axis names, values are lists\n ``[position, input units]``. If exact position does not exist,\n the closest valid position is used.\n parent : WrightTools Collection instance (optional)\n Collection to place the new \"chop\" collection within. Default is\n None (new parent).\n verbose : bool (optional)\n Toggle talkback. Default is True.\n\n Returns\n -------\n WrightTools Collection\n Collection of chopped data objects.\n\n Examples\n --------\n >>> data.axis_names\n ['d2', 'w1', 'w2']\n\n Get all w1 wigners.\n\n >>> datas = data.chop('d2', 'w1')\n >>> len(datas)\n 51\n\n Get 2D frequency at d2=0 fs.\n\n >>> datas = data.chop('w1', 'w2', at={'d2': [0, 'fs']})\n >>> len(datas)\n 0\n >>> datas[0].axis_names\n ['w1', 'w2']\n >>> datas[0].d2[:]\n 0.\n\n See Also\n --------\n collapse\n Collapse the dataset along one axis.\n split\n Split the dataset while maintaining its dimensionality.\n \"\"\"\n from ._axis import operators, operator_to_identifier\n\n # parse args\n args = list(args)\n for i, arg in enumerate(args):\n if isinstance(arg, int):\n args[i] = self._axes[arg].natural_name\n elif isinstance(arg, str):\n # same normalization that occurs in the natural_name @property\n arg = arg.strip()\n for op in operators:\n arg = arg.replace(op, operator_to_identifier[op])\n args[i] = wt_kit.string2identifier(arg)\n\n # normalize the at keys to the natural name\n for k in [ak for ak in at.keys() if type(ak) == str]:\n for op in operators:\n if op in k:\n nk = k.replace(op, operator_to_identifier[op])\n at[nk] = at[k]\n at.pop(k)\n k = nk\n\n # get output collection\n out = wt_collection.Collection(name=\"chop\", parent=parent)\n # get output shape\n kept = args + [ak for ak in at.keys() if type(ak) == str]\n kept_axes = [self._axes[self.axis_names.index(a)] for a in kept]\n removed_axes = [a for a in self._axes if a not in kept_axes]\n removed_shape = wt_kit.joint_shape(*removed_axes)\n if removed_shape == ():\n removed_shape = (1,) * self.ndim\n removed_shape = list(removed_shape)\n for i in at.keys():\n if type(i) == int:\n removed_shape[i] = 1\n for ax in kept_axes:\n if ax.shape.count(1) == ax.ndim - 1:\n removed_shape[ax.shape.index(ax.size)] = 1\n removed_shape = tuple(removed_shape)\n # iterate\n i = 0\n for idx in np.ndindex(removed_shape):\n idx = np.array(idx, dtype=object)\n idx[np.array(removed_shape) == 1] = slice(None)\n for axis, point in at.items():\n if type(axis) == int:\n idx[axis] = point\n continue\n point, units = point\n destination_units = self._axes[self.axis_names.index(axis)].units\n point = wt_units.converter(point, units, destination_units)\n axis_index = self.axis_names.index(axis)\n axis = self._axes[axis_index]\n idx_index = np.array(axis.shape) > 1\n if np.sum(idx_index) > 1:\n raise wt_exceptions.MultidimensionalAxisError(\"chop\", axis.natural_name)\n idx_index = list(idx_index).index(True)\n idx[idx_index] = np.argmin(np.abs(axis[tuple(idx)] - point))\n data = out.create_data(name=\"chop%03i\" % i)\n for v in self.variables:\n kwargs = {}\n kwargs[\"name\"] = v.natural_name\n kwargs[\"values\"] = v[idx]\n kwargs[\"units\"] = v.units\n kwargs[\"label\"] = v.label\n kwargs.update(v.attrs)\n data.create_variable(**kwargs)\n for c in self.channels:\n kwargs = {}\n kwargs[\"name\"] = c.natural_name\n kwargs[\"values\"] = c[idx]\n kwargs[\"units\"] = c.units\n kwargs[\"label\"] = c.label\n kwargs[\"signed\"] = c.signed\n kwargs.update(c.attrs)\n data.create_channel(**kwargs)\n new_axes = [a.expression for a in kept_axes if a.expression not in at.keys()]\n new_axis_units = [a.units for a in kept_axes if a.expression not in at.keys()]\n data.transform(*new_axes)\n for const in self.constant_expressions:\n data.create_constant(const, verbose=False)\n for ax in self.axis_expressions:\n if ax not in new_axes:\n data.create_constant(ax, verbose=False)\n for j, units in enumerate(new_axis_units):\n data.axes[j].convert(units)\n i += 1\n out.flush()\n # return\n if verbose:\n print(\"chopped data into %d piece(s)\" % len(out), \"in\", new_axes)\n return out\n\n def gradient(self, axis, *, channel=0):\n \"\"\"\n Compute the gradient along one axis.\n\n New channels have names ``<channel name>_<axis name>_gradient``.\n\n Parameters\n ----------\n axis : int or str\n The axis to differentiate along.\n If given as an integer, the axis in the underlying array is used,\n and unitary spacing is assumed.\n If given as a string, the axis must exist, and be a 1D array-aligned axis.\n (i.e. have a shape with a single value which is not ``1``)\n The axis to collapse along is inferred from the shape of the axis.\n channel : int or str\n The channel to differentiate.\n Default is the first channel.\n \"\"\"\n # get axis index --------------------------------------------------------------------------\n if isinstance(axis, int):\n axis_index = axis\n elif isinstance(axis, str):\n index = self.axis_names.index(axis)\n axes = [i for i in range(self.ndim) if self.axes[index].shape[i] > 1]\n if len(axes) > 1:\n raise wt_exceptions.MultidimensionalAxisError(axis, \"collapse\")\n elif len(axes) == 0:\n raise wt_exceptions.ValueError(\n \"Axis '{}' is a single point, cannot compute gradient\".format(axis)\n )\n axis_index = axes[0]\n else:\n raise wt_exceptions.TypeError(\"axis: expected {int, str}, got %s\" % type(axis))\n\n channel_index = wt_kit.get_index(self.channel_names, channel)\n channel = self.channel_names[channel_index]\n\n if self[channel].shape[axis_index] == 1:\n raise wt_exceptions.ValueError(\n \"Channel '{}' has a single point along Axis '{}', cannot compute gradient\".format(\n channel, axis\n )\n )\n rtype = np.result_type(self[channel].dtype, float)\n new = self.create_channel(\n \"{}_{}_gradient\".format(channel, axis),\n values=np.empty(self[channel].shape, dtype=rtype),\n )\n\n channel = self[channel]\n if axis == axis_index:\n new[:] = np.gradient(channel[:], axis=axis_index)\n else:\n new[:] = np.gradient(channel[:], self[axis].points, axis=axis_index)\n\n def moment(self, axis, channel=0, moment=1, *, resultant=None):\n \"\"\"Take the nth moment the dataset along one axis, adding lower rank channels.\n\n New channels have names ``<channel name>_<axis name>_moment_<moment num>``.\n\n Moment 0 is the integral of the slice.\n Moment 1 is the weighted average or \"Center of Mass\", normalized by the integral\n Moment 2 is the variance, the central moment about the center of mass,\n normalized by the integral\n Moments 3+ are central moments about the center of mass, normalized by the integral\n and by the standard deviation to the power of the moment.\n\n Moments, especially higher order moments, are susceptible to noise and baseline.\n It is recommended when used with real data to use :meth:`WrightTools.data.Channel.clip`\n in conjunction with moments to reduce effects of noise.\n\n Parameters\n ----------\n axis : int or str\n The axis to take the moment along.\n If given as an integer, the axis with that index is used.\n If given as a string, the axis with that name is used.\n The axis must exist, and be a 1D array-aligned axis.\n (i.e. have a shape with a single value which is not ``1``)\n The collapsed axis must be monotonic to produce correct results.\n The axis to collapse along is inferred from the shape of the axis.\n channel : int or str\n The channel to take the moment.\n If given as an integer, the channel with that index is used.\n If given as a string, the channel with that name is used.\n The channel must have values along the axis\n (i.e. its shape must not be ``1`` in the dimension for which the axis is not ``1``)\n Default is 0, the first channel.\n moment : int or tuple of int\n The moments to take.\n One channel will be created for each number given.\n Default is 1, the center of mass.\n resultant : tuple of int\n The resultant shape after the moment operation.\n By default, it is intuited by the axis along which the moment is being taken.\n This default only works if that axis is 1D, so resultant is required if a\n multidimensional axis is passed as the first argument.\n The requirement of monotonicity applies on a per pixel basis.\n\n See Also\n --------\n collapse\n Reduce dimensionality by some mathematical operation\n clip\n Set values above/below a threshold to a particular value\n WrightTools.kit.joint_shape\n Useful for setting `resultant` kwarg based off of axes not collapsed.\n \"\"\"\n # get axis index --------------------------------------------------------------------------\n axis_index = None\n if resultant is not None:\n for i, (s, r) in enumerate(zip(wt_kit.joint_shape(*self.axes), resultant)):\n if s != r and r == 1 and axis_index is None:\n axis_index = i\n elif s == r:\n continue\n else:\n raise wt_exceptions.ValueError(\n f\"Invalid resultant shape '{resultant}' for shape {wt_kit.joint_shape(*self.axes)}. \"\n + \"Consider using `wt.kit.joint_shape` to join non-collapsed axes.\"\n )\n\n index = wt_kit.get_index(self.axis_names, axis)\n if axis_index is None:\n axes = [i for i in range(self.ndim) if self.axes[index].shape[i] > 1]\n if len(axes) > 1:\n raise wt_exceptions.MultidimensionalAxisError(axis, \"moment\")\n elif len(axes) == 0:\n raise wt_exceptions.ValueError(\n \"Axis {} is a single point, cannot compute moment\".format(axis)\n )\n axis_index = axes[0]\n\n warnings.warn(\"moment\", category=wt_exceptions.EntireDatasetInMemoryWarning)\n\n channel_index = wt_kit.get_index(self.channel_names, channel)\n channel = self.channel_names[channel_index]\n\n if self[channel].shape[axis_index] == 1:\n raise wt_exceptions.ValueError(\n \"Channel '{}' has a single point along Axis '{}', cannot compute moment\".format(\n channel, axis\n )\n )\n\n new_shape = list(self[channel].shape)\n new_shape[axis_index] = 1\n\n channel = self[channel]\n axis_inp = axis\n axis = self.axes[index]\n x = axis[:]\n if np.any(np.isnan(x)):\n raise wt_exceptions.ValueError(\"Axis '{}' includes NaN\".format(axis_inp))\n y = np.nan_to_num(channel[:])\n\n try:\n moments = tuple(moment)\n except TypeError:\n moments = (moment,)\n\n multiplier = 1\n if 0 in moments:\n # May be possible to optimize, probably doesn't need the sum\n # only matters for integral, all others normalize by integral\n multiplier = np.sign(\n np.sum(np.diff(x, axis=axis_index), axis=axis_index, keepdims=True)\n )\n\n for moment in moments:\n about = 0\n norm = 1\n if moment > 0:\n norm = np.trapz(y, x, axis=axis_index)\n norm = np.array(norm)\n norm.shape = new_shape\n if moment > 1:\n about = np.trapz(x * y, x, axis=axis_index)\n about = np.array(about)\n about.shape = new_shape\n about /= norm\n if moment > 2:\n sigma = np.trapz((x - about) ** 2 * y, x, axis=axis_index)\n sigma = np.array(sigma)\n sigma.shape = new_shape\n sigma /= norm\n sigma **= 0.5\n norm *= sigma ** moment\n\n values = np.trapz((x - about) ** moment * y, x, axis=axis_index)\n values = np.array(values)\n values.shape = new_shape\n values /= norm\n if moment == 0:\n values *= multiplier\n self.create_channel(\n \"{}_{}_{}_{}\".format(channel.natural_name, axis_inp, \"moment\", moment),\n values=values,\n )\n\n def collapse(self, axis, method=\"sum\"):\n \"\"\"Collapse the dataset along one axis, adding lower rank channels.\n\n New channels have names ``<channel name>_<axis name>_<method>``.\n\n Parameters\n ----------\n axis : int or str\n The axis to collapse along.\n If given as an integer, the axis in the underlying array is used.\n If given as a string, the axis must exist, and be a 1D array-aligned axis.\n (i.e. have a shape with a single value which is not ``1``)\n The axis to collapse along is inferred from the shape of the axis.\n method : {'average', 'sum', 'max', 'min'} (optional)\n The method of collapsing the given axis. Method may also be list\n of methods corresponding to the channels of the object. Default\n is sum. NaNs are ignored.\n Can also be a list, allowing for different treatment for varied channels.\n In this case, None indicates that no change to that channel should occur.\n\n See Also\n --------\n chop\n Divide the dataset into its lower-dimensionality components.\n split\n Split the dataset while maintaining its dimensionality.\n moment\n Take the moment along a particular axis\n \"\"\"\n if method in (\"int\", \"integrate\"):\n warnings.warn(\n \"integrate method of collapse is deprecated, use moment(moment=0) instead\",\n wt_exceptions.VisibleDeprecationWarning,\n )\n for channel in self.channel_names:\n try:\n self.moment(axis, channel, moment=0)\n self.rename_channels(\n **{self.channel_names[-1]: f\"{channel}_{axis}_{method}\"}, verbose=False\n )\n except wt_exceptions.ValueError:\n pass # may have some channels which fail, do so silently\n return\n # get axis index --------------------------------------------------------------------------\n if isinstance(axis, int):\n axis_index = axis\n elif isinstance(axis, str):\n index = self.axis_names.index(axis)\n axes = [i for i in range(self.ndim) if self.axes[index].shape[i] > 1]\n if len(axes) > 1:\n raise wt_exceptions.MultidimensionalAxisError(axis, \"collapse\")\n elif len(axes) == 0:\n raise wt_exceptions.ValueError(\n \"Axis {} is a single point, cannot collapse\".format(axis)\n )\n axis_index = axes[0]\n else:\n raise wt_exceptions.TypeError(\"axis: expected {int, str}, got %s\" % type(axis))\n\n new_shape = list(self.shape)\n new_shape[axis_index] = 1\n func = {\n \"sum\": np.nansum,\n \"max\": np.nanmax,\n \"maximum\": np.nanmax,\n \"min\": np.nanmin,\n \"minimum\": np.nanmin,\n \"ave\": np.nanmean,\n \"average\": np.nanmean,\n \"mean\": np.nanmean,\n }\n\n # methods ---------------------------------------------------------------------------------\n if isinstance(method, str):\n methods = [method for _ in self.channels]\n if isinstance(method, list):\n if len(method) == len(self.channels):\n methods = method\n else:\n raise wt_exceptions.ValueError(\n \"method argument must have same number of elements as there are channels\"\n )\n for m in methods:\n if m not in func.keys():\n raise wt_exceptions.ValueError(\"method '{}' not recognized\".format(m))\n\n warnings.warn(\"collapse\", category=wt_exceptions.EntireDatasetInMemoryWarning)\n\n # collapse --------------------------------------------------------------------------------\n for method, channel in zip(methods, self.channel_names):\n if method is None:\n continue\n\n if self[channel].shape[axis_index] == 1:\n continue # Cannot collapse any further, don't clutter data object\n\n new_shape = list(self[channel].shape)\n new_shape[axis_index] = 1\n rtype = self[channel].dtype\n if method in [\"ave\", \"average\", \"mean\"]:\n rtype = np.result_type(self[channel].dtype, float)\n\n new = self.create_channel(\n \"{}_{}_{}\".format(channel, axis, method),\n values=np.empty(new_shape, dtype=rtype),\n units=self[channel].units,\n )\n\n new[:] = func[method](self[channel], axis=axis_index, keepdims=True)\n\n def convert(self, destination_units, *, convert_variables=False, verbose=True):\n \"\"\"Convert all compatable axes and constants to given units.\n\n Parameters\n ----------\n destination_units : str\n Destination units.\n convert_variables : boolean (optional)\n Toggle conversion of stored arrays. Default is False\n verbose : bool (optional)\n Toggle talkback. Default is True.\n\n See Also\n --------\n Axis.convert\n Convert a single axis object to compatable units. Call on an\n axis object in data.axes.\n \"\"\"\n # apply to all compatible axes\n for axis in self.axes:\n if wt_units.is_valid_conversion(axis.units, destination_units):\n orig = axis.units\n axis.convert(destination_units, convert_variables=convert_variables)\n if verbose:\n print(\n \"axis {} converted from {} to {}\".format(\n axis.expression, orig, destination_units\n )\n )\n # apply to all compatible constants\n for constant in self.constants:\n if wt_units.is_valid_conversion(constant.units, destination_units):\n orig = constant.units\n constant.convert(destination_units, convert_variables=convert_variables)\n if verbose:\n print(\n \"constant {} converted from {} to {}\".format(\n constant.expression, orig, destination_units\n )\n )\n if convert_variables:\n for var in self.variables:\n if wt_units.is_valid_conversion(var.units, destination_units):\n orig = var.units\n var.convert(destination_units)\n if verbose:\n print(\n \"variable {} converted from {} to {}\".format(\n var.natural_name, orig, destination_units\n )\n )\n self._on_axes_updated()\n self._on_constants_updated()\n\n def create_channel(\n self, name, values=None, *, shape=None, units=None, dtype=None, **kwargs\n ) -> Channel:\n \"\"\"Append a new channel.\n\n Parameters\n ----------\n name : string\n Unique name for this channel.\n values : array (optional)\n Array. If None, an empty array equaling the data shape is\n created. Default is None.\n shape : tuple of int\n Shape to use. Must broadcast with the full shape.\n Only used if `values` is None.\n Default is the full shape of self.\n units : string (optional)\n Channel units. Default is None.\n dtype : numpy.dtype (optional)\n dtype to use for dataset, default is np.float64.\n Only used if `values` is None.\n kwargs : dict\n Additional keyword arguments passed to Channel instantiation.\n\n Returns\n -------\n Channel\n Created channel.\n \"\"\"\n if name in self.channel_names:\n warnings.warn(name, wt_exceptions.ObjectExistsWarning)\n return self[name]\n elif name in self.variable_names:\n raise wt_exceptions.NameNotUniqueError(name)\n\n require_kwargs = {\"chunks\": True}\n if values is None:\n if shape is None:\n require_kwargs[\"shape\"] = self.shape\n else:\n require_kwargs[\"shape\"] = shape\n if dtype is None:\n require_kwargs[\"dtype\"] = np.dtype(np.float64)\n else:\n require_kwargs[\"dtype\"] = dtype\n if require_kwargs[\"dtype\"].kind in \"fcmM\":\n require_kwargs[\"fillvalue\"] = np.nan\n else:\n require_kwargs[\"fillvalue\"] = 0\n else:\n require_kwargs[\"data\"] = values\n require_kwargs[\"shape\"] = values.shape\n require_kwargs[\"dtype\"] = values.dtype\n if np.prod(require_kwargs[\"shape\"]) == 1:\n require_kwargs[\"chunks\"] = None\n # create dataset\n dataset_id = self.require_dataset(name=name, **require_kwargs).id\n channel = Channel(self, dataset_id, units=units, **kwargs)\n # finish\n self.attrs[\"channel_names\"] = np.append(self.attrs[\"channel_names\"], name.encode())\n return channel\n\n def create_variable(\n self, name, values=None, *, shape=None, units=None, dtype=None, **kwargs\n ) -> Variable:\n \"\"\"Add new child variable.\n\n Parameters\n ----------\n name : string\n Unique identifier.\n values : array-like (optional)\n Array to populate variable with. If None, an variable will be filled with NaN.\n Default is None.\n shape : tuple of int\n Shape to use. must broadcast with the full shape.\n Only used if `values` is None.\n Default is the full shape of self.\n units : string (optional)\n Variable units. Default is None.\n dtype : numpy.dtype (optional)\n dtype to use for dataset, default is np.float64.\n Only used if `values` is None.\n kwargs\n Additional kwargs to variable instantiation.\n\n Returns\n -------\n WrightTools Variable\n New child variable.\n \"\"\"\n if name in self.variable_names:\n warnings.warn(name, wt_exceptions.ObjectExistsWarning)\n return self[name]\n elif name in self.channel_names:\n raise wt_exceptions.NameNotUniqueError(name)\n if values is None:\n if shape is None:\n shape = self.shape\n if dtype is None:\n dtype = np.dtype(np.float64)\n if dtype.kind in \"fcmM\":\n fillvalue = np.nan\n else:\n fillvalue = 0\n else:\n shape = values.shape\n dtype = values.dtype\n fillvalue = None\n # create dataset\n id = self.require_dataset(\n name=name, data=values, shape=shape, dtype=dtype, fillvalue=fillvalue\n ).id\n variable = Variable(self, id, units=units, **kwargs)\n # finish\n self._variables = None\n self.attrs[\"variable_names\"] = np.append(self.attrs[\"variable_names\"], name.encode())\n return variable\n\n def get_nadir(self, channel=0) -> tuple:\n \"\"\"Get the coordinates, in units, of the minimum in a channel.\n\n Parameters\n ----------\n channel : int or str (optional)\n Channel. Default is 0.\n\n Returns\n -------\n generator of numbers\n Coordinates in units for each axis.\n \"\"\"\n # get channel\n if isinstance(channel, int):\n channel_index = channel\n elif isinstance(channel, str):\n channel_index = self.channel_names.index(channel)\n else:\n raise TypeError(\"channel: expected {int, str}, got %s\" % type(channel))\n channel = self.channels[channel_index]\n # get indicies\n idx = channel.argmin()\n # finish\n return tuple(a[idx] for a in self._axes)\n\n def get_zenith(self, channel=0) -> tuple:\n \"\"\"Get the coordinates, in units, of the maximum in a channel.\n\n Parameters\n ----------\n channel : int or str (optional)\n Channel. Default is 0.\n\n Returns\n -------\n generator of numbers\n Coordinates in units for each axis.\n \"\"\"\n # get channel\n if isinstance(channel, int):\n channel_index = channel\n elif isinstance(channel, str):\n channel_index = self.channel_names.index(channel)\n else:\n raise TypeError(\"channel: expected {int, str}, got %s\" % type(channel))\n channel = self.channels[channel_index]\n # get indicies\n idx = channel.argmax()\n # finish\n return tuple(a[idx] for a in self._axes)\n\n def heal(self, channel=0, method=\"linear\", fill_value=np.nan, verbose=True):\n \"\"\"\n Remove nans from channel using interpolation.\n\n Parameters\n ----------\n channel : int or str (optional)\n Channel to heal. Default is 0.\n method : {'linear', 'nearest', 'cubic'} (optional)\n The interpolation method. Note that cubic interpolation is only\n possible for 1D and 2D data. See `griddata`__ for more information.\n Default is linear.\n fill_value : number-like (optional)\n The value written to pixels that cannot be filled by interpolation.\n Default is nan.\n verbose : bool (optional)\n Toggle talkback. Default is True.\n\n\n __ http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html\n\n\n .. note:: Healing may take several minutes for large datasets.\n Interpolation time goes as nearest, linear, then cubic.\n\n\n \"\"\"\n warnings.warn(\"heal\", category=wt_exceptions.EntireDatasetInMemoryWarning)\n timer = wt_kit.Timer(verbose=False)\n with timer:\n # channel\n if isinstance(channel, int):\n channel_index = channel\n elif isinstance(channel, str):\n channel_index = self.channel_names.index(channel)\n else:\n raise TypeError(\"channel: expected {int, str}, got %s\" % type(channel))\n channel = self.channels[channel_index]\n values = self.channels[channel_index][:]\n points = [axis[:] for axis in self._axes]\n xi = tuple(np.meshgrid(*points, indexing=\"ij\"))\n # 'undo' gridding\n arr = np.zeros((len(self._axes) + 1, values.size))\n for i in range(len(self._axes)):\n arr[i] = xi[i].flatten()\n arr[-1] = values.flatten()\n # remove nans\n arr = arr[:, ~np.isnan(arr).any(axis=0)]\n # grid data wants tuples\n tup = tuple([arr[i] for i in range(len(arr) - 1)])\n # grid data\n out = griddata(tup, arr[-1], xi, method=method, fill_value=fill_value)\n self.channels[channel_index][:] = out\n # print\n if verbose:\n print(\n \"channel {0} healed in {1} seconds\".format(\n channel.name, np.around(timer.interval, decimals=3)\n )\n )\n\n def level(self, channel, axis, npts, *, verbose=True):\n \"\"\"Subtract the average value of npts at the edge of a given axis.\n\n Parameters\n ----------\n channel : int or str\n Channel to level.\n axis : int\n Axis to level along.\n npts : int\n Number of points to average for each slice. Positive numbers\n take points at leading indicies and negative numbers take points\n at trailing indicies.\n verbose : bool (optional)\n Toggle talkback. Default is True.\n \"\"\"\n warnings.warn(\"level\", category=wt_exceptions.EntireDatasetInMemoryWarning)\n channel_index = wt_kit.get_index(self.channel_names, channel)\n channel = self.channels[channel_index]\n # verify npts not zero\n npts = int(npts)\n if npts == 0:\n raise wt_exceptions.ValueError(\"npts must not be zero\")\n # get subtrahend\n ss = [slice(None)] * self.ndim\n if npts > 0:\n ss[axis] = slice(0, npts, None)\n else:\n ss[axis] = slice(npts, None, None)\n subtrahend = np.nanmean(channel[ss], axis=axis)\n if self.ndim > 1:\n subtrahend = np.expand_dims(subtrahend, axis=axis)\n # level\n channel -= subtrahend\n # finish\n channel._null = 0\n if verbose:\n print(\"channel {0} leveled along axis {1}\".format(channel.natural_name, axis))\n\n def map_variable(\n self, variable, points, input_units=\"same\", *, name=None, parent=None, verbose=True\n ) -> \"Data\":\n \"\"\"Map points of an axis to new points using linear interpolation.\n\n Out-of-bounds points are written nan.\n\n Parameters\n ----------\n variable : string\n The variable to map onto.\n points : array-like or int\n If array, the new points. If int, new points will have the same\n limits, with int defining the number of evenly spaced points\n between.\n input_units : str (optional)\n The units of the new points. Default is same, which assumes\n the new points have the same units as the axis.\n name : string (optional)\n The name of the new data object. If None, generated from\n natural_name. Default is None.\n parent : WrightTools.Collection (optional)\n Parent of new data object. If None, data is made at root of a\n new temporary file.\n verbose : bool (optional)\n Toggle talkback. Default is True.\n\n Returns\n -------\n WrightTools.Data\n New data object.\n \"\"\"\n # get variable index\n variable_index = wt_kit.get_index(self.variable_names, variable)\n variable = self.variables[variable_index]\n # get points\n if isinstance(points, int):\n points = np.linspace(variable.min(), variable.max(), points)\n points = np.array(points)\n # points dimensionality\n if points.ndim < variable.ndim:\n for i, d in enumerate(variable.shape):\n if d == 1:\n points = np.expand_dims(points, axis=i)\n # convert points\n if input_units == \"same\":\n pass\n else:\n points = wt_units.converter(points, input_units, variable.units)\n # construct new data object\n special = [\"name\", \"axes\", \"constants\", \"channel_names\", \"variable_names\"]\n kwargs = {k: v for k, v in self.attrs.items() if k not in special}\n if name is None:\n name = \"{0}_{1}_mapped\".format(self.natural_name, variable.natural_name)\n kwargs[\"name\"] = name\n kwargs[\"parent\"] = parent\n out = Data(**kwargs)\n # mapped variable\n values = points\n out.create_variable(values=values, **variable.attrs)\n # orthogonal variables\n for v in self.variables:\n if wt_kit.orthogonal(v.shape, variable.shape):\n out.create_variable(values=v[:], **v.attrs)\n out.transform(*self.axis_expressions)\n # interpolate\n if self.ndim == 1:\n\n def interpolate(dataset, points):\n function = scipy.interpolate.interp1d(variable[:], dataset[:], bounds_error=False)\n return function(points)\n\n else:\n pts = np.array([a.full.flatten() for a in self.axes]).T\n out_pts = np.array([a.full.flatten() for a in out.axes]).T\n\n def interpolate(dataset, points):\n values = dataset.full.flatten()\n function = scipy.interpolate.LinearNDInterpolator(pts, values, rescale=True)\n new = function(out_pts)\n new.shape = out.shape\n return new\n\n for v in self.variables:\n if v.natural_name not in out.variable_names:\n out.create_variable(values=interpolate(v, points), **v.attrs)\n out.variable_names = self.variable_names # enforce old order\n out._variables = None # force regeneration of variables @property\n for channel in self.channels:\n out.create_channel(values=interpolate(channel, points), **channel.attrs)\n # finish\n if verbose:\n print(\"data mapped from {0} to {1}\".format(self.shape, out.shape))\n return out\n\n def offset(\n self,\n points,\n offsets,\n along,\n offset_axis,\n units=\"same\",\n offset_units=\"same\",\n mode=\"valid\",\n method=\"linear\",\n verbose=True,\n ):\n \"\"\"Offset one axis based on another axis' values.\n\n Useful for correcting instrumental artifacts such as zerotune.\n\n Parameters\n ----------\n points : 1D array-like\n Points.\n offsets : 1D array-like\n Offsets.\n along : str or int\n Axis that points array lies along.\n offset_axis : str or int\n Axis to offset using offsets.\n units : str (optional)\n Units of points array.\n offset_units : str (optional)\n Units of offsets aray.\n mode : {'valid', 'full', 'old'} (optional)\n Define how far the new axis will extend. Points outside of valid\n interpolation range will be written nan.\n method : {'linear', 'nearest', 'cubic'} (optional)\n The interpolation method. Note that cubic interpolation is only\n possible for 1D and 2D data. See `griddata`__ for more information.\n Default is linear.\n verbose : bool (optional)\n Toggle talkback. Default is True.\n\n\n __ http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html\n\n >>> points # an array of w1 points\n >>> offsets # an array of d1 corrections\n >>> data.offset(points, offsets, 'w1', 'd1')\n\n \"\"\"\n raise NotImplementedError\n # axis ------------------------------------------------------------------------------------\n if isinstance(along, int):\n axis_index = along\n elif isinstance(along, str):\n axis_index = self.axis_names.index(along)\n else:\n raise TypeError(\"along: expected {int, str}, got %s\" % type(along))\n axis = self._axes[axis_index]\n # values & points -------------------------------------------------------------------------\n # get values, points, units\n if units == \"same\":\n input_units = axis.units\n else:\n input_units = units\n # check offsets is 1D or 0D\n if len(offsets.shape) == 1:\n pass\n else:\n raise RuntimeError(\"values must be 1D or 0D in offset!\")\n # check if units is compatible, convert\n dictionary = getattr(wt_units, axis.units_kind)\n if input_units in dictionary.keys():\n pass\n else:\n raise RuntimeError(\"units incompatible in offset!\")\n points = wt_units.converter(points, input_units, axis.units)\n # create correction array\n function = interp1d(points, offsets, bounds_error=False)\n corrections = function(axis[:])\n # remove nans\n finite_indicies = np.where(np.isfinite(corrections))[0]\n left_pad_width = finite_indicies[0]\n right_pad_width = len(corrections) - finite_indicies[-1] - 1\n corrections = np.pad(\n corrections[np.isfinite(corrections)],\n (int(left_pad_width), int(right_pad_width)),\n mode=\"edge\",\n )\n # do correction ---------------------------------------------------------------------------\n # transpose so axis is last\n transpose_order = np.arange(len(self._axes))\n transpose_order[axis_index] = len(self._axes) - 1\n transpose_order[-1] = axis_index\n self.transpose(transpose_order, verbose=False)\n # get offset axis index\n if isinstance(offset_axis, int):\n offset_axis_index = offset_axis\n elif isinstance(offset_axis, str):\n offset_axis_index = self.axis_names.index(offset_axis)\n else:\n raise TypeError(\"offset_axis: expected {int, str}, got %s\" % type(offset_axis))\n # new points\n new_points = [a[:] for a in self._axes]\n old_offset_axis_points = self._axes[offset_axis_index][:]\n spacing = abs(\n (old_offset_axis_points.max() - old_offset_axis_points.min())\n / float(len(old_offset_axis_points))\n )\n if mode == \"old\":\n new_offset_axis_points = old_offset_axis_points\n elif mode == \"valid\":\n _max = old_offset_axis_points.max() + corrections.min()\n _min = old_offset_axis_points.min() + corrections.max()\n n = int(abs(np.ceil((_max - _min) / spacing)))\n new_offset_axis_points = np.linspace(_min, _max, n)\n elif mode == \"full\":\n _max = old_offset_axis_points.max() + corrections.max()\n _min = old_offset_axis_points.min() + corrections.min()\n n = np.ceil((_max - _min) / spacing)\n new_offset_axis_points = np.linspace(_min, _max, n)\n new_points[offset_axis_index] = new_offset_axis_points\n new_xi = tuple(np.meshgrid(*new_points, indexing=\"ij\"))\n xi = tuple(np.meshgrid(*[a[:] for a in self._axes], indexing=\"ij\"))\n for channel in self.channels:\n # 'undo' gridding\n arr = np.zeros((len(self._axes) + 1, channel[:].size))\n for i in range(len(self._axes)):\n arr[i] = xi[i].flatten()\n arr[-1] = channel[:].flatten()\n # do corrections\n corrections = list(corrections)\n corrections = corrections * int((len(arr[0]) / len(corrections)))\n arr[offset_axis_index] += corrections\n # grid data\n tup = tuple([arr[i] for i in range(len(arr) - 1)])\n # note that rescale is crucial in this operation\n out = griddata(tup, arr[-1], new_xi, method=method, fill_value=np.nan, rescale=True)\n channel[:] = out\n self._axes[offset_axis_index][:] = new_offset_axis_points\n # transpose out\n self.transpose(transpose_order, verbose=False)\n\n def print_tree(self, *, verbose=True):\n \"\"\"Print a ascii-formatted tree representation of the data contents.\"\"\"\n print(\"{0} ({1})\".format(self.natural_name, self.filepath))\n self._print_branch(\"\", depth=0, verbose=verbose)\n\n def prune(self, keep_channels=True, *, verbose=True):\n \"\"\"Remove unused variables and (optionally) channels from the Data object.\n\n Unused variables are those that are not included in either axes or constants.\n Unused channels are those not specified in keep_channels, or the first channel.\n\n Parameters\n ----------\n keep_channels : boolean or int or str or tuple\n If False, removes all but the first channel.\n If int or str, removes all but that index/name channel.\n If tuple, removes all channels except those in the tuple by index or name.\n Default is True: do not delete channels\n verbose : boolean\n Toggle talkback. Default is True.\n \"\"\"\n for v in self.variables:\n for var in wt_kit.flatten_list([ax.variables for ax in self._axes + self._constants]):\n if v == var:\n break\n else:\n self.remove_variable(v.natural_name, implied=False, verbose=verbose)\n if keep_channels is not True:\n try:\n if isinstance(keep_channels, str):\n raise TypeError\n indexes = tuple(keep_channels)\n except TypeError:\n indexes = (keep_channels,)\n\n for i, ch in enumerate(self.channels):\n if i not in indexes and not ch.natural_name in indexes:\n self.remove_channel(ch.natural_name, verbose=verbose)\n\n def remove_channel(self, channel, *, verbose=True):\n \"\"\"Remove channel from data.\n\n Parameters\n ----------\n channel : int or str\n Channel index or name to remove.\n verbose : boolean (optional)\n Toggle talkback. Default is True.\n \"\"\"\n channel_index = wt_kit.get_index(self.channel_names, channel)\n new = list(self.channel_names)\n name = new.pop(channel_index)\n del self[name]\n self.channel_names = new\n if verbose:\n print(\"channel {0} removed\".format(name))\n\n def remove_variable(self, variable, *, implied=True, verbose=True):\n \"\"\"Remove variable from data.\n\n Parameters\n ----------\n variable : int or str\n Variable index or name to remove.\n implied : boolean (optional)\n Toggle deletion of other variables that start with the same\n name. Default is True.\n verbose : boolean (optional)\n Toggle talkback. Default is True.\n \"\"\"\n if isinstance(variable, int):\n variable = self.variable_names[variable]\n # find all of the implied variables\n removed = []\n if implied:\n for n in self.variable_names:\n if n.startswith(variable):\n removed.append(n)\n else:\n removed = [variable]\n # check that axes will not be ruined\n for n in removed:\n for a in self._axes:\n if n in [v.natural_name for v in a.variables]:\n message = \"{0} is contained in axis {1}\".format(n, a.expression)\n raise RuntimeError(message)\n for c in self._constants:\n if n in [v.natural_name for v in c.variables]:\n warnings.warn(\n \"Variable being removed used in a constant\",\n wt_exceptions.WrightToolsWarning,\n )\n\n # do removal\n for n in removed:\n variable_index = wt_kit.get_index(self.variable_names, n)\n new = list(self.variable_names)\n name = new.pop(variable_index)\n del self[name]\n self.variable_names = new\n self._variables = None\n # finish\n if verbose:\n print(\"{0} variable(s) removed:\".format(len(removed)))\n for n in removed:\n print(\" {0}\".format(n))\n\n def rename_channels(self, *, verbose=True, **kwargs):\n \"\"\"Rename a set of channels.\n\n Parameters\n ----------\n kwargs\n Keyword arguments of the form current:'new'.\n verbose : boolean (optional)\n Toggle talkback. Default is True\n \"\"\"\n # ensure that items will remain unique\n changed = kwargs.keys()\n for k, v in kwargs.items():\n if v not in changed and v in self.keys():\n raise wt_exceptions.NameNotUniqueError(v)\n # compile references to items that are changing\n new = {}\n for k, v in kwargs.items():\n obj = self[k]\n index = self.channel_names.index(k)\n # rename\n new[v] = obj, index\n Group._instances.pop(obj.fullpath, None)\n obj.natural_name = str(v)\n # remove old references\n del self[k]\n # apply new references\n names = list(self.channel_names)\n for v, value in new.items():\n obj, index = value\n self[v] = obj\n names[index] = v\n self.channel_names = names\n # finish\n if verbose:\n print(\"{0} channel(s) renamed:\".format(len(kwargs)))\n for k, v in kwargs.items():\n print(\" {0} --> {1}\".format(k, v))\n\n def rename_variables(self, *, implied=True, verbose=True, **kwargs):\n \"\"\"Rename a set of variables.\n\n Parameters\n ----------\n kwargs\n Keyword arguments of the form current:'new'.\n implied : boolean (optional)\n Toggle inclusion of other variables that start with the same\n name. Default is True.\n verbose : boolean (optional)\n Toggle talkback. Default is True\n \"\"\"\n # find all of the implied variables\n kwargs = collections.OrderedDict(kwargs)\n if implied:\n new = collections.OrderedDict()\n for k, v in kwargs.items():\n for n in self.variable_names:\n if n.startswith(k):\n new[n] = n.replace(k, v, 1)\n kwargs = new\n # ensure that items will remain unique\n changed = kwargs.keys()\n for k, v in kwargs.items():\n if v not in changed and v in self.keys():\n raise wt_exceptions.NameNotUniqueError(v)\n # compile references to items that are changing\n new = {}\n for k, v in kwargs.items():\n obj = self[k]\n index = self.variable_names.index(k)\n # rename\n new[v] = obj, index\n Group._instances.pop(obj.fullpath, None)\n obj.natural_name = str(v)\n # remove old references\n del self[k]\n # apply new references\n names = list(self.variable_names)\n for v, value in new.items():\n obj, index = value\n self[v] = obj\n names[index] = v\n self.variable_names = names\n units = self.units\n new = list(self.axis_expressions)\n for i, v in enumerate(kwargs.keys()):\n for j, n in enumerate(new):\n new[j] = n.replace(v, \"{%i}\" % i)\n for i, n in enumerate(new):\n new[i] = n.format(*kwargs.values())\n self.transform(*new)\n for a, u in zip(self._axes, units):\n a.convert(u)\n units = self.constant_units\n new = list(self.constant_expressions)\n for i, v in enumerate(kwargs.keys()):\n for j, n in enumerate(new):\n new[j] = n.replace(v, \"{%i}\" % i)\n for i, n in enumerate(new):\n new[i] = n.format(*kwargs.values())\n self.set_constants(*new)\n for c, u in zip(self._constants, units):\n c.convert(u)\n # finish\n if verbose:\n print(\"{0} variable(s) renamed:\".format(len(kwargs)))\n for k, v in kwargs.items():\n print(\" {0} --> {1}\".format(k, v))\n\n def share_nans(self):\n \"\"\"Share not-a-numbers between all channels.\n\n If any channel is nan at a given index, all channels will be nan\n at that index after this operation.\n\n Uses the share_nans method found in wt.kit.\n \"\"\"\n\n def f(_, s, channels):\n outs = wt_kit.share_nans(*[c[s] for c in channels])\n for c, o in zip(channels, outs):\n c[s] = o\n\n self.channels[0].chunkwise(f, self.channels)\n\n def smooth(self, factors, channel=None, verbose=True) -> \"Data\":\n \"\"\"Smooth a channel using an n-dimenional kaiser window.\n\n Note, all arrays are loaded into memory.\n\n For more info see `Kaiser_window`__ wikipedia entry.\n\n __ https://en.wikipedia.org/wiki/Kaiser_window\n\n Parameters\n ----------\n factors : int or list of int\n The smoothing factor. You may provide a list of smoothing factors\n for each axis.\n channel : int or str or None (optional)\n The channel to smooth. If None, all channels will be smoothed.\n Default is None.\n verbose : bool (optional)\n Toggle talkback. Default is True.\n \"\"\"\n warnings.warn(\"smooth\", category=wt_exceptions.EntireDatasetInMemoryWarning)\n # get factors -----------------------------------------------------------------------------\n\n if isinstance(factors, list):\n pass\n else:\n dummy = np.zeros(len(self._axes))\n dummy[::] = factors\n factors = list(dummy)\n # get channels ----------------------------------------------------------------------------\n if channel is None:\n channels = self.channels\n else:\n if isinstance(channel, int):\n channel_index = channel\n elif isinstance(channel, str):\n channel_index = self.channel_names.index(channel)\n else:\n raise TypeError(\"channel: expected {int, str}, got %s\" % type(channel))\n channels = [self.channels[channel_index]]\n # smooth ----------------------------------------------------------------------------------\n for channel in channels:\n values = channel[:]\n for axis_index in range(len(factors)):\n factor = factors[axis_index]\n # transpose so the axis of interest is last\n transpose_order = range(len(values.shape))\n # replace axis_index with zero\n transpose_order = [\n len(values.shape) - 1 if i == axis_index else i for i in transpose_order\n ]\n transpose_order[len(values.shape) - 1] = axis_index\n values = values.transpose(transpose_order)\n # get kaiser window\n beta = 5.0\n w = np.kaiser(2 * factor + 1, beta)\n # for all slices...\n for index in np.ndindex(values[..., 0].shape):\n current_slice = values[index]\n temp_slice = np.pad(current_slice, int(factor), mode=str(\"edge\"))\n values[index] = np.convolve(temp_slice, w / w.sum(), mode=str(\"valid\"))\n # transpose out\n values = values.transpose(transpose_order)\n # return array to channel object\n channel[:] = values\n if verbose:\n print(\"smoothed data\")\n\n def split(\n self, expression, positions, *, units=None, parent=None, verbose=True\n ) -> wt_collection.Collection:\n \"\"\"\n Split the data object along a given expression, in units.\n\n Parameters\n ----------\n expression : int or str\n The expression to split along. If given as an integer, the axis at that index\n is used.\n positions : number-type or 1D array-type\n The position(s) to split at, in units.\n units : str (optional)\n The units of the given positions. Default is same, which assumes\n input units are identical to first variable units.\n parent : WrightTools.Collection (optional)\n The parent collection in which to place the 'split' collection.\n Default is a new Collection.\n verbose : bool (optional)\n Toggle talkback. Default is True.\n\n Returns\n -------\n WrightTools.collection.Collection\n A Collection of data objects.\n The order of the objects is such that the axis points retain their original order.\n\n See Also\n --------\n chop\n Divide the dataset into its lower-dimensionality components.\n collapse\n Collapse the dataset along one axis.\n \"\"\"\n # axis ------------------------------------------------------------------------------------\n old_expr = self.axis_expressions\n old_units = self.units\n out = wt_collection.Collection(name=\"split\", parent=parent)\n if isinstance(expression, int):\n if units is None:\n units = self._axes[expression].units\n expression = self._axes[expression].expression\n elif isinstance(expression, str):\n pass\n else:\n raise TypeError(\"expression: expected {int, str}, got %s\" % type(expression))\n\n self.transform(expression)\n if units:\n self.convert(units, verbose=False)\n\n try:\n positions = [-np.inf] + sorted(list(positions)) + [np.inf]\n except TypeError:\n positions = [-np.inf, positions, np.inf]\n\n values = self._axes[0].full\n masks = [(values >= lo) & (values < hi) for lo, hi in wt_kit.pairwise(positions)]\n omasks = []\n cuts = []\n for mask in masks:\n try:\n omasks.append(wt_kit.mask_reduce(mask))\n cuts.append([i == 1 for i in omasks[-1].shape])\n # Ensure at least one axis is kept\n if np.all(cuts[-1]):\n cuts[-1][0] = False\n except ValueError:\n omasks.append(None)\n cuts.append(None)\n for i in range(len(positions) - 1):\n out.create_data(\"split%03i\" % i)\n\n for var in self.variables:\n for i, (imask, omask, cut) in enumerate(zip(masks, omasks, cuts)):\n if omask is None:\n # Zero length split\n continue\n omask = wt_kit.enforce_mask_shape(omask, var.shape)\n omask.shape = tuple([s for s, c in zip(omask.shape, cut) if not c])\n out_arr = np.full(omask.shape, np.nan)\n imask = wt_kit.enforce_mask_shape(imask, var.shape)\n out_arr[omask] = var[:][imask]\n out[i].create_variable(values=out_arr, **var.attrs)\n\n for ch in self.channels:\n for i, (imask, omask, cut) in enumerate(zip(masks, omasks, cuts)):\n if omask is None:\n # Zero length split\n continue\n omask = wt_kit.enforce_mask_shape(omask, ch.shape)\n omask.shape = tuple([s for s, c in zip(omask.shape, cut) if not c])\n out_arr = np.full(omask.shape, np.nan)\n imask = wt_kit.enforce_mask_shape(imask, ch.shape)\n out_arr[omask] = ch[:][imask]\n out[i].create_channel(values=out_arr, **ch.attrs)\n\n if verbose:\n for d in out.values():\n try:\n d.transform(expression)\n except IndexError:\n continue\n\n print(\"split data into {0} pieces along <{1}>:\".format(len(positions) - 1, expression))\n for i, (lo, hi) in enumerate(wt_kit.pairwise(positions)):\n new_data = out[i]\n if new_data.shape == ():\n print(\" {0} : None\".format(i))\n else:\n new_axis = new_data.axes[0]\n print(\n \" {0} : {1:0.2f} to {2:0.2f} {3} {4}\".format(\n i, lo, hi, self.axes[0].units, new_axis.shape\n )\n )\n\n for d in out.values():\n try:\n d.transform(*old_expr)\n keep = []\n keep_units = []\n for ax, u in zip(d.axes, old_units):\n if ax.size > 1:\n keep.append(ax.expression)\n keep_units.append(u)\n else:\n d.create_constant(ax.expression, verbose=False)\n d.transform(*keep)\n for ax, u in zip(d.axes, keep_units):\n ax.convert(u)\n except IndexError:\n continue\n tempax = Axis(d, expression)\n if all(\n np.all(\n np.sum(~np.isnan(tempax.masked), axis=tuple(set(range(tempax.ndim)) - {j}))\n <= 1\n )\n for j in range(tempax.ndim)\n ):\n d.create_constant(expression, verbose=False)\n self.transform(*old_expr)\n for ax, u in zip(self.axes, old_units):\n ax.convert(u)\n\n return out\n\n def transform(self, *axes, verbose=True):\n \"\"\"Transform the data.\n\n Parameters\n ----------\n axes : strings\n Expressions for the new set of axes.\n verbose : boolean (optional)\n Toggle talkback. Default is True\n\n See Also\n --------\n set_constants\n Similar method except for constants\n \"\"\"\n # TODO: ensure that transform does not break data\n # create\n new = []\n newt = \"newt\" in self.axis_expressions\n current = {a.expression: a for a in self._axes}\n for expression in axes:\n axis = current.get(expression, Axis(self, expression))\n new.append(axis)\n self._axes = new\n # units\n for a in self._axes:\n if a.units is None:\n a.convert(a.variables[0].units)\n # finish\n self.flush()\n self._on_axes_updated()\n nownewt = \"newt\" in self.axis_expressions\n if verbose and nownewt and not newt:\n print(\"Look she turned me into a newt\")\n elif verbose and newt and not nownewt:\n print(\"I got better\")\n\n def set_constants(self, *constants, verbose=True):\n \"\"\"Set the constants associated with the data.\n\n Parameters\n ----------\n constants : str\n Expressions for the new set of constants.\n verbose : boolean (optional)\n Toggle talkback. Default is True\n\n See Also\n --------\n transform\n Similar method except for axes.\n create_constant\n Add an individual constant.\n remove_constant\n Remove an individual constant.\n \"\"\"\n # create\n new = []\n current = {c.expression: c for c in self._constants}\n for expression in constants:\n constant = current.get(expression, Constant(self, expression))\n new.append(constant)\n self._constants = new\n # units\n for c in self._constants:\n if c.units is None:\n c.convert(c.variables[0].units)\n # finish\n self.flush()\n self._on_constants_updated()\n\n def create_constant(self, expression, *, verbose=True):\n \"\"\"Append a constant to the stored list.\n\n Parameters\n ----------\n expression : str\n Expression for the new constant.\n verbose : boolean (optional)\n Toggle talkback. Default is True\n\n See Also\n --------\n set_constants\n Remove and replace all constants.\n remove_constant\n Remove an individual constant.\n \"\"\"\n if expression in self.constant_expressions:\n wt_exceptions.ObjectExistsWarning.warn(expression)\n return self.constants[self.constant_expressions.index(expression)]\n constant = Constant(self, expression)\n if constant.units is None:\n constant.convert(constant.variables[0].units)\n self._constants.append(constant)\n self.flush()\n self._on_constants_updated()\n if verbose:\n print(\"Constant '{}' added\".format(constant.expression))\n return constant\n\n def remove_constant(self, constant, *, verbose=True):\n \"\"\"Remove a constant from the stored list.\n\n Parameters\n ----------\n constant : str or Constant or int\n Expression for the new constant.\n verbose : boolean (optional)\n Toggle talkback. Default is True\n\n See Also\n --------\n set_constants\n Remove and replace all constants.\n create_constant\n Add an individual constant.\n \"\"\"\n if isinstance(constant, (str, int)):\n constant_index = wt_kit.get_index(self.constant_expressions, constant)\n elif isinstance(constant, Constant):\n constant_index = wt_kit.get_index(self.constants, constant)\n constant = self._constants[constant_index]\n self._constants.pop(constant_index)\n self.flush()\n self._on_constants_updated()\n if verbose:\n print(\"Constant '{}' removed\".format(constant.expression))\n\n def zoom(self, factor, order=1, verbose=True):\n \"\"\"Zoom the data array using spline interpolation of the requested order.\n\n The number of points along each axis is increased by factor.\n See `scipy ndimage`__ for more info.\n\n __ http://docs.scipy.org/doc/scipy/reference/\n generated/scipy.ndimage.interpolation.zoom.html\n\n Parameters\n ----------\n factor : float\n The number of points along each axis will increase by this factor.\n order : int (optional)\n The order of the spline used to interpolate onto new points.\n verbose : bool (optional)\n Toggle talkback. Default is True.\n \"\"\"\n raise NotImplementedError\n import scipy.ndimage\n\n # axes\n for axis in self._axes:\n axis[:] = scipy.ndimage.interpolation.zoom(axis[:], factor, order=order)\n # channels\n for channel in self.channels:\n channel[:] = scipy.ndimage.interpolation.zoom(channel[:], factor, order=order)\n # return\n if verbose:\n print(\"data zoomed to new shape:\", self.shape)\n",
"\"\"\"Brunold.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport os\nimport pathlib\n\nimport numpy as np\n\nfrom ._data import Data\nfrom .. import exceptions as wt_exceptions\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\n__all__ = [\"from_BrunoldrRaman\"]\n\n\n# --- from function -------------------------------------------------------------------------------\n\n\ndef from_BrunoldrRaman(filepath, name=None, parent=None, verbose=True) -> Data:\n \"\"\"Create a data object from the Brunold rRaman instrument.\n\n Expects one energy (in wavenumbers) and one counts value.\n\n Parameters\n ----------\n filepath : path-like\n Path to .txt file.\n Can be either a local or remote file (http/ftp).\n Can be compressed with gz/bz2, decompression based on file name.\n name : string (optional)\n Name to give to the created data object. If None, filename is used.\n Default is None.\n parent : WrightTools.Collection (optional)\n Collection to place new data object within. Default is None.\n verbose : boolean (optional)\n Toggle talkback. Default is True.\n\n Returns\n -------\n data\n New data object(s).\n \"\"\"\n # parse filepath\n filestr = os.fspath(filepath)\n filepath = pathlib.Path(filepath)\n\n if not \".txt\" in filepath.suffixes:\n wt_exceptions.WrongFileTypeWarning.warn(filepath, \".txt\")\n # parse name\n if not name:\n name = filepath.name.split(\".\")[0]\n # create data\n kwargs = {\"name\": name, \"kind\": \"BrunoldrRaman\", \"source\": filestr}\n if parent is None:\n data = Data(**kwargs)\n else:\n data = parent.create_data(**kwargs)\n # array\n ds = np.DataSource(None)\n f = ds.open(filestr, \"rt\")\n arr = np.genfromtxt(f, delimiter=\"\\t\").T\n f.close()\n # chew through all scans\n data.create_variable(name=\"energy\", values=arr[0], units=\"wn\")\n data.create_channel(name=\"signal\", values=arr[1])\n data.transform(\"energy\")\n # finish\n if verbose:\n print(\"data created at {0}\".format(data.fullpath))\n print(\" range: {0} to {1} (wn)\".format(data.energy[0], data.energy[-1]))\n print(\" size: {0}\".format(data.size))\n return data\n",
"\"\"\"Tensor 27.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport os\nimport pathlib\n\nimport numpy as np\n\nfrom ._data import Data\nfrom .. import exceptions as wt_exceptions\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\n__all__ = [\"from_Tensor27\"]\n\n\n# --- from function -------------------------------------------------------------------------------\n\n\ndef from_Tensor27(filepath, name=None, parent=None, verbose=True) -> Data:\n \"\"\"Create a data object from a Tensor27 FTIR file.\n\n .. plot::\n\n >>> import WrightTools as wt\n >>> import matplotlib.pyplot as plt\n >>> from WrightTools import datasets\n >>> p = datasets.Tensor27.CuPCtS_powder_ATR\n >>> data = wt.data.from_Tensor27(p)\n >>> artist = wt.artists.quick1D(data)\n >>> plt.xlim(1300,1700)\n >>> plt.ylim(-0.005,.02)\n\n Parameters\n ----------\n filepath : path-like\n Path to Tensor27 output file (.dpt).\n Can be either a local or remote file (http/ftp).\n Can be compressed with gz/bz2, decompression based on file name.\n name : string (optional)\n Name to give to the created data object. If None, filename is used.\n Default is None.\n parent : WrightTools.Collection (optional)\n Collection to place new data object within. Default is None.\n verbose : boolean (optional)\n Toggle talkback. Default is True.\n\n Returns\n -------\n data\n New data object.\n \"\"\"\n # parse filepath\n filestr = os.fspath(filepath)\n filepath = pathlib.Path(filepath)\n\n if not \".dpt\" in filepath.suffixes:\n wt_exceptions.WrongFileTypeWarning.warn(filepath, \".dpt\")\n # parse name\n if not name:\n name = filepath.name.split(\".\")[0]\n # create data\n kwargs = {\"name\": name, \"kind\": \"Tensor27\", \"source\": filestr}\n if parent is None:\n data = Data(**kwargs)\n else:\n data = parent.create_data(**kwargs)\n # array\n ds = np.DataSource(None)\n f = ds.open(filestr, \"rt\")\n arr = np.genfromtxt(f, skip_header=0).T\n f.close()\n # chew through all scans\n data.create_variable(name=\"energy\", values=arr[0], units=\"wn\")\n data.create_channel(name=\"signal\", values=arr[1])\n data.transform(\"energy\")\n # finish\n if verbose:\n print(\"data created at {0}\".format(data.fullpath))\n print(\" range: {0} to {1} (wn)\".format(data.energy[0], data.energy[-1]))\n print(\" size: {0}\".format(data.size))\n return data\n"
] | [
[
"numpy.expand_dims",
"numpy.kaiser",
"numpy.linspace",
"numpy.around",
"numpy.nan_to_num",
"numpy.dtype",
"numpy.all",
"numpy.nanmean",
"scipy.interpolate.griddata",
"scipy.ndimage.interpolation.zoom",
"numpy.trapz",
"numpy.full",
"numpy.ceil",
"scipy.interpolate.interp1d",
"numpy.diff",
"numpy.isnan",
"scipy.interpolate.LinearNDInterpolator",
"numpy.meshgrid",
"numpy.array",
"numpy.sum",
"numpy.isfinite",
"numpy.gradient",
"numpy.result_type",
"numpy.prod",
"numpy.ndindex",
"numpy.empty"
],
[
"numpy.DataSource",
"numpy.genfromtxt"
],
[
"numpy.DataSource",
"numpy.genfromtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ahmedsabie/tensorflow | [
"be084bd7a4dd241eb781fc704f57bcacc5c9b6dd",
"be084bd7a4dd241eb781fc704f57bcacc5c9b6dd",
"be084bd7a4dd241eb781fc704f57bcacc5c9b6dd"
] | [
"tensorflow/python/framework/func_graph.py",
"tensorflow/python/keras/keras_parameterized_test.py",
"tensorflow/python/keras/layers/preprocessing/benchmarks/hashing_benchmark.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"FuncGraph and related functionality.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections as py_collections\nimport itertools\nimport weakref\n\nimport numpy as np\n\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import execute\nfrom tensorflow.python.eager import tape\nfrom tensorflow.python.eager.graph_only_ops import graph_placeholder\nfrom tensorflow.python.framework import auto_control_deps\nfrom tensorflow.python.framework import composite_tensor\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.framework import type_spec\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import custom_gradient\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import tensor_array_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import memory\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import object_identity\nfrom tensorflow.python.util import tf_contextlib\nfrom tensorflow.python.util import tf_decorator\n\nALLOWLIST_COLLECTIONS = [\n ops.GraphKeys.GLOBAL_VARIABLES,\n ops.GraphKeys.LOCAL_VARIABLES,\n ops.GraphKeys.TRAINABLE_VARIABLES,\n variable_scope._VARSTORE_KEY, # pylint: disable=protected-access\n variable_scope._VARSCOPESTORE_KEY # pylint: disable=protected-access\n]\n\n\n_EAGER_CONST_THRESHOLD = 128\n\n\nclass UnknownArgument(object):\n \"\"\"Signifies an argument which is not currently handled.\"\"\"\n pass\n\n\ndef convert_structure_to_signature(structure, arg_names=None):\n \"\"\"Convert a potentially nested structure to a signature.\n\n Args:\n structure: Structure to convert, where top level collection is a list or a\n tuple.\n arg_names: Optional list of arguments that has equal number of elements as\n `structure` and is used for naming corresponding TensorSpecs.\n\n Returns:\n Identical structure that has TensorSpec objects instead of Tensors and\n UnknownArgument instead of any unsupported types.\n \"\"\"\n def encode_arg(arg, path):\n \"\"\"A representation for this argument, for converting into signatures.\"\"\"\n if isinstance(arg, ops.Tensor):\n user_specified_name = None\n try:\n user_specified_name = compat.as_str(\n arg.op.get_attr(\"_user_specified_name\"))\n except ValueError:\n pass\n\n if path and user_specified_name and user_specified_name != path[0]:\n # The user has explicitly named the argument differently than the name\n # of the function argument.\n name = user_specified_name\n else:\n name = \"/\".join(str(p) for p in path)\n return tensor_spec.TensorSpec(arg.shape, arg.dtype, name)\n if isinstance(arg, composite_tensor.CompositeTensor):\n # TODO(b/133606651) Do we need to inject arg_name?\n return arg._type_spec # pylint: disable=protected-access\n if isinstance(arg, resource_variable_ops.BaseResourceVariable):\n name = \"/\".join(str(p) for p in path)\n return resource_variable_ops.VariableSpec(arg.shape, arg.dtype, name)\n if isinstance(arg, (\n int,\n float,\n bool,\n str,\n type(None),\n dtypes.DType,\n tensor_spec.TensorSpec,\n type_spec.TypeSpec,\n )):\n return arg\n return UnknownArgument()\n\n # We are using the flattened paths to name the TensorSpecs. We need an\n # explicit name for them downstream.\n flattened = nest.flatten_with_tuple_paths(structure)\n if arg_names:\n if len(arg_names) != len(structure):\n raise ValueError(\n \"Passed in arg_names don't match actual signature (%s).\" % arg_names)\n # Replace all top-level names with their actual arg_names. If a path before\n # was \"(2,'a',1)\", it will become \"(arg_names[2],'a',1)\".\n flattened = [\n ((arg_names[path[0]],) + path[1:], arg) for path, arg in flattened\n ]\n\n mapped = [encode_arg(arg, path) for path, arg in flattened]\n return nest.pack_sequence_as(structure, mapped)\n\n\nclass FuncGraph(ops.Graph):\n \"\"\"Graph representing a function body.\n\n Attributes:\n name: The name of the function.\n inputs: Placeholder tensors representing the inputs to this function. The\n tensors are in this FuncGraph. This represents \"regular\" inputs as well as\n captured inputs (i.e. the values of self.captures), with the regular\n inputs coming first.\n outputs: Tensors that will be returned by this function. The tensors are in\n this FuncGraph.\n control_outputs: Operations that must be executed before the function\n represented by this graph can be said to have been executed.\n structured_input_signature: A tuple of (args, kwargs), which are both\n possibly-nested python objects that were received by this function. Note\n that these structures might contain Python `None`s.\n structured_outputs: A possibly-nested python object which will be returned\n by this function. The Tensors in this structure are the same as those of\n self.outputs. Note that this structure might contain Python `None`s.\n variables: Variables that should be watched during function execution.\n outer_graph: The graph this function is defined in. May be another FuncGraph\n or the global default Graph.\n captures: Maps external tensor -> internal tensor (i.e. input placeholder).\n The entries are in the order they were captured.\n control_captures: Set of external ops on which this graph has a control\n dependency.\n seed: The graph-level random seed.\n capture_by_value: If True, the func graph will capture Variables by value\n instead of reference.\n \"\"\"\n\n def __init__(self, name, collections=None, capture_by_value=None):\n \"\"\"Construct a new FuncGraph.\n\n The graph will inherit its graph key, collections, seed, and distribution\n strategy stack from the current context or graph.\n\n Args:\n name: the name of the function.\n collections: a dictionary of collections this FuncGraph should start\n with. If not specified (None), the FuncGraph will read (but not write\n to) the outer graph's collections that are not allowlisted, and both\n read and write to the outer graph's collections that are allowlisted.\n The current allowlisted collections are the global variables, the\n local variables, and the trainable variables.\n Defaults to None.\n capture_by_value: An optional boolean. If True, the func graph will\n capture Variables by value instead of reference. By default inherit\n from outer graphs, and failing that will default to False.\n \"\"\"\n super(FuncGraph, self).__init__()\n\n self.name = name\n self.inputs = []\n self.outputs = []\n self.control_outputs = []\n self.control_captures = set()\n self.structured_input_signature = None\n self.structured_outputs = None\n self._weak_variables = []\n self._watched_variables = object_identity.ObjectIdentityWeakSet()\n self.is_control_flow_graph = False\n\n outer_graph = ops.get_default_graph()\n self._weak_outer_graph = weakref.ref(outer_graph)\n while outer_graph.building_function:\n outer_graph = outer_graph.outer_graph\n # If self._weak_outer_graph is deleted, we revert to the outermost Graph\n # active when the FuncGraph was traced. This will not be a FuncGraph.\n self._fallback_outer_graph = outer_graph\n self._captures = py_collections.OrderedDict()\n # If not None, records the names of output args of this function. Used to\n # preserve the output names in the signature of a serialized+deserialized\n # function. Private at the moment mostly because it's often out of date.\n self._output_names = None\n # Maps arbitrary key -> (closure, nest of placeholders), where at function\n # call time the value of closure() will be used to feed the nest of\n # placeholders.\n self._deferred_captures = py_collections.OrderedDict()\n # Inherit capture-by-value from outer graph.\n if capture_by_value is not None:\n self.capture_by_value = capture_by_value\n elif self.outer_graph is not None and isinstance(\n self.outer_graph, FuncGraph):\n self.capture_by_value = self.outer_graph.capture_by_value\n else:\n self.capture_by_value = False\n\n self._building_function = True\n # Map from resource tensor name to last op (in program order) which uses\n # this tensor. Used to enforce that execution order matches program order\n # for resource tensors.\n self._last_op_using_resource_tensor = {}\n\n graph = self.outer_graph\n\n if context.executing_eagerly():\n self.seed = context.global_seed()\n # [for tf-data user migration from TF1.0 to 2.0] seed_used keep track of\n # any None op_seed for random_op in the function, in which case we end up\n # using function seed, which could be unintended behavior for the op.\n self._seed_used = False\n else:\n self.seed = graph.seed\n self._seed_used = False\n # TODO(allenl): Figure out if we can remove colocation stack\n # specialization (currently used in cond_v2), here and in the cache key.\n self._colocation_stack = graph._colocation_stack.copy() # pylint: disable=protected-access\n\n if collections is None:\n for collection_name in graph.get_all_collection_keys():\n if collection_name not in ALLOWLIST_COLLECTIONS:\n self._collections[collection_name] = graph.get_collection(\n collection_name)\n for collection_name in ALLOWLIST_COLLECTIONS:\n self._collections[collection_name] = graph.get_collection_ref(\n collection_name)\n else:\n self._collections = collections\n\n # Keep track of whether this FuncGraph is exportable to SavedModel. Use\n # `graph.mark_as_unsaveable(reason)` to mark this FuncGraph and any\n # dependent functions as unsaveable.\n self._saveable = True\n self._saving_errors = set()\n\n # Keep track of callbacks to run when this graph exits default scope\n self._scope_exit_callbacks = None\n\n def __str__(self):\n return \"FuncGraph(name=%s, id=%s)\" % (self.name, id(self))\n\n def watch_variable(self, v):\n \"\"\"Marks the variable v as accessed while building this graph.\"\"\"\n while self is not None and isinstance(self, FuncGraph):\n self._watched_variables.add(v)\n self = self.outer_graph\n\n def capture_call_time_value(self, closure, spec, key=None):\n \"\"\"Creates a placeholder which at call time has the value closure().\n\n Useful, for example, to respect TensorFlow context managers, which are often\n dynamically scoped.\n\n Args:\n closure: function which takes no arguments, to be evaluated at function\n call time, returning a nest of tensors compatible with `spec`.\n spec: nest of TypeSpec for the value to capture.\n key: optional. If not None, multiple calls to lazy_capture with the same\n key in the same graph will return the same placeholder, and the\n first closure will be used at function call time.\n\n Returns:\n Nest of placeholders which, at function call time, will be fed with the\n result of calling closure().\n\n Raises:\n ValueError: at function call time, if the return value of closure() is\n not compatible with `spec`.\n \"\"\"\n if key is None:\n key = object()\n if key not in self._deferred_captures:\n\n def convert_to_placeholder(s):\n if not isinstance(s, tensor_spec.DenseSpec):\n raise TypeError(\n \"Expected a nest of `TypeSpec` objects, found %s of type %s.\" %\n (s, type(s)))\n return array_ops.placeholder(dtype=s.dtype, shape=s.shape)\n\n placeholder = nest.map_structure(\n convert_to_placeholder, spec, expand_composites=True)\n\n def wrapped_closure():\n ret_nest = closure()\n nest.assert_same_structure(spec, ret_nest, expand_composites=True)\n # This uses the tensor dtype defined in `spec` when converting values\n # in `ret_nest` to tensors.\n # pylint: disable=protected-access\n y = nest.map_structure(lambda s, r: s._to_components(r), spec, ret_nest,\n expand_composites=False)\n # pylint: enable=protected-access\n return nest.flatten(y, expand_composites=True)\n\n self._deferred_captures[key] = (wrapped_closure, placeholder)\n return self._deferred_captures[key][1]\n\n def control_dependencies(self, control_inputs):\n \"\"\"Handles control dependencies.\n\n FuncGraph wraps Graph's control_dependencies logic by first filtering out\n any external tensors / operations and storing them in the graph's\n control_captures member. Any consumers of this function graph must then\n decide how to handle the control captures.\n\n Args:\n control_inputs: A list of `Operation` or `Tensor` objects which\n must be executed or computed before running the operations\n defined in the context. Can also be `None` to clear the control\n dependencies.\n\n Returns:\n A context manager that specifies control dependencies for all\n operations constructed within the context.\n\n Raises:\n TypeError: If `control_inputs` is not a list of `Operation` or\n `Tensor` objects.\n \"\"\"\n if control_inputs is None:\n return super(FuncGraph, self).control_dependencies(control_inputs)\n\n filtered_control_inputs = []\n for c in control_inputs:\n # Check for _UnreadVariable\n if (isinstance(c, ops.IndexedSlices) or\n (hasattr(c, \"_handle\") and hasattr(c, \"op\"))):\n c = c.op\n graph_element = ops._as_graph_element(c) # pylint: disable=protected-access\n if graph_element is None:\n graph_element = c\n if graph_element is not None and getattr(\n graph_element, \"graph\", None) is not self:\n self.control_captures.add(graph_element)\n else:\n filtered_control_inputs.append(graph_element)\n return super(FuncGraph, self).control_dependencies(filtered_control_inputs)\n\n def as_default(self):\n outer_cm = super(FuncGraph, self).as_default()\n\n @tf_contextlib.contextmanager\n def inner_cm():\n \"\"\"Context manager for copying distribute.Strategy scope information.\"\"\"\n # pylint: disable=protected-access\n # TODO(b/112906995, nareshmodi): distribution strategy depends on\n # inheriting this stack from the default graph even in eager mode. Maybe\n # it should be part of the eager context? This would also allow us to\n # remove a get_default_graph() call from the function cache lookup.\n graph = ops.get_default_graph()\n old_strategy_stack = self._distribution_strategy_stack\n self._distribution_strategy_stack = list(\n graph._distribution_strategy_stack)\n\n # We ignore device placements from any outer scopes while tracing the\n # function when possible, to avoid hard-coding them in the function\n # graph. \"Default\" placements come from the PartitionedCallOp's placement,\n # so that the same trace of the Python function may be placed on several\n # different devices and saved functions may be placed on new devices when\n # restored.\n # However, we need to preserve the outer device stack in the following\n # cases in non eager context:\n # 1. device stack is callable\n # 2. When using distribution strategy with legacy graph mode.\n old_device_stack = self._device_function_stack\n if (not context.executing_eagerly() and\n (device_stack_has_callable(graph._device_function_stack) or\n (self._distribution_strategy_stack and\n not ops.executing_eagerly_outside_functions()))):\n # Hard-code devices from device functions in the function body\n self._device_function_stack = graph._device_function_stack.copy()\n\n old_creator_stack = self._variable_creator_stack\n self._variable_creator_stack = graph._variable_creator_stack\n # Inherit the graph key, since this is used for matching variables in\n # optimizers.\n old_graph_key = self._graph_key\n self._graph_key = graph._graph_key\n # pylint: enable=protected-access\n\n old_scope_exit_callbacks = self._scope_exit_callbacks\n self._scope_exit_callbacks = []\n\n with outer_cm as g:\n try:\n yield g\n finally:\n try:\n for fn in self._scope_exit_callbacks:\n fn()\n finally:\n self._scope_exit_callbacks = old_scope_exit_callbacks\n self._distribution_strategy_stack = old_strategy_stack\n self._device_function_stack = old_device_stack\n self._variable_creator_stack = old_creator_stack\n self._graph_key = old_graph_key\n return inner_cm()\n\n @property\n def outer_graph(self):\n \"\"\"The Graph this FuncGraph is nested in.\n\n Functions may capture Tensors from graphs they are nested in (transitive).\n\n Returns:\n A Graph object. Initially set to the current default graph when the\n FuncGraph was created. If the previous `outer_graph` was deleted because\n the function that owns it was deleted, `outer_graph` is reset to the\n outermost default graph active when the FuncGraph was created. This\n FuncGraph won't have captured anything from the new `outer_graph` (and\n likely not from the previous setting, since that would have created a\n strong reference), but it is returned so that FuncGraphs always have a\n parent.\n \"\"\"\n current = self._weak_outer_graph()\n if current is None:\n return self._fallback_outer_graph\n return current\n\n @outer_graph.setter\n def outer_graph(self, new_outer_graph):\n \"\"\"Sets `outer_graph` to `new_outer_graph`.\"\"\"\n self._weak_outer_graph = weakref.ref(new_outer_graph)\n\n @property\n def output_types(self):\n return [t.dtype for t in self.outputs]\n\n @property\n def output_shapes(self):\n return [t.shape for t in self.outputs]\n\n @property\n def trainable_variables(self):\n \"\"\"A sequence of trainable variables accessed by this FuncGraph.\n\n Note that functions keep only weak references to variables. Calling the\n function after a variable it accesses has been deleted is an error.\n\n Returns:\n Sequence of trainable variables for this func graph.\n \"\"\"\n return tuple(v for v in self.variables if v.trainable)\n\n @property\n def variables(self):\n \"\"\"A sequence of variables accessed by this FuncGraph.\n\n Note that functions keep only weak references to variables. Calling the\n function after a variable it accesses has been deleted is an error.\n\n Returns:\n Sequence of variables for this func graph.\n \"\"\"\n def deref(weak_v):\n v = weak_v()\n if v is None:\n raise AssertionError(\n \"Called a function referencing variables which have been deleted. \"\n \"This likely means that function-local variables were created and \"\n \"not referenced elsewhere in the program. This is generally a \"\n \"mistake; consider storing variables in an object attribute on \"\n \"first call.\")\n return v\n\n return tuple(deref(v) for v in self._weak_variables)\n\n @variables.setter\n def variables(self, var_list):\n self._weak_variables = [weakref.ref(v) for v in var_list]\n\n def _capture_by_value(\n self,\n op_type,\n inputs,\n dtypes, # pylint: disable=redefined-outer-name\n input_types=None,\n name=None,\n attrs=None,\n op_def=None,\n compute_device=True):\n # When capturing by value, do the read outside\n reverse_captures = dict((id(v), k) for k, v in self.captures)\n uncaptured_inputs = [reverse_captures.get(id(t), t) for t in inputs]\n with ops.init_scope():\n if context.executing_eagerly():\n attr_list = (\"dtype\", int(attrs[\"dtype\"].type))\n value, = execute.execute(\n compat.as_bytes(op_type), 1, uncaptured_inputs, attr_list,\n context.context())\n else:\n op = ops.get_default_graph()._create_op_internal( # pylint: disable=protected-access\n op_type,\n uncaptured_inputs,\n dtypes,\n input_types,\n name,\n attrs,\n op_def,\n compute_device)\n value = op.outputs[0]\n captured_value = self.capture(value)\n return captured_value.op\n\n def _create_op_internal(\n self,\n op_type,\n inputs,\n dtypes=None, # pylint: disable=redefined-outer-name\n input_types=None,\n name=None,\n attrs=None,\n op_def=None,\n compute_device=True):\n \"\"\"Like Graph.create_op, except handles external input tensors.\n\n This overload adds functionality to create_op to \"capture\" any external\n input tensors, i.e. tensors from the eager context or outer function graphs\n if this is a nested function. See `capture` for more information.\n\n Args:\n op_type: The `Operation` type to create. This corresponds to the\n `OpDef.name` field for the proto that defines the operation.\n inputs: A list of `Tensor` objects that will be inputs to the `Operation`.\n dtypes: (Optional) A list of `DType` objects that will be the types of the\n tensors that the operation produces.\n input_types: (Optional.) A list of `DType`s that will be the types of\n the tensors that the operation consumes. By default, uses the base\n `DType` of each input in `inputs`. Operations that expect\n reference-typed inputs must specify `input_types` explicitly.\n name: (Optional.) A string name for the operation. If not specified, a\n name is generated based on `op_type`.\n attrs: (Optional.) A dictionary where the key is the attribute name (a\n string) and the value is the respective `attr` attribute of the\n `NodeDef` proto that will represent the operation (an `AttrValue`\n proto).\n op_def: (Optional.) The `OpDef` proto that describes the `op_type` that\n the operation will have.\n compute_device: (Optional.) If True, device functions will be executed\n to compute the device property of the Operation.\n\n Returns:\n An `Operation` object.\n \"\"\"\n if self.capture_by_value and op_type in [\"ReadVariableOp\",\n \"ResourceGather\"]:\n return self._capture_by_value(op_type, inputs, dtypes, input_types, name,\n attrs, op_def, compute_device)\n\n # This capturing logic interacts poorly with control flow contexts which\n # want to replace inputs of ops far too late in the process. This can lead\n # the context to get confused and try to create an Enter for an Enter. We\n # can detect this here and skip the additional Enter which can confuse loop\n # validation logic.\n if op_type == \"Enter\" and inputs[0].op.type == \"Enter\":\n if inputs[0].op.get_attr(\"frame_name\") == attrs[\"frame_name\"].s:\n return inputs[0].op\n # Calling AddValue on the control flow contexts to force creation of the\n # backward accumulators in the original graph before we create placeholders\n # to capture the inputs.\n ctxt = ops.get_default_graph()._control_flow_context # pylint: disable=protected-access\n # Use a different list to avoid modifying the original inputs list.\n captured_inputs = []\n for inp in inputs:\n # TPU Estimator defines a control flow context with no AddValue method.\n if ctxt is not None and hasattr(ctxt, \"AddValue\"):\n inp = ctxt.AddValue(inp)\n inp = self.capture(inp)\n captured_inputs.append(inp)\n return super(FuncGraph, self)._create_op_internal( # pylint: disable=protected-access\n op_type, captured_inputs, dtypes, input_types, name, attrs, op_def,\n compute_device)\n\n def capture(self, tensor, name=None, shape=None):\n \"\"\"Captures `tensor` if it's external to this graph.\n\n If `tensor` is from a different graph, returns a placeholder for it.\n `tensor` and the placeholder will appear in self.captures, and the\n placeholder will appear in self.inputs. Multiple calls to this method with\n the same `tensor` argument will return the same placeholder. If `tensor` is\n from this graph, returns `tensor`.\n\n Args:\n tensor: Tensor. May be from this FuncGraph or a different graph.\n name: Optional name if a placeholder is created.\n shape: Optional shape if a placeholder is created.\n\n Returns:\n Tensor from this FuncGraph.\n\n Raises:\n InaccessibleTensorError: if any tensors are accessed in a manner that\n bypasses the mechanisms required for the data dependencies to be correctly\n wired.\n \"\"\"\n if isinstance(tensor, ops.EagerTensor):\n if name is None:\n name = str(ops.uid())\n\n # Small EagerTensors are captured with Const ops\n if (tensor.dtype in dtypes.TF_VALUE_DTYPES and\n np.prod(tensor.shape) <= _EAGER_CONST_THRESHOLD):\n return self.capture_eager_tensor(tensor, name)\n\n # Large EagerTensors and resources are captured with Placeholder ops\n return self._capture_helper(tensor, name, shape)\n if tensor.graph is not self:\n if name is None:\n name = tensor.op.name\n inner_graph = tensor.graph\n while inner_graph is not None and isinstance(inner_graph, FuncGraph):\n if inner_graph is self:\n raise errors.InaccessibleTensorError(\n \"The tensor '%s' cannot be accessed here: it is defined\"\n \" in another function or code block. Use return values,\"\n \" explicit Python locals or TensorFlow collections to access\"\n \" it. Defined in: %s; accessed from: %s.\\n\"\n % (tensor, tensor.graph, self))\n inner_graph = inner_graph.outer_graph\n return self._capture_helper(tensor, name)\n return tensor\n\n def _capture_helper(self, tensor, name, shape=None):\n capture = self._captures.get(id(tensor))\n if capture is None:\n placeholder = _create_substitute_placeholder(\n tensor, name=name, dtype=tensor.dtype, shape=shape)\n # Record the composite device as an attribute to the placeholder.\n # This attribute would be propogated into the arg_attr of the FunctionDef.\n # Currently, a packed eager tensor is always placed on a CompositeDevice.\n if isinstance(tensor, ops.EagerTensor) and tensor.is_packed:\n placeholder.op._set_attr( # pylint: disable=protected-access\n \"_composite_device\",\n attr_value_pb2.AttrValue(s=compat.as_bytes(tensor.device)))\n self.add_capture(tensor, placeholder)\n else:\n placeholder = capture[1]\n tape.record_operation(\"captured_value\", [placeholder], [tensor],\n backward_function=lambda x: [x],\n forward_function=lambda x: [x])\n return placeholder\n\n @property\n def captures(self):\n \"\"\"Order list of tuples containing external and internal captures.\"\"\"\n return self._captures.values()\n\n def add_capture(self, tensor, placeholder):\n \"\"\"Capture a specific tensor and utilize the provided placeholder.\n\n Args:\n tensor: Tensor to captures.\n placeholder: Provided placeholder for the tensor.\n \"\"\"\n self._captures[id(tensor)] = (tensor, placeholder)\n self.inputs.append(placeholder)\n\n def replace_capture(self, tensor, placeholder):\n \"\"\"Replace already existing capture.\"\"\"\n self._captures[id(tensor)] = (tensor, placeholder)\n\n def reset_captures(self, capture_list):\n \"\"\"Set the captures with the provided list of captures & placeholder.\"\"\"\n self._captures = py_collections.OrderedDict()\n for tensor, placeholder in capture_list:\n self._captures[id(tensor)] = (tensor, placeholder)\n\n def pop_capture(self, tensor):\n \"\"\"Remove the capture and return the generated placeholder.\"\"\"\n capture = self._captures.pop(id(tensor), None)\n if capture is None:\n return None\n\n return capture[1]\n\n def clear_captures(self):\n # TODO(b/115366440): Delete this method when a custom OrderedDict is added.\n # Clearing captures using clear() leaves some cycles around.\n while self._captures:\n self._captures.popitem()\n memory.dismantle_ordered_dict(self._captures)\n while self._deferred_captures:\n self._deferred_captures.popitem()\n memory.dismantle_ordered_dict(self._deferred_captures)\n\n def capture_distributed_variable(self, variable, placeholder):\n \"\"\"Add given distributed variable to captures with given placeholder.\"\"\"\n self._captures[id(variable)] = (variable, placeholder)\n tape.record_operation(\"captured_value\", [placeholder], [variable],\n backward_function=lambda x: [x],\n forward_function=lambda x: [x])\n\n def capture_eager_tensor(self, tensor, name):\n capture = self._captures.get(id(tensor))\n if capture is None:\n # We clear all control dependencies and place the Const op on the same\n # device as the source tensor. The device placement may be relaxed at\n # a later date.\n with ops.control_dependencies(None), self.device(tensor.device):\n constant_value = tensor_util.constant_value(tensor)\n if constant_value is None:\n # Some eager tensors, e.g. parallel tensors, are not convertible to a\n # single constant. We'll use a placeholder for this case.\n return self._capture_helper(tensor, name)\n graph_const = constant_op.constant(constant_value, dtype=tensor.dtype,\n shape=tensor.shape, name=name)\n self.add_capture(tensor, graph_const)\n else:\n graph_const = capture[1]\n tape.record_operation(\"captured_value\", [graph_const], [tensor],\n backward_function=lambda x: [x],\n forward_function=lambda x: [x])\n return graph_const\n\n def captured(self, tensor):\n \"\"\"Check if the specified tensor has been captured.\"\"\"\n return id(tensor) in self._captures\n\n @property\n def external_captures(self):\n \"\"\"External tensors captured by this function.\"\"\"\n return [c[0] for c in self._captures.values()]\n\n @property\n def internal_captures(self):\n \"\"\"Placeholders in this function corresponding captured tensors.\"\"\"\n return [c[1] for c in self._captures.values()]\n\n @property\n def deferred_external_captures(self):\n \"\"\"Ordered nest of tensors whose placeholders will be fed at call time.\"\"\"\n return [c[0] for c in self._deferred_captures.values()]\n\n @property\n def deferred_internal_captures(self):\n \"\"\"List of nest of placeholders which at call time will be fed.\"\"\"\n return [c[1] for c in self._deferred_captures.values()]\n\n @property\n def variable_captures(self):\n \"\"\"Map of python object ids of variables to variables which are captured.\"\"\"\n return {\n id(self._captures[id(v)][1]): v\n for v in self.variables\n if id(v) in self._captures\n }\n\n def mark_as_unsaveable(self, error_message):\n \"\"\"Marks this FuncGraph as unsaveable.\n\n Any attempts to export this FuncGraph will raise an error with the specified\n message.\n\n Args:\n error_message: List or string containing the error message to be raised\n when saving this FuncGraph to SavedModel.\n \"\"\"\n self._saveable = False\n if isinstance(error_message, str):\n error_message = [error_message]\n self._saving_errors.update(error_message)\n\n @property\n def saveable(self):\n \"\"\"Returns whether this FuncGraph is saveable.\"\"\"\n return self._saveable\n\n @property\n def saving_errors(self):\n \"\"\"Returns set of errors preventing this FuncGraph from being saved.\"\"\"\n return self._saving_errors\n\n def _add_scope_exit_callback(self, fn):\n \"\"\"Add a function to call when this graph exits the default scope.\"\"\"\n if not callable(fn):\n raise TypeError(\"fn is not callable: {}\".format(fn))\n if self._scope_exit_callbacks is None:\n raise RuntimeError(\n \"Attempting to add a scope exit callback, but the default graph is \"\n \"not the context scope graph. Did you forget to call \"\n \"'with graph.as_default(): ...'?\")\n self._scope_exit_callbacks.append(fn)\n\n\ndef func_graph_from_py_func(name,\n python_func,\n args,\n kwargs,\n signature=None,\n func_graph=None,\n autograph=False,\n autograph_options=None,\n add_control_dependencies=True,\n arg_names=None,\n op_return_value=None,\n collections=None,\n capture_by_value=None,\n override_flat_arg_shapes=None):\n \"\"\"Returns a `FuncGraph` generated from `python_func`.\n\n Args:\n name: an identifier for the function.\n python_func: the Python function to trace.\n args: the positional args with which the Python function should be called;\n ignored if a signature is provided.\n kwargs: the keyword args with which the Python function should be called;\n ignored if a signature is provided.\n signature: a possibly nested sequence of `TensorSpecs` specifying the shapes\n and dtypes of the arguments. When a signature is provided, `args` and\n `kwargs` are ignored, and `python_func` is traced with Tensors conforming\n to `signature`. If `None`, the shapes and dtypes are inferred from the\n inputs.\n func_graph: Optional. An instance of FuncGraph. If provided, we will use\n this graph else a new one is built and returned.\n autograph: whether to use autograph to compile `python_func`.\n See https://www.tensorflow.org/guide/autograph for more information.\n autograph_options: additional knobs to control when `autograph=True`.\n See https://www.tensorflow.org/guide/autograph for more information.\n add_control_dependencies: If True, automatically adds control dependencies\n to ensure program order matches execution order and stateful ops always\n execute.\n arg_names: Optional list of argument names, used to give input placeholders\n recognizable names.\n op_return_value: Optional. A Tensor. If set and `python_func` returns\n Operations, those return values will be replaced with this value. If not\n set, returning an Operation triggers an error.\n collections: a dictionary of collections this FuncGraph should start\n with. If not specified (None), the FuncGraph will read (but not write to)\n the outer graph's collections that are not allowlisted, and both\n read and write to the outer graph's collections that are allowlisted.\n The current allowlisted collections are the global variables, the\n local variables, and the trainable variables.\n Defaults to None.\n capture_by_value: An optional boolean. If True, the func graph will capture\n Variables by value instead of reference. By default inherit from outer\n graphs, and failing that will default to False.\n override_flat_arg_shapes: An optional list of instances that are either\n `None` or `TensorShape`. The length must match that of\n `nest.flatten((args, kwargs), expand_composites=True)`. The entries\n containing value `None` must match entries in flattened arguments\n containing non-tensors, while entries containing a `TensorShape` must\n match entries in the flattened arguments containing tensors.\n\n Returns:\n A FuncGraph.\n\n Raises:\n TypeError: If any of `python_func`'s return values is neither `None` nor a\n `Tensor`.\n ValueError: If both `signature` and `override_flat_arg_shapes` are\n passed in.\n \"\"\"\n if op_return_value is not None:\n assert isinstance(op_return_value, ops.Tensor), op_return_value\n if func_graph is None:\n func_graph = FuncGraph(name, collections=collections,\n capture_by_value=capture_by_value)\n assert isinstance(func_graph, FuncGraph)\n if add_control_dependencies:\n deps_control_manager = auto_control_deps.AutomaticControlDependencies()\n else:\n deps_control_manager = ops.NullContextmanager()\n\n with func_graph.as_default(), deps_control_manager as deps_ctx:\n current_scope = variable_scope.get_variable_scope()\n default_use_recource = current_scope.use_resource\n current_scope.set_use_resource(True)\n\n if signature is not None and override_flat_arg_shapes is not None:\n raise ValueError(\n \"Passed both signature and override_flat_arg_shapes: %s and %s.\"\n % (signature, override_flat_arg_shapes))\n\n if signature is not None:\n args = signature\n kwargs = {}\n\n # Creates and names placeholders for all arguments.\n if override_flat_arg_shapes is not None:\n flat_args = nest.flatten(args, expand_composites=True)\n arg_shapes = override_flat_arg_shapes[:len(flat_args)]\n kwarg_shapes = override_flat_arg_shapes[len(flat_args):]\n else:\n arg_shapes = None\n kwarg_shapes = None\n func_args = _get_defun_inputs_from_args(\n args, arg_names, flat_shapes=arg_shapes)\n func_kwargs = _get_defun_inputs_from_kwargs(\n kwargs, flat_shapes=kwarg_shapes)\n\n # Convert all Tensors into TensorSpecs before saving the structured inputs.\n # If storing pure concrete functions that are not called through polymorphic\n # functions, we don't have access to FunctionSpec, so we need to call the\n # TensorSpecs by their `arg_names` for later binding.\n func_graph.structured_input_signature = (\n convert_structure_to_signature(func_args, arg_names),\n convert_structure_to_signature(func_kwargs))\n\n flat_func_args = nest.flatten(func_args, expand_composites=True)\n flat_func_kwargs = nest.flatten(func_kwargs, expand_composites=True)\n # Temporarily set inputs to allow graph building code to inspect\n # them. Reassigned below.\n func_graph.inputs = [arg for arg in flat_func_args + flat_func_kwargs\n if isinstance(arg, ops.Tensor)]\n\n # Note: `nest.flatten` sorts by keys, as does `_deterministic_dict_values`.\n # Variables to help check whether mutation happens in calling the function\n # Copy the recursive list, tuple and map structure, but not base objects\n func_args_before = nest.pack_sequence_as(func_args, flat_func_args,\n expand_composites=True)\n func_kwargs_before = nest.pack_sequence_as(\n func_kwargs, flat_func_kwargs, expand_composites=True)\n\n def convert(x):\n \"\"\"Converts a function output to a Tensor.\"\"\"\n if x is None:\n return None\n if op_return_value is not None and isinstance(x, ops.Operation):\n # TODO(b/79881896): we currently can't capture external control deps, so\n # this won't work if x needs to be captured (i.e. if python_func returns\n # captured Operations).\n with ops.control_dependencies([x]):\n x = array_ops.identity(op_return_value)\n elif not isinstance(x, tensor_array_ops.TensorArray):\n try:\n x = ops.convert_to_tensor_or_composite(x)\n except (ValueError, TypeError):\n raise TypeError(\n \"To be compatible with tf.eager.defun, Python functions \"\n \"must return zero or more Tensors; in compilation of %s, found \"\n \"return value of type %s, which is not a Tensor.\" %\n (str(python_func), type(x)))\n if add_control_dependencies:\n x = deps_ctx.mark_as_return(x)\n return x\n\n try:\n if autograph:\n from tensorflow.python import autograph # pylint: disable=g-import-not-at-top\n _, original_func = tf_decorator.unwrap(python_func)\n\n def wrapper(*args, **kwargs):\n \"\"\"Calls a converted version of original_func.\"\"\"\n # TODO(mdan): Push this block higher in tf.function's call stack.\n try:\n return autograph.converted_call(\n original_func,\n args,\n kwargs,\n options=autograph.ConversionOptions(\n recursive=True,\n optional_features=autograph_options,\n user_requested=True,\n ))\n except Exception as e: # pylint:disable=broad-except\n if hasattr(e, \"ag_error_metadata\"):\n raise e.ag_error_metadata.to_exception(e)\n else:\n raise\n\n # Wrapping around a decorator allows checks like tf_inspect.getargspec\n # to be accurate.\n converted_func = tf_decorator.make_decorator(original_func, wrapper)\n python_func = tf_decorator.rewrap(python_func, original_func,\n converted_func)\n\n else:\n _, original_func = tf_decorator.unwrap(python_func)\n\n func_outputs = python_func(*func_args, **func_kwargs)\n\n # invariant: `func_outputs` contains only Tensors, CompositeTensors,\n # TensorArrays and `None`s.\n func_outputs = nest.map_structure(convert, func_outputs,\n expand_composites=True)\n\n check_mutation(func_args_before, func_args, original_func)\n check_mutation(func_kwargs_before, func_kwargs, original_func)\n finally:\n current_scope.set_use_resource(default_use_recource)\n\n # Variables in `func_args`, `func_kwargs` should be explicit inputs\n # to the function, not captured inputs.\n graph_variables = list(func_graph._watched_variables) # pylint: disable=protected-access\n arg_variables = object_identity.ObjectIdentitySet()\n inputs = []\n for arg in (nest.flatten(func_args, expand_composites=True) +\n nest.flatten(func_kwargs, expand_composites=True)):\n if isinstance(arg, resource_variable_ops.BaseResourceVariable):\n # Even if an argument variable was not used in the function, we've\n # already manually captured the resource Tensor when creating argument\n # placeholders.\n resource_placeholder = func_graph.pop_capture(arg.handle)\n if resource_placeholder is None:\n continue\n arg_variables.add(arg)\n inputs.append(resource_placeholder)\n elif isinstance(arg, ops.Tensor):\n inputs.append(arg)\n variables = [v for v in graph_variables if v not in arg_variables]\n func_graph.inputs = (\n inputs + func_graph.internal_captures + nest.flatten(\n func_graph.deferred_internal_captures, expand_composites=True))\n func_graph.structured_outputs = func_outputs\n # Returning a closed-over tensor does not trigger convert_to_tensor.\n func_graph.outputs.extend(\n func_graph.capture(x)\n for x in flatten(func_graph.structured_outputs)\n if x is not None)\n\n func_graph.variables = variables\n\n if add_control_dependencies:\n func_graph.control_outputs.extend(deps_control_manager.ops_which_must_run)\n func_graph.collective_manager_ids_used = (\n deps_control_manager.collective_manager_ids_used)\n\n return func_graph\n\n\ndef maybe_captured(tensor):\n \"\"\"If t is a captured value placeholder, returns the original captured value.\n\n Args:\n tensor: Tensor.\n\n Returns:\n A tensor, potentially from a different Graph/FuncGraph.\n \"\"\"\n if (not isinstance(tensor, ops.EagerTensor) and\n tensor.op.graph.building_function and tensor.op.type == \"Placeholder\"):\n for input_t, placeholder_t in tensor.op.graph.captures:\n if tensor == placeholder_t:\n return maybe_captured(input_t)\n # pylint: enable=protected-access\n return tensor\n\n\ndef device_stack_has_callable(device_stack):\n \"\"\"Checks whether a device stack contains a callable.\"\"\"\n return any(callable(spec._device_name_or_function) # pylint: disable=protected-access\n for spec in device_stack.peek_objs())\n\n\ndef check_mutation(n1, n2, func):\n \"\"\"Check if two list of arguments are exactly the same.\"\"\"\n func_name = getattr(func, \"__name__\", func)\n\n errmsg = (\"{}() should not modify its Python input arguments.\"\n \" Check if it modifies any lists or dicts passed as\"\n \" arguments. Modifying a copy is allowed.\".format(func_name))\n try:\n # TODO(mdan): Compare more robustly so that argument names can be reported.\n nest.assert_same_structure(n1, n2, expand_composites=True)\n except ValueError:\n raise ValueError(errmsg)\n\n for arg1, arg2 in zip(nest.flatten(n1, expand_composites=True),\n nest.flatten(n2, expand_composites=True)):\n if arg1 is not arg2:\n raise ValueError(errmsg)\n\n\n# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.\ndef flatten(sequence):\n \"\"\"Like nest.flatten w/ expand_composites, but returns flow for TensorArrays.\n\n Args:\n sequence: A nested structure of Tensors, CompositeTensors, and\n TensorArrays.\n\n Returns:\n A list of tensors.\n \"\"\"\n flat_sequence = nest.flatten(sequence, expand_composites=True)\n return [\n item.flow if isinstance(item, tensor_array_ops.TensorArray) else item\n for item in flat_sequence]\n\n\n# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.\ndef pack_sequence_as(structure, flat_sequence):\n \"\"\"Like `nest.pack_sequence_as` but also builds TensorArrays from flows.\n\n Args:\n structure: The structure to pack into. May contain Tensors,\n CompositeTensors, or TensorArrays.\n flat_sequence: An iterable containing tensors.\n\n Returns:\n A nested structure.\n\n Raises:\n AssertionError if `structure` and `flat_sequence` are not compatible.\n \"\"\"\n flat_sequence = list(flat_sequence)\n flattened_structure = nest.flatten(structure, expand_composites=True)\n if len(flattened_structure) != len(flat_sequence):\n raise ValueError(\"Mismatch in element count\")\n for i in range(len(flat_sequence)):\n if isinstance(flattened_structure[i], tensor_array_ops.TensorArray):\n flat_sequence[i] = tensor_array_ops.build_ta_with_new_flow(\n old_ta=flattened_structure[i], flow=flat_sequence[i])\n return nest.pack_sequence_as(structure, flat_sequence, expand_composites=True)\n\n\ndef _create_substitute_placeholder(value, name=None, dtype=None, shape=None):\n \"\"\"Creates a placeholder for `value` and propagates shape info to it.\"\"\"\n # Note: setting ops.control_dependencies(None) ensures we always put\n # capturing placeholders outside of any control flow context.\n if shape is None:\n shape = value.shape\n with ops.control_dependencies(None):\n placeholder = graph_placeholder(\n dtype=dtype or value.dtype, shape=shape, name=name)\n custom_gradient.copy_handle_data(value, placeholder)\n return placeholder\n\n\ndef _get_defun_inputs_from_args(args, names, flat_shapes=None):\n \"\"\"Maps Python function positional args to graph-construction inputs.\"\"\"\n return _get_defun_inputs(\n args, names, structure=args, flat_shapes=flat_shapes)\n\n\ndef _get_composite_tensor_spec(x):\n \"\"\"Returns the TypeSpec for x if it's a composite tensor, or x otherwise.\"\"\"\n return (x._type_spec # pylint: disable=protected-access\n if isinstance(x, composite_tensor.CompositeTensor) else x)\n\n\ndef _get_defun_inputs(args, names, structure, flat_shapes=None):\n \"\"\"Maps python function args to graph-construction inputs.\n\n Args:\n args: A flat list of user-specified arguments.\n names: A list of strings with user-specified argument names, same length as\n `args`. May be `None`, in which case a generic name is used.\n structure: The original argument list or dictionary.\n flat_shapes: A flat list of values that are either `None` or\n instances of `TensorShape`. If provided, then length must match\n that of `nest.flatten(args, expand_composites=True)`; and locations where\n `args` are instances of `Tensor` must have a corresponding `TensorShape`\n in `flat_shapes`. May be `None`, in which case exact shapes are read\n directly from the args.\n\n Returns:\n Placeholders with the same structure as `structure`.\n\n Raises:\n RuntimeError: if `flat_shapes` is provided, but\n `len(flat_shapes) != len(nest.flatten(args, expand_composites=True))`.\n RuntimeError: if a shape from `flat_shapes` is not None\n for an argument that is not a `Tensor`, `TensorSpec`,\n or `ResourceVariable`.\n \"\"\"\n func_graph = ops.get_default_graph()\n function_inputs = []\n if names is None:\n names = [None] * len(args)\n if flat_shapes is None:\n shapes_iter = itertools.repeat(None)\n else:\n len_flat_args = len(nest.flatten(args, expand_composites=True))\n if len_flat_args != len(flat_shapes):\n raise RuntimeError(\n \"Length of fully flat shapes (%d) must match that of \"\n \"flatten(args) (%d). args: %s, flat_shapes: %s\"\n % (len(flat_shapes),\n len_flat_args,\n args,\n flat_shapes))\n shapes_iter = iter(flat_shapes)\n for arg_value, name in zip(args, names):\n\n # Replace any composite tensors with their TypeSpecs. This is important\n # for ensuring that shape information that's not preserved by the TypeSpec\n # (such as the number of values in a SparseTensor) gets properly masked.\n arg_value = nest.map_structure(_get_composite_tensor_spec, arg_value)\n\n flattened = nest.flatten(arg_value, expand_composites=True)\n\n for arg in flattened:\n # We have a shape entry for each arg, regardless of whether it's a real\n # Tensor or not. For non-tensor entries it should be None.\n shape = next(shapes_iter)\n if isinstance(arg, (ops.Tensor, tensor_spec.TensorSpec)):\n arg_is_spec = isinstance(arg, tensor_spec.TensorSpec)\n if arg_is_spec and arg.name:\n requested_name = arg.name\n else:\n requested_name = name\n placeholder_shape = shape if shape is not None else arg.shape\n try:\n placeholder = graph_placeholder(\n arg.dtype, placeholder_shape,\n name=requested_name)\n except ValueError:\n # Sometimes parameter names are not valid op names, so fall back to\n # unnamed placeholders.\n placeholder = graph_placeholder(arg.dtype, placeholder_shape)\n if not arg_is_spec:\n custom_gradient.copy_handle_data(arg, placeholder)\n if name is not None:\n # Record the requested/user-specified name in case it's different than\n # the uniquified name, for validation when exporting signatures.\n placeholder.op._set_attr( # pylint: disable=protected-access\n \"_user_specified_name\",\n attr_value_pb2.AttrValue(s=compat.as_bytes(requested_name)))\n function_inputs.append(placeholder)\n elif isinstance(arg, (resource_variable_ops.BaseResourceVariable,\n resource_variable_ops.VariableSpec)):\n if isinstance(arg, resource_variable_ops.VariableSpec):\n name = arg.name or name\n with func_graph.outer_graph.as_default():\n placeholder = graph_placeholder(dtypes.resource, arg.shape,\n name=name)\n\n arg = resource_variable_ops.BaseResourceVariable(\n name=name,\n shape=arg.shape,\n dtype=arg.dtype,\n handle=placeholder,\n handle_name=name)\n # Capture arg variables to create placeholders for them. These will be\n # removed as captures after the function is traced (since otherwise we'd\n # just add it back with a new placeholder when the variable was\n # referenced).\n placeholder = func_graph.capture(arg.handle, name=name)\n placeholder.op._set_attr( # pylint: disable=protected-access\n \"_user_specified_name\",\n attr_value_pb2.AttrValue(s=compat.as_bytes(name)))\n function_inputs.append(arg)\n else:\n if shape is not None:\n raise RuntimeError(\n \"Expected provided shape override to be None for arg that isn't \"\n \"a Tensor, but saw arg: '%s', shape: '%s'. args: %s\"\n % (arg, shape, args))\n function_inputs.append(arg)\n return nest.pack_sequence_as(structure, function_inputs,\n expand_composites=True)\n\n\ndef _get_defun_inputs_from_kwargs(kwargs, flat_shapes):\n \"\"\"Maps Python function keyword args to graph-construction inputs.\"\"\"\n if kwargs:\n names, args = zip(*sorted(kwargs.items()))\n else:\n names = []\n args = []\n return _get_defun_inputs(\n args, names, structure=kwargs, flat_shapes=flat_shapes)\n\n\ndef dismantle_func_graph(func_graph):\n \"\"\"Removes reference cycles in `func_graph` FuncGraph.\n\n Helpful for making sure the garbage collector doesn't need to run when\n the FuncGraph goes out of scope, e.g. in tests using defun with\n @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True).\n\n Args:\n func_graph: A `FuncGraph` object to destroy. `func_graph` is unusable\n after this function.\n \"\"\"\n func_graph.clear_captures()\n ops.dismantle_graph(func_graph)\n\n\ndef override_func_graph_name_scope(func_graph, name_scope):\n func_graph._name_stack = name_scope # pylint: disable=protected-access\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Keras testing_utils.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport unittest\n\nfrom absl.testing import parameterized\n\nfrom tensorflow.python import keras\nfrom tensorflow.python import tf2\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.keras import keras_parameterized\nfrom tensorflow.python.keras import testing_utils\nfrom tensorflow.python.keras.engine import keras_tensor\nfrom tensorflow.python.platform import test\n\n\nclass KerasParameterizedTest(keras_parameterized.TestCase):\n\n def test_run_with_all_model_types(self):\n model_types = []\n models = []\n\n class ExampleTest(keras_parameterized.TestCase):\n\n def runTest(self):\n pass\n\n @keras_parameterized.run_with_all_model_types\n def testBody(self):\n model_types.append(testing_utils.get_model_type())\n models.append(testing_utils.get_small_mlp(1, 4, input_dim=3))\n\n e = ExampleTest()\n e.testBody_functional()\n e.testBody_subclass()\n e.testBody_sequential()\n\n self.assertLen(model_types, 3)\n self.assertAllEqual(model_types, [\n \"functional\",\n \"subclass\",\n \"sequential\"\n ])\n\n # Validate that the models are what they should be\n self.assertTrue(models[0]._is_graph_network)\n self.assertFalse(models[1]._is_graph_network)\n self.assertNotIsInstance(models[0], keras.models.Sequential)\n self.assertNotIsInstance(models[1], keras.models.Sequential)\n self.assertIsInstance(models[2], keras.models.Sequential)\n\n ts = unittest.makeSuite(ExampleTest)\n res = unittest.TestResult()\n ts.run(res)\n\n self.assertLen(model_types, 6)\n\n def test_run_with_all_model_types_and_extra_params(self):\n model_types = []\n models = []\n\n class ExampleTest(keras_parameterized.TestCase):\n\n def runTest(self):\n pass\n\n @keras_parameterized.run_with_all_model_types\n @parameterized.named_parameters(\n [dict(testcase_name=\"_0\", with_brackets=True),\n dict(testcase_name=\"_1\", with_brackets=False)])\n def testBody(self, with_brackets):\n with_brackets = \"with_brackets\" if with_brackets else \"without_brackets\"\n model_types.append((with_brackets, testing_utils.get_model_type()))\n models.append(testing_utils.get_small_mlp(1, 4, input_dim=3))\n\n e = ExampleTest()\n e.testBody_0_functional()\n e.testBody_0_subclass()\n e.testBody_0_sequential()\n e.testBody_1_functional()\n e.testBody_1_subclass()\n e.testBody_1_sequential()\n\n self.assertLen(model_types, 6)\n self.assertAllEqual(model_types, [\n (\"with_brackets\", \"functional\"),\n (\"with_brackets\", \"subclass\"),\n (\"with_brackets\", \"sequential\"),\n (\"without_brackets\", \"functional\"),\n (\"without_brackets\", \"subclass\"),\n (\"without_brackets\", \"sequential\"),\n ])\n\n # Validate that the models are what they should be\n self.assertTrue(models[0]._is_graph_network)\n self.assertFalse(models[1]._is_graph_network)\n self.assertNotIsInstance(models[0], keras.models.Sequential)\n self.assertNotIsInstance(models[1], keras.models.Sequential)\n self.assertIsInstance(models[2], keras.models.Sequential)\n\n ts = unittest.makeSuite(ExampleTest)\n res = unittest.TestResult()\n ts.run(res)\n\n self.assertLen(model_types, 12)\n\n def test_run_with_all_model_types_exclude_one(self):\n model_types = []\n models = []\n\n class ExampleTest(keras_parameterized.TestCase):\n\n def runTest(self):\n pass\n\n @keras_parameterized.run_with_all_model_types(exclude_models=\"sequential\")\n def testBody(self):\n model_types.append(testing_utils.get_model_type())\n models.append(testing_utils.get_small_mlp(1, 4, input_dim=3))\n\n e = ExampleTest()\n if hasattr(e, \"testBody_functional\"):\n e.testBody_functional()\n if hasattr(e, \"testBody_subclass\"):\n e.testBody_subclass()\n if hasattr(e, \"testBody_sequential\"):\n e.testBody_sequential()\n\n self.assertLen(model_types, 2)\n self.assertAllEqual(model_types, [\n \"functional\",\n \"subclass\"\n ])\n\n # Validate that the models are what they should be\n self.assertTrue(models[0]._is_graph_network)\n self.assertFalse(models[1]._is_graph_network)\n self.assertNotIsInstance(models[0], keras.models.Sequential)\n self.assertNotIsInstance(models[1], keras.models.Sequential)\n\n ts = unittest.makeSuite(ExampleTest)\n res = unittest.TestResult()\n ts.run(res)\n\n self.assertLen(model_types, 4)\n\n def test_run_with_all_model_types_exclude_multiple(self):\n model_types = []\n models = []\n\n class ExampleTest(keras_parameterized.TestCase):\n\n def runTest(self):\n pass\n\n @keras_parameterized.run_with_all_model_types(\n exclude_models=[\"sequential\", \"functional\"])\n def testBody(self):\n model_types.append(testing_utils.get_model_type())\n models.append(testing_utils.get_small_mlp(1, 4, input_dim=3))\n\n e = ExampleTest()\n if hasattr(e, \"testBody_functional\"):\n e.testBody_functional()\n if hasattr(e, \"testBody_subclass\"):\n e.testBody_subclass()\n if hasattr(e, \"testBody_sequential\"):\n e.testBody_sequential()\n\n self.assertLen(model_types, 1)\n self.assertAllEqual(model_types, [\n \"subclass\"\n ])\n\n # Validate that the models are what they should be\n self.assertFalse(models[0]._is_graph_network)\n self.assertNotIsInstance(models[0], keras.models.Sequential)\n\n ts = unittest.makeSuite(ExampleTest)\n res = unittest.TestResult()\n ts.run(res)\n\n self.assertLen(model_types, 2)\n\n def test_run_all_keras_modes(self):\n l = []\n\n class ExampleTest(keras_parameterized.TestCase):\n\n def runTest(self):\n pass\n\n @keras_parameterized.run_all_keras_modes(skip_keras_tensors=True)\n def testBody(self):\n mode = \"eager\" if context.executing_eagerly() else \"graph\"\n should_run_eagerly = testing_utils.should_run_eagerly()\n l.append((mode, should_run_eagerly))\n\n e = ExampleTest()\n if not tf2.enabled():\n e.testBody_v1_session()\n e.testBody_v2_eager()\n e.testBody_v2_function()\n\n if not tf2.enabled():\n self.assertLen(l, 3)\n self.assertAllEqual(l, [\n (\"graph\", False),\n (\"eager\", True),\n (\"eager\", False),\n ])\n\n ts = unittest.makeSuite(ExampleTest)\n res = unittest.TestResult()\n ts.run(res)\n self.assertLen(l, 6)\n else:\n self.assertLen(l, 2)\n self.assertAllEqual(l, [\n (\"eager\", True),\n (\"eager\", False),\n ])\n\n ts = unittest.makeSuite(ExampleTest)\n res = unittest.TestResult()\n ts.run(res)\n self.assertLen(l, 4)\n\n def test_run_all_keras_modes_include_keras_tensors(self):\n l = []\n\n class ExampleTest(keras_parameterized.TestCase):\n\n def runTest(self):\n pass\n\n @keras_parameterized.run_all_keras_modes()\n def testBody(self):\n mode = \"eager\" if context.executing_eagerly() else \"graph\"\n should_run_eagerly = testing_utils.should_run_eagerly()\n l.append((mode, should_run_eagerly,\n keras_tensor.keras_tensors_enabled()))\n\n e = ExampleTest()\n if not tf2.enabled():\n e.testBody_v1_session()\n e.testBody_v2_eager()\n e.testBody_v2_function()\n e.testBody_v2_function_use_keras_tensors()\n\n if not tf2.enabled():\n self.assertLen(l, 4)\n self.assertAllEqual(l, [\n (\"graph\", False, False),\n (\"eager\", True, keras_tensor._KERAS_TENSORS_ENABLED),\n (\"eager\", False, keras_tensor._KERAS_TENSORS_ENABLED),\n (\"eager\", False, True),\n ])\n\n ts = unittest.makeSuite(ExampleTest)\n res = unittest.TestResult()\n ts.run(res)\n self.assertLen(l, 8)\n else:\n self.assertLen(l, 3)\n self.assertAllEqual(l, [\n (\"eager\", True, keras_tensor._KERAS_TENSORS_ENABLED),\n (\"eager\", False, keras_tensor._KERAS_TENSORS_ENABLED),\n (\"eager\", False, True),\n ])\n\n ts = unittest.makeSuite(ExampleTest)\n res = unittest.TestResult()\n ts.run(res)\n self.assertLen(l, 6)\n\n def test_run_all_keras_modes_extra_params(self):\n l = []\n\n class ExampleTest(keras_parameterized.TestCase):\n\n def runTest(self):\n pass\n\n @keras_parameterized.run_all_keras_modes(skip_keras_tensors=True)\n @parameterized.named_parameters(\n [dict(testcase_name=\"_0\", with_brackets=True),\n dict(testcase_name=\"_1\", with_brackets=False)])\n def testBody(self, with_brackets):\n mode = \"eager\" if context.executing_eagerly() else \"graph\"\n with_brackets = \"with_brackets\" if with_brackets else \"without_brackets\"\n should_run_eagerly = testing_utils.should_run_eagerly()\n l.append((with_brackets, mode, should_run_eagerly))\n\n e = ExampleTest()\n if not tf2.enabled():\n e.testBody_0_v1_session()\n e.testBody_1_v1_session()\n\n e.testBody_0_v2_eager()\n e.testBody_0_v2_function()\n e.testBody_1_v2_eager()\n e.testBody_1_v2_function()\n\n expected_combinations = {\n (\"with_brackets\", \"eager\", True),\n (\"with_brackets\", \"eager\", False),\n (\"without_brackets\", \"eager\", True),\n (\"without_brackets\", \"eager\", False),\n }\n\n if not tf2.enabled():\n expected_combinations = expected_combinations.union({\n (\"with_brackets\", \"graph\", False),\n (\"without_brackets\", \"graph\", False),\n })\n\n self.assertLen(l, len(expected_combinations))\n self.assertEqual(set(l), expected_combinations)\n\n ts = unittest.makeSuite(ExampleTest)\n res = unittest.TestResult()\n ts.run(res)\n\n self.assertLen(l, len(expected_combinations) * 2)\n\n def test_run_all_keras_modes_always_skip_v1(self):\n l = []\n\n class ExampleTest(keras_parameterized.TestCase):\n\n def runTest(self):\n pass\n\n @keras_parameterized.run_all_keras_modes(always_skip_v1=True,\n skip_keras_tensors=True)\n def testBody(self):\n mode = \"eager\" if context.executing_eagerly() else \"graph\"\n should_run_eagerly = testing_utils.should_run_eagerly()\n l.append((mode, should_run_eagerly))\n\n e = ExampleTest()\n if hasattr(e, \"testBody_v1_session\"):\n e.testBody_v1_session()\n if hasattr(e, \"testBody_v2_eager\"):\n e.testBody_v2_eager()\n if hasattr(e, \"testBody_v2_function\"):\n e.testBody_v2_function()\n\n self.assertLen(l, 2)\n self.assertEqual(\n set(l), {\n (\"eager\", True),\n (\"eager\", False),\n })\n\n def test_run_all_keras_modes_with_all_model_types(self):\n l = []\n\n class ExampleTest(keras_parameterized.TestCase):\n\n def runTest(self):\n pass\n\n @keras_parameterized.run_with_all_model_types\n @keras_parameterized.run_all_keras_modes(skip_keras_tensors=True)\n def testBody(self):\n mode = \"eager\" if context.executing_eagerly() else \"graph\"\n should_run_eagerly = testing_utils.should_run_eagerly()\n l.append((mode, should_run_eagerly, testing_utils.get_model_type()))\n\n e = ExampleTest()\n e.testBody_v2_eager_functional()\n e.testBody_v2_function_functional()\n e.testBody_v2_eager_sequential()\n e.testBody_v2_function_sequential()\n e.testBody_v2_eager_subclass()\n e.testBody_v2_function_subclass()\n\n if not tf2.enabled():\n e.testBody_v1_session_functional()\n e.testBody_v1_session_sequential()\n e.testBody_v1_session_subclass()\n\n expected_combinations = {\n (\"eager\", True, \"functional\"),\n (\"eager\", False, \"functional\"),\n (\"eager\", True, \"sequential\"),\n (\"eager\", False, \"sequential\"),\n (\"eager\", True, \"subclass\"),\n (\"eager\", False, \"subclass\"),\n }\n\n if not tf2.enabled():\n expected_combinations = expected_combinations.union({\n (\"graph\", False, \"functional\"),\n (\"graph\", False, \"sequential\"),\n (\"graph\", False, \"subclass\"),\n })\n\n self.assertLen(l, len(expected_combinations))\n self.assertEqual(set(l), expected_combinations)\n\n ts = unittest.makeSuite(ExampleTest)\n res = unittest.TestResult()\n ts.run(res)\n\n self.assertLen(l, len(expected_combinations) * 2)\n\n def test_run_all_model_types_with_all_keras_modes(self):\n l = []\n\n class ExampleTest(keras_parameterized.TestCase):\n\n def runTest(self):\n pass\n\n @keras_parameterized.run_all_keras_modes(skip_keras_tensors=True)\n @keras_parameterized.run_with_all_model_types\n def testBody(self):\n mode = \"eager\" if context.executing_eagerly() else \"graph\"\n should_run_eagerly = testing_utils.should_run_eagerly()\n l.append((mode, should_run_eagerly, testing_utils.get_model_type()))\n\n e = ExampleTest()\n e.testBody_functional_v2_eager()\n e.testBody_functional_v2_function()\n e.testBody_sequential_v2_eager()\n e.testBody_sequential_v2_function()\n e.testBody_subclass_v2_eager()\n e.testBody_subclass_v2_function()\n\n if not tf2.enabled():\n e.testBody_functional_v1_session()\n e.testBody_sequential_v1_session()\n e.testBody_subclass_v1_session()\n\n expected_combinations = {\n (\"eager\", True, \"functional\"),\n (\"eager\", False, \"functional\"),\n (\"eager\", True, \"sequential\"),\n (\"eager\", False, \"sequential\"),\n (\"eager\", True, \"subclass\"),\n (\"eager\", False, \"subclass\"),\n }\n\n if not tf2.enabled():\n expected_combinations = expected_combinations.union({\n (\"graph\", False, \"functional\"),\n (\"graph\", False, \"sequential\"),\n (\"graph\", False, \"subclass\"),\n })\n\n self.assertLen(l, len(expected_combinations))\n self.assertEqual(set(l), expected_combinations)\n\n ts = unittest.makeSuite(ExampleTest)\n res = unittest.TestResult()\n ts.run(res)\n\n self.assertLen(l, len(expected_combinations) * 2)\n\n def test_run_all_keras_modes_with_all_model_types_annotate_class(self):\n l = []\n\n @keras_parameterized.run_with_all_model_types\n @keras_parameterized.run_all_keras_modes(skip_keras_tensors=True)\n class ExampleTest(keras_parameterized.TestCase):\n\n def runTest(self):\n pass\n\n @parameterized.named_parameters(dict(testcase_name=\"_arg\",\n arg=True))\n def testBody(self, arg):\n mode = \"eager\" if context.executing_eagerly() else \"graph\"\n should_run_eagerly = testing_utils.should_run_eagerly()\n l.append((mode, should_run_eagerly, testing_utils.get_model_type()))\n\n e = ExampleTest()\n e.testBody_arg_v2_eager_functional()\n e.testBody_arg_v2_function_functional()\n e.testBody_arg_v2_eager_sequential()\n e.testBody_arg_v2_function_sequential()\n e.testBody_arg_v2_eager_subclass()\n e.testBody_arg_v2_function_subclass()\n\n if not tf2.enabled():\n e.testBody_arg_v1_session_functional()\n e.testBody_arg_v1_session_sequential()\n e.testBody_arg_v1_session_subclass()\n\n expected_combinations = {\n (\"eager\", True, \"functional\"),\n (\"eager\", False, \"functional\"),\n (\"eager\", True, \"sequential\"),\n (\"eager\", False, \"sequential\"),\n (\"eager\", True, \"subclass\"),\n (\"eager\", False, \"subclass\"),\n }\n\n if not tf2.enabled():\n expected_combinations = expected_combinations.union({\n (\"graph\", False, \"functional\"),\n (\"graph\", False, \"sequential\"),\n (\"graph\", False, \"subclass\"),\n })\n\n self.assertLen(l, len(expected_combinations))\n self.assertEqual(set(l), expected_combinations)\n\n ts = unittest.makeSuite(ExampleTest)\n res = unittest.TestResult()\n ts.run(res)\n\n self.assertLen(l, len(expected_combinations) * 2)\n\n def test_run_all_keras_modes_with_all_model_types_annotate_class_2(self):\n l = []\n\n @keras_parameterized.run_with_all_model_types\n class ExampleTest(keras_parameterized.TestCase):\n\n def runTest(self):\n pass\n\n @keras_parameterized.run_all_keras_modes(skip_keras_tensors=True)\n @parameterized.named_parameters(dict(testcase_name=\"_arg\",\n arg=True))\n def testBody(self, arg):\n mode = \"eager\" if context.executing_eagerly() else \"graph\"\n should_run_eagerly = testing_utils.should_run_eagerly()\n l.append((mode, should_run_eagerly, testing_utils.get_model_type()))\n\n e = ExampleTest()\n e.testBody_arg_v2_eager_functional()\n e.testBody_arg_v2_function_functional()\n e.testBody_arg_v2_eager_sequential()\n e.testBody_arg_v2_function_sequential()\n e.testBody_arg_v2_eager_subclass()\n e.testBody_arg_v2_function_subclass()\n\n if not tf2.enabled():\n e.testBody_arg_v1_session_functional()\n e.testBody_arg_v1_session_sequential()\n e.testBody_arg_v1_session_subclass()\n\n expected_combinations = {\n (\"eager\", True, \"functional\"),\n (\"eager\", False, \"functional\"),\n (\"eager\", True, \"sequential\"),\n (\"eager\", False, \"sequential\"),\n (\"eager\", True, \"subclass\"),\n (\"eager\", False, \"subclass\"),\n }\n\n if not tf2.enabled():\n expected_combinations = expected_combinations.union({\n (\"graph\", False, \"functional\"),\n (\"graph\", False, \"sequential\"),\n (\"graph\", False, \"subclass\"),\n })\n\n self.assertLen(l, len(expected_combinations))\n self.assertEqual(set(l), expected_combinations)\n\n ts = unittest.makeSuite(ExampleTest)\n res = unittest.TestResult()\n ts.run(res)\n\n self.assertLen(l, len(expected_combinations) * 2)\n\n @keras_parameterized.run_all_keras_modes(skip_keras_tensors=True)\n @parameterized.named_parameters(dict(testcase_name=\"argument\",\n arg=True))\n def test_run_all_keras_modes_extra_params_2(self, arg):\n self.assertEqual(arg, True)\n\n @keras_parameterized.run_with_all_model_types\n @parameterized.named_parameters(dict(testcase_name=\"argument\",\n arg=True))\n def test_run_with_all_model_types_extra_params_2(self, arg):\n self.assertEqual(arg, True)\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Benchmark for Keras hashing preprocessing layer.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\nimport random\nimport string\nimport time\n\nfrom absl import flags\nimport numpy as np\n\nfrom tensorflow.python import keras\nfrom tensorflow.python.compat import v2_compat\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.keras.layers.preprocessing import hashing\nfrom tensorflow.python.ops import string_ops\nfrom tensorflow.python.platform import benchmark\nfrom tensorflow.python.platform import test\n\nFLAGS = flags.FLAGS\n\nv2_compat.enable_v2_behavior()\n\n\n# word_gen creates random sequences of ASCII letters (both lowercase and upper).\n# The number of unique strings is ~2,700.\ndef word_gen():\n for _ in itertools.count(1):\n yield \"\".join(random.choice(string.ascii_letters) for i in range(2))\n\n\nclass BenchmarkLayer(benchmark.TensorFlowBenchmark):\n \"\"\"Benchmark the layer forward pass.\"\"\"\n\n def run_dataset_implementation(self, batch_size):\n num_repeats = 5\n starts = []\n ends = []\n for _ in range(num_repeats):\n ds = dataset_ops.Dataset.from_generator(word_gen, dtypes.string,\n tensor_shape.TensorShape([]))\n ds = ds.shuffle(batch_size * 100)\n ds = ds.batch(batch_size)\n num_batches = 5\n ds = ds.take(num_batches)\n ds = ds.prefetch(num_batches)\n starts.append(time.time())\n # Benchmarked code begins here.\n for i in ds:\n _ = string_ops.string_to_hash_bucket(i, num_buckets=2)\n # Benchmarked code ends here.\n ends.append(time.time())\n\n avg_time = np.mean(np.array(ends) - np.array(starts)) / num_batches\n return avg_time\n\n def bm_layer_implementation(self, batch_size):\n input_1 = keras.Input(shape=(None,), dtype=dtypes.string, name=\"word\")\n layer = hashing.Hashing(num_bins=2)\n _ = layer(input_1)\n\n num_repeats = 5\n starts = []\n ends = []\n for _ in range(num_repeats):\n ds = dataset_ops.Dataset.from_generator(word_gen, dtypes.string,\n tensor_shape.TensorShape([]))\n ds = ds.shuffle(batch_size * 100)\n ds = ds.batch(batch_size)\n num_batches = 5\n ds = ds.take(num_batches)\n ds = ds.prefetch(num_batches)\n starts.append(time.time())\n # Benchmarked code begins here.\n for i in ds:\n _ = layer(i)\n # Benchmarked code ends here.\n ends.append(time.time())\n\n avg_time = np.mean(np.array(ends) - np.array(starts)) / num_batches\n name = \"hashing|batch_%s\" % batch_size\n baseline = self.run_dataset_implementation(batch_size)\n extras = {\n \"dataset implementation baseline\": baseline,\n \"delta seconds\": (baseline - avg_time),\n \"delta percent\": ((baseline - avg_time) / baseline) * 100\n }\n self.report_benchmark(\n iters=num_repeats, wall_time=avg_time, extras=extras, name=name)\n\n def benchmark_vocab_size_by_batch(self):\n for batch in [32, 64, 256]:\n self.bm_layer_implementation(batch_size=batch)\n\n\nif __name__ == \"__main__\":\n test.main()\n"
] | [
[
"tensorflow.python.eager.tape.record_operation",
"tensorflow.python.framework.ops._as_graph_element",
"tensorflow.python.util.tf_decorator.rewrap",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.util.tf_decorator.make_decorator",
"tensorflow.python.framework.ops.executing_eagerly_outside_functions",
"tensorflow.python.util.object_identity.ObjectIdentityWeakSet",
"tensorflow.python.eager.context.context",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.eager.graph_only_ops.graph_placeholder",
"tensorflow.python.framework.auto_control_deps.AutomaticControlDependencies",
"tensorflow.python.util.nest.flatten_with_tuple_paths",
"tensorflow.python.eager.context.global_seed",
"tensorflow.python.ops.resource_variable_ops.BaseResourceVariable",
"tensorflow.python.framework.tensor_util.constant_value",
"tensorflow.python.framework.ops.NullContextmanager",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.framework.ops.dismantle_graph",
"tensorflow.python.util.tf_decorator.unwrap",
"tensorflow.python.framework.ops.convert_to_tensor_or_composite",
"tensorflow.python.ops.custom_gradient.copy_handle_data",
"tensorflow.python.framework.ops.init_scope",
"tensorflow.python.ops.variable_scope.get_variable_scope",
"tensorflow.python.ops.resource_variable_ops.VariableSpec",
"tensorflow.python.util.nest.pack_sequence_as",
"tensorflow.python.ops.tensor_array_ops.build_ta_with_new_flow",
"tensorflow.python.framework.errors.InaccessibleTensorError",
"tensorflow.python.util.nest.assert_same_structure",
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.framework.ops.uid",
"tensorflow.python.util.object_identity.ObjectIdentitySet",
"tensorflow.python.util.compat.as_bytes",
"tensorflow.python.framework.ops.get_default_graph",
"numpy.prod",
"tensorflow.python.util.memory.dismantle_ordered_dict",
"tensorflow.python.autograph.ConversionOptions",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.keras.testing_utils.should_run_eagerly",
"tensorflow.python.keras.keras_parameterized.run_all_keras_modes",
"tensorflow.python.keras.testing_utils.get_small_mlp",
"tensorflow.python.keras.keras_parameterized.run_with_all_model_types",
"tensorflow.python.tf2.enabled",
"tensorflow.python.platform.test.main",
"tensorflow.python.keras.testing_utils.get_model_type",
"tensorflow.python.keras.engine.keras_tensor.keras_tensors_enabled",
"tensorflow.python.eager.context.executing_eagerly"
],
[
"tensorflow.python.compat.v2_compat.enable_v2_behavior",
"tensorflow.python.keras.layers.preprocessing.hashing.Hashing",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.keras.Input",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.string_ops.string_to_hash_bucket",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.3",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.3",
"2.4"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
dendisuhubdy/MinkowskiEngine | [
"a1cdcba68ef925bfefed2fe161f62e1ec78573b9"
] | [
"MinkowskiEngine/MinkowskiFunctional.py"
] | [
"import torch.nn.functional as F\n\nfrom SparseTensor import SparseTensor\n\n\ndef relu(input):\n output = F.relu(input.F)\n return SparseTensor(\n output, coords_key=input.coords_key, coords_manager=input.coords_man)\n"
] | [
[
"torch.nn.functional.relu"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
QuantumHardware/qiskit-experiments | [
"c09cf35bb922419354955abe8d536a97a9ea286b",
"c09cf35bb922419354955abe8d536a97a9ea286b"
] | [
"test/calibration/experiments/test_drag.py",
"qiskit_experiments/curve_analysis/visualization/fit_result_plotters.py"
] | [
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Test drag calibration experiment.\"\"\"\n\nfrom test.base import QiskitExperimentsTestCase\nimport unittest\nimport numpy as np\n\nfrom qiskit.circuit import Parameter\nfrom qiskit.exceptions import QiskitError\nfrom qiskit.pulse import DriveChannel, Drag\nimport qiskit.pulse as pulse\nfrom qiskit.qobj.utils import MeasLevel\nfrom qiskit import transpile\n\nfrom qiskit_experiments.exceptions import CalibrationError\nfrom qiskit_experiments.library import RoughDrag, RoughDragCal\nfrom qiskit_experiments.test.mock_iq_backend import DragBackend\nfrom qiskit_experiments.calibration_management.basis_gate_library import FixedFrequencyTransmon\nfrom qiskit_experiments.calibration_management import Calibrations\n\n\nclass TestDragEndToEnd(QiskitExperimentsTestCase):\n \"\"\"Test the drag experiment.\"\"\"\n\n def setUp(self):\n \"\"\"Setup some schedules.\"\"\"\n super().setUp()\n\n beta = Parameter(\"β\")\n\n with pulse.build(name=\"xp\") as xp:\n pulse.play(Drag(duration=160, amp=0.208519, sigma=40, beta=beta), DriveChannel(0))\n\n self.x_plus = xp\n self.test_tol = 0.05\n\n def test_reps(self):\n \"\"\"Test that setting reps raises and error if reps is not of length three.\"\"\"\n\n drag = RoughDrag(0, self.x_plus)\n\n with self.assertRaises(CalibrationError):\n drag.set_experiment_options(reps=[1, 2, 3, 4])\n\n def test_end_to_end(self):\n \"\"\"Test the drag experiment end to end.\"\"\"\n\n backend = DragBackend(gate_name=\"Drag(xp)\")\n\n drag = RoughDrag(1, self.x_plus)\n\n expdata = drag.run(backend)\n self.assertExperimentDone(expdata)\n result = expdata.analysis_results(1)\n\n self.assertTrue(abs(result.value.n - backend.ideal_beta) < self.test_tol)\n self.assertEqual(result.quality, \"good\")\n\n # Small leakage will make the curves very flat, in this case one should\n # rather increase beta.\n backend = DragBackend(error=0.0051, gate_name=\"Drag(xp)\")\n\n drag = RoughDrag(0, self.x_plus)\n drag.analysis.set_options(p0={\"beta\": 1.2})\n exp_data = drag.run(backend)\n self.assertExperimentDone(exp_data)\n result = exp_data.analysis_results(1)\n\n self.assertTrue(abs(result.value.n - backend.ideal_beta) < self.test_tol)\n self.assertEqual(result.quality, \"good\")\n\n # Large leakage will make the curves oscillate quickly.\n backend = DragBackend(error=0.05, gate_name=\"Drag(xp)\")\n\n drag = RoughDrag(1, self.x_plus, betas=np.linspace(-4, 4, 31))\n drag.set_run_options(shots=200)\n drag.analysis.set_options(p0={\"beta\": 1.8, \"freq0\": 0.08, \"freq1\": 0.16, \"freq2\": 0.32})\n exp_data = drag.run(backend)\n self.assertExperimentDone(exp_data)\n result = exp_data.analysis_results(1)\n\n meas_level = exp_data.metadata[\"job_metadata\"][-1][\"run_options\"][\"meas_level\"]\n\n self.assertEqual(meas_level, MeasLevel.CLASSIFIED)\n self.assertTrue(abs(result.value.n - backend.ideal_beta) < self.test_tol)\n self.assertEqual(result.quality, \"good\")\n\n\nclass TestDragCircuits(QiskitExperimentsTestCase):\n \"\"\"Test the circuits of the drag calibration.\"\"\"\n\n def setUp(self):\n \"\"\"Setup some schedules.\"\"\"\n super().setUp()\n\n beta = Parameter(\"β\")\n\n with pulse.build(name=\"xp\") as xp:\n pulse.play(Drag(duration=160, amp=0.208519, sigma=40, beta=beta), DriveChannel(0))\n\n self.x_plus = xp\n\n def test_default_circuits(self):\n \"\"\"Test the default circuit.\"\"\"\n\n backend = DragBackend(error=0.005, gate_name=\"Drag(xp)\")\n\n drag = RoughDrag(0, self.x_plus)\n drag.set_experiment_options(reps=[2, 4, 8])\n drag.backend = DragBackend(gate_name=\"Drag(xp)\")\n circuits = drag.circuits()\n\n for idx, expected in enumerate([4, 8, 16]):\n ops = transpile(circuits[idx * 51], backend).count_ops()\n self.assertEqual(ops[\"Drag(xp)\"], expected)\n\n def test_raise_multiple_parameter(self):\n \"\"\"Check that the experiment raises with unassigned parameters.\"\"\"\n\n beta = Parameter(\"β\")\n amp = Parameter(\"amp\")\n\n with pulse.build(name=\"xp\") as xp:\n pulse.play(Drag(duration=160, amp=amp, sigma=40, beta=beta), DriveChannel(0))\n\n with self.assertRaises(QiskitError):\n RoughDrag(1, xp, betas=np.linspace(-3, 3, 21))\n\n\nclass TestRoughDragCalUpdate(QiskitExperimentsTestCase):\n \"\"\"Test that a Drag calibration experiment properly updates the calibrations.\"\"\"\n\n def setUp(self):\n \"\"\"Setup the tests\"\"\"\n super().setUp()\n\n library = FixedFrequencyTransmon()\n\n self.backend = DragBackend(gate_name=\"Drag(x)\")\n self.cals = Calibrations.from_backend(self.backend, library)\n self.test_tol = 0.05\n\n def test_update(self):\n \"\"\"Test that running RoughDragCal updates the calibrations.\"\"\"\n\n qubit = 0\n prev_beta = self.cals.get_parameter_value(\"β\", (0,), \"x\")\n self.assertEqual(prev_beta, 0)\n\n expdata = RoughDragCal(qubit, self.cals, backend=self.backend).run()\n self.assertExperimentDone(expdata)\n\n new_beta = self.cals.get_parameter_value(\"β\", (0,), \"x\")\n self.assertTrue(abs(new_beta - self.backend.ideal_beta) < self.test_tol)\n self.assertTrue(abs(new_beta) > self.test_tol)\n\n def test_dragcal_experiment_config(self):\n \"\"\"Test RoughDragCal config can round trip\"\"\"\n exp = RoughDragCal(0, self.cals, backend=self.backend)\n loaded_exp = RoughDragCal.from_config(exp.config())\n self.assertNotEqual(exp, loaded_exp)\n self.assertTrue(self.json_equiv(exp, loaded_exp))\n\n @unittest.skip(\"Calibration experiments are not yet JSON serializable\")\n def test_dragcal_roundtrip_serializable(self):\n \"\"\"Test round trip JSON serialization\"\"\"\n exp = RoughDragCal(0, self.cals)\n self.assertRoundTripSerializable(exp, self.json_equiv)\n\n def test_drag_experiment_config(self):\n \"\"\"Test RoughDrag config can roundtrip\"\"\"\n with pulse.build(name=\"xp\") as sched:\n pulse.play(pulse.Drag(160, 0.5, 40, Parameter(\"β\")), pulse.DriveChannel(0))\n exp = RoughDrag(0, backend=self.backend, schedule=sched)\n loaded_exp = RoughDrag.from_config(exp.config())\n self.assertNotEqual(exp, loaded_exp)\n self.assertTrue(self.json_equiv(exp, loaded_exp))\n\n @unittest.skip(\"Schedules are not yet JSON serializable\")\n def test_drag_roundtrip_serializable(self):\n \"\"\"Test round trip JSON serialization\"\"\"\n with pulse.build(name=\"xp\") as sched:\n pulse.play(pulse.Drag(160, 0.5, 40, Parameter(\"β\")), pulse.DriveChannel(0))\n exp = RoughDrag(0, backend=self.backend, schedule=sched)\n self.assertRoundTripSerializable(exp, self.json_equiv)\n",
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\"\"\"\nA collection of functions that draw formatted curve analysis results.\n\nFor example, this visualization contains not only fit curves and raw data points,\nbut also some extra fitting information, such as fit values of some interesting parameters\nand goodness of the fitting represented by chi-squared. These extra information can be\nalso visualized as a fit report.\n\nNote that plotter is a class that only has a class method to draw the image.\nThis is just like a function, but allows serialization via Enum.\n\"\"\"\n\nfrom collections import defaultdict\nfrom typing import List, Dict, Optional\n\nimport uncertainties\nimport numpy as np\nfrom matplotlib.ticker import FuncFormatter\nfrom qiskit.utils import detach_prefix\n\nfrom qiskit_experiments.curve_analysis.curve_data import SeriesDef, FitData, CurveData\nfrom qiskit_experiments.framework import AnalysisResultData\nfrom qiskit_experiments.framework.matplotlib import get_non_gui_ax\nfrom .curves import plot_scatter, plot_errorbar, plot_curve_fit\nfrom .style import PlotterStyle\n\n\nclass MplDrawSingleCanvas:\n \"\"\"A plotter to draw a single canvas figure for fit result.\"\"\"\n\n @classmethod\n def draw(\n cls,\n series_defs: List[SeriesDef],\n raw_samples: List[CurveData],\n fit_samples: List[CurveData],\n tick_labels: Dict[str, str],\n fit_data: FitData,\n result_entries: List[AnalysisResultData],\n style: Optional[PlotterStyle] = None,\n axis: Optional[\"matplotlib.axes.Axes\"] = None,\n ) -> \"pyplot.Figure\":\n \"\"\"Create a fit result of all curves in the single canvas.\n\n Args:\n series_defs: List of definition for each curve.\n raw_samples: List of raw sample data for each curve.\n fit_samples: List of formatted sample data for each curve.\n tick_labels: Dictionary of axis label information. Axis units and label for x and y\n value should be explained.\n fit_data: fit data generated by the analysis.\n result_entries: List of analysis result data entries.\n style: Optional. A configuration object to modify the appearance of the figure.\n axis: Optional. A matplotlib Axis object.\n\n Returns:\n A matplotlib figure of the curve fit result.\n \"\"\"\n if axis is None:\n axis = get_non_gui_ax()\n\n # update image size to experiment default\n figure = axis.get_figure()\n figure.set_size_inches(*style.figsize)\n else:\n figure = axis.get_figure()\n\n # draw all curves on the same canvas\n for series_def, raw_samp, fit_samp in zip(series_defs, raw_samples, fit_samples):\n draw_single_curve_mpl(\n axis=axis,\n series_def=series_def,\n raw_sample=raw_samp,\n fit_sample=fit_samp,\n fit_data=fit_data,\n style=style,\n )\n\n # add legend\n if len(series_defs) > 1:\n axis.legend(loc=style.legend_loc)\n\n # get axis scaling factor\n for this_axis in (\"x\", \"y\"):\n sub_axis = getattr(axis, this_axis + \"axis\")\n unit = tick_labels[this_axis + \"val_unit\"]\n label = tick_labels[this_axis + \"label\"]\n if unit:\n maxv = np.max(np.abs(sub_axis.get_data_interval()))\n scaled_maxv, prefix = detach_prefix(maxv, decimal=3)\n prefactor = scaled_maxv / maxv\n # pylint: disable=cell-var-from-loop\n sub_axis.set_major_formatter(FuncFormatter(lambda x, p: f\"{x * prefactor: .3g}\"))\n sub_axis.set_label_text(f\"{label} [{prefix}{unit}]\", fontsize=style.axis_label_size)\n else:\n sub_axis.set_label_text(label, fontsize=style.axis_label_size)\n axis.ticklabel_format(axis=this_axis, style=\"sci\", scilimits=(-3, 3))\n\n if tick_labels[\"xlim\"]:\n axis.set_xlim(tick_labels[\"xlim\"])\n\n if tick_labels[\"ylim\"]:\n axis.set_ylim(tick_labels[\"ylim\"])\n\n # write analysis report\n if fit_data:\n report_str = write_fit_report(result_entries)\n report_str += r\"Fit $\\chi^2$ = \" + f\"{fit_data.reduced_chisq: .4g}\"\n\n report_handler = axis.text(\n *style.fit_report_rpos,\n report_str,\n ha=\"center\",\n va=\"top\",\n size=style.fit_report_text_size,\n transform=axis.transAxes,\n )\n\n bbox_props = dict(boxstyle=\"square, pad=0.3\", fc=\"white\", ec=\"black\", lw=1, alpha=0.8)\n report_handler.set_bbox(bbox_props)\n\n axis.tick_params(labelsize=style.tick_label_size)\n axis.grid(True)\n\n return figure\n\n\nclass MplDrawMultiCanvasVstack:\n \"\"\"A plotter to draw a vertically stacked multi canvas figure for fit result.\"\"\"\n\n @classmethod\n def draw(\n cls,\n series_defs: List[SeriesDef],\n raw_samples: List[CurveData],\n fit_samples: List[CurveData],\n tick_labels: Dict[str, str],\n fit_data: FitData,\n result_entries: List[AnalysisResultData],\n style: Optional[PlotterStyle] = None,\n axis: Optional[\"matplotlib.axes.Axes\"] = None,\n ) -> \"pyplot.Figure\":\n \"\"\"Create a fit result of all curves in the single canvas.\n\n Args:\n series_defs: List of definition for each curve.\n raw_samples: List of raw sample data for each curve.\n fit_samples: List of formatted sample data for each curve.\n tick_labels: Dictionary of axis label information. Axis units and label for x and y\n value should be explained.\n fit_data: fit data generated by the analysis.\n result_entries: List of analysis result data entries.\n style: Optional. A configuration object to modify the appearance of the figure.\n axis: Optional. A matplotlib Axis object.\n\n Returns:\n A matplotlib figure of the curve fit result.\n \"\"\"\n if axis is None:\n axis = get_non_gui_ax()\n\n # update image size to experiment default\n figure = axis.get_figure()\n figure.set_size_inches(*style.figsize)\n else:\n figure = axis.get_figure()\n\n # get canvas number\n n_subplots = max(series_def.canvas for series_def in series_defs) + 1\n\n # use inset axis. this allows us to draw multiple canvases on a given single axis object\n inset_ax_h = (1 - (0.05 * (n_subplots - 1))) / n_subplots\n inset_axes = [\n axis.inset_axes(\n [0, 1 - (inset_ax_h + 0.05) * n_axis - inset_ax_h, 1, inset_ax_h],\n transform=axis.transAxes,\n zorder=1,\n )\n for n_axis in range(n_subplots)\n ]\n\n # show x label only in the bottom canvas\n for inset_axis in inset_axes[:-1]:\n inset_axis.set_xticklabels([])\n inset_axes[-1].get_shared_x_axes().join(*inset_axes)\n\n # remove original axis frames\n axis.spines.right.set_visible(False)\n axis.spines.left.set_visible(False)\n axis.spines.top.set_visible(False)\n axis.spines.bottom.set_visible(False)\n axis.set_xticks([])\n axis.set_yticks([])\n\n # collect data source per canvas\n plot_map = defaultdict(list)\n for curve_ind, series_def in enumerate(series_defs):\n plot_map[series_def.canvas].append(curve_ind)\n\n y_labels = tick_labels[\"ylabel\"].split(\",\")\n if len(y_labels) == 1:\n y_labels = y_labels * n_subplots\n\n for ax_ind, curve_inds in plot_map.items():\n inset_axis = inset_axes[ax_ind]\n\n for curve_ind in curve_inds:\n draw_single_curve_mpl(\n axis=inset_axis,\n series_def=series_defs[curve_ind],\n raw_sample=raw_samples[curve_ind],\n fit_sample=fit_samples[curve_ind],\n fit_data=fit_data,\n style=style,\n )\n\n # add legend to each inset axis\n if len(curve_inds) > 1:\n inset_axis.legend(loc=style.legend_loc)\n\n # format y axis tick value of each inset axis\n yaxis = getattr(inset_axis, \"yaxis\")\n unit = tick_labels[\"yval_unit\"]\n label = y_labels[ax_ind]\n if unit:\n maxv = np.max(np.abs(yaxis.get_data_interval()))\n scaled_maxv, prefix = detach_prefix(maxv, decimal=3)\n prefactor = scaled_maxv / maxv\n # pylint: disable=cell-var-from-loop\n yaxis.set_major_formatter(FuncFormatter(lambda x, p: f\"{x * prefactor: .3g}\"))\n yaxis.set_label_text(f\"{label} [{prefix}{unit}]\", fontsize=style.axis_label_size)\n else:\n inset_axis.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(-3, 3))\n yaxis.set_label_text(label, fontsize=style.axis_label_size)\n\n if tick_labels[\"ylim\"]:\n inset_axis.set_ylim(tick_labels[\"ylim\"])\n\n # format x axis\n xaxis = getattr(inset_axes[-1], \"xaxis\")\n unit = tick_labels[\"xval_unit\"]\n label = tick_labels[\"xlabel\"]\n if unit:\n maxv = np.max(np.abs(xaxis.get_data_interval()))\n scaled_maxv, prefix = detach_prefix(maxv, decimal=3)\n prefactor = scaled_maxv / maxv\n # pylint: disable=cell-var-from-loop\n xaxis.set_major_formatter(FuncFormatter(lambda x, p: f\"{x * prefactor: .3g}\"))\n xaxis.set_label_text(f\"{label} [{prefix}{unit}]\", fontsize=style.axis_label_size)\n else:\n axis.ticklabel_format(axis=\"x\", style=\"sci\", scilimits=(-3, 3))\n xaxis.set_label_text(label, fontsize=style.axis_label_size)\n\n if tick_labels[\"xlim\"]:\n inset_axes[-1].set_xlim(tick_labels[\"xlim\"])\n\n # write analysis report\n if fit_data:\n report_str = write_fit_report(result_entries)\n report_str += r\"Fit $\\chi^2$ = \" + f\"{fit_data.reduced_chisq: .4g}\"\n\n report_handler = axis.text(\n *style.fit_report_rpos,\n report_str,\n ha=\"center\",\n va=\"top\",\n size=style.fit_report_text_size,\n transform=axis.transAxes,\n )\n\n bbox_props = dict(boxstyle=\"square, pad=0.3\", fc=\"white\", ec=\"black\", lw=1, alpha=0.8)\n report_handler.set_bbox(bbox_props)\n\n axis.tick_params(labelsize=style.tick_label_size)\n axis.grid(True)\n\n return figure\n\n\ndef draw_single_curve_mpl(\n axis: \"matplotlib.axes.Axes\",\n series_def: SeriesDef,\n raw_sample: CurveData,\n fit_sample: CurveData,\n fit_data: FitData,\n style: PlotterStyle,\n):\n \"\"\"A function that draws a single curve on the given plotter canvas.\n\n Args:\n axis: Drawer canvas.\n series_def: Definition of the curve to draw.\n raw_sample: Raw sample data.\n fit_sample: Formatted sample data.\n fit_data: Fitting parameter collection.\n style: Style sheet for plotting.\n \"\"\"\n\n # plot raw data if data is formatted\n if not np.array_equal(raw_sample.y, fit_sample.y):\n plot_scatter(xdata=raw_sample.x, ydata=raw_sample.y, ax=axis, zorder=0)\n\n # plot formatted data\n if np.all(np.isnan(fit_sample.y_err)):\n sigma = None\n else:\n sigma = np.nan_to_num(fit_sample.y_err)\n\n plot_errorbar(\n xdata=fit_sample.x,\n ydata=fit_sample.y,\n sigma=sigma,\n ax=axis,\n label=series_def.name,\n marker=series_def.plot_symbol,\n color=series_def.plot_color,\n zorder=1,\n linestyle=\"\",\n )\n\n # plot fit curve\n if fit_data:\n plot_curve_fit(\n func=series_def.fit_func,\n result=fit_data,\n ax=axis,\n color=series_def.plot_color,\n zorder=2,\n fit_uncertainty=style.plot_sigma,\n )\n\n\ndef write_fit_report(result_entries: List[AnalysisResultData]) -> str:\n \"\"\"A function that generates fit reports documentation from list of data.\n\n Args:\n result_entries: List of data entries.\n\n Returns:\n Documentation of fit reports.\n \"\"\"\n analysis_description = \"\"\n\n def format_val(float_val: float) -> str:\n if np.abs(float_val) < 1e-3 or np.abs(float_val) > 1e3:\n return f\"{float_val: .4e}\"\n return f\"{float_val: .4g}\"\n\n for res in result_entries:\n if isinstance(res.value, uncertainties.UFloat):\n fitval = res.value\n unit = res.extra.get(\"unit\", None)\n if unit:\n # unit is defined. do detaching prefix, i.e. 1000 Hz -> 1 kHz\n val, val_prefix = detach_prefix(fitval.nominal_value, decimal=3)\n val_unit = val_prefix + unit\n value_repr = f\"{val: .3g}\"\n\n # write error bar if it is finite value\n if fitval.std_dev is not None and np.isfinite(fitval.std_dev):\n # with stderr\n err, err_prefix = detach_prefix(fitval.std_dev, decimal=3)\n err_unit = err_prefix + unit\n if val_unit == err_unit:\n # same value scaling, same prefix\n value_repr += f\" \\u00B1 {err: .2f} {val_unit}\"\n else:\n # different value scaling, different prefix\n value_repr += f\" {val_unit} \\u00B1 {err: .2f} {err_unit}\"\n else:\n # without stderr, just append unit\n value_repr += f\" {val_unit}\"\n else:\n # unit is not defined. raw value formatting is performed.\n value_repr = format_val(fitval.nominal_value)\n if np.isfinite(fitval.std_dev):\n # with stderr\n value_repr += f\" \\u00B1 {format_val(fitval.std_dev)}\"\n\n analysis_description += f\"{res.name} = {value_repr}\\n\"\n\n return analysis_description\n"
] | [
[
"numpy.linspace"
],
[
"numpy.abs",
"numpy.array_equal",
"numpy.isfinite",
"numpy.isnan",
"numpy.nan_to_num",
"matplotlib.ticker.FuncFormatter"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
thyneb19/lux | [
"07a282d6a5f60c05942d866fa6f33636c3428abc"
] | [
"tests/test_type.py"
] | [
"# Copyright 2019-2020 The Lux Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom .context import lux\nimport pytest\nimport random\nimport pandas as pd\nimport warnings\n\n\n# Suite of test that checks if data_type inferred correctly by Lux\ndef test_check_cars():\n lux.config.set_SQL_connection(\"\")\n df = pd.read_csv(\"lux/data/car.csv\")\n df.maintain_metadata()\n assert df.data_type[\"Name\"] == \"nominal\"\n assert df.data_type[\"MilesPerGal\"] == \"quantitative\"\n assert df.data_type[\"Cylinders\"] == \"nominal\"\n assert df.data_type[\"Displacement\"] == \"quantitative\"\n assert df.data_type[\"Horsepower\"] == \"quantitative\"\n assert df.data_type[\"Weight\"] == \"quantitative\"\n assert df.data_type[\"Acceleration\"] == \"quantitative\"\n assert df.data_type[\"Year\"] == \"temporal\"\n assert df.data_type[\"Origin\"] == \"nominal\"\n\n\ndef test_check_int_id():\n df = pd.read_csv(\n \"https://github.com/lux-org/lux-datasets/blob/master/data/instacart_sample.csv?raw=true\"\n )\n df._repr_html_()\n inverted_data_type = lux.config.executor.invert_data_type(df.data_type)\n assert len(inverted_data_type[\"id\"]) == 3\n assert (\n \"<code>order_id</code>, <code>product_id</code>, <code>user_id</code> is not visualized since it resembles an ID field.\"\n in df._message.to_html()\n )\n\n\ndef test_check_str_id():\n df = pd.read_csv(\"https://github.com/lux-org/lux-datasets/blob/master/data/churn.csv?raw=true\")\n df._repr_html_()\n assert (\n \"<code>customerID</code> is not visualized since it resembles an ID field.</li>\"\n in df._message.to_html()\n )\n\n\ndef test_check_hpi():\n df = pd.read_csv(\"https://github.com/lux-org/lux-datasets/blob/master/data/hpi.csv?raw=true\")\n df.maintain_metadata()\n\n assert df.data_type == {\n \"HPIRank\": \"quantitative\",\n \"Country\": \"geographical\",\n \"SubRegion\": \"nominal\",\n \"AverageLifeExpectancy\": \"quantitative\",\n \"AverageWellBeing\": \"quantitative\",\n \"HappyLifeYears\": \"quantitative\",\n \"Footprint\": \"quantitative\",\n \"InequalityOfOutcomes\": \"quantitative\",\n \"InequalityAdjustedLifeExpectancy\": \"quantitative\",\n \"InequalityAdjustedWellbeing\": \"quantitative\",\n \"HappyPlanetIndex\": \"quantitative\",\n \"GDPPerCapita\": \"quantitative\",\n \"Population\": \"quantitative\",\n }\n\n\ndef test_check_airbnb():\n df = pd.read_csv(\"https://github.com/lux-org/lux-datasets/blob/master/data/airbnb_nyc.csv?raw=true\")\n df.maintain_metadata()\n assert df.data_type == {\n \"id\": \"id\",\n \"name\": \"nominal\",\n \"host_id\": \"id\",\n \"host_name\": \"nominal\",\n \"neighbourhood_group\": \"nominal\",\n \"neighbourhood\": \"nominal\",\n \"latitude\": \"quantitative\",\n \"longitude\": \"quantitative\",\n \"room_type\": \"nominal\",\n \"price\": \"quantitative\",\n \"minimum_nights\": \"quantitative\",\n \"number_of_reviews\": \"quantitative\",\n \"last_review\": \"temporal\",\n \"reviews_per_month\": \"quantitative\",\n \"calculated_host_listings_count\": \"quantitative\",\n \"availability_365\": \"quantitative\",\n }\n\n\ndef test_check_airports():\n df = pd.read_csv(\n \"https://raw.githubusercontent.com/altair-viz/vega_datasets/master/vega_datasets/_data/airports.csv\"\n )\n df.maintain_metadata()\n assert df.data_type == {\n \"iata\": \"id\",\n \"name\": \"nominal\",\n \"city\": \"nominal\",\n \"state\": \"geographical\",\n \"country\": \"geographical\",\n \"latitude\": \"quantitative\",\n \"longitude\": \"quantitative\",\n }\n\n\ndef test_check_datetime():\n df = pd.DataFrame(\n {\n \"a\": [\"2020-01-01\"],\n \"b\": [\"20-01-01\"],\n \"c\": [\"20-jan-01\"],\n \"d\": [\"20-january-01\"],\n \"e\": [\"2020 January 01\"],\n \"f\": [\"2020 January 01 00:00:00 pm PT\"],\n \"g\": [\"2020 January 01 13:00:00\"],\n \"h\": [\"2020 January 01 23:59:59 GTC-6\"],\n }\n )\n df.maintain_metadata()\n assert df.data_type == {\n \"a\": \"temporal\",\n \"b\": \"temporal\",\n \"c\": \"temporal\",\n \"d\": \"temporal\",\n \"e\": \"temporal\",\n \"f\": \"temporal\",\n \"g\": \"temporal\",\n \"h\": \"temporal\",\n }\n\n\ndef test_check_datetime_numeric_values():\n car_df = pd.read_csv(\"lux/data/car.csv\")\n car_df = car_df.rename(columns={\"Year\": \"blah\"})\n car_df.maintain_metadata()\n assert car_df.data_type[\"blah\"] == \"temporal\"\n\n spotify_df = pd.read_csv(\n \"https://raw.githubusercontent.com/lux-org/lux-datasets/master/data/spotify.csv\"\n )\n spotify_df = spotify_df.rename(columns={\"year\": \"blah\"})\n spotify_df.maintain_metadata()\n assert spotify_df.data_type[\"blah\"] == \"temporal\"\n assert spotify_df.data_type[\"release_date\"] == \"temporal\"\n\n\ndef test_check_stock():\n df = pd.read_csv(\"https://github.com/lux-org/lux-datasets/blob/master/data/stocks.csv?raw=true\")\n df.maintain_metadata()\n assert df.data_type == {\n \"symbol\": \"nominal\",\n \"monthdate\": \"temporal\",\n \"price\": \"quantitative\",\n }, \"Stock dataset type detection error\"\n\n\ndef test_check_college():\n df = pd.read_csv(\"lux/data/college.csv\")\n df.maintain_metadata()\n assert df.data_type == {\n \"Name\": \"nominal\",\n \"PredominantDegree\": \"nominal\",\n \"HighestDegree\": \"nominal\",\n \"FundingModel\": \"nominal\",\n \"Region\": \"nominal\",\n \"Geography\": \"nominal\",\n \"AdmissionRate\": \"quantitative\",\n \"ACTMedian\": \"quantitative\",\n \"SATAverage\": \"quantitative\",\n \"AverageCost\": \"quantitative\",\n \"Expenditure\": \"quantitative\",\n \"AverageFacultySalary\": \"quantitative\",\n \"MedianDebt\": \"quantitative\",\n \"AverageAgeofEntry\": \"quantitative\",\n \"MedianFamilyIncome\": \"quantitative\",\n \"MedianEarnings\": \"quantitative\",\n }\n\n\ndef test_float_categorical():\n values = [\n {\"A\": 6.0, \"B\": 1.0, \"C\": 1.0, \"D\": 3.0, \"E\": 2.0, \"F\": 5.0},\n {\"A\": 5.0, \"B\": 2.0, \"C\": 2.0, \"D\": 2.0, \"E\": 2.0, \"F\": 3.0},\n {\"A\": 3.0, \"B\": 6.0, \"C\": 3.0, \"D\": 3.0, \"E\": 2.0, \"F\": 5.0},\n {\"A\": 6.0, \"B\": 3.0, \"C\": 3.0, \"D\": 2.0, \"E\": 2.0, \"F\": 2.0},\n {\"A\": 7.0, \"B\": 4.0, \"C\": 2.0, \"D\": 2.0, \"E\": 2.0, \"F\": 4.0},\n {\"A\": 5.0, \"B\": 3.0, \"C\": 6.0, \"D\": 3.0, \"E\": 3.0, \"F\": 4.0},\n {\"A\": 3.0, \"B\": 4.0, \"C\": 3.0, \"D\": 6.0, \"E\": 5.0, \"F\": 5.0},\n {\"A\": 3.0, \"B\": 3.0, \"C\": 2.0, \"D\": 2.0, \"E\": 4.0, \"F\": 5.0},\n {\"A\": 3.0, \"B\": 2.0, \"C\": 2.0, \"D\": 2.0, \"E\": 2.0, \"F\": 4.0},\n {\"A\": 1.0, \"B\": 2.0, \"C\": 2.0, \"D\": 2.0, \"E\": 2.0, \"F\": 6.0},\n {\"A\": 3.0, \"B\": 3.0, \"C\": 2.0, \"D\": 3.0, \"E\": 3.0, \"F\": 5.0},\n {\"A\": 7.0, \"B\": 1.0, \"C\": 1.0, \"D\": 2.0, \"E\": 2.0, \"F\": 3.0},\n {\"A\": 6.0, \"B\": 2.0, \"C\": 2.0, \"D\": 2.0, \"E\": 2.0, \"F\": 3.0},\n {\"A\": 2.0, \"B\": 3.0, \"C\": 2.0, \"D\": 3.0, \"E\": 3.0, \"F\": 4.0},\n {\"A\": 6.0, \"B\": 2.0, \"C\": 3.0, \"D\": 3.0, \"E\": 3.0, \"F\": 5.0},\n ]\n df = pd.DataFrame(values)\n df.maintain_metadata()\n inverted_data_type = lux.config.executor.invert_data_type(df.data_type)\n assert inverted_data_type[\"nominal\"] == [\n \"A\",\n \"B\",\n \"C\",\n \"D\",\n \"E\",\n \"F\",\n ], \"Float column should be detected as categorical\"\n for x in list(df.dtypes):\n assert x == \"float64\", \"Source dataframe preserved as float dtype\"\n\n\ndef test_set_data_type():\n df = pd.read_csv(\n \"https://github.com/lux-org/lux-datasets/blob/master/data/real_estate_tutorial.csv?raw=true\"\n )\n with pytest.warns(UserWarning) as w:\n df._repr_html_()\n assert \"starter template that you can use\" in str(w[-1].message)\n assert \"df.set_data_type\" in str(w[-1].message)\n\n df.set_data_type({\"Month\": \"nominal\", \"Year\": \"nominal\"})\n assert df.data_type[\"Month\"] == \"nominal\"\n assert df.data_type[\"Year\"] == \"nominal\"\n with warnings.catch_warnings() as w:\n warnings.simplefilter(\"always\")\n df._repr_html_()\n assert not w\n\n\ndef test_set_data_type_invalid():\n df = pd.read_csv(\n \"https://github.com/lux-org/lux-datasets/blob/master/data/real_estate_tutorial.csv?raw=true\"\n )\n with pytest.raises(ValueError):\n df.set_data_type({\"Month\": \"nomnal\", \"Year\": \"nomnal\"})\n\n\ndef test_set_wrong_data_type():\n df = pd.read_csv(\n \"https://github.com/lux-org/lux-datasets/blob/master/data/real_estate_tutorial.csv?raw=true\"\n )\n df.set_data_type({\"Year\": \"quantitative\"})\n assert df.data_type[\"Year\"] == \"quantitative\"\n\n\ndef test_id_with_label():\n df = pd.read_csv(\n \"https://github.com/lux-org/lux-datasets/blob/master/data/state_timeseries.csv?raw=true\"\n )\n df.maintain_metadata()\n assert df.data_type == {\"Date\": \"temporal\", \"State\": \"geographical\", \"Value\": \"quantitative\"}\n\n\ndef test_ID_random():\n \"\"\"Tests whether a ID column not satisfying other properties of an ID gets recognized.\"\"\"\n values = [\n {\"ID\": random.randint(0, 1000), \"A\": 6.0, \"B\": 1.0, \"C\": 1.0, \"D\": 3.0, \"E\": 2.0, \"F\": 5.0}\n for x in range(1000)\n ]\n df = pd.DataFrame(values)\n df.maintain_metadata()\n assert df.data_type == {\n \"ID\": \"quantitative\",\n \"A\": \"nominal\",\n \"B\": \"nominal\",\n \"C\": \"nominal\",\n \"D\": \"nominal\",\n \"E\": \"nominal\",\n \"F\": \"nominal\",\n }\n\n\ndef test_ID():\n \"\"\"Tests different ways of writing id\"\"\"\n values = [{\"ID\": x, \"A\": 6.0, \"B\": 1.0, \"C\": 1.0, \"D\": 3.0, \"E\": 2.0, \"F\": 5.0} for x in range(1000)]\n df = pd.DataFrame(values)\n df.maintain_metadata()\n assert df.data_type == {\n \"ID\": \"id\",\n \"A\": \"nominal\",\n \"B\": \"nominal\",\n \"C\": \"nominal\",\n \"D\": \"nominal\",\n \"E\": \"nominal\",\n \"F\": \"nominal\",\n }\n\n\ndef test_id_aug_test():\n \"\"\"Tests in a different dataset\n Reference: https://www.kaggle.com/arashnic/hr-analytics-job-change-of-data-scientists\n \"\"\"\n df = pd.read_csv(\"https://github.com/lux-org/lux-datasets/blob/master/data/aug_test.csv?raw=true\")\n df.maintain_metadata()\n assert df.data_type == {\n \"enrollee_id\": \"id\",\n \"city\": \"nominal\",\n \"city_development_index\": \"quantitative\",\n \"gender\": \"nominal\",\n \"relevent_experience\": \"nominal\",\n \"enrolled_university\": \"nominal\",\n \"education_level\": \"nominal\",\n \"major_discipline\": \"nominal\",\n \"experience\": \"nominal\",\n \"company_size\": \"nominal\",\n \"company_type\": \"nominal\",\n \"last_new_job\": \"nominal\",\n \"training_hours\": \"quantitative\",\n }\n\n\ndef test_id_music_data():\n \"\"\"Tests in a different dataset if a column not named as an ID is recognized as an identification.\n Reference: https://www.kaggle.com/yamaerenay/spotify-dataset-19212020-160k-tracks\n \"\"\"\n df = pd.read_csv(\"https://github.com/lux-org/lux-datasets/blob/master/data/spotify.csv?raw=true\")\n df[\"unique_num\"] = df[\"id\"]\n df.drop(columns=[\"id\"])\n df.maintain_metadata()\n assert df.data_type == {\n \"valence\": \"quantitative\",\n \"year\": \"temporal\",\n \"acousticness\": \"quantitative\",\n \"artists\": \"nominal\",\n \"danceability\": \"quantitative\",\n \"duration_ms\": \"quantitative\",\n \"energy\": \"quantitative\",\n \"explicit\": \"nominal\",\n \"unique_num\": \"id\",\n \"instrumentalness\": \"quantitative\",\n \"key\": \"nominal\",\n \"liveness\": \"quantitative\",\n \"loudness\": \"quantitative\",\n \"mode\": \"nominal\",\n \"name\": \"nominal\",\n \"popularity\": \"quantitative\",\n \"release_date\": \"temporal\",\n \"speechiness\": \"quantitative\",\n \"tempo\": \"quantitative\",\n \"id\": \"id\",\n }\n\n\ndef test_id_absenteeism_data():\n \"\"\" Tests whether an id named column is not recognized because even though it is named an id, it is not with its nature. \"\"\"\n df = pd.read_csv(\"https://github.com/lux-org/lux-datasets/blob/master/data/absenteeism.csv?raw=true\")\n df.maintain_metadata()\n assert df.data_type == {\n \"ID\": \"quantitative\",\n \"Reason for absence\": \"quantitative\",\n \"Month of absence\": \"nominal\",\n \"Day of the week\": \"nominal\",\n \"Seasons\": \"nominal\",\n \"Transportation expense\": \"quantitative\",\n \"Distance from Residence to Work\": \"quantitative\",\n \"Service time\": \"nominal\",\n \"Age\": \"quantitative\",\n \"Work load Average/day \": \"quantitative\",\n \"Hit target\": \"nominal\",\n \"Disciplinary failure\": \"nominal\",\n \"Education\": \"nominal\",\n \"Son\": \"nominal\",\n \"Social drinker\": \"nominal\",\n \"Social smoker\": \"nominal\",\n \"Pet\": \"nominal\",\n \"Weight\": \"quantitative\",\n \"Height\": \"nominal\",\n \"Body mass index\": \"nominal\",\n \"Absenteeism time in hours\": \"nominal\",\n }\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
leipzig/gatk-sv | [
"bf3704bd1d705339577530e267cd4d1b2f77a17f",
"96566cbbaf0f8f9c8452517b38eea1e5dd6ed33a",
"96566cbbaf0f8f9c8452517b38eea1e5dd6ed33a",
"96566cbbaf0f8f9c8452517b38eea1e5dd6ed33a"
] | [
"src/sv-pipeline/pre_SVCalling_and_QC/raw_vcf_qc/calc_num_svs_pick_outlier.py",
"src/svtk/svtk/cli/pesr_test.py",
"src/sv-pipeline/scripts/downstream_analysis_and_filtering/filter_cleanup_and_QUAL_recalibration.PCRMinus_only.py",
"src/svtk/svtk/cxsv/rescan_single_enders.py"
] | [
"#!/usr/bin/env python\n\nimport sys\nfrom typing import Sequence, Set\nimport argparse\nimport numpy\nimport pandas\n\n\n_zero_svs_are_outliers = True\n_outlier_std_threshold = 5.0\n_column_order = [\"CHROM\", \"SVTYPE\", \"Mean\", \"Median\", \"STD\",\n \"Outlier_Sample\", \"Outlier_Number\", \"Outlier_Cate\"]\n\n\ndef read_statfile(statfile: str) -> pandas.DataFrame:\n \"\"\"\n Special function needed to read in stats data table because\n a) pandas doesn't understand that the '#' means header\n b) there are multiple stats files concatenated together, resulting in headers being randomly mixed in\n Args:\n statfile: str\n File name with concatenated tab-separated tables of variant stats\n Returns:\n stats_data: pandas.DataFrame\n Table of variant stats\n \"\"\"\n with open(statfile, 'r') as f_in:\n # get column header from first line, stripping '#'\n columns = f_in.readline().lstrip('#').split()\n # read rest of tsv file, using these columns as header and ignoring any future lines starting with '#'\n return pandas.read_csv(statfile, sep='\\t', comment='#', names=columns)\n\n\ndef pick_outliers_by_group(\n chrom: str,\n sv_type: str,\n check_stats: pandas.DataFrame,\n all_samples: Set[str],\n zero_svs_are_outliers: bool = _zero_svs_are_outliers,\n outlier_std_threshold: float = _outlier_std_threshold\n) -> pandas.DataFrame:\n \"\"\"\n For given combination of contig and SV type, find samples that have outlier number of SVs. Return table of outliers\n along with statistics about SV count.\n Args:\n chrom: str\n Contig for checking SV counts\n sv_type: str\n SV type for checking SV counts\n check_stats: pandas.DataFrame\n Table with SV counts on this contig with this sv_type\n all_samples: Set[str]\n Set of all sample IDs in cohort\n zero_svs_are_outliers: bool\n Whether to treat samples with no counts as automatic outliers, or explicitly code as zero counts\n outlier_std_threshold: float\n Threshold for outlier status as multiple of standard deviation of SV counts\n Returns:\n outliers: pandas.DataFrame\n Table of outliers\n \"\"\"\n # find samples that are missing: they have 0 SVs of this type on this contig\n missing_samples = pandas.DataFrame(\n tuple(\n {\"CHROM\": chrom, \"SVTYPE\": sv_type, \"SAMPLE\": sample_id, \"NUM\": 0}\n for sample_id in all_samples.difference(check_stats[\"SAMPLE\"])\n )\n )\n\n if zero_svs_are_outliers:\n # THIS IS THE ORIGINAL PIPELINE BEHAVIOR\n # compute basic stats about observed nonzero SV counts\n count_mean = check_stats[\"NUM\"].mean()\n count_median = check_stats[\"NUM\"].median()\n count_std = check_stats[\"NUM\"].std()\n # Amongst samples that have SVs, find counts deviating by more than set multiple of std from the median\n is_outlier = numpy.abs(\n check_stats[\"NUM\"] - count_median) > outlier_std_threshold * count_std\n # Treat missing samples as outliers.\n outliers = pandas.concat(\n (missing_samples, check_stats.loc[is_outlier]), axis=0)\n else:\n # THIS FINDS FEWER, MORE MEANINGFUL OUTLIERS\n # Which samples are missing / included but have zero counts is unpredictable.\n # 1) concatenate all samples together\n check_stats = pandas.concat((check_stats, missing_samples), axis=0)\n # 2) compute stats from non-zero SV counts\n nonzero = check_stats[\"NUM\"] > 0\n count_mean = check_stats.loc[nonzero, \"NUM\"].mean()\n count_median = check_stats.loc[nonzero, \"NUM\"].median()\n count_std = check_stats.loc[nonzero, \"NUM\"].std()\n # 3) check outliers by usual means from those stats\n # Set threshold to be set multiple of greater of: std of counts, sqrt(median of counts)\n # (i.e. greater of std or expected Poisson std)\n # Find counts those deviating by more than threshold from the median (including zeros)\n is_outlier = (\n numpy.abs(check_stats[\"NUM\"] - count_median) >\n outlier_std_threshold * numpy.maximum(count_std, numpy.sqrt(count_median))\n )\n outliers = check_stats.loc[is_outlier].copy()\n\n if outliers.empty:\n return pandas.DataFrame([], columns=_column_order)\n # augment outlier table with some statistics\n outliers[\"Mean\"] = count_mean\n outliers[\"Median\"] = count_median\n outliers[\"STD\"] = count_std\n outliers[\"Outlier_Cate\"] = numpy.where(\n outliers[\"NUM\"] > count_median, \"high\", \"low\")\n # rename and re-order columns\n return outliers.rename({\"NUM\": \"Outlier_Number\", \"SAMPLE\": \"Outlier_Sample\"}, axis=1).reindex(_column_order, axis=1)\n\n\ndef pick_outliers(\n stats_data: pandas.DataFrame,\n zero_svs_are_outliers: bool = _zero_svs_are_outliers,\n outlier_std_threshold: float = _outlier_std_threshold\n) -> pandas.DataFrame:\n \"\"\"\n Find samples that have outlier number of SVs when broken down by contig and SV type. Return table of outliers\n along with statistics about SV count.\n Args:\n stats_data: pandas.DataFrame\n Table with SV counts\n zero_svs_are_outliers: bool\n Whether to treat samples with no counts as automatic outliers, or explicitly code as zero counts\n outlier_std_threshold: float\n Threshold for outlier status as multiple of standard deviation of SV counts\n Returns:\n outliers: pandas.DataFrame\n Table of outliers\n \"\"\"\n # get set of all samples in stats data\n all_samples = set(stats_data[\"SAMPLE\"])\n\n # loop over unique combinations of contig and sv type\n # find outliers from each unique combination\n # and concatenate those outliers into one table\n outliers = pandas.concat(\n tuple(\n pick_outliers_by_group(\n chrom=chrom, sv_type=sv_type, check_stats=check_stats, all_samples=all_samples,\n zero_svs_are_outliers=zero_svs_are_outliers, outlier_std_threshold=outlier_std_threshold\n )\n for (chrom, sv_type), check_stats in stats_data.groupby(\n [\"CHROM\", \"SVTYPE\"], sort=False, as_index=False, group_keys=False\n )\n ),\n axis=0\n )\n return outliers\n\n\ndef write_outliers_file(\n outliers: pandas.DataFrame,\n outname: str,\n outlier_type: str\n):\n \"\"\"\n Write outliers of the appropriate type (\"low\" or \"high\") to TSV file.\n Args:\n outliers: pandas.DataFrame\n Table of outlier data\n outname: str\n Base name of outlier TSV file. Final file name will have \".low\" or \".high\" appended to it.\n outlier_type: str\n \"low\" or \"high\".\n \"\"\"\n # write outliers to tsv. Add \"#\" in front of header\n with open(outname + \".\" + outlier_type, 'w') as f_out:\n f_out.write(\"#\") # add '#' in front of header\n outlier_wanted = outliers[\"Outlier_Cate\"] == outlier_type\n outliers.loc[outlier_wanted].to_csv(f_out, sep='\\t', index=False)\n\n\ndef calc_num_svs_pick_outlier(\n statfile: str,\n outname: str,\n zero_svs_are_outliers: bool = _zero_svs_are_outliers,\n outlier_std_threshold: float = _outlier_std_threshold\n):\n \"\"\"\n Find samples that have outlier number of SVs when broken down by contig and SV type.\n Write two tables of outliers, along with statistics about SV count: one for those with above-median counts (\"high\")\n and one for those at median or below (\"low\").\n Args:\n statfile: str\n TSV file with table with SV counts\n outname: str\n Base name for saving outlier files. Low file will have \".low\" appended to the name, and high file will have\n \".high\"\n zero_svs_are_outliers: bool\n Whether to treat samples with no counts as automatic outliers, or explicitly code as zero counts\n outlier_std_threshold: float\n Threshold for outlier status as multiple of standard deviation of SV counts\n \"\"\"\n stats_data = read_statfile(statfile)\n outliers = pick_outliers(stats_data, zero_svs_are_outliers=zero_svs_are_outliers,\n outlier_std_threshold=outlier_std_threshold)\n write_outliers_file(outliers, outname, \"low\")\n write_outliers_file(outliers, outname, \"high\")\n\n\ndef _parse_arguments(argv: Sequence[str]) -> argparse.Namespace:\n # noinspection PyTypeChecker\n parser = argparse.ArgumentParser(\n description=\"Find outliers in SV counts broken down by contig and SV type\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n parser.add_argument(\"statfile\", type=str,\n help=\"name of stats concatinated from all samples\")\n parser.add_argument(\"outname\", type=str, help=\"name of output file\")\n parser.add_argument(\"-z\", \"--zero-counts-are-not-outliers\", action=\"store_true\",\n help=\"don't make zero SV counts an automatic outlier, check deviation from median as usual\")\n parser.add_argument(\"-t\", \"--outlier-std-threshold\", type=float, default=_outlier_std_threshold,\n help=\"threshold multiple of std of counts for outliers\")\n return parser.parse_args(argv[1:])\n\n\nif __name__ == \"__main__\":\n args = _parse_arguments(sys.argv)\n calc_num_svs_pick_outlier(statfile=args.statfile, outname=args.outname,\n zero_svs_are_outliers=not args.zero_counts_are_not_outliers,\n outlier_std_threshold=args.outlier_std_threshold)\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright © 2017 Matthew Stone <[email protected]>\n# Distributed under terms of the MIT license.\n\n\"\"\"\nCalculate enrichment of clipped reads or discordant pairs at SV breakpoints.\n\"\"\"\n\nimport argparse\nimport sys\nimport pysam\nimport pandas as pd\nfrom svtk.pesr import SRTestRunner, PETestRunner, PETest, SRTest\n\n\ndef sr_test(argv):\n parser = argparse.ArgumentParser(\n description=\"Calculate enrichment of clipped reads at SV breakpoints.\",\n prog='svtk sr-test',\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('vcf',\n help='VCF of variant calls. Standardized to include '\n 'CHR2, END, SVTYPE, STRANDS in INFO.')\n parser.add_argument('countfile', help='Tabix indexed file of split counts.'\n ' Columns: chrom,pos,clip,count,sample')\n parser.add_argument('fout',\n help='Output table of most significant start/end'\n 'positions.')\n parser.add_argument('-w', '--window', type=int, default=100,\n help='Window around variant start/end to consider for '\n 'split read support. [100]')\n parser.add_argument('--common', default=False,\n action='store_true', help='Ignore background for common AF')\n parser.add_argument('-b', '--background', type=int, default=160,\n help='Number of background samples to choose for '\n 'comparison in t-test. [160]')\n parser.add_argument('-s', '--samples', type=argparse.FileType('r'),\n default=None,\n help='Whitelist of samples to restrict testing to.')\n parser.add_argument('--index', default=None,\n help='Tabix index of discordant pair file. Required if '\n 'discordant pair file is hosted remotely.')\n # TODO: add normalization\n parser.add_argument('--medianfile', default=None,\n help='Median coverage statistics for each library '\n '(optional). If provided, each sample\\'s split '\n 'counts will be normalized accordingly. '\n 'Same format as RdTest, one column per sample.')\n parser.add_argument('--log', action='store_true', default=False,\n help='Print progress log to stderr.')\n\n # Print help if no arguments specified\n if len(argv) == 0:\n parser.print_help()\n sys.exit(1)\n args = parser.parse_args(argv)\n\n vcf = pysam.VariantFile(args.vcf)\n\n if args.index is not None:\n countfile = pysam.TabixFile(args.countfile, index=args.index,\n parser=pysam.asTuple())\n else:\n if args.countfile.startswith('http'):\n raise Exception('Must provide tabix index with remote URL')\n countfile = pysam.TabixFile(args.countfile, parser=pysam.asTuple())\n\n if args.fout in '- stdout'.split():\n fout = sys.stdout\n else:\n fout = open(args.fout, 'w')\n\n header = 'name coord pos log_pval called_median bg_median bg_frac'.split()\n fout.write('\\t'.join(header) + '\\n')\n\n if args.samples is not None:\n whitelist = [s.strip() for s in args.samples.readlines()]\n else:\n whitelist = None\n\n if args.medianfile is not None:\n medians = pd.read_table(args.medianfile)\n medians = pd.melt(medians, var_name='sample', value_name='median_cov')\n else:\n medians = None\n\n runner = SRTestRunner(vcf, countfile, fout, args.background, args.common,\n args.window, whitelist, medians=medians, log=args.log)\n runner.run()\n\n\ndef pe_test(argv):\n parser = argparse.ArgumentParser(\n description=\"Calculate enrichment of discordant pairs at SV breakpoints.\",\n prog='svtk pe-test',\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('vcf', help='Variants.')\n parser.add_argument('disc', help='Table of discordant pair coordinates.')\n parser.add_argument('fout', type=argparse.FileType('w'),\n help='Output table of PE counts.')\n parser.add_argument('-o', '--window-out', type=int, default=500,\n help='Window outside breakpoint to query for '\n 'discordant pairs. [500]')\n parser.add_argument('-i', '--window-in', type=int, default=50,\n help='Window inside breakpoint to query for '\n 'discordant pairs. [50]')\n parser.add_argument('-b', '--background', type=int, default=160,\n help='Number of background samples to sample for PE '\n 'evidence. [160]')\n parser.add_argument('--common', default=False,\n action='store_true', help='Ignore background for common AF')\n parser.add_argument('-s', '--samples', type=argparse.FileType('r'),\n default=None,\n help='Whitelist of samples to restrict testing to.')\n parser.add_argument('--index', default=None,\n help='Tabix index of discordant pair file. Required if '\n 'discordant pair file is hosted remotely.')\n parser.add_argument('--medianfile', default=None,\n help='Median coverage statistics for each library '\n '(optional). If provided, each sample\\'s split '\n 'counts will be normalized accordingly. '\n 'Same format as RdTest, one column per sample.')\n parser.add_argument('--log', action='store_true', default=False,\n help='Print progress log to stderr.')\n\n if len(argv) == 0:\n parser.print_help()\n sys.exit(1)\n args = parser.parse_args(argv)\n\n if args.vcf in '- stdin'.split():\n vcf = pysam.VariantFile(sys.stdin)\n else:\n vcf = pysam.VariantFile(args.vcf)\n\n if args.fout in '- stdout'.split():\n fout = sys.stdout\n else:\n fout = args.fout\n\n header = 'name log_pval called_median bg_median bg_frac'.split()\n args.fout.write('\\t'.join(header) + '\\n')\n\n if args.samples is not None:\n whitelist = [s.strip() for s in args.samples.readlines()]\n else:\n whitelist = None\n\n if args.index is not None:\n discfile = pysam.TabixFile(args.disc, index=args.index)\n else:\n if args.disc.startswith('http'):\n raise Exception('Must provide tabix index with remote URL')\n discfile = pysam.TabixFile(args.disc)\n\n if args.medianfile is not None:\n medians = pd.read_table(args.medianfile)\n medians = pd.melt(medians, var_name='sample', value_name='median_cov')\n else:\n medians = None\n\n runner = PETestRunner(vcf, discfile, fout, args.background, args.common,\n args.window_in, args.window_out, whitelist, medians=medians, log=args.log)\n\n runner.run()\n\n\ndef count_pe(argv):\n parser = argparse.ArgumentParser(\n description=\"Count discordant pairs supporting a SV breakpoints.\",\n prog='svtk count-pe',\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('vcf', help='Variants.')\n parser.add_argument('disc', help='Table of discordant pair coordinates.')\n parser.add_argument('fout', type=argparse.FileType('w'),\n help='Output table of PE counts.')\n parser.add_argument('-o', '--window-out', type=int, default=500,\n help='Window outside breakpoint to query for '\n 'discordant pairs. [500]')\n parser.add_argument('-i', '--window-in', type=int, default=50,\n help='Window inside breakpoint to query for '\n 'discordant pairs. [50]')\n parser.add_argument('--common', default=False,\n action='store_true', help='Ignore background for common AF')\n parser.add_argument('-s', '--samples', type=argparse.FileType('r'),\n default=None,\n help='Whitelist of samples to restrict testing to.')\n parser.add_argument('--index', default=None,\n help='Tabix index of discordant pair file. Required if '\n 'discordant pair file is hosted remotely.')\n parser.add_argument('--medianfile', default=None,\n help='Median coverage statistics for each library '\n '(optional). If provided, each sample\\'s split '\n 'counts will be normalized accordingly. '\n 'Same format as RdTest, one column per sample.')\n\n if len(argv) == 0:\n parser.print_help()\n sys.exit(1)\n args = parser.parse_args(argv)\n\n if args.vcf in '- stdin'.split():\n vcf = pysam.VariantFile(sys.stdin)\n else:\n vcf = pysam.VariantFile(args.vcf)\n\n if args.fout in '- stdout'.split():\n fout = sys.stdout\n else:\n fout = args.fout\n\n header = 'name sample count'.split()\n args.fout.write('\\t'.join(header) + '\\n')\n\n if args.samples is not None:\n whitelist = [s.strip() for s in args.samples.readlines()]\n else:\n whitelist = [s for s in vcf.header.samples]\n\n if args.index is not None:\n discfile = pysam.TabixFile(args.disc, index=args.index)\n else:\n if args.disc.startswith('http'):\n raise Exception('Must provide tabix index with remote URL')\n discfile = pysam.TabixFile(args.disc)\n\n if args.medianfile is not None:\n medians = pd.read_table(args.medianfile)\n medians = pd.melt(medians, var_name='sample', value_name='median_cov')\n else:\n medians = None\n\n petest = PETest(discfile, args.common, args.window_in,\n args.window_out, medians=medians)\n\n for record in vcf:\n counts = petest.load_counts(record, args.window_in, args.window_out)\n counts = petest.normalize_counts(counts)\n counts = counts.set_index('sample')\n counts = counts.reindex(whitelist).fillna(0).astype(int)\n counts = counts.reset_index()\n counts['name'] = record.id\n cols = 'name sample count'.split()\n\n for row in counts[cols].as_matrix():\n fout.write('\\t'.join([str(x) for x in row]) + '\\n')\n # counts[cols].to_csv(fout, header=False, index=False, sep='\\t', na_rep='NA')\n\n\ndef count_sr(argv):\n parser = argparse.ArgumentParser(\n description=\"Count clipped reads at SV breakpoints. Unwindowed.\",\n prog='svtk count-sr',\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('vcf',\n help='VCF of variant calls. Standardized to include '\n 'CHR2, END, SVTYPE, STRANDS in INFO.')\n parser.add_argument('countfile', help='Tabix indexed file of split counts.'\n ' Columns: chrom,pos,clip,count,sample')\n parser.add_argument('fout',\n help='Output table of split read counts.')\n parser.add_argument('--common', default=False,\n action='store_true', help='Ignore background for common AF')\n parser.add_argument('-s', '--samples', type=argparse.FileType('r'),\n default=None,\n help='Whitelist of samples to restrict testing to.')\n parser.add_argument('--index', default=None,\n help='Tabix index of discordant pair file. Required if '\n 'discordant pair file is hosted remotely.')\n # TODO: add normalization\n parser.add_argument('--medianfile', default=None,\n help='Median coverage statistics for each library '\n '(optional). If provided, each sample\\'s split '\n 'counts will be normalized accordingly. '\n 'Same format as RdTest, one column per sample.')\n # Print help if no arguments specified\n if len(argv) == 0:\n parser.print_help()\n sys.exit(1)\n args = parser.parse_args(argv)\n\n vcf = pysam.VariantFile(args.vcf)\n\n if args.index is not None:\n countfile = pysam.TabixFile(args.countfile, index=args.index,\n parser=pysam.asTuple())\n else:\n if args.countfile.startswith('http'):\n raise Exception('Must provide tabix index with remote URL')\n countfile = pysam.TabixFile(args.countfile, parser=pysam.asTuple())\n\n if args.fout in '- stdout'.split():\n fout = sys.stdout\n else:\n fout = open(args.fout, 'w')\n\n header = 'name coord sample count'.split()\n fout.write('\\t'.join(header) + '\\n')\n\n if args.samples is not None:\n whitelist = [s.strip() for s in args.samples.readlines()]\n else:\n whitelist = [s for s in vcf.header.samples]\n\n if args.medianfile is not None:\n medians = pd.read_table(args.medianfile)\n medians = pd.melt(medians, var_name='sample', value_name='median_cov')\n else:\n medians = None\n srtest = SRTest(countfile, args.common, window=0, medians=medians)\n\n for record in vcf:\n for coord in 'start end'.split():\n if coord == 'start':\n pos, strand, chrom = record.pos, record.info['STRANDS'][0], record.chrom\n else:\n # TODO: With a properly formatted VCF, should be using END2 instead of END here\n pos, strand, chrom = record.stop, record.info['STRANDS'][1], record.info['CHR2']\n\n counts = srtest.load_counts(chrom, pos, strand)\n counts = srtest.normalize_counts(counts)\n counts = counts['sample count'.split()]\n counts = counts.set_index('sample')\n counts = counts.reindex(whitelist).fillna(0).astype(int)\n counts = counts.reset_index()\n counts['name'] = record.id\n counts['coord'] = coord\n\n for row in counts[header].values:\n fout.write('\\t'.join([str(x) for x in row]) + '\\n')\n # counts[header].to_csv(fout, header=False, index=False, sep='\\t', na_rep='NA')\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright © 2018 Ryan Collins <[email protected]>\n# Distributed under terms of the MIT license.\n\n\"\"\"\nApply final FILTER cleanup and QUAL score recalibration\n\"\"\"\n\n\nimport argparse\nimport sys\nimport pysam\nimport csv\nfrom numpy import median\nfrom svtk.utils import is_biallelic\n\n\n# Define global variables\nfilts_for_info = 'PESR_GT_OVERDISPERSION HIGH_SR_BACKGROUND BOTHSIDES_SUPPORT VARIABLE_ACROSS_BATCHES'.split(\n ' ')\nfilts_to_remove = 'HIGH_PCRPLUS_NOCALL_RATE HIGH_PCRMINUS_NOCALL_RATE'.split(\n ' ')\nfilts_to_remove = filts_to_remove + filts_for_info\nNULL_GTs = [(None, None), (None, )]\nREF_GTs = [(0, 0), (0, ), (None, 2)]\nNULL_and_REF_GTs = NULL_GTs + REF_GTs\nHET_GTs = [(0, 1), (None, 1), (None, 3)]\n\n\ndef import_callrates(table_in):\n \"\"\"\n Import table of variant callrates\n \"\"\"\n\n callrates = {}\n\n with open(table_in) as tsvfile:\n reader = csv.reader(tsvfile, delimiter='\\t')\n for vid, callrate in reader:\n if vid not in callrates.keys():\n callrates[vid] = float(callrate)\n\n return callrates\n\n\n# def get_call_rate(record, samples):\n# \"\"\"\n# Get fraction of samples with non-null genotypes\n# \"\"\"\n# total_s = [s for s in record.samples if s in samples]\n# total = len(total_s)\n# nocall_s = [s for s in total_s if record.samples[s]['GT'] in NULL_GTs]\n# nocall = len(nocall_s)\n# callrate = 1 - ( nocall / total )\n# return callrate\n\n\ndef recal_qual_score(record):\n \"\"\"\n Recalibrate quality score for a single variant\n \"\"\"\n quals = []\n for s in [s for s in record.samples]:\n GT = record.samples[s]['GT']\n if GT in NULL_and_REF_GTs:\n continue\n elif GT in HET_GTs:\n quals.append(record.samples[s]['GQ'])\n else:\n quals.append(999)\n\n if len(quals) > 0:\n return int(median(quals))\n\n\ndef cleanup_vcf(vcf, fout, callrates, min_callrate_global=0.85,\n min_callrate_smallDels=0.95):\n\n # minus_samples = [s for s in vcf.header.samples if s not in plus_samples]\n # male_minus_samples = [s for s in minus_samples if s not in male_samples]\n\n for record in vcf:\n # Move several filters from FILTER to INFO\n for filt in filts_for_info:\n if filt in record.filter:\n record.info[filt] = True\n\n # Move HIGH_SR_BACKGROUND\n\n # Remove all HIGH_NOCALL_RATE and HIGH_SR_BACKGROUND tags from FILTER column\n newfilts = [\n filt for filt in record.filter if filt not in filts_to_remove]\n record.filter.clear()\n for filt in newfilts:\n record.filter.add(filt)\n if len(record.filter) == 0:\n record.filter.add('PASS')\n\n # #Mark sites with low PCR+ call rate\n # plus_callrate = get_call_rate(record, plus_samples)\n # if plus_callrate < min_callrate:\n # if 'LOW_PCRPLUS_CALL_RATE' not in record.info.keys():\n # record.info.keys().append('LOW_PCRPLUS_CALL_RATE')\n # record.info['LOW_PCRPLUS_CALL_RATE'] = True\n\n # Mark sites with low PCR- call rate\n if record.id in callrates.keys():\n callrate = callrates[record.id]\n # Mark small (300bp-1kb) deletions with stricter 5% null gt rate,\n # and mark all other variants at specified null gt rate\n if record.info['SVTYPE'] == 'DEL' \\\n and record.info['SVLEN'] < 1000 \\\n and record.info['SVLEN'] > 300:\n if callrate < min_callrate_smallDels:\n record.filter.add('LOW_CALL_RATE')\n else:\n if callrate < min_callrate_global:\n record.filter.add('LOW_CALL_RATE')\n\n # Recalibrate QUAL score for biallelic variants\n if is_biallelic(record):\n newQUAL = recal_qual_score(record)\n if newQUAL is not None:\n record.qual = newQUAL\n\n # Only check for non-empty GTs for biallelic variants\n if is_biallelic(record):\n for s in record.samples:\n if record.samples[s]['GT'] not in NULL_and_REF_GTs:\n fout.write(record)\n break\n else:\n fout.write(record)\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('vcf', help='Input vcf (supports \"stdin\").')\n # parser.add_argument('PCRPLUS_samples', help='List of PCRPLUS sample IDs.')\n # parser.add_argument('male_samples', help='List of male sample IDs.')\n parser.add_argument('fout', help='Output file (supports \"stdout\").')\n parser.add_argument('--callrate-table', help='TSV of variant IDs and ' +\n 'their corresponding callrates.', required=True)\n parser.add_argument('--min-callrate-global', type=float, help='Minimum fraction ' +\n 'of samples required to have non-missing genotypes for ' +\n 'all variants.', default=0.85)\n parser.add_argument('--min-callrate-smallDels', type=float, help='Minimum fraction ' +\n 'of samples required to have non-missing genotypes for ' +\n 'DEL variants between 300bp-1kb.', default=0.95)\n\n args = parser.parse_args()\n\n # Open connection to input VCF\n if args.vcf in '- stdin'.split():\n vcf = pysam.VariantFile(sys.stdin)\n else:\n vcf = pysam.VariantFile(args.vcf)\n\n # Add new FILTER lines to VCF header\n NEW_FILTERS = ['##FILTER=<ID=LOW_CALL_RATE,Description=\"Site does not meet ' +\n 'minimum requirements for fraction of PCR- samples with non-null ' +\n 'genotypes. Flags sites more prone to false discoveries.\">']\n header = vcf.header\n for filt in NEW_FILTERS:\n header.add_line(filt)\n\n # Remove unused FILTER lines from VCF header\n for filt in filts_to_remove:\n if filt in header.filters:\n header.filters.remove_header(filt)\n\n # Add new INFO lines to VCF header\n NEW_INFOS = ['##INFO=<ID=PESR_GT_OVERDISPERSION,Number=0,Type=Flag,Description=' +\n '\"PESR genotyping data is overdispersed. Flags sites where genotypes' +\n ' are likely noisier.\">',\n '##INFO=<ID=HIGH_SR_BACKGROUND,Number=0,Type=Flag,Description=' +\n '\"Suspicious accumulation of split reads in predicted non-carrier ' +\n 'samples. Flags sites more prone to false discoveries and where ' +\n 'breakpoint precision is reduced.\">',\n '##INFO=<ID=BOTHSIDES_SUPPORT,Number=0,Type=Flag,Description=' +\n '\"Variant has read-level support for both sides of breakpoint. ' +\n 'Indicates higher-confidence variants.\">',\n '##INFO=<ID=VARIABLE_ACROSS_BATCHES,Number=0,Type=Flag,Description=' +\n '\"Site appears at variable frequencies across batches. Accuracy ' +\n 'of allele frequency estimates for these sites may be reduced.\">']\n for info in NEW_INFOS:\n header.add_line(info)\n\n # #Read list of PCR+ samples\n # f_plus_samples = open(args.PCRPLUS_samples, 'r')\n # plus_samples = f_plus_samples.read().splitlines()\n # f_plus_samples.close()\n\n # #Read list of male samples\n # f_male_samples = open(args.male_samples, 'r')\n # male_samples = f_male_samples.read().splitlines()\n # f_male_samples.close()\n\n # Read callrates\n callrates = import_callrates(args.callrate_table)\n\n # Open connection to output VCF\n if args.fout in '- stdout'.split():\n fout = pysam.VariantFile(sys.stdout, 'w', header=vcf.header)\n else:\n fout = pysam.VariantFile(args.fout, 'w', header=vcf.header)\n\n # Cleanup VCF\n cleanup_vcf(vcf, fout, callrates,\n min_callrate_global=args.min_callrate_global,\n min_callrate_smallDels=args.min_callrate_smallDels,)\n\n fout.close()\n\n\nif __name__ == '__main__':\n main()\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright © 2018 Matthew Stone <[email protected]>\n# Distributed under terms of the MIT license.\n\n\"\"\"\n\n\"\"\"\n\nimport argparse\nfrom collections import defaultdict\nimport pysam\nfrom svtk.genomeslink import GenomeSLINK, GSNode\nimport svtk.utils as svu\nfrom statistics import median\nimport numpy as np\nimport datetime\n\n\nclass DiscPair(GSNode):\n def __init__(self, chrA, posA, strandA, chrB, posB, strandB, sample):\n self.strandA = strandA\n self.strandB = strandB\n self.sample = sample\n super().__init__(chrA, posA, chrB, posB)\n\n @property\n def is_inversion(self):\n return (self.chrA == self.chrB) and (self.strandA == self.strandB)\n\n def __str__(self):\n e = '{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\t{6}\\n'\n return e.format(self.chrA, self.posA, self.strandA,\n self.chrB, self.posB, self.strandB, self.sample)\n\n\ndef match_cluster(record, cluster, dist=300):\n \"\"\"\n Determine whether DiscPair cluster matches a VCF record of interest.\n\n Checks if pairs exist which match the record's strandedness, then checks\n whether the min/max coord of these pairs is within a specified distance\n of the record's coordinates.\n\n Arguments\n ---------\n record : pysam.VariantRecord\n cluster : list of DiscPair\n dist : int, optional\n\n Returns\n -------\n match : bool\n \"\"\"\n r_start, r_end = record.pos, record.stop\n\n if record.info['STRANDS'] == '++':\n # Choose max start/end of ++ pairs\n c_start = max((p.posA for p in cluster if (\n p.strandA == '+')), default=None)\n if c_start is None:\n # If no ++ pairs present, return False\n return False\n c_end = max(p.posB for p in cluster if (p.strandB == '+'))\n elif record.info['STRANDS'] == '--':\n # Choose min start/end of -- pairs\n c_start = min((p.posA for p in cluster if (\n p.strandA == '-')), default=None)\n if c_start is None:\n # If no -- pairs present, return False\n return False\n c_end = min(p.posB for p in cluster if (p.strandB == '-'))\n else:\n strands = record.info['STRANDS']\n raise Exception('Invalid inversion orientation: {0}'.format(strands))\n\n # Test if cluster start/end are sufficiently close to record start/end\n return abs(r_start - c_start) < dist and abs(r_end - c_end) < dist\n\n\ndef rescan_single_ender(record, pe, min_support=4, window=1000, dist=300,\n min_frac_samples=0.5, pe_blacklist=None, max_samples=40,\n quiet=False, min_span=50):\n \"\"\"\n Test if a putative single-ender inversion has support from other strand.\n\n Selects discordant pairs in the neighborhood of the original record, then\n clusters them together. If enough samples have sufficient paired-end\n evidence supporting the opposite strand, we have found support for the\n other end of the record.\n\n Arguments\n ---------\n record : pysam.VariantRecord\n pe : pysam.TabixFile\n Scraped discordant pair metadata\n min_support : int, optional\n Number of pairs required to count a sample as supported\n window : int, optional\n Window around record start to search for pairs\n dist : int, optional\n Clustering distance for fetched pairs\n min_frac_samples : float, optional\n Fraction of called samples required to have opposite strand support in\n order to call the record as having both strands present. If 0, only one\n sample will be required.\n pe_blacklist : pysam.TabixFile, optional\n Blacklisted genomic regions. Anomalous pairs in these regions will be\n removed prior to clustering.\n quiet : boolean, optional\n Do not print status updates\n min_span : int, optional\n Minimum distance spanned between discordant read mapping positions in\n newly identified candidate breakpoints\n\n\n Returns\n -------\n opposite : pysam.VariantRecord\n Record corresponding to the pairs found supporting the other strand.\n None if no such pairs found\n \"\"\"\n\n # Print statement that single ender rescan has been attempted\n if not quiet:\n now = datetime.datetime.now()\n print('svtk resolve @ ' + now.strftime(\"%H:%M:%S\") + ': ' +\n 'single-ender rescan procedure started for ' +\n record.id)\n\n # Select pairs nearby record\n search_start = max(0, record.pos - window)\n search_end = max(search_start + 1, record.pos + window)\n pairs = pe.fetch(\n '{0}:{1}-{2}'.format(record.chrom, search_start, search_end))\n pairs = [DiscPair(*p.split()) for p in pairs]\n\n # To protect against wasting time on particularly messy loci not captured\n # in the blacklist, automatically fail site if total number of discordant\n # pairs is > samples * min_support\n all_samples = record.samples.keys()\n print(len(pairs), flush=True)\n print(len(all_samples) * min_support, flush=True)\n if len(pairs) > len(all_samples) * min_support:\n return record, None\n\n # Subset to only inversion pairs\n pairs = [p for p in pairs if p.is_inversion]\n\n # If median number of pairs per sample not called in the original record\n # > min_support, fail record. Otherwise, keep going.\n called = svu.get_called_samples(record)\n if len(called) < len(all_samples):\n # Count number of pairs per sample for all samples\n sample_support_precluster = defaultdict(int)\n for pair in pairs:\n sample_support_precluster[pair.sample] += 1\n # compute median pairs per uncalled sample\n median_pairs_not_called = median(\n sample_support_precluster.get(s, 0) for s in all_samples if s not in called\n )\n if median_pairs_not_called > min_support:\n return record, None\n # Randomly subset pairs from all samples to max_samples\n if len(called) > max_samples:\n np.random.seed(2) # arbitrary fixed seed for reproducibility\n called = np.random.choice(called, max_samples, replace=False).tolist()\n\n # Restrict to inversions in samples called in VCF record\n pairs = [p for p in pairs if p.sample in called and p.is_inversion]\n\n # Cluster pairs\n slink = GenomeSLINK(pairs, dist, blacklist=pe_blacklist)\n # choose the largest cluster\n cluster = max(\n (c for c in slink.cluster() if match_cluster(record, c, window)),\n key=len, default=None\n )\n if cluster is None:\n # if no clusters, fail site\n return record, None\n\n # Select clustered pairs which support the opposite strand as the record\n missing_strand = '+' if record.info['STRANDS'] == '--' else '-'\n supporting_pairs = [p for p in cluster if p.strandA == missing_strand]\n\n # Check span of supporting pairs from best cluster\n minA = round(np.percentile([p.posA for p in cluster], 10))\n maxA = round(np.percentile([p.posA for p in cluster], 90))\n spanA = maxA - minA\n\n minB = round(np.percentile([p.posB for p in cluster], 10))\n maxB = round(np.percentile([p.posB for p in cluster], 90))\n spanB = maxB - minB\n\n if min(spanA, spanB) < min_span:\n return record, None\n\n # Count number of supporting pairs in each called sample\n sample_support = defaultdict(int)\n for pair in supporting_pairs:\n sample_support[pair.sample] += 1\n\n # If enough samples were found to have support, make new variant record\n n_supported_samples = sum(sample_support[s] > min_support for s in called)\n if n_supported_samples >= min_frac_samples * len(called):\n opp_strand = make_new_record(supporting_pairs, record)\n\n same_strand_pairs = [p for p in cluster if p.strandA != missing_strand]\n same_strand = make_new_record(same_strand_pairs, record, True)\n same_strand.id = record.id\n\n # Print statement that single ender rescan has been successful\n if not quiet:\n now = datetime.datetime.now()\n print('svtk resolve @ ' + now.strftime(\"%H:%M:%S\") + ': ' +\n 'single-ender rescan successful for ' +\n record.id)\n\n return same_strand, opp_strand\n else:\n return record, None\n\n\ndef make_new_record(pairs, old_record, retain_algs=False):\n record = old_record.copy()\n\n record.id = record.id + '_OPPSTRAND'\n\n # Take third quartile of + read positions for +/+ breakpoints\n # Take first quartile of - read positions for -/- breakpoints\n if pairs[0].strandA == '+':\n record.pos = round(np.percentile([p.posA for p in pairs], 90), 0)\n record.stop = round(np.percentile([p.posB for p in pairs], 90), 0)\n record.info['STRANDS'] = '++'\n else:\n record.pos = round(np.percentile([p.posA for p in pairs], 10), 0)\n record.stop = round(np.percentile([p.posB for p in pairs], 10), 0)\n record.info['STRANDS'] = '--'\n\n record.info['SVLEN'] = record.stop - record.pos\n if retain_algs:\n old_algs = list(record.info['ALGORITHMS'])\n old_algs.append('rescan')\n record.info['ALGORITHMS'] = tuple(old_algs)\n\n return record\n\n\ndef rescan_single_enders(vcf, pe, min_support=4, window=500, pe_blacklist=None):\n for record in vcf:\n rescan_single_ender(record, pe, min_support, window,\n pe_blacklist=pe_blacklist)\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('vcf', help='Single enders')\n parser.add_argument('pairs', help='Scraped discordant pair file.')\n parser.add_argument('--min-rescan-pe-support', type=int, default=4,\n help='Minumum discordant pairs required during '\n 'single-ender rescan ')\n parser.add_argument('--window', type=int, default=500, help='Window around '\n 'single ender coordinates to search for pairs')\n parser.add_argument('-x', '--pe-blacklist', metavar='BED.GZ',\n default=None, help='Tabix indexed bed of blacklisted '\n 'regions. Any anomalous pair falling inside one '\n 'of these regions is excluded from PE rescanning.')\n\n args = parser.parse_args()\n\n vcf = pysam.VariantFile(args.vcf)\n pe = pysam.TabixFile(args.pairs)\n blacklist = pysam.TabixFile(args.pe_blacklist)\n\n rescan_single_enders(vcf, pe, args.min_rescan_pe_support, args.window,\n pe_blacklist=blacklist)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"pandas.concat",
"pandas.read_csv",
"numpy.abs",
"numpy.sqrt",
"pandas.DataFrame",
"numpy.where"
],
[
"pandas.read_table",
"pandas.melt"
],
[
"numpy.median"
],
[
"numpy.percentile",
"numpy.random.seed",
"numpy.random.choice"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
savelov/nowcast | [
"9c1168b1ba642f15bc4ffb000bdbca6db27c29b1",
"9c1168b1ba642f15bc4ffb000bdbca6db27c29b1"
] | [
"pysteps/io/exporters.py",
"pysteps_custom_utils/probability_nowcasting.py"
] | [
"\"\"\"\npysteps.io.exporter\n===================\n\nMethods for exporting forecasts of 2d precipitation fields into various file\nformats.\n\nEach exporter method in this module has its own initialization function that\nimplements the following interface::\n\n initialize_forecast_exporter_xxx(filename, startdate, timestep,\n num_timesteps, shape, num_ens_members,\n metadata, incremental=None)\n\nwhere xxx is the name (or abbreviation) of the file format.\n\nThis function creates the file and writes the metadata. The datasets are written\nby calling :py:func:`pysteps.io.exporters.export_forecast_dataset`, and\nthe file is closed by calling :py:func:`pysteps.io.exporters.close_forecast_file`.\n\nThe arguments in the above are defined as follows:\n\n.. tabularcolumns:: |p{2cm}|p{2cm}|L|\n\n+---------------+-------------------+-----------------------------------------+\n| Argument | Type/values | Description |\n+===============+===================+=========================================+\n| filename | str | name of the output file |\n+---------------+-------------------+-----------------------------------------+\n| startdate | datetime.datetime | start date of the forecast |\n+---------------+-------------------+-----------------------------------------+\n| timestep | int | time step of the forecast (minutes) |\n+---------------+-------------------+-----------------------------------------+\n| n_timesteps | int | number of time steps in the forecast |\n| | | this argument is ignored if |\n| | | incremental is set to 'timestep'. |\n+---------------+-------------------+-----------------------------------------+\n| shape | tuple | two-element tuple defining the shape |\n| | | (height,width) of the forecast grids |\n+---------------+-------------------+-----------------------------------------+\n| n_ens_members | int | number of ensemble members in the |\n| | | forecast. This argument is ignored if |\n| | | incremental is set to 'member' |\n+---------------+-------------------+-----------------------------------------+\n| metadata | dict | metadata dictionary containing the |\n| | | projection,x1,x2,y1,y2 and unit |\n| | | attributes described in the |\n| | | documentation of pysteps.io.importers |\n+---------------+-------------------+-----------------------------------------+\n| incremental | {None, 'timestep',| Allow incremental writing of datasets |\n| | 'member'} | into the netCDF file |\n| | | the available options are: |\n| | | 'timestep' = write a forecast or a |\n| | | forecast ensemble for a given |\n| | | time step |\n| | | 'member' = write a forecast sequence |\n| | | for a given ensemble member |\n+---------------+-------------------+-----------------------------------------+\n\nThe return value is a dictionary containing an exporter object. This can be\nused with :py:func:`pysteps.io.exporters.export_forecast_dataset` to write \ndatasets into the given file format.\n\nAvailable Exporters\n-------------------\n\n.. autosummary::\n :toctree: ../generated/\n\n initialize_forecast_exporter_kineros\n initialize_forecast_exporter_netcdf\n\nGeneric functions\n-----------------\n\n.. autosummary::\n :toctree: ../generated/\n\n export_forecast_dataset\n close_forecast_file\n\"\"\"\n\nfrom datetime import datetime\nimport numpy as np\nimport os\nfrom pysteps.exceptions import MissingOptionalDependency\n\ntry:\n import netCDF4\n netcdf4_imported = True\nexcept ImportError:\n netcdf4_imported = False\ntry:\n import pyproj\n pyproj_imported = True\nexcept ImportError:\n pyproj_imported = False\n\n# TODO(exporters): This is a draft version of the kineros exporter.\n# Revise the variable names and\n# the structure of the file if necessary.\n\ndef initialize_forecast_exporter_kineros(filename, startdate, timestep,\n n_timesteps, shape, n_ens_members,\n metadata, incremental=None):\n \"\"\"Initialize a KINEROS2 Rainfall .pre file as specified\n in https://www.tucson.ars.ag.gov/kineros/.\n\n Grid points are treated as individual rain gauges and a separate file is\n produced for each ensemble member.\n \n Parameters\n ----------\n filename : str\n Name of the output file.\n \n startdate : datetime.datetime\n Start date of the forecast as datetime object.\n \n timestep : int\n Time step of the forecast (minutes).\n \n n_timesteps : int\n Number of time steps in the forecast this argument is ignored if \n incremental is set to 'timestep'.\n \n shape : tuple of int\n Two-element tuple defining the shape (height,width) of the forecast \n grids.\n \n n_ens_members : int\n Number of ensemble members in the forecast. This argument is ignored if\n incremental is set to 'member'.\n \n metadata: dict\n Metadata dictionary containing the projection,x1,x2,y1,y2 and unit \n attributes described in the documentation of \n :py:mod:`pysteps.io.importers`.\n \n incremental : {None}, optional\n Currently not implemented for this method.\n\n Returns\n -------\n exporter : dict\n The return value is a dictionary containing an exporter object. This c\n an be used with :py:func:`pysteps.io.exporters.export_forecast_dataset` \n to write datasets into the given file format.\n \n \"\"\"\n\n if incremental is not None:\n raise ValueError(\"unknown option %s: incremental writing is not supported\" % incremental)\n\n exporter = {}\n\n basefn, extfn = os.path.splitext(filename)\n if extfn == \"\":\n extfn = \".pre\"\n\n # one file for each member\n n_ens_members = np.min((99, n_ens_members))\n fns = []\n for i in range(n_ens_members):\n fn = \"%s_N%02d%s\" % (basefn, i, extfn)\n with open(fn, \"w\") as fd:\n # write header\n fd.writelines(\"! pysteps-generated nowcast.\\n\")\n fd.writelines(\"! created the %s.\\n\" % datetime.now().strftime(\"%c\"))\n # TODO(exporters): Add pySTEPS version here\n fd.writelines(\"! Member = %02d.\\n\" % i)\n fd.writelines(\"! Startdate = %s.\\n\" % startdate.strftime(\"%c\"))\n fns.append(fn)\n fd.close()\n\n h, w = shape\n\n if metadata[\"unit\"] == \"mm/h\":\n var_name = \"Intensity\"\n var_long_name = \"Intensity in mm/hr\"\n var_unit = \"mm/hr\"\n elif metadata[\"unit\"] == \"mm\":\n var_name = \"Depth\"\n var_long_name = \"Accumulated depth in mm\"\n var_unit = \"mm\"\n else:\n raise ValueError(\"unsupported unit %s\" % metadata[\"unit\"])\n\n xr = np.linspace(metadata[\"x1\"], metadata[\"x2\"], w+1)[:-1]\n xr += 0.5 * (xr[1] - xr[0])\n yr = np.linspace(metadata[\"y1\"], metadata[\"y2\"], h+1)[:-1]\n yr += 0.5 * (yr[1] - yr[0])\n X, Y = np.meshgrid(xr, yr)\n XY_coords = np.stack([X, Y])\n\n exporter[\"method\"] = \"kineros\"\n exporter[\"ncfile\"] = fns\n exporter[\"XY_coords\"] = XY_coords\n exporter[\"var_name\"] = var_name\n exporter[\"var_long_name\"] = var_long_name\n exporter[\"var_unit\"] = var_unit\n exporter[\"startdate\"] = startdate\n exporter[\"timestep\"] = timestep\n exporter[\"metadata\"] = metadata\n exporter[\"incremental\"] = incremental\n exporter[\"num_timesteps\"] = n_timesteps\n exporter[\"num_ens_members\"] = n_ens_members\n exporter[\"shape\"] = shape\n\n return exporter\n\n\n# TODO(exporters): This is a draft version of the netcdf exporter.\n# Revise the variable names and\n# the structure of the file if necessary.\n\ndef initialize_forecast_exporter_netcdf(filename, startdate, timestep,\n n_timesteps, shape, n_ens_members,\n metadata, product='precip_intensity',\n incremental=None):\n \"\"\"Initialize a netCDF forecast exporter.\n \n Parameters\n ----------\n filename : str\n Name of the output file.\n \n startdate : datetime.datetime\n Start date of the forecast as datetime object.\n \n timestep : int\n Time step of the forecast (minutes).\n \n n_timesteps : int\n Number of time steps in the forecast this argument is ignored if \n incremental is set to 'timestep'.\n \n shape : tuple of int\n Two-element tuple defining the shape (height,width) of the forecast \n grids.\n \n n_ens_members : int\n Number of ensemble members in the forecast. This argument is ignored if\n incremental is set to 'member'.\n \n metadata: dict\n Metadata dictionary containing the projection,x1,x2,y1,y2 and unit \n attributes described in the documentation of \n :py:mod:`pysteps.io.importers`.\n\n product: str\n product name can be 'precip_intensity' for intensity export,\n 'precip_probability' for probability export.\n\n incremental : {None,'timestep','member'}, optional\n Allow incremental writing of datasets into the netCDF file.\\n\n The available options are: 'timestep' = write a forecast or a forecast \n ensemble for a given time step; 'member' = write a forecast sequence \n for a given ensemble member. If set to None, incremental writing is \n disabled.\n\n Returns\n -------\n exporter : dict\n The return value is a dictionary containing an exporter object. This c\n an be used with :py:func:`pysteps.io.exporters.export_forecast_dataset` \n to write datasets into the given file format.\n \n \"\"\"\n if not netcdf4_imported:\n raise MissingOptionalDependency(\n \"netCDF4 package is required for netcdf \"\n \"exporters but it is not installed\")\n\n if not pyproj_imported:\n raise MissingOptionalDependency(\n \"pyproj package is required for netcdf \"\n \"exporters but it is not installed\")\n\n if incremental not in [None, \"timestep\", \"member\"]:\n raise ValueError(\"unknown option %s: incremental must be 'timestep' or 'member'\" % incremental)\n\n if incremental == \"timestep\":\n n_timesteps = None\n elif incremental == \"member\":\n n_ens_members = None\n elif incremental is not None:\n raise ValueError(\"unknown argument value incremental='%s': must be 'timestep' or 'member'\" % str(incremental))\n\n exporter = {}\n\n filename = os.path.realpath(filename)\n if not os.path.exists(os.path.dirname(filename)):\n os.mkdir(os.path.dirname(filename))\n ncf = netCDF4.Dataset(filename, 'w', format=\"NETCDF4\")\n\n ncf.Conventions = \"CF-1.7\"\n ncf.title = \"pysteps-generated nowcast\"\n ncf.institution = \"the pySTEPS community (https://pysteps.github.io)\"\n ncf.source = \"pysteps\" # TODO(exporters): Add pySTEPS version here\n ncf.history = \"\"\n ncf.references = \"\"\n ncf.comment = \"\"\n\n h, w = shape\n\n # if product != 'precip_probability':\n # ncf.createDimension(\"ens_number\", size=n_ens_members)\n ncf.createDimension(\"time\", size=n_timesteps)\n ncf.createDimension(\"y\", size=h)\n ncf.createDimension(\"x\", size=w)\n\n # necessary settings for probability nowcasting\n ncf.datetime = str(startdate)\n if product == 'precip_probability':\n #TODO: Add this metadata unit percent in the source\n metadata[\"unit\"] = \"percent\"\n\n if metadata[\"unit\"] == \"mm/h\":\n var_name = \"precip_intensity\"\n var_standard_name = None\n var_long_name = \"instantaneous precipitation rate\"\n var_unit = \"mm h-1\"\n elif metadata[\"unit\"] == \"percent\":\n var_name = \"precip_probability\"\n var_standard_name = None\n var_long_name = \"probablistic precipitation\"\n var_unit = \"percent\"\n elif metadata[\"unit\"] == \"mm\":\n var_name = \"precip_accum\"\n var_standard_name = None\n var_long_name = \"accumulated precipitation\"\n var_unit = \"mm\"\n elif metadata[\"unit\"] == \"dBZ\":\n var_name = \"reflectivity\"\n var_long_name = \"equivalent reflectivity factor\"\n var_standard_name = \"equivalent_reflectivity_factor\"\n var_unit = \"dBZ\"\n else:\n raise ValueError(\"unknown unit %s\" % metadata[\"unit\"])\n\n xr = np.linspace(metadata[\"x1\"], metadata[\"x2\"], w+1)[:-1]\n xr += 0.5 * (xr[1] - xr[0])\n yr = np.linspace(metadata[\"y1\"], metadata[\"y2\"], h+1)[:-1]\n yr += 0.5 * (yr[1] - yr[0])\n\n var_xc = ncf.createVariable(\"xc\", np.float32, dimensions=(\"x\",))\n var_xc[:] = xr\n var_xc.axis = 'X'\n var_xc.standard_name = \"projection_x_coordinate\"\n var_xc.long_name = \"x-coordinate in Cartesian system\"\n # TODO(exporters): Don't hard-code the unit.\n var_xc.units = 'm'\n\n var_yc = ncf.createVariable(\"yc\", np.float32, dimensions=(\"y\",))\n var_yc[:] = yr\n var_yc.axis = 'Y'\n var_yc.standard_name = \"projection_y_coordinate\"\n var_yc.long_name = \"y-coordinate in Cartesian system\"\n # TODO(exporters): Don't hard-code the unit.\n var_yc.units = 'm'\n\n X, Y = np.meshgrid(xr, yr)\n pr = pyproj.Proj(metadata[\"projection\"])\n lon,lat = pr(X.flatten(), Y.flatten(), inverse=True)\n\n lon, lat = pr(X.flatten(), Y.flatten(), inverse=True)\n new_long, new_lat = np.zeros((h, w), dtype=np.float), np.zeros((h, w), dtype=np.float)\n idx = 0\n for row in range(h):\n for col in range(w):\n new_long[row][col] = lon[idx]\n idx += 1\n idx = 0\n for row in range(h):\n for col in range(w):\n new_lat[row][col] = lat[idx]\n idx += 1\n\n var_lon = ncf.createVariable(\"lon\", np.float32, dimensions=(\"y\", \"x\"))\n var_lon[:] = new_long\n var_lon.standard_name = \"longitude\"\n var_lon.long_name = \"longitude coordinate\"\n # TODO(exporters): Don't hard-code the unit.\n var_lon.units = \"degrees_east\"\n\n var_lat = ncf.createVariable(\"lat\", np.float, dimensions=(\"y\", \"x\"))\n var_lat[:] = new_lat\n var_lat.standard_name = \"latitude\"\n var_lat.long_name = \"latitude coordinate\"\n # TODO(exporters): Don't hard-code the unit.\n var_lat.units = \"degrees_north\"\n\n ncf.projection = metadata[\"projection\"]\n\n grid_mapping_var_name, grid_mapping_name, grid_mapping_params = \\\n _convert_proj4_to_grid_mapping(metadata[\"projection\"])\n # skip writing the grid mapping if a matching name was not found\n if grid_mapping_var_name is not None:\n var_gm = ncf.createVariable(grid_mapping_var_name, np.int,\n dimensions=())\n var_gm.grid_mapping_name = grid_mapping_name\n for i in grid_mapping_params.items():\n var_gm.setncattr(i[0], i[1])\n\n # if product != 'precip_probability':\n # var_ens_num = ncf.createVariable(\"ens_number\", np.int,\n # dimensions=(\"ens_number\",))\n # if incremental != \"member\":\n # var_ens_num[:] = list(range(1, n_ens_members+1))\n # var_ens_num.long_name = \"ensemble member\"\n # var_ens_num.units = \"\"\n\n var_time = ncf.createVariable(\"time\", np.int, dimensions=(\"time\",))\n if incremental != \"timestep\":\n if product == 'precip_probability':\n var_time[:] = [i*timestep for i in range(1, n_timesteps+1)]\n else:\n var_time[:] = [i*timestep*60 for i in range(1, n_timesteps+1)]\n\n var_time.long_name = \"forecast time\"\n startdate_str = datetime.strftime(startdate, \"%Y-%m-%d %H:%M:%S\")\n var_time.units = \"minutes since %s\" % startdate_str if product == 'precip_probability' \\\n else \"seconds since %s\" % startdate_str\n\n dimensions = (\"time\", \"y\", \"x\")\n\n var_F = ncf.createVariable(var_name, np.float32,\n dimensions=dimensions,\n zlib=True, complevel=9)\n\n if var_standard_name is not None:\n var_F.standard_name = var_standard_name\n var_F.long_name = var_long_name\n var_F.coordinates = \"y x\"\n var_F.units = var_unit\n\n exporter[\"method\"] = \"netcdf\"\n exporter[\"ncfile\"] = ncf\n exporter[\"var_F\"] = var_F\n # if product != 'precip_probability':\n # exporter[\"var_ens_num\"] = var_ens_num\n exporter[\"var_time\"] = var_time\n exporter[\"var_name\"] = var_name\n exporter[\"startdate\"] = startdate\n exporter[\"timestep\"] = timestep\n exporter[\"metadata\"] = metadata\n exporter[\"incremental\"] = incremental\n exporter[\"num_timesteps\"] = n_timesteps\n exporter[\"num_ens_members\"] = n_ens_members\n exporter[\"shape\"] = shape\n\n return exporter\n\n\ndef export_forecast_dataset(F, exporter, mask=None):\n \"\"\"Write a forecast array into a file.\n\n The written dataset has dimensions\n (num_ens_members,num_timesteps,shape[0],shape[1]), where shape refers to\n the shape of the two-dimensional forecast grids. If the exporter was\n initialized with incremental!=None, the array is appended to the existing\n dataset either along the ensemble member or time axis.\n\n Parameters\n ----------\n exporter : dict\n An exporter object created with any initialization method implemented\n in :py:mod:`pysteps.io.exporters`.\n F : array_like\n The array to write. The required shape depends on the choice of the\n 'incremental' parameter the exporter was initialized with:\n\n :TODO: Update this table incorporating 'precip_probability'\n +-----------------+---------------------------------------------------+\n | incremental | required shape |\n +=================+===================================================+\n | None | (num_ens_members,num_timesteps,shape[0],shape[1]) |\n +-----------------+---------------------------------------------------+\n | 'timestep' | (num_ens_members,shape[0],shape[1]) |\n +-----------------+---------------------------------------------------+\n | 'member' | (num_timesteps,shape[0],shape[1]) |\n +-----------------+---------------------------------------------------+\n\n \"\"\"\n if exporter[\"method\"] == \"netcdf\" and not netcdf4_imported:\n raise MissingOptionalDependency(\n \"netCDF4 package is required for netcdf \"\n \"exporters but it is not installed\")\n\n if exporter[\"incremental\"] is None:\n shp = (exporter[\"num_timesteps\"], exporter[\"shape\"][0], exporter[\"shape\"][1])\n if F.shape != shp:\n raise ValueError(\"F has invalid shape: %s != %s\" % (str(F.shape),str(shp)))\n elif exporter[\"incremental\"] == \"timestep\":\n shp = (exporter[\"num_ens_members\"], exporter[\"shape\"][0],\n exporter[\"shape\"][1])\n if F.shape != shp:\n raise ValueError(\"F has invalid shape: %s != %s\" % (str(F.shape),str(shp)))\n elif exporter[\"incremental\"] == \"member\":\n shp = (exporter[\"num_timesteps\"], exporter[\"shape\"][0],\n exporter[\"shape\"][1])\n if F.shape != shp:\n raise ValueError(\"F has invalid shape: %s != %s\" % (str(F.shape),str(shp)))\n\n if exporter[\"method\"] == \"netcdf\":\n _export_netcdf(F, exporter, mask)\n elif exporter[\"method\"] == \"kineros\":\n _export_kineros(F, exporter)\n else:\n raise ValueError(\"unknown exporter method %s\" % exporter[\"method\"])\n\n\ndef close_forecast_file(exporter):\n \"\"\"Close the file associated with a forecast exporter.\n\n Finish writing forecasts and close the file associated with a forecast\n exporter.\n\n Parameters\n ----------\n exporter : dict\n An exporter object created with any initialization method implemented\n in :py:mod:`pysteps.io.exporters`.\n\n \"\"\"\n if exporter[\"method\"] == \"kineros\":\n pass # no need to close the file\n else:\n exporter[\"ncfile\"].close()\n\n\ndef _export_kineros(F, exporter):\n\n num_timesteps = exporter[\"num_timesteps\"]\n num_ens_members = exporter[\"num_ens_members\"]\n startdate = exporter[\"startdate\"]\n timestep = exporter[\"timestep\"]\n xgrid = exporter[\"XY_coords\"][0, :, :].flatten()\n ygrid = exporter[\"XY_coords\"][1, :, :].flatten()\n\n timemin = [(t + 1)*timestep for t in range(num_timesteps)]\n\n for n in range(num_ens_members):\n fn = exporter[\"ncfile\"][n]\n F_ = F[n, :, :, :].reshape((num_timesteps, -1))\n if exporter[\"var_name\"] == \"Depth\":\n F_ = np.cumsum(F_, axis=0)\n with open(fn, \"a\") as fd:\n for m in range(F_.shape[1]):\n fd.writelines(\"BEGIN RG%03d\\n\" % (m + 1))\n fd.writelines(\" X = %.2f, Y = %.2f\\n\" % (xgrid[m], ygrid[m]))\n fd.writelines(\" N = %i\\n\" % num_timesteps)\n fd.writelines(\" TIME %s\\n\" % exporter[\"var_name\"].upper())\n fd.writelines(\"! (min) (%s)\\n\" % exporter[\"var_unit\"])\n for t in range(num_timesteps):\n line_new = \"{:6.1f} {:11.2f}\\n\".format(timemin[t], F_[t, m])\n fd.writelines(line_new)\n fd.writelines(\"END\\n\\n\")\n\n\ndef _export_netcdf(F, exporter, mask=None):\n var_F = exporter[\"var_F\"]\n\n if exporter[\"incremental\"] is None:\n var_F[:] = F[:,::-1,:]\n elif exporter[\"incremental\"] == \"timestep\":\n var_F[:, var_F.shape[1], :, :] = F\n var_time = exporter[\"var_time\"]\n var_time[len(var_time)-1] = len(var_time) * exporter[\"timestep\"] * 60\n else:\n var_F[var_F.shape[0], :, :, :] = F\n var_ens_num = exporter[\"var_time\"]\n var_ens_num[len(var_ens_num)-1] = len(var_ens_num)\n\n\n# TODO(exporters): Write methods for converting Proj.4 projection definitions\n# into CF grid mapping attributes. Currently this has been implemented for\n# the stereographic projection.\n# The conversions implemented here are take from:\n# https://github.com/cf-convention/cf-convention.github.io/blob/master/wkt-proj-4.md\n\ndef _convert_proj4_to_grid_mapping(proj4str):\n tokens = proj4str.split('+')\n\n d = {}\n for t in tokens[1:]:\n t = t.split('=')\n if len(t) > 1:\n d[t[0]] = t[1].strip()\n\n params = {}\n # TODO(exporters): implement more projection types here\n if d[\"proj\"] == \"stere\":\n grid_mapping_var_name = \"polar_stereographic\"\n grid_mapping_name = \"polar_stereographic\"\n v = d[\"lon_0\"] if d[\"lon_0\"][-1] not in [\"E\", \"W\"] else d[\"lon_0\"][:-1]\n params[\"straight_vertical_longitude_from_pole\"] = float(v)\n v = d[\"lat_0\"] if d[\"lat_0\"][-1] not in [\"N\", \"S\"] else d[\"lat_0\"][:-1]\n params[\"latitude_of_projection_origin\"] = float(v)\n if \"lat_ts\" in list(d.keys()):\n params[\"standard_parallel\"] = float(d[\"lat_ts\"])\n elif \"k_0\" in list(d.keys()):\n params[\"scale_factor_at_projection_origin\"] = float(d[\"k_0\"])\n params[\"false_easting\"] = float(d[\"x_0\"])\n params[\"false_northing\"] = float(d[\"y_0\"])\n elif d[\"proj\"] == \"sterea\":\n grid_mapping_var_name = \"oblique_stereographic\"\n grid_mapping_name = \"oblique_stereographic\"\n v = d[\"lon_0\"] if d[\"lon_0\"][-1] not in [\"E\", \"W\"] else d[\"lon_0\"][:-1]\n params[\"longitude_of_projection_origin\"] = float(v)\n v = d[\"lat_0\"] if d[\"lat_0\"][-1] not in [\"N\", \"S\"] else d[\"lat_0\"][:-1]\n params[\"latitude_of_projection_origin\"] = float(v)\n if \"lat_ts\" in list(d.keys()):\n params[\"standard_parallel\"] = float(d[\"lat_ts\"])\n elif \"k_0\" in list(d.keys()):\n params[\"scale_factor_at_projection_origin\"] = float(d[\"k_0\"])\n params[\"false_easting\"] = float(d[\"x_0\"])\n params[\"false_northing\"] = float(d[\"y_0\"])\n elif d[\"proj\"] == \"aea\": # Albers Conical Equal Area\n grid_mapping_var_name = \"proj\"\n grid_mapping_name = \"albers_conical_equal_area\"\n params[\"false_easting\"] = float(d[\"x_0\"]) if \"x_0\" in d else float(0)\n params[\"false_northing\"] = float(d[\"y_0\"]) if \"y_0\" in d else float(0)\n v = d[\"lon_0\"] if \"lon_0\" in d else float(0)\n params[\"longitude_of_central_meridian\"] = float(v)\n v = d[\"lat_0\"] if \"lat_0\" in d else float(0)\n params[\"latitude_of_projection_origin\"] = float(v)\n v1 = d[\"lat_1\"] if \"lat_1\" in d else float(0)\n v2 = d[\"lat_2\"] if \"lat_2\" in d else float(0)\n params[\"standard_parallel\"] = (float(v1), float(v2))\n else:\n print('unknown projection', d[\"proj\"])\n return None, None, None\n\n return grid_mapping_var_name, grid_mapping_name, params\n",
"import numpy as np\nimport pysteps as stp\n\n\ndef nowcast_probability(n_time_step, shape, R_fct):\n prob = np.zeros((n_time_step, shape[0], shape[1]))\n for i in range(n_time_step):\n prob[i, :, :] = stp.postprocessing.ensemblestats.excprob(R_fct[:, i, :, :], 0.1, ignore_nan=True)\n\n return prob\n"
] | [
[
"numpy.linspace",
"numpy.min",
"numpy.cumsum",
"numpy.stack",
"numpy.meshgrid",
"numpy.zeros"
],
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
StewSchrieff/riddlerHoopGame | [
"3d63f494aa803c7571ace83f87a40ce5d6b0dfc1"
] | [
"venv/Lib/site-packages/matplotlib/backends/backend_qt5.py"
] | [
"from __future__ import (absolute_import, division, print_function,\n unicode_literals)\nimport six\n\nimport functools\nimport os\nimport re\nimport signal\nimport sys\nfrom six import unichr\nimport traceback\n\nimport matplotlib\n\nfrom matplotlib._pylab_helpers import Gcf\nfrom matplotlib.backend_bases import (\n _Backend, FigureCanvasBase, FigureManagerBase, NavigationToolbar2,\n TimerBase, cursors, ToolContainerBase, StatusbarBase)\nimport matplotlib.backends.qt_editor.figureoptions as figureoptions\nfrom matplotlib.backends.qt_editor.formsubplottool import UiSubplotTool\nfrom matplotlib.figure import Figure\nfrom matplotlib.backend_managers import ToolManager\nfrom matplotlib import backend_tools\n\nfrom .qt_compat import (\n QtCore, QtGui, QtWidgets, _getSaveFileName, is_pyqt5, __version__, QT_API)\n\nbackend_version = __version__\n\n# SPECIAL_KEYS are keys that do *not* return their unicode name\n# instead they have manually specified names\nSPECIAL_KEYS = {QtCore.Qt.Key_Control: 'control',\n QtCore.Qt.Key_Shift: 'shift',\n QtCore.Qt.Key_Alt: 'alt',\n QtCore.Qt.Key_Meta: 'super',\n QtCore.Qt.Key_Return: 'enter',\n QtCore.Qt.Key_Left: 'left',\n QtCore.Qt.Key_Up: 'up',\n QtCore.Qt.Key_Right: 'right',\n QtCore.Qt.Key_Down: 'down',\n QtCore.Qt.Key_Escape: 'escape',\n QtCore.Qt.Key_F1: 'f1',\n QtCore.Qt.Key_F2: 'f2',\n QtCore.Qt.Key_F3: 'f3',\n QtCore.Qt.Key_F4: 'f4',\n QtCore.Qt.Key_F5: 'f5',\n QtCore.Qt.Key_F6: 'f6',\n QtCore.Qt.Key_F7: 'f7',\n QtCore.Qt.Key_F8: 'f8',\n QtCore.Qt.Key_F9: 'f9',\n QtCore.Qt.Key_F10: 'f10',\n QtCore.Qt.Key_F11: 'f11',\n QtCore.Qt.Key_F12: 'f12',\n QtCore.Qt.Key_Home: 'home',\n QtCore.Qt.Key_End: 'end',\n QtCore.Qt.Key_PageUp: 'pageup',\n QtCore.Qt.Key_PageDown: 'pagedown',\n QtCore.Qt.Key_Tab: 'tab',\n QtCore.Qt.Key_Backspace: 'backspace',\n QtCore.Qt.Key_Enter: 'enter',\n QtCore.Qt.Key_Insert: 'insert',\n QtCore.Qt.Key_Delete: 'delete',\n QtCore.Qt.Key_Pause: 'pause',\n QtCore.Qt.Key_SysReq: 'sysreq',\n QtCore.Qt.Key_Clear: 'clear', }\n\n# define which modifier keys are collected on keyboard events.\n# elements are (mpl names, Modifier Flag, Qt Key) tuples\nSUPER = 0\nALT = 1\nCTRL = 2\nSHIFT = 3\nMODIFIER_KEYS = [('super', QtCore.Qt.MetaModifier, QtCore.Qt.Key_Meta),\n ('alt', QtCore.Qt.AltModifier, QtCore.Qt.Key_Alt),\n ('ctrl', QtCore.Qt.ControlModifier, QtCore.Qt.Key_Control),\n ('shift', QtCore.Qt.ShiftModifier, QtCore.Qt.Key_Shift),\n ]\n\nif sys.platform == 'darwin':\n # in OSX, the control and super (aka cmd/apple) keys are switched, so\n # switch them back.\n SPECIAL_KEYS.update({QtCore.Qt.Key_Control: 'cmd', # cmd/apple key\n QtCore.Qt.Key_Meta: 'control',\n })\n MODIFIER_KEYS[0] = ('cmd', QtCore.Qt.ControlModifier,\n QtCore.Qt.Key_Control)\n MODIFIER_KEYS[2] = ('ctrl', QtCore.Qt.MetaModifier,\n QtCore.Qt.Key_Meta)\n\n\ncursord = {\n cursors.MOVE: QtCore.Qt.SizeAllCursor,\n cursors.HAND: QtCore.Qt.PointingHandCursor,\n cursors.POINTER: QtCore.Qt.ArrowCursor,\n cursors.SELECT_REGION: QtCore.Qt.CrossCursor,\n cursors.WAIT: QtCore.Qt.WaitCursor,\n }\n\n\n# make place holder\nqApp = None\n\n\ndef _create_qApp():\n \"\"\"\n Only one qApp can exist at a time, so check before creating one.\n \"\"\"\n global qApp\n\n if qApp is None:\n app = QtWidgets.QApplication.instance()\n if app is None:\n # check for DISPLAY env variable on X11 build of Qt\n if is_pyqt5():\n try:\n from PyQt5 import QtX11Extras\n is_x11_build = True\n except ImportError:\n is_x11_build = False\n else:\n is_x11_build = hasattr(QtGui, \"QX11Info\")\n if is_x11_build:\n display = os.environ.get('DISPLAY')\n if display is None or not re.search(r':\\d', display):\n raise RuntimeError('Invalid DISPLAY variable')\n\n qApp = QtWidgets.QApplication([b\"matplotlib\"])\n qApp.lastWindowClosed.connect(qApp.quit)\n else:\n qApp = app\n\n if is_pyqt5():\n try:\n qApp.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps)\n qApp.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)\n except AttributeError:\n pass\n\n\ndef _allow_super_init(__init__):\n \"\"\"\n Decorator for ``__init__`` to allow ``super().__init__`` on PyQt4/PySide2.\n \"\"\"\n\n if QT_API == \"PyQt5\":\n\n return __init__\n\n else:\n # To work around lack of cooperative inheritance in PyQt4, PySide,\n # and PySide2, when calling FigureCanvasQT.__init__, we temporarily\n # patch QWidget.__init__ by a cooperative version, that first calls\n # QWidget.__init__ with no additional arguments, and then finds the\n # next class in the MRO with an __init__ that does support cooperative\n # inheritance (i.e., not defined by the PyQt4, PySide, PySide2, sip\n # or Shiboken packages), and manually call its `__init__`, once again\n # passing the additional arguments.\n\n qwidget_init = QtWidgets.QWidget.__init__\n\n def cooperative_qwidget_init(self, *args, **kwargs):\n qwidget_init(self)\n mro = type(self).__mro__\n next_coop_init = next(\n cls for cls in mro[mro.index(QtWidgets.QWidget) + 1:]\n if cls.__module__.split(\".\")[0] not in [\n \"PyQt4\", \"sip\", \"PySide\", \"PySide2\", \"Shiboken\"])\n next_coop_init.__init__(self, *args, **kwargs)\n\n @functools.wraps(__init__)\n def wrapper(self, **kwargs):\n try:\n QtWidgets.QWidget.__init__ = cooperative_qwidget_init\n __init__(self, **kwargs)\n finally:\n # Restore __init__\n QtWidgets.QWidget.__init__ = qwidget_init\n\n return wrapper\n\n\nclass TimerQT(TimerBase):\n '''\n Subclass of :class:`backend_bases.TimerBase` that uses Qt timer events.\n\n Attributes\n ----------\n interval : int\n The time between timer events in milliseconds. Default is 1000 ms.\n single_shot : bool\n Boolean flag indicating whether this timer should\n operate as single shot (run once and then stop). Defaults to False.\n callbacks : list\n Stores list of (func, args) tuples that will be called upon timer\n events. This list can be manipulated directly, or the functions\n `add_callback` and `remove_callback` can be used.\n\n '''\n\n def __init__(self, *args, **kwargs):\n TimerBase.__init__(self, *args, **kwargs)\n\n # Create a new timer and connect the timeout() signal to the\n # _on_timer method.\n self._timer = QtCore.QTimer()\n self._timer.timeout.connect(self._on_timer)\n self._timer_set_interval()\n\n def _timer_set_single_shot(self):\n self._timer.setSingleShot(self._single)\n\n def _timer_set_interval(self):\n self._timer.setInterval(self._interval)\n\n def _timer_start(self):\n self._timer.start()\n\n def _timer_stop(self):\n self._timer.stop()\n\n\nclass FigureCanvasQT(QtWidgets.QWidget, FigureCanvasBase):\n\n # map Qt button codes to MouseEvent's ones:\n buttond = {QtCore.Qt.LeftButton: 1,\n QtCore.Qt.MidButton: 2,\n QtCore.Qt.RightButton: 3,\n # QtCore.Qt.XButton1: None,\n # QtCore.Qt.XButton2: None,\n }\n\n @_allow_super_init\n def __init__(self, figure):\n _create_qApp()\n super(FigureCanvasQT, self).__init__(figure=figure)\n\n self.figure = figure\n # We don't want to scale up the figure DPI more than once.\n # Note, we don't handle a signal for changing DPI yet.\n figure._original_dpi = figure.dpi\n self._update_figure_dpi()\n # In cases with mixed resolution displays, we need to be careful if the\n # dpi_ratio changes - in this case we need to resize the canvas\n # accordingly. We could watch for screenChanged events from Qt, but\n # the issue is that we can't guarantee this will be emitted *before*\n # the first paintEvent for the canvas, so instead we keep track of the\n # dpi_ratio value here and in paintEvent we resize the canvas if\n # needed.\n self._dpi_ratio_prev = None\n\n self._draw_pending = False\n self._is_drawing = False\n self._draw_rect_callback = lambda painter: None\n\n self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)\n self.setMouseTracking(True)\n self.resize(*self.get_width_height())\n # Key auto-repeat enabled by default\n self._keyautorepeat = True\n\n palette = QtGui.QPalette(QtCore.Qt.white)\n self.setPalette(palette)\n\n def _update_figure_dpi(self):\n dpi = self._dpi_ratio * self.figure._original_dpi\n self.figure._set_dpi(dpi, forward=False)\n\n @property\n def _dpi_ratio(self):\n # Not available on Qt4 or some older Qt5.\n try:\n # self.devicePixelRatio() returns 0 in rare cases\n return self.devicePixelRatio() or 1\n except AttributeError:\n return 1\n\n def _update_dpi(self):\n # As described in __init__ above, we need to be careful in cases with\n # mixed resolution displays if dpi_ratio is changing between painting\n # events.\n # Return whether we triggered a resizeEvent (and thus a paintEvent)\n # from within this function.\n if self._dpi_ratio != self._dpi_ratio_prev:\n # We need to update the figure DPI.\n self._update_figure_dpi()\n self._dpi_ratio_prev = self._dpi_ratio\n # The easiest way to resize the canvas is to emit a resizeEvent\n # since we implement all the logic for resizing the canvas for\n # that event.\n event = QtGui.QResizeEvent(self.size(), self.size())\n self.resizeEvent(event)\n # resizeEvent triggers a paintEvent itself, so we exit this one\n # (after making sure that the event is immediately handled).\n return True\n return False\n\n def get_width_height(self):\n w, h = FigureCanvasBase.get_width_height(self)\n return int(w / self._dpi_ratio), int(h / self._dpi_ratio)\n\n def enterEvent(self, event):\n FigureCanvasBase.enter_notify_event(self, guiEvent=event)\n\n def leaveEvent(self, event):\n QtWidgets.QApplication.restoreOverrideCursor()\n FigureCanvasBase.leave_notify_event(self, guiEvent=event)\n\n def mouseEventCoords(self, pos):\n \"\"\"Calculate mouse coordinates in physical pixels\n\n Qt5 use logical pixels, but the figure is scaled to physical\n pixels for rendering. Transform to physical pixels so that\n all of the down-stream transforms work as expected.\n\n Also, the origin is different and needs to be corrected.\n\n \"\"\"\n dpi_ratio = self._dpi_ratio\n x = pos.x()\n # flip y so y=0 is bottom of canvas\n y = self.figure.bbox.height / dpi_ratio - pos.y()\n return x * dpi_ratio, y * dpi_ratio\n\n def mousePressEvent(self, event):\n x, y = self.mouseEventCoords(event.pos())\n button = self.buttond.get(event.button())\n if button is not None:\n FigureCanvasBase.button_press_event(self, x, y, button,\n guiEvent=event)\n\n def mouseDoubleClickEvent(self, event):\n x, y = self.mouseEventCoords(event.pos())\n button = self.buttond.get(event.button())\n if button is not None:\n FigureCanvasBase.button_press_event(self, x, y,\n button, dblclick=True,\n guiEvent=event)\n\n def mouseMoveEvent(self, event):\n x, y = self.mouseEventCoords(event)\n FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)\n\n def mouseReleaseEvent(self, event):\n x, y = self.mouseEventCoords(event)\n button = self.buttond.get(event.button())\n if button is not None:\n FigureCanvasBase.button_release_event(self, x, y, button,\n guiEvent=event)\n\n if is_pyqt5():\n def wheelEvent(self, event):\n x, y = self.mouseEventCoords(event)\n # from QWheelEvent::delta doc\n if event.pixelDelta().x() == 0 and event.pixelDelta().y() == 0:\n steps = event.angleDelta().y() / 120\n else:\n steps = event.pixelDelta().y()\n if steps:\n FigureCanvasBase.scroll_event(\n self, x, y, steps, guiEvent=event)\n else:\n def wheelEvent(self, event):\n x = event.x()\n # flipy so y=0 is bottom of canvas\n y = self.figure.bbox.height - event.y()\n # from QWheelEvent::delta doc\n steps = event.delta() / 120\n if event.orientation() == QtCore.Qt.Vertical:\n FigureCanvasBase.scroll_event(\n self, x, y, steps, guiEvent=event)\n\n def keyPressEvent(self, event):\n key = self._get_key(event)\n if key is not None:\n FigureCanvasBase.key_press_event(self, key, guiEvent=event)\n\n def keyReleaseEvent(self, event):\n key = self._get_key(event)\n if key is not None:\n FigureCanvasBase.key_release_event(self, key, guiEvent=event)\n\n @property\n def keyAutoRepeat(self):\n \"\"\"\n If True, enable auto-repeat for key events.\n \"\"\"\n return self._keyautorepeat\n\n @keyAutoRepeat.setter\n def keyAutoRepeat(self, val):\n self._keyautorepeat = bool(val)\n\n def resizeEvent(self, event):\n # _dpi_ratio_prev will be set the first time the canvas is painted, and\n # the rendered buffer is useless before anyways.\n if self._dpi_ratio_prev is None:\n return\n w = event.size().width() * self._dpi_ratio\n h = event.size().height() * self._dpi_ratio\n dpival = self.figure.dpi\n winch = w / dpival\n hinch = h / dpival\n self.figure.set_size_inches(winch, hinch, forward=False)\n # pass back into Qt to let it finish\n QtWidgets.QWidget.resizeEvent(self, event)\n # emit our resize events\n FigureCanvasBase.resize_event(self)\n\n def sizeHint(self):\n w, h = self.get_width_height()\n return QtCore.QSize(w, h)\n\n def minumumSizeHint(self):\n return QtCore.QSize(10, 10)\n\n def _get_key(self, event):\n if not self._keyautorepeat and event.isAutoRepeat():\n return None\n\n event_key = event.key()\n event_mods = int(event.modifiers()) # actually a bitmask\n\n # get names of the pressed modifier keys\n # bit twiddling to pick out modifier keys from event_mods bitmask,\n # if event_key is a MODIFIER, it should not be duplicated in mods\n mods = [name for name, mod_key, qt_key in MODIFIER_KEYS\n if event_key != qt_key and (event_mods & mod_key) == mod_key]\n try:\n # for certain keys (enter, left, backspace, etc) use a word for the\n # key, rather than unicode\n key = SPECIAL_KEYS[event_key]\n except KeyError:\n # unicode defines code points up to 0x0010ffff\n # QT will use Key_Codes larger than that for keyboard keys that are\n # are not unicode characters (like multimedia keys)\n # skip these\n # if you really want them, you should add them to SPECIAL_KEYS\n MAX_UNICODE = 0x10ffff\n if event_key > MAX_UNICODE:\n return None\n\n key = unichr(event_key)\n # qt delivers capitalized letters. fix capitalization\n # note that capslock is ignored\n if 'shift' in mods:\n mods.remove('shift')\n else:\n key = key.lower()\n\n mods.reverse()\n return '+'.join(mods + [key])\n\n def new_timer(self, *args, **kwargs):\n \"\"\"\n Creates a new backend-specific subclass of\n :class:`backend_bases.Timer`. This is useful for getting\n periodic events through the backend's native event\n loop. Implemented only for backends with GUIs.\n\n Other Parameters\n ----------------\n interval : scalar\n Timer interval in milliseconds\n\n callbacks : list\n Sequence of (func, args, kwargs) where ``func(*args, **kwargs)``\n will be executed by the timer every *interval*.\n\n \"\"\"\n return TimerQT(*args, **kwargs)\n\n def flush_events(self):\n qApp.processEvents()\n\n def start_event_loop(self, timeout=0):\n if hasattr(self, \"_event_loop\") and self._event_loop.isRunning():\n raise RuntimeError(\"Event loop already running\")\n self._event_loop = event_loop = QtCore.QEventLoop()\n if timeout:\n timer = QtCore.QTimer.singleShot(timeout * 1000, event_loop.quit)\n event_loop.exec_()\n\n def stop_event_loop(self, event=None):\n if hasattr(self, \"_event_loop\"):\n self._event_loop.quit()\n\n def draw(self):\n \"\"\"Render the figure, and queue a request for a Qt draw.\n \"\"\"\n # The renderer draw is done here; delaying causes problems with code\n # that uses the result of the draw() to update plot elements.\n if self._is_drawing:\n return\n self._is_drawing = True\n try:\n super(FigureCanvasQT, self).draw()\n finally:\n self._is_drawing = False\n self.update()\n\n def draw_idle(self):\n \"\"\"Queue redraw of the Agg buffer and request Qt paintEvent.\n \"\"\"\n # The Agg draw needs to be handled by the same thread matplotlib\n # modifies the scene graph from. Post Agg draw request to the\n # current event loop in order to ensure thread affinity and to\n # accumulate multiple draw requests from event handling.\n # TODO: queued signal connection might be safer than singleShot\n if not (self._draw_pending or self._is_drawing):\n self._draw_pending = True\n QtCore.QTimer.singleShot(0, self._draw_idle)\n\n def _draw_idle(self):\n if self.height() < 0 or self.width() < 0:\n self._draw_pending = False\n if not self._draw_pending:\n return\n try:\n self.draw()\n except Exception:\n # Uncaught exceptions are fatal for PyQt5, so catch them instead.\n traceback.print_exc()\n finally:\n self._draw_pending = False\n\n def drawRectangle(self, rect):\n # Draw the zoom rectangle to the QPainter. _draw_rect_callback needs\n # to be called at the end of paintEvent.\n if rect is not None:\n def _draw_rect_callback(painter):\n pen = QtGui.QPen(QtCore.Qt.black, 1 / self._dpi_ratio,\n QtCore.Qt.DotLine)\n painter.setPen(pen)\n painter.drawRect(*(pt / self._dpi_ratio for pt in rect))\n else:\n def _draw_rect_callback(painter):\n return\n self._draw_rect_callback = _draw_rect_callback\n self.update()\n\n\nclass MainWindow(QtWidgets.QMainWindow):\n closing = QtCore.Signal()\n\n def closeEvent(self, event):\n self.closing.emit()\n QtWidgets.QMainWindow.closeEvent(self, event)\n\n\nclass FigureManagerQT(FigureManagerBase):\n \"\"\"\n Attributes\n ----------\n canvas : `FigureCanvas`\n The FigureCanvas instance\n num : int or str\n The Figure number\n toolbar : qt.QToolBar\n The qt.QToolBar\n window : qt.QMainWindow\n The qt.QMainWindow\n\n \"\"\"\n\n def __init__(self, canvas, num):\n FigureManagerBase.__init__(self, canvas, num)\n self.canvas = canvas\n self.window = MainWindow()\n self.window.closing.connect(canvas.close_event)\n self.window.closing.connect(self._widgetclosed)\n\n self.window.setWindowTitle(\"Figure %d\" % num)\n image = os.path.join(matplotlib.rcParams['datapath'],\n 'images', 'matplotlib.svg')\n self.window.setWindowIcon(QtGui.QIcon(image))\n\n # Give the keyboard focus to the figure instead of the\n # manager; StrongFocus accepts both tab and click to focus and\n # will enable the canvas to process event w/o clicking.\n # ClickFocus only takes the focus is the window has been\n # clicked\n # on. http://qt-project.org/doc/qt-4.8/qt.html#FocusPolicy-enum or\n # http://doc.qt.digia.com/qt/qt.html#FocusPolicy-enum\n self.canvas.setFocusPolicy(QtCore.Qt.StrongFocus)\n self.canvas.setFocus()\n\n self.window._destroying = False\n\n self.toolmanager = self._get_toolmanager()\n self.toolbar = self._get_toolbar(self.canvas, self.window)\n self.statusbar = None\n\n if self.toolmanager:\n backend_tools.add_tools_to_manager(self.toolmanager)\n if self.toolbar:\n backend_tools.add_tools_to_container(self.toolbar)\n self.statusbar = StatusbarQt(self.window, self.toolmanager)\n\n if self.toolbar is not None:\n self.window.addToolBar(self.toolbar)\n if not self.toolmanager:\n # add text label to status bar\n statusbar_label = QtWidgets.QLabel()\n self.window.statusBar().addWidget(statusbar_label)\n self.toolbar.message.connect(statusbar_label.setText)\n tbs_height = self.toolbar.sizeHint().height()\n else:\n tbs_height = 0\n\n # resize the main window so it will display the canvas with the\n # requested size:\n cs = canvas.sizeHint()\n sbs = self.window.statusBar().sizeHint()\n self._status_and_tool_height = tbs_height + sbs.height()\n height = cs.height() + self._status_and_tool_height\n self.window.resize(cs.width(), height)\n\n self.window.setCentralWidget(self.canvas)\n\n if matplotlib.is_interactive():\n self.window.show()\n self.canvas.draw_idle()\n\n def notify_axes_change(fig):\n # This will be called whenever the current axes is changed\n if self.toolbar is not None:\n self.toolbar.update()\n self.canvas.figure.add_axobserver(notify_axes_change)\n self.window.raise_()\n\n def full_screen_toggle(self):\n if self.window.isFullScreen():\n self.window.showNormal()\n else:\n self.window.showFullScreen()\n\n def _widgetclosed(self):\n if self.window._destroying:\n return\n self.window._destroying = True\n try:\n Gcf.destroy(self.num)\n except AttributeError:\n pass\n # It seems that when the python session is killed,\n # Gcf can get destroyed before the Gcf.destroy\n # line is run, leading to a useless AttributeError.\n\n def _get_toolbar(self, canvas, parent):\n # must be inited after the window, drawingArea and figure\n # attrs are set\n if matplotlib.rcParams['toolbar'] == 'toolbar2':\n toolbar = NavigationToolbar2QT(canvas, parent, False)\n elif matplotlib.rcParams['toolbar'] == 'toolmanager':\n toolbar = ToolbarQt(self.toolmanager, self.window)\n else:\n toolbar = None\n return toolbar\n\n def _get_toolmanager(self):\n if matplotlib.rcParams['toolbar'] == 'toolmanager':\n toolmanager = ToolManager(self.canvas.figure)\n else:\n toolmanager = None\n return toolmanager\n\n def resize(self, width, height):\n 'set the canvas size in pixels'\n self.window.resize(width, height + self._status_and_tool_height)\n\n def show(self):\n self.window.show()\n self.window.activateWindow()\n self.window.raise_()\n\n def destroy(self, *args):\n # check for qApp first, as PySide deletes it in its atexit handler\n if QtWidgets.QApplication.instance() is None:\n return\n if self.window._destroying:\n return\n self.window._destroying = True\n if self.toolbar:\n self.toolbar.destroy()\n self.window.close()\n\n def get_window_title(self):\n return six.text_type(self.window.windowTitle())\n\n def set_window_title(self, title):\n self.window.setWindowTitle(title)\n\n\nclass NavigationToolbar2QT(NavigationToolbar2, QtWidgets.QToolBar):\n message = QtCore.Signal(str)\n\n def __init__(self, canvas, parent, coordinates=True):\n \"\"\" coordinates: should we show the coordinates on the right? \"\"\"\n self.canvas = canvas\n self.parent = parent\n self.coordinates = coordinates\n self._actions = {}\n \"\"\"A mapping of toolitem method names to their QActions\"\"\"\n\n QtWidgets.QToolBar.__init__(self, parent)\n NavigationToolbar2.__init__(self, canvas)\n\n def _icon(self, name):\n if is_pyqt5():\n name = name.replace('.png', '_large.png')\n pm = QtGui.QPixmap(os.path.join(self.basedir, name))\n if hasattr(pm, 'setDevicePixelRatio'):\n pm.setDevicePixelRatio(self.canvas._dpi_ratio)\n return QtGui.QIcon(pm)\n\n def _init_toolbar(self):\n self.basedir = os.path.join(matplotlib.rcParams['datapath'], 'images')\n\n for text, tooltip_text, image_file, callback in self.toolitems:\n if text is None:\n self.addSeparator()\n else:\n a = self.addAction(self._icon(image_file + '.png'),\n text, getattr(self, callback))\n self._actions[callback] = a\n if callback in ['zoom', 'pan']:\n a.setCheckable(True)\n if tooltip_text is not None:\n a.setToolTip(tooltip_text)\n if text == 'Subplots':\n a = self.addAction(self._icon(\"qt4_editor_options.png\"),\n 'Customize', self.edit_parameters)\n a.setToolTip('Edit axis, curve and image parameters')\n\n self.buttons = {}\n\n # Add the x,y location widget at the right side of the toolbar\n # The stretch factor is 1 which means any resizing of the toolbar\n # will resize this label instead of the buttons.\n if self.coordinates:\n self.locLabel = QtWidgets.QLabel(\"\", self)\n self.locLabel.setAlignment(\n QtCore.Qt.AlignRight | QtCore.Qt.AlignTop)\n self.locLabel.setSizePolicy(\n QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding,\n QtWidgets.QSizePolicy.Ignored))\n labelAction = self.addWidget(self.locLabel)\n labelAction.setVisible(True)\n\n # reference holder for subplots_adjust window\n self.adj_window = None\n\n # Esthetic adjustments - we need to set these explicitly in PyQt5\n # otherwise the layout looks different - but we don't want to set it if\n # not using HiDPI icons otherwise they look worse than before.\n if is_pyqt5():\n self.setIconSize(QtCore.QSize(24, 24))\n self.layout().setSpacing(12)\n\n if is_pyqt5():\n # For some reason, self.setMinimumHeight doesn't seem to carry over to\n # the actual sizeHint, so override it instead in order to make the\n # aesthetic adjustments noted above.\n def sizeHint(self):\n size = super(NavigationToolbar2QT, self).sizeHint()\n size.setHeight(max(48, size.height()))\n return size\n\n def edit_parameters(self):\n allaxes = self.canvas.figure.get_axes()\n if not allaxes:\n QtWidgets.QMessageBox.warning(\n self.parent, \"Error\", \"There are no axes to edit.\")\n return\n elif len(allaxes) == 1:\n axes, = allaxes\n else:\n titles = []\n for axes in allaxes:\n name = (axes.get_title() or\n \" - \".join(filter(None, [axes.get_xlabel(),\n axes.get_ylabel()])) or\n \"<anonymous {} (id: {:#x})>\".format(\n type(axes).__name__, id(axes)))\n titles.append(name)\n item, ok = QtWidgets.QInputDialog.getItem(\n self.parent, 'Customize', 'Select axes:', titles, 0, False)\n if ok:\n axes = allaxes[titles.index(six.text_type(item))]\n else:\n return\n\n figureoptions.figure_edit(axes, self)\n\n def _update_buttons_checked(self):\n # sync button checkstates to match active mode\n self._actions['pan'].setChecked(self._active == 'PAN')\n self._actions['zoom'].setChecked(self._active == 'ZOOM')\n\n def pan(self, *args):\n super(NavigationToolbar2QT, self).pan(*args)\n self._update_buttons_checked()\n\n def zoom(self, *args):\n super(NavigationToolbar2QT, self).zoom(*args)\n self._update_buttons_checked()\n\n def set_message(self, s):\n self.message.emit(s)\n if self.coordinates:\n self.locLabel.setText(s)\n\n def set_cursor(self, cursor):\n self.canvas.setCursor(cursord[cursor])\n\n def draw_rubberband(self, event, x0, y0, x1, y1):\n height = self.canvas.figure.bbox.height\n y1 = height - y1\n y0 = height - y0\n rect = [int(val) for val in (x0, y0, x1 - x0, y1 - y0)]\n self.canvas.drawRectangle(rect)\n\n def remove_rubberband(self):\n self.canvas.drawRectangle(None)\n\n def configure_subplots(self):\n image = os.path.join(matplotlib.rcParams['datapath'],\n 'images', 'matplotlib.png')\n dia = SubplotToolQt(self.canvas.figure, self.parent)\n dia.setWindowIcon(QtGui.QIcon(image))\n dia.exec_()\n\n def save_figure(self, *args):\n filetypes = self.canvas.get_supported_filetypes_grouped()\n sorted_filetypes = sorted(six.iteritems(filetypes))\n default_filetype = self.canvas.get_default_filetype()\n\n startpath = os.path.expanduser(\n matplotlib.rcParams['savefig.directory'])\n start = os.path.join(startpath, self.canvas.get_default_filename())\n filters = []\n selectedFilter = None\n for name, exts in sorted_filetypes:\n exts_list = \" \".join(['*.%s' % ext for ext in exts])\n filter = '%s (%s)' % (name, exts_list)\n if default_filetype in exts:\n selectedFilter = filter\n filters.append(filter)\n filters = ';;'.join(filters)\n\n fname, filter = _getSaveFileName(self.parent,\n \"Choose a filename to save to\",\n start, filters, selectedFilter)\n if fname:\n # Save dir for next time, unless empty str (i.e., use cwd).\n if startpath != \"\":\n matplotlib.rcParams['savefig.directory'] = (\n os.path.dirname(six.text_type(fname)))\n try:\n self.canvas.figure.savefig(six.text_type(fname))\n except Exception as e:\n QtWidgets.QMessageBox.critical(\n self, \"Error saving file\", six.text_type(e),\n QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.NoButton)\n\n\nclass SubplotToolQt(UiSubplotTool):\n def __init__(self, targetfig, parent):\n UiSubplotTool.__init__(self, None)\n\n self._figure = targetfig\n\n for lower, higher in [(\"bottom\", \"top\"), (\"left\", \"right\")]:\n self._widgets[lower].valueChanged.connect(\n lambda val: self._widgets[higher].setMinimum(val + .001))\n self._widgets[higher].valueChanged.connect(\n lambda val: self._widgets[lower].setMaximum(val - .001))\n\n self._attrs = [\"top\", \"bottom\", \"left\", \"right\", \"hspace\", \"wspace\"]\n self._defaults = {attr: vars(self._figure.subplotpars)[attr]\n for attr in self._attrs}\n\n # Set values after setting the range callbacks, but before setting up\n # the redraw callbacks.\n self._reset()\n\n for attr in self._attrs:\n self._widgets[attr].valueChanged.connect(self._on_value_changed)\n for action, method in [(\"Export values\", self._export_values),\n (\"Tight layout\", self._tight_layout),\n (\"Reset\", self._reset),\n (\"Close\", self.close)]:\n self._widgets[action].clicked.connect(method)\n\n def _export_values(self):\n # Explicitly round to 3 decimals (which is also the spinbox precision)\n # to avoid numbers of the form 0.100...001.\n dialog = QtWidgets.QDialog()\n layout = QtWidgets.QVBoxLayout()\n dialog.setLayout(layout)\n text = QtWidgets.QPlainTextEdit()\n text.setReadOnly(True)\n layout.addWidget(text)\n text.setPlainText(\n \",\\n\".join(\"{}={:.3}\".format(attr, self._widgets[attr].value())\n for attr in self._attrs))\n # Adjust the height of the text widget to fit the whole text, plus\n # some padding.\n size = text.maximumSize()\n size.setHeight(\n QtGui.QFontMetrics(text.document().defaultFont())\n .size(0, text.toPlainText()).height() + 20)\n text.setMaximumSize(size)\n dialog.exec_()\n\n def _on_value_changed(self):\n self._figure.subplots_adjust(**{attr: self._widgets[attr].value()\n for attr in self._attrs})\n self._figure.canvas.draw_idle()\n\n def _tight_layout(self):\n self._figure.tight_layout()\n for attr in self._attrs:\n widget = self._widgets[attr]\n widget.blockSignals(True)\n widget.setValue(vars(self._figure.subplotpars)[attr])\n widget.blockSignals(False)\n self._figure.canvas.draw_idle()\n\n def _reset(self):\n for attr, value in self._defaults.items():\n self._widgets[attr].setValue(value)\n\n\nclass ToolbarQt(ToolContainerBase, QtWidgets.QToolBar):\n def __init__(self, toolmanager, parent):\n ToolContainerBase.__init__(self, toolmanager)\n QtWidgets.QToolBar.__init__(self, parent)\n self._toolitems = {}\n self._groups = {}\n self._last = None\n\n @property\n def _icon_extension(self):\n if is_pyqt5():\n return '_large.png'\n return '.png'\n\n def add_toolitem(\n self, name, group, position, image_file, description, toggle):\n\n button = QtWidgets.QToolButton(self)\n button.setIcon(self._icon(image_file))\n button.setText(name)\n if description:\n button.setToolTip(description)\n\n def handler():\n self.trigger_tool(name)\n if toggle:\n button.setCheckable(True)\n button.toggled.connect(handler)\n else:\n button.clicked.connect(handler)\n\n self._last = button\n self._toolitems.setdefault(name, [])\n self._add_to_group(group, name, button, position)\n self._toolitems[name].append((button, handler))\n\n def _add_to_group(self, group, name, button, position):\n gr = self._groups.get(group, [])\n if not gr:\n sep = self.addSeparator()\n gr.append(sep)\n before = gr[position]\n widget = self.insertWidget(before, button)\n gr.insert(position, widget)\n self._groups[group] = gr\n\n def _icon(self, name):\n pm = QtGui.QPixmap(name)\n if hasattr(pm, 'setDevicePixelRatio'):\n pm.setDevicePixelRatio(self.toolmanager.canvas._dpi_ratio)\n return QtGui.QIcon(pm)\n\n def toggle_toolitem(self, name, toggled):\n if name not in self._toolitems:\n return\n for button, handler in self._toolitems[name]:\n button.toggled.disconnect(handler)\n button.setChecked(toggled)\n button.toggled.connect(handler)\n\n def remove_toolitem(self, name):\n for button, handler in self._toolitems[name]:\n button.setParent(None)\n del self._toolitems[name]\n\n\nclass StatusbarQt(StatusbarBase, QtWidgets.QLabel):\n def __init__(self, window, *args, **kwargs):\n StatusbarBase.__init__(self, *args, **kwargs)\n QtWidgets.QLabel.__init__(self)\n window.statusBar().addWidget(self)\n\n def set_message(self, s):\n self.setText(s)\n\n\nclass ConfigureSubplotsQt(backend_tools.ConfigureSubplotsBase):\n def trigger(self, *args):\n image = os.path.join(matplotlib.rcParams['datapath'],\n 'images', 'matplotlib.png')\n parent = self.canvas.manager.window\n dia = SubplotToolQt(self.figure, parent)\n dia.setWindowIcon(QtGui.QIcon(image))\n dia.exec_()\n\n\nclass SaveFigureQt(backend_tools.SaveFigureBase):\n def trigger(self, *args):\n filetypes = self.canvas.get_supported_filetypes_grouped()\n sorted_filetypes = sorted(six.iteritems(filetypes))\n default_filetype = self.canvas.get_default_filetype()\n\n startpath = os.path.expanduser(\n matplotlib.rcParams['savefig.directory'])\n start = os.path.join(startpath, self.canvas.get_default_filename())\n filters = []\n selectedFilter = None\n for name, exts in sorted_filetypes:\n exts_list = \" \".join(['*.%s' % ext for ext in exts])\n filter = '%s (%s)' % (name, exts_list)\n if default_filetype in exts:\n selectedFilter = filter\n filters.append(filter)\n filters = ';;'.join(filters)\n\n parent = self.canvas.manager.window\n fname, filter = _getSaveFileName(parent,\n \"Choose a filename to save to\",\n start, filters, selectedFilter)\n if fname:\n # Save dir for next time, unless empty str (i.e., use cwd).\n if startpath != \"\":\n matplotlib.rcParams['savefig.directory'] = (\n os.path.dirname(six.text_type(fname)))\n try:\n self.canvas.figure.savefig(six.text_type(fname))\n except Exception as e:\n QtWidgets.QMessageBox.critical(\n self, \"Error saving file\", six.text_type(e),\n QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.NoButton)\n\n\nclass SetCursorQt(backend_tools.SetCursorBase):\n def set_cursor(self, cursor):\n self.canvas.setCursor(cursord[cursor])\n\n\nclass RubberbandQt(backend_tools.RubberbandBase):\n def draw_rubberband(self, x0, y0, x1, y1):\n height = self.canvas.figure.bbox.height\n y1 = height - y1\n y0 = height - y0\n rect = [int(val) for val in (x0, y0, x1 - x0, y1 - y0)]\n self.canvas.drawRectangle(rect)\n\n def remove_rubberband(self):\n self.canvas.drawRectangle(None)\n\n\nbackend_tools.ToolSaveFigure = SaveFigureQt\nbackend_tools.ToolConfigureSubplots = ConfigureSubplotsQt\nbackend_tools.ToolSetCursor = SetCursorQt\nbackend_tools.ToolRubberband = RubberbandQt\n\n\ndef error_msg_qt(msg, parent=None):\n if not isinstance(msg, six.string_types):\n msg = ','.join(map(str, msg))\n\n QtWidgets.QMessageBox.warning(None, \"Matplotlib\",\n msg, QtGui.QMessageBox.Ok)\n\n\ndef exception_handler(type, value, tb):\n \"\"\"Handle uncaught exceptions\n It does not catch SystemExit\n \"\"\"\n msg = ''\n # get the filename attribute if available (for IOError)\n if hasattr(value, 'filename') and value.filename is not None:\n msg = value.filename + ': '\n if hasattr(value, 'strerror') and value.strerror is not None:\n msg += value.strerror\n else:\n msg += six.text_type(value)\n\n if len(msg):\n error_msg_qt(msg)\n\n\n@_Backend.export\nclass _BackendQT5(_Backend):\n FigureCanvas = FigureCanvasQT\n FigureManager = FigureManagerQT\n\n @staticmethod\n def trigger_manager_draw(manager):\n manager.canvas.draw_idle()\n\n @staticmethod\n def mainloop():\n # allow KeyboardInterrupt exceptions to close the plot window.\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n qApp.exec_()\n"
] | [
[
"matplotlib.backend_bases.FigureCanvasBase.get_width_height",
"matplotlib.backend_bases.FigureCanvasBase.enter_notify_event",
"matplotlib.backend_bases.NavigationToolbar2.__init__",
"matplotlib.backend_bases.ToolContainerBase.__init__",
"matplotlib.backend_bases.FigureCanvasBase.scroll_event",
"matplotlib.backends.qt_editor.formsubplottool.UiSubplotTool.__init__",
"matplotlib.backend_tools.add_tools_to_manager",
"matplotlib.backends.qt_editor.figureoptions.figure_edit",
"matplotlib._pylab_helpers.Gcf.destroy",
"matplotlib.backend_bases.FigureCanvasBase.button_release_event",
"matplotlib.backend_bases.TimerBase.__init__",
"matplotlib.is_interactive",
"matplotlib.backend_managers.ToolManager",
"matplotlib.backend_bases.FigureCanvasBase.key_press_event",
"matplotlib.backend_bases.FigureCanvasBase.leave_notify_event",
"matplotlib.backend_tools.add_tools_to_container",
"matplotlib.backend_bases.FigureCanvasBase.resize_event",
"matplotlib.backend_bases.FigureCanvasBase.motion_notify_event",
"matplotlib.backend_bases.FigureManagerBase.__init__",
"matplotlib.backend_bases.FigureCanvasBase.key_release_event",
"matplotlib.backend_bases.StatusbarBase.__init__",
"matplotlib.backend_bases.FigureCanvasBase.button_press_event"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LueyEscargot/pyGuiTest | [
"c072fe29a7c94dc60ec54344a5d4a91253d25f3f"
] | [
"pyside/lesson_08_main.py"
] | [
"\nimport sys\nimport argparse\nimport pandas as pd\n\nfrom PySide2.QtCore import QDateTime, QTimeZone\nfrom PySide2.QtWidgets import QApplication\nfrom lesson_08_main_window import MainWindow\nfrom lesson_08_mainWidget import Widget\n\n\ndef transform_date(utc, timezone=None):\n utc_fmt = \"yyyy-MM-ddTHH:mm:ss.zzzZ\"\n new_date = QDateTime().fromString(utc, utc_fmt)\n if timezone:\n new_date.setTimeZone(timezone)\n return new_date\n\n\ndef read_data(fname):\n # Read the CSV content\n df = pd.read_csv(fname)\n\n # Remove wrong magnitudes\n df = df.drop(df[df.mag < 0].index)\n magnitudes = df[\"mag\"]\n\n # My local timezone\n timezone = QTimeZone(b\"Aisa/ShangHai\")\n\n # Get timestamp transformed to our timezone\n times = df[\"time\"].apply(lambda x: transform_date(x, timezone))\n\n return times, magnitudes\n\n\nif __name__ == \"__main__\":\n options = argparse.ArgumentParser()\n options.add_argument(\"-f\", \"--file\", type=str, required=True)\n args = options.parse_args()\n data = read_data(args.file)\n\n # Qt Application\n app = QApplication(sys.argv)\n\n widget = Widget(data)\n window = MainWindow(widget)\n window.show()\n\n sys.exit(app.exec_())"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
wjsi/mars | [
"a69fb19edfe748d4393b90ff2c4941a76c084596",
"a69fb19edfe748d4393b90ff2c4941a76c084596",
"a69fb19edfe748d4393b90ff2c4941a76c084596",
"a69fb19edfe748d4393b90ff2c4941a76c084596",
"a69fb19edfe748d4393b90ff2c4941a76c084596",
"a69fb19edfe748d4393b90ff2c4941a76c084596",
"a69fb19edfe748d4393b90ff2c4941a76c084596",
"a69fb19edfe748d4393b90ff2c4941a76c084596",
"a69fb19edfe748d4393b90ff2c4941a76c084596"
] | [
"mars/tensor/fft/ifftn.py",
"mars/tensor/fft/irfft.py",
"mars/remote/tests/test_remote_function.py",
"mars/learn/neighbors/base.py",
"mars/services/mutable/tests/test_mutable.py",
"mars/tensor/images/imread.py",
"mars/tensor/reduction/all.py",
"mars/tensor/base/result_type.py",
"mars/dataframe/window/expanding/tests/test_expanding_execution.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nfrom ... import opcodes as OperandDef\nfrom ..datasource import tensor as astensor\nfrom .core import TensorComplexFFTNMixin, validate_fftn, TensorStandardFFTN\n\n\nclass TensorIFFTN(TensorStandardFFTN, TensorComplexFFTNMixin):\n _op_type_ = OperandDef.IFFTN\n\n def __init__(self, shape=None, axes=None, norm=None, **kw):\n super().__init__(_shape=shape, _axes=axes, _norm=norm, **kw)\n\n\ndef ifftn(a, s=None, axes=None, norm=None):\n \"\"\"\n Compute the N-dimensional inverse discrete Fourier Transform.\n\n This function computes the inverse of the N-dimensional discrete\n Fourier Transform over any number of axes in an M-dimensional tensor by\n means of the Fast Fourier Transform (FFT). In other words,\n ``ifftn(fftn(a)) == a`` to within numerical accuracy.\n For a description of the definitions and conventions used, see `mt.fft`.\n\n The input, analogously to `ifft`, should be ordered in the same way as is\n returned by `fftn`, i.e. it should have the term for zero frequency\n in all axes in the low-order corner, the positive frequency terms in the\n first half of all axes, the term for the Nyquist frequency in the middle\n of all axes and the negative frequency terms in the second half of all\n axes, in order of decreasingly negative frequency.\n\n Parameters\n ----------\n a : array_like\n Input tensor, can be complex.\n s : sequence of ints, optional\n Shape (length of each transformed axis) of the output\n (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).\n This corresponds to ``n`` for ``ifft(x, n)``.\n Along any axis, if the given shape is smaller than that of the input,\n the input is cropped. If it is larger, the input is padded with zeros.\n if `s` is not given, the shape of the input along the axes specified\n by `axes` is used. See notes for issue on `ifft` zero padding.\n axes : sequence of ints, optional\n Axes over which to compute the IFFT. If not given, the last ``len(s)``\n axes are used, or all axes if `s` is also not specified.\n Repeated indices in `axes` means that the inverse transform over that\n axis is performed multiple times.\n norm : {None, \"ortho\"}, optional\n Normalization mode (see `mt.fft`). Default is None.\n\n Returns\n -------\n out : complex Tensor\n The truncated or zero-padded input, transformed along the axes\n indicated by `axes`, or by a combination of `s` or `a`,\n as explained in the parameters section above.\n\n Raises\n ------\n ValueError\n If `s` and `axes` have different length.\n IndexError\n If an element of `axes` is larger than than the number of axes of `a`.\n\n See Also\n --------\n mt.fft : Overall view of discrete Fourier transforms, with definitions\n and conventions used.\n fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.\n ifft : The one-dimensional inverse FFT.\n ifft2 : The two-dimensional inverse FFT.\n ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning\n of tensor.\n\n Notes\n -----\n See `mt.fft` for definitions and conventions used.\n\n Zero-padding, analogously with `ifft`, is performed by appending zeros to\n the input along the specified dimension. Although this is the common\n approach, it might lead to surprising results. If another form of zero\n padding is desired, it must be performed before `ifftn` is called.\n\n Examples\n --------\n >>> import mars.tensor as mt\n\n >>> a = mt.eye(4)\n >>> mt.fft.ifftn(mt.fft.fftn(a, axes=(0,)), axes=(1,)).execute()\n array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],\n [ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],\n [ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],\n [ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])\n\n\n Create and plot an image with band-limited frequency content:\n\n >>> import matplotlib.pyplot as plt\n >>> n = mt.zeros((200,200), dtype=complex)\n >>> n[60:80, 20:40] = mt.exp(1j*mt.random.uniform(0, 2*mt.pi, (20, 20)))\n >>> im = mt.fft.ifftn(n).real\n >>> plt.imshow(im.execute())\n <matplotlib.image.AxesImage object at 0x...>\n >>> plt.show()\n\n \"\"\"\n a = astensor(a)\n axes = validate_fftn(a, s=s, axes=axes, norm=norm)\n op = TensorIFFTN(shape=s, axes=axes, norm=norm, dtype=np.dtype(np.complex_))\n return op(a)\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nfrom ... import opcodes as OperandDef\nfrom ..datasource import tensor as astensor\nfrom .core import TensorFFTMixin, validate_fft, TensorRealFFT\n\n\nclass TensorIRFFT(TensorRealFFT, TensorFFTMixin):\n _op_type_ = OperandDef.IRFFT\n\n def __init__(self, n=None, axis=-1, norm=None, **kw):\n super().__init__(_n=n, _axis=axis, _norm=norm, **kw)\n\n @classmethod\n def _get_shape(cls, op, shape):\n new_shape = list(shape)\n if op.n is not None:\n new_shape[op.axis] = op.n\n else:\n new_shape[op.axis] = 2 * (new_shape[op.axis] - 1)\n return tuple(new_shape)\n\n\ndef irfft(a, n=None, axis=-1, norm=None):\n \"\"\"\n Compute the inverse of the n-point DFT for real input.\n\n This function computes the inverse of the one-dimensional *n*-point\n discrete Fourier Transform of real input computed by `rfft`.\n In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical\n accuracy. (See Notes below for why ``len(a)`` is necessary here.)\n\n The input is expected to be in the form returned by `rfft`, i.e. the\n real zero-frequency term followed by the complex positive frequency terms\n in order of increasing frequency. Since the discrete Fourier Transform of\n real input is Hermitian-symmetric, the negative frequency terms are taken\n to be the complex conjugates of the corresponding positive frequency terms.\n\n Parameters\n ----------\n a : array_like\n The input tensor.\n n : int, optional\n Length of the transformed axis of the output.\n For `n` output points, ``n//2+1`` input points are necessary. If the\n input is longer than this, it is cropped. If it is shorter than this,\n it is padded with zeros. If `n` is not given, it is determined from\n the length of the input along the axis specified by `axis`.\n axis : int, optional\n Axis over which to compute the inverse FFT. If not given, the last\n axis is used.\n norm : {None, \"ortho\"}, optional\n Normalization mode (see `mt.fft`). Default is None.\n\n Returns\n -------\n out : Tensor\n The truncated or zero-padded input, transformed along the axis\n indicated by `axis`, or the last one if `axis` is not specified.\n The length of the transformed axis is `n`, or, if `n` is not given,\n ``2*(m-1)`` where ``m`` is the length of the transformed axis of the\n input. To get an odd number of output points, `n` must be specified.\n\n Raises\n ------\n IndexError\n If `axis` is larger than the last axis of `a`.\n\n See Also\n --------\n mt.fft : For definition of the DFT and conventions used.\n rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.\n fft : The one-dimensional FFT.\n irfft2 : The inverse of the two-dimensional FFT of real input.\n irfftn : The inverse of the *n*-dimensional FFT of real input.\n\n Notes\n -----\n Returns the real valued `n`-point inverse discrete Fourier transform\n of `a`, where `a` contains the non-negative frequency terms of a\n Hermitian-symmetric sequence. `n` is the length of the result, not the\n input.\n\n If you specify an `n` such that `a` must be zero-padded or truncated, the\n extra/removed values will be added/removed at high frequencies. One can\n thus resample a series to `m` points via Fourier interpolation by:\n ``a_resamp = irfft(rfft(a), m)``.\n\n Examples\n --------\n >>> import mars.tenosr as mt\n\n >>> mt.fft.ifft([1, -1j, -1, 1j]).execute()\n array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])\n >>> mt.fft.irfft([1, -1j, -1]).execute()\n array([ 0., 1., 0., 0.])\n\n Notice how the last term in the input to the ordinary `ifft` is the\n complex conjugate of the second term, and the output has zero imaginary\n part everywhere. When calling `irfft`, the negative frequencies are not\n specified, and the output array is purely real.\n\n \"\"\"\n a = astensor(a)\n validate_fft(a, axis=axis, norm=norm)\n op = TensorIRFFT(n=n, axis=axis, norm=norm, dtype=np.dtype(np.float_))\n return op(a)\n",
"# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import defaultdict\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom ... import dataframe as md\nfrom ... import tensor as mt\nfrom ...core import tile\nfrom ...deploy.oscar.session import get_default_session\nfrom ...learn.utils import shuffle\nfrom ...lib.mmh3 import hash as mmh3_hash\nfrom .. import spawn, ExecutableTuple\n\n\ndef test_params():\n def f(x):\n return x + 1\n\n r = spawn(f, args=(1,))\n c = tile(r).chunks[0]\n assert isinstance(c.params, dict)\n c.params = c.get_params_from_data(2)\n assert isinstance(c.params, dict)\n\n params = c.params\n params.pop(\"index\", None)\n r.params = params\n r.refresh_params()\n\n\ndef test_remote_function(setup):\n session = setup\n\n def f1(x):\n return x + 1\n\n def f2(x, y, z=None):\n return x * y * (z[0] + z[1])\n\n rs = np.random.RandomState(0)\n raw1 = rs.rand(10, 10)\n raw2 = rs.rand(10, 10)\n\n r1 = spawn(f1, raw1)\n r2 = spawn(f1, raw2)\n r3 = spawn(f2, (r1, r2), {\"z\": [r1, r2]})\n\n result = r3.execute().fetch()\n expected = (raw1 + 1) * (raw2 + 1) * (raw1 + 1 + raw2 + 1)\n np.testing.assert_almost_equal(result, expected)\n\n with pytest.raises(TypeError):\n spawn(f2, (r1, r2), kwargs=())\n\n session_id = session.session_id\n\n def f():\n assert get_default_session().session_id == session_id\n return mt.ones((2, 3)).sum().to_numpy()\n\n assert spawn(f).execute().fetch() == 6\n\n\ndef test_multi_output(setup):\n sentences = [\"word1 word2\", \"word2 word3\", \"word3 word2 word1\"]\n\n def mapper(s):\n word_to_count = defaultdict(lambda: 0)\n for word in s.split():\n word_to_count[word] += 1\n\n downsides = [defaultdict(lambda: 0), defaultdict(lambda: 0)]\n for word, count in word_to_count.items():\n downsides[mmh3_hash(word) % 2][word] += count\n\n return downsides\n\n def reducer(word_to_count_list):\n d = defaultdict(lambda: 0)\n for word_to_count in word_to_count_list:\n for word, count in word_to_count.items():\n d[word] += count\n\n return dict(d)\n\n outs = [], []\n for sentence in sentences:\n out1, out2 = spawn(mapper, sentence, n_output=2)\n outs[0].append(out1)\n outs[1].append(out2)\n\n rs = []\n for out in outs:\n r = spawn(reducer, out)\n rs.append(r)\n\n result = dict()\n for wc in ExecutableTuple(rs).to_object():\n result.update(wc)\n\n assert result == {\"word1\": 2, \"word2\": 3, \"word3\": 2}\n\n\ndef test_chained_remote(setup):\n def f(x):\n return x + 1\n\n def g(x):\n return x * 2\n\n s = spawn(g, spawn(f, 2))\n\n result = s.execute().fetch()\n assert result == 6\n\n\ndef test_input_tileable(setup):\n def f(t, x):\n return (t * x).sum().to_numpy()\n\n rs = np.random.RandomState(0)\n raw = rs.rand(5, 4)\n\n t1 = mt.tensor(raw, chunk_size=3)\n t2 = t1.sum(axis=0)\n s = spawn(f, args=(t2, 3))\n\n result = s.execute().fetch()\n expected = (raw.sum(axis=0) * 3).sum()\n assert pytest.approx(result) == expected\n\n df1 = md.DataFrame(raw, chunk_size=3)\n df1.execute()\n df2 = shuffle(df1)\n df2.execute()\n\n def f2(input_df):\n bonus = input_df.iloc[:, 0].fetch().sum()\n return input_df.sum().to_pandas() + bonus\n\n for df in [df1, df2]:\n s = spawn(f2, args=(df,))\n\n result = s.execute().fetch()\n expected = pd.DataFrame(raw).sum() + raw[:, 0].sum()\n pd.testing.assert_series_equal(result, expected)\n\n\ndef test_unknown_shape_inputs(setup):\n def f(t, x):\n assert all(not np.isnan(s) for s in t.shape)\n return (t * x).sum().to_numpy(extra_config={\"check_nsplits\": False})\n\n rs = np.random.RandomState(0)\n raw = rs.rand(5, 4)\n\n t1 = mt.tensor(raw, chunk_size=3)\n t2 = t1[t1 > 0]\n s = spawn(f, args=(t2, 3))\n\n result = s.execute().fetch()\n expected = (raw[raw > 0] * 3).sum()\n assert pytest.approx(result) == expected\n\n\ndef test_none_outputs(setup):\n def f(*_args):\n pass\n\n r1 = spawn(f, args=(0,))\n r2 = spawn(f, args=(r1, 1))\n r3 = spawn(f, args=(r1, 2))\n r4 = spawn(f, args=(r2, r3))\n\n assert r4.execute().fetch() is None\n",
"# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport warnings\nfrom abc import ABCMeta, abstractmethod\n\nimport numpy as np\nfrom sklearn.base import BaseEstimator, MultiOutputMixin\n\nfrom ... import tensor as mt\nfrom ...tensor.reshape.reshape import _reshape as reshape_unchecked\nfrom ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS\nfrom ..metrics import pairwise_distances_topk\nfrom ..utils import check_array\nfrom ..utils.validation import check_is_fitted\nfrom ._ball_tree import create_ball_tree, ball_tree_query, SklearnBallTree\nfrom ._kd_tree import create_kd_tree, kd_tree_query, SklearnKDTree\nfrom ._faiss import build_faiss_index, faiss_query, METRIC_TO_FAISS_METRIC_TYPE\nfrom ._proxima import build_proxima_index, proxima_query, METRIC_TO_PROXIMA_METRIC_TYPE\nfrom ._kneighbors_graph import KNeighborsGraph\n\n\nVALID_METRICS = dict(\n ball_tree=SklearnBallTree.valid_metrics,\n kd_tree=SklearnKDTree.valid_metrics,\n # The following list comes from the\n # sklearn.metrics.pairwise doc string\n brute=(\n list(PAIRWISE_DISTANCE_FUNCTIONS.keys())\n + [\n \"braycurtis\",\n \"canberra\",\n \"chebyshev\",\n \"correlation\",\n \"cosine\",\n \"dice\",\n \"hamming\",\n \"jaccard\",\n \"kulsinski\",\n \"mahalanobis\",\n \"matching\",\n \"minkowski\",\n \"rogerstanimoto\",\n \"russellrao\",\n \"seuclidean\",\n \"sokalmichener\",\n \"sokalsneath\",\n \"sqeuclidean\",\n \"yule\",\n \"wminkowski\",\n ]\n ),\n faiss=list(METRIC_TO_FAISS_METRIC_TYPE),\n proxima=list(METRIC_TO_PROXIMA_METRIC_TYPE),\n)\n\n\nVALID_METRICS_SPARSE = dict(\n ball_tree=[], kd_tree=[], brute=(PAIRWISE_DISTANCE_FUNCTIONS.keys() - {\"haversine\"})\n)\n\n\nclass NeighborsBase(BaseEstimator, MultiOutputMixin, metaclass=ABCMeta):\n \"\"\"Base class for nearest neighbors estimators.\"\"\"\n\n @abstractmethod\n def __init__(\n self,\n n_neighbors=None,\n radius=None,\n algorithm=\"auto\",\n leaf_size=30,\n metric=\"minkowski\",\n p=2,\n metric_params=None,\n n_jobs=None,\n ):\n\n self.n_neighbors = n_neighbors\n self.radius = radius\n self.algorithm = algorithm\n self.leaf_size = leaf_size\n self.metric = metric\n self.metric_params = metric_params\n self.p = p\n self.n_jobs = n_jobs\n self._check_algorithm_metric()\n\n def _check_algorithm_metric(self):\n if self.algorithm not in [\n \"auto\",\n \"brute\",\n \"kd_tree\",\n \"ball_tree\",\n \"faiss\",\n \"proxima\",\n ]:\n raise ValueError(f\"unrecognized algorithm: '{self.algorithm}'\")\n\n if self.algorithm == \"auto\":\n if self.metric == \"precomputed\":\n alg_check = \"brute\"\n elif callable(self.metric) or self.metric in VALID_METRICS[\"ball_tree\"]:\n alg_check = \"ball_tree\"\n else:\n alg_check = \"brute\"\n else:\n alg_check = self.algorithm\n\n if callable(self.metric):\n if self.algorithm == \"kd_tree\":\n # callable metric is only valid for brute force and ball_tree\n raise ValueError(\n \"kd_tree algorithm does not support callable metric '%s'\"\n % self.metric\n )\n elif self.metric not in VALID_METRICS[alg_check]:\n raise ValueError(\n \"Metric '%s' not valid. Use \"\n \"sorted(sklearn.neighbors.VALID_METRICS['%s']) \"\n \"to get valid options. \"\n \"Metric can also be a callable function.\" % (self.metric, alg_check)\n )\n\n if self.metric_params is not None and \"p\" in self.metric_params:\n warnings.warn(\n \"Parameter p is found in metric_params. \"\n \"The corresponding parameter from __init__ \"\n \"is ignored.\",\n SyntaxWarning,\n stacklevel=3,\n )\n effective_p = self.metric_params[\"p\"]\n else:\n effective_p = self.p\n\n if self.metric in [\"wminkowski\", \"minkowski\"] and effective_p < 1:\n raise ValueError(\"p must be greater than one for minkowski metric\")\n\n def _fit(self, X, session=None, run_kwargs=None):\n self._check_algorithm_metric()\n if self.metric_params is None:\n self.effective_metric_params_ = {}\n else:\n self.effective_metric_params_ = self.metric_params.copy()\n\n effective_p = self.effective_metric_params_.get(\"p\", self.p)\n if self.metric in [\"wminkowski\", \"minkowski\"]:\n self.effective_metric_params_[\"p\"] = effective_p\n\n self.effective_metric_ = self.metric\n # For minkowski distance, use more efficient methods where available\n if self.metric == \"minkowski\":\n p = self.effective_metric_params_.pop(\"p\", 2)\n if p < 1: # pragma: no cover\n raise ValueError(\"p must be greater than one \" \"for minkowski metric\")\n elif p == 1:\n self.effective_metric_ = \"manhattan\"\n elif p == 2:\n self.effective_metric_ = \"euclidean\"\n elif p == np.inf:\n self.effective_metric_ = \"chebyshev\"\n else:\n self.effective_metric_params_[\"p\"] = p\n\n if isinstance(X, NeighborsBase):\n self._fit_X = X._fit_X\n self._tree = X._tree\n self._fit_method = X._fit_method\n return self\n\n elif isinstance(X, SklearnBallTree):\n self._fit_X = mt.tensor(X.data)\n self._tree = X\n self._fit_method = \"ball_tree\"\n return self\n\n elif isinstance(X, SklearnKDTree):\n self._fit_X = mt.tensor(X.data)\n self._tree = X\n self._fit_method = \"kd_tree\"\n return self\n\n X = check_array(X, accept_sparse=True)\n\n if np.isnan(X.size):\n # if X has unknown shape, execute it first\n X.execute(session=session, **(run_kwargs or dict()))\n\n if X.issparse():\n if self.algorithm not in (\"auto\", \"brute\"):\n warnings.warn(\"cannot use tree with sparse input: \" \"using brute force\")\n if self.effective_metric_ not in VALID_METRICS_SPARSE[\n \"brute\"\n ] and not callable(self.effective_metric_):\n raise ValueError(\n \"Metric '%s' not valid for sparse input. \"\n \"Use sorted(sklearn.neighbors.\"\n \"VALID_METRICS_SPARSE['brute']) \"\n \"to get valid options. \"\n \"Metric can also be a callable function.\" % (self.effective_metric_)\n )\n self._fit_X = X.copy()\n self._tree = None\n self._fit_method = \"brute\"\n return self\n\n self._fit_method = self.algorithm\n self._fit_X = X\n\n if self._fit_method == \"auto\":\n # A tree approach is better for small number of neighbors,\n # and KDTree is generally faster when available\n if (\n self.n_neighbors is None or self.n_neighbors < self._fit_X.shape[0] // 2\n ) and self.metric != \"precomputed\":\n if self.effective_metric_ in VALID_METRICS[\"kd_tree\"]:\n self._fit_method = \"kd_tree\"\n elif (\n callable(self.effective_metric_)\n or self.effective_metric_ in VALID_METRICS[\"ball_tree\"]\n ):\n self._fit_method = \"ball_tree\"\n else:\n self._fit_method = \"brute\"\n else:\n self._fit_method = \"brute\"\n\n if self._fit_method == \"ball_tree\":\n self._tree = tree = create_ball_tree(\n X,\n self.leaf_size,\n metric=self.effective_metric_,\n **self.effective_metric_params_,\n )\n tree.execute(session=session, **(run_kwargs or dict()))\n elif self._fit_method == \"kd_tree\":\n self._tree = tree = create_kd_tree(\n X,\n self.leaf_size,\n metric=self.effective_metric_,\n **self.effective_metric_params_,\n )\n tree.execute(session=session, **(run_kwargs or dict()))\n elif self._fit_method == \"brute\":\n self._tree = None\n elif self._fit_method == \"faiss\":\n faiss_index = build_faiss_index(\n X, metric=self.effective_metric_, **self.effective_metric_params_\n )\n faiss_index.execute(session=session, **(run_kwargs or dict()))\n self._faiss_index = faiss_index\n elif self._fit_method == \"proxima\": # pragma: no cover\n proxima_metric = METRIC_TO_PROXIMA_METRIC_TYPE[self.effective_metric_]\n proxima_index = build_proxima_index(\n X,\n distance_metric=proxima_metric,\n topk=self.n_neighbors,\n session=session,\n run_kwargs=run_kwargs,\n **self.effective_metric_params_,\n )\n self._proxima_index = proxima_index\n else: # pragma: no cover\n raise ValueError(\"algorithm = '%s' not recognized\" % self.algorithm)\n\n if self.n_neighbors is not None:\n if self.n_neighbors <= 0:\n raise ValueError(f\"Expected n_neighbors > 0. Got {self.n_neighbors}\")\n else:\n if not np.issubdtype(type(self.n_neighbors), np.integer):\n raise TypeError(\n f\"n_neighbors does not take {type(self.n_neighbors)} value, \"\n \"enter integer value\"\n )\n\n return self\n\n\nclass KNeighborsMixin:\n \"\"\"Mixin for k-neighbors searches\"\"\"\n\n def kneighbors(\n self,\n X=None,\n n_neighbors=None,\n return_distance=True,\n session=None,\n run_kwargs=None,\n **kw,\n ):\n \"\"\"Finds the K-neighbors of a point.\n Returns indices of and distances to the neighbors of each point.\n\n Parameters\n ----------\n X : array-like, shape (n_query, n_features), \\\n or (n_query, n_indexed) if metric == 'precomputed'\n The query point or points.\n If not provided, neighbors of each indexed point are returned.\n In this case, the query point is not considered its own neighbor.\n\n n_neighbors : int\n Number of neighbors to get (default is the value\n passed to the constructor).\n\n return_distance : boolean, optional. Defaults to True.\n If False, distances will not be returned\n\n Returns\n -------\n dist : Tensor\n Array representing the lengths to points, only present if\n return_distance=True\n\n ind : Tensor\n Indices of the nearest points in the population matrix.\n\n Examples\n --------\n In the following example, we construct a NeighborsClassifier\n class from a tensor representing our data set and ask who's\n the closest point to [1,1,1]\n\n >>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]\n >>> from mars.learn.neighbors import NearestNeighbors\n >>> neigh = NearestNeighbors(n_neighbors=1)\n >>> neigh.fit(samples) # doctest: +ELLIPSIS\n NearestNeighbors(algorithm='auto', leaf_size=30, ...)\n >>> print(neigh.kneighbors([[1., 1., 1.]])) # doctest: +ELLIPSIS\n (array([[0.5]]), array([[2]]))\n\n As you can see, it returns [[0.5]], and [[2]], which means that the\n element is at distance 0.5 and is the third element of samples\n (indexes start at 0). You can also query for multiple points:\n\n >>> X = [[0., 1., 0.], [1., 0., 1.]]\n >>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS\n array([[1],\n [2]]...)\n\n \"\"\"\n check_is_fitted(self, [\"_fit_method\", \"_fit_X\"], all_or_any=any)\n\n if n_neighbors is None:\n n_neighbors = self.n_neighbors\n elif n_neighbors <= 0:\n raise ValueError(f\"Expected n_neighbors > 0. Got {n_neighbors}\")\n else:\n if not np.issubdtype(type(n_neighbors), np.integer):\n raise TypeError(\n f\"n_neighbors does not take {type(n_neighbors)} value, \"\n \"enter integer value\"\n )\n\n if X is not None:\n query_is_train = False\n X = check_array(X, accept_sparse=True)\n else:\n query_is_train = True\n X = self._fit_X\n # Include an extra neighbor to account for the sample itself being\n # returned, which is removed later\n n_neighbors += 1\n\n if X.key == self._fit_X.key and X is not self._fit_X:\n X = self._fit_X\n if np.isnan(X.size):\n # has unknown size, execute first\n X.execute(session=session, **(run_kwargs or dict()))\n\n train_size = self._fit_X.shape[0]\n if n_neighbors > train_size:\n raise ValueError(\n \"Expected n_neighbors <= n_samples, \"\n f\"but n_samples = {train_size}, n_neighbors = {n_neighbors}\"\n )\n n_samples, _ = X.shape\n sample_range = mt.arange(n_samples)[:, None]\n\n if self._fit_method == \"brute\":\n # for efficiency, use squared euclidean distances\n kwds = (\n {\"squared\": True}\n if self.effective_metric_ == \"euclidean\"\n else self.effective_metric_params_\n )\n\n neigh_dist, neigh_ind = pairwise_distances_topk(\n X, self._fit_X, k=n_neighbors, metric=self.effective_metric_, **kwds\n )\n if return_distance:\n if self.effective_metric_ == \"euclidean\":\n result = mt.sqrt(neigh_dist), neigh_ind\n else:\n result = neigh_dist, neigh_ind\n else:\n result = neigh_ind\n elif self._fit_method in [\"ball_tree\", \"kd_tree\"]:\n if X.issparse():\n raise ValueError(\n f\"{self._fit_method} does not work with sparse matrices. \"\n \"Densify the data, or set algorithm='brute'\"\n )\n\n query = (\n ball_tree_query if self._fit_method == \"ball_tree\" else kd_tree_query\n )\n result = query(self._tree, X, n_neighbors, return_distance)\n elif self._fit_method == \"faiss\":\n if X.issparse():\n raise ValueError(\n f\"{self._fit_method} does not work with sparse matrices. \"\n \"Densify the data, or set algorithm='brute'\"\n )\n result = faiss_query(\n self._faiss_index, X, n_neighbors, return_distance, **kw\n )\n elif self._fit_method == \"proxima\": # pragma: no cover\n if X.issparse():\n raise ValueError(\n f\"{self._fit_method} does not work with sparse matrices. \"\n \"Densify the data, or set algorithm='brute'\"\n )\n ind, dis = proxima_query(\n X, n_neighbors, index=self._proxima_index, run=False, **kw\n )\n if not return_distance:\n result = ind\n else:\n result = (dis, ind)\n else: # pragma: no cover\n raise ValueError(\"internal: _fit_method not recognized\")\n\n if not query_is_train:\n if isinstance(result, (tuple, list)):\n result = mt.ExecutableTuple(result)\n result.execute(session=session, **(run_kwargs or dict()))\n return result\n else:\n # If the query data is the same as the indexed data, we would like\n # to ignore the first nearest neighbor of every sample, i.e\n # the sample itself.\n if return_distance:\n dist, neigh_ind = result\n else:\n neigh_ind = result\n\n sample_mask = neigh_ind != sample_range\n\n # Corner case: When the number of duplicates are more\n # than the number of neighbors, the first NN will not\n # be the sample, but a duplicate.\n # In that case mask the first duplicate.\n dup_gr_nbrs = mt.all(sample_mask, axis=1)\n sample_mask[:, 0] = mt.where(dup_gr_nbrs, False, sample_mask[:, 0])\n\n neigh_ind = reshape_unchecked(\n neigh_ind[sample_mask], (n_samples, n_neighbors - 1)\n )\n\n if return_distance:\n dist = reshape_unchecked(\n dist[sample_mask], (n_samples, n_neighbors - 1)\n )\n ret = mt.ExecutableTuple([dist, neigh_ind])\n ret.execute(session=session, **(run_kwargs or dict()))\n return ret\n neigh_ind.execute(session=session, **(run_kwargs or dict()))\n return neigh_ind\n\n def kneighbors_graph(\n self,\n X=None,\n n_neighbors=None,\n mode=\"connectivity\",\n session=None,\n run_kwargs=None,\n ):\n \"\"\"Computes the (weighted) graph of k-Neighbors for points in X\n\n Parameters\n ----------\n X : array-like, shape (n_query, n_features), \\\n or (n_query, n_indexed) if metric == 'precomputed'\n The query point or points.\n If not provided, neighbors of each indexed point are returned.\n In this case, the query point is not considered its own neighbor.\n\n n_neighbors : int\n Number of neighbors for each sample.\n (default is value passed to the constructor).\n\n mode : {'connectivity', 'distance'}, optional\n Type of returned matrix: 'connectivity' will return the\n connectivity matrix with ones and zeros, in 'distance' the\n edges are Euclidean distance between points.\n\n Returns\n -------\n A : SparseTensor, shape = [n_samples, n_samples_fit]\n n_samples_fit is the number of samples in the fitted data\n A[i, j] is assigned the weight of edge that connects i to j.\n\n Examples\n --------\n >>> X = [[0], [3], [1]]\n >>> from mars.learn.neighbors import NearestNeighbors\n >>> neigh = NearestNeighbors(n_neighbors=2)\n >>> neigh.fit(X) # doctest: +ELLIPSIS\n NearestNeighbors(algorithm='auto', leaf_size=30, ...)\n >>> A = neigh.kneighbors_graph(X)\n >>> A.fetch().toarray()\n array([[1., 0., 1.],\n [0., 1., 1.],\n [1., 0., 1.]])\n\n See also\n --------\n NearestNeighbors.radius_neighbors_graph\n \"\"\"\n check_is_fitted(self, [\"_fit_method\", \"_fit_X\"], all_or_any=any)\n if n_neighbors is None:\n n_neighbors = self.n_neighbors\n\n # kneighbors does the None handling.\n if X is not None:\n X = check_array(X, accept_sparse=True)\n n_samples1 = X.shape[0]\n else:\n n_samples1 = self._fit_X.shape[0]\n\n n_samples2 = self._fit_X.shape[0]\n\n if mode == \"connectivity\":\n A_data = None\n A_ind = self.kneighbors(X, n_neighbors, return_distance=False)\n\n elif mode == \"distance\":\n A_data, A_ind = self.kneighbors(X, n_neighbors, return_distance=True)\n\n else:\n raise ValueError(\n 'Unsupported mode, must be one of \"connectivity\" '\n f'or \"distance\" but got {mode} instead'\n )\n\n op = KNeighborsGraph(\n a_data=A_data, a_ind=A_ind, n_neighbors=n_neighbors, sparse=True\n )\n graph = op(A_data, A_ind, shape=(n_samples1, n_samples2))\n graph.execute(session=session, **(run_kwargs or dict()))\n return graph\n\n\nclass UnsupervisedMixin:\n def fit(self, X, y=None, session=None, run_kwargs=None):\n \"\"\"Fit the model using X as training data\n\n Parameters\n ----------\n X : {array-like, tensor, BallTree, KDTree}\n Training data. If tensor, shape [n_samples, n_features],\n or [n_samples, n_samples] if metric='precomputed'.\n \"\"\"\n return self._fit(X, session=session, run_kwargs=run_kwargs)\n",
"# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nimport uuid\nimport sys\n\nimport pytest\nimport numpy as np\n\nfrom ....deploy.oscar.local import new_cluster\nfrom ....deploy.oscar.session import AsyncSession, SyncSession\nfrom ..core import MutableTensor\nfrom ..utils import normalize_timestamp\n\n\n_is_windows = sys.platform.lower().startswith(\"win\")\n\n\[email protected]\nasync def create_cluster():\n client = await new_cluster(n_worker=2, n_cpu=2, web=True)\n async with client:\n yield client\n\n\[email protected](_is_windows, reason=\"FIXME\")\[email protected](\n \"session_type\",\n [\"async_session\", \"async_web_session\", \"sync_session\", \"sync_web_session\"],\n)\[email protected]\nasync def test_mutable_tensor(create_cluster, session_type):\n is_web = \"web\" in session_type\n is_async = \"async\" in session_type\n\n if is_web:\n session_id = str(uuid.uuid4())\n session = await AsyncSession.init(create_cluster.web_address, session_id)\n else:\n session = create_cluster.session\n if not is_async:\n session = SyncSession.from_isolated_session(session)\n\n tensor_useless: MutableTensor = session.create_mutable_tensor( # noqa: F841\n shape=(10, 30, 50), dtype=np.int64, default_value=100, chunk_size=(20, 20, 20)\n )\n if is_async:\n tensor_useless = await tensor_useless\n\n tensor: MutableTensor = session.create_mutable_tensor(\n shape=(10, 30, 50),\n dtype=np.int64,\n name=\"mytensor\",\n default_value=100,\n chunk_size=(20, 20, 20),\n )\n if is_async:\n tensor = await tensor\n\n assert tensor.shape == (10, 30, 50)\n assert tensor.dtype == np.int64\n assert tensor.name == \"mytensor\"\n assert tensor.default_value == 100\n\n assert tensor_useless.name != tensor.name\n\n # non exists\n with pytest.raises(ValueError):\n tensor1 = session.get_mutable_tensor(\"notensor\")\n if is_async:\n tensor1 = await tensor1\n\n # create with duplicate name\n with pytest.raises(ValueError):\n tensor2 = session.create_mutable_tensor(\n shape=(10, 30, 50),\n dtype=np.int64,\n name=\"mytensor\",\n default_value=100,\n chunk_size=(20, 20, 20),\n )\n if is_async:\n tensor2 = await tensor2\n\n tensor3: MutableTensor = session.get_mutable_tensor(\"mytensor\")\n if is_async:\n tensor3 = await tensor3\n assert tensor3.shape == (10, 30, 50)\n assert tensor3.dtype == np.int64\n assert tensor3.name == \"mytensor\"\n assert tensor3.default_value == 100\n\n # test using read/write\n\n expected = np.full((10, 30, 50), fill_value=100)\n xs = await tensor3.read(slice(None, None, None))\n np.testing.assert_array_equal(expected, xs)\n\n await tensor.write(slice(None, None, None), 1)\n expected[:] = 1\n xs = await tensor3.read(slice(None, None, None))\n np.testing.assert_array_equal(expected, xs)\n\n await tensor.write((9, 2, 3), 2)\n expected[9, 2, 3] = 2\n xs = await tensor3.read((9, 2, 3))\n assert expected[9, 2, 3] == xs\n\n await tensor.write((slice(2, 9, 3), slice(5, 15, None), slice(8, 50, 9)), 4)\n expected[2:9:3, 5:15, 8:50:9] = 4\n xs = await tensor3.read(slice(None, None, None))\n np.testing.assert_array_equal(expected, xs)\n\n # test using __getitem__/__setitem__\n\n # reset\n tensor[:] = 100\n\n expected = np.full((10, 30, 50), fill_value=100)\n xs = tensor3[:]\n np.testing.assert_array_equal(expected, xs)\n\n tensor[:] = 1\n expected[:] = 1\n xs = tensor3[:]\n np.testing.assert_array_equal(expected, xs)\n\n tensor[9, 2, 3] = 2\n expected[9, 2, 3] = 2\n xs = tensor3[9, 2, 3]\n assert expected[9, 2, 3] == xs\n\n tensor[2:19:3, 5:15, 8:50:9] = 4\n expected[2:19:3, 5:15, 8:50:9] = 4\n xs = tensor3[:]\n np.testing.assert_array_equal(expected, xs)\n\n # seal\n\n if is_async:\n sealed = await tensor.seal()\n info = await session.execute(sealed)\n await info\n value = await session.fetch(sealed)\n else:\n sealed = await tensor.seal()\n session.execute(sealed)\n value = session.fetch(sealed)\n np.testing.assert_array_equal(expected, value)\n\n # non exists after sealed\n with pytest.raises(ValueError):\n await tensor.seal()\n with pytest.raises(ValueError):\n await tensor3.seal()\n\n # TODO: real fancy index not supported yet, as `TensorConcatenate` involved\n #\n # await tensor.write(([11, 2, 3, 50], [14, 5, 6, 50], [17, 8, 9, 50]), 3)\n # expected[[11, 2, 3, 50], [14, 5, 6, 50], [17, 8, 9, 50]] = 3\n # xs = await tensor1[:]\n # np.testing.assert_array_equal(expected, xs)\n\n\[email protected](_is_windows, reason=\"FIXME\")\[email protected](\n \"session_type\",\n [\"async_session\", \"async_web_session\", \"sync_session\", \"sync_web_session\"],\n)\[email protected]\nasync def test_mutable_tensor_timestamp(create_cluster, session_type):\n is_web = \"web\" in session_type\n is_async = \"async\" in session_type\n\n if is_web:\n session_id = str(uuid.uuid4())\n session = await AsyncSession.init(create_cluster.web_address, session_id)\n else:\n session = create_cluster.session\n if not is_async:\n session = SyncSession.from_isolated_session(session)\n\n tensor: MutableTensor = session.create_mutable_tensor(\n shape=(2, 4), dtype=np.int64, default_value=0, chunk_size=(1, 3)\n )\n if is_async:\n tensor = await tensor\n\n assert tensor.shape == (2, 4)\n assert tensor.dtype == np.int64\n assert tensor.default_value == 0\n\n t0 = normalize_timestamp()\n await asyncio.sleep(5)\n t1 = normalize_timestamp()\n\n # write with earlier timestamp\n await tensor.write((slice(0, 2, 1), slice(0, 2, 1)), 1, timestamp=t1)\n\n # read staled value\n actual = await tensor.read(slice(None, None, None), t0)\n expected = np.array([[0, 0, 0, 0], [0, 0, 0, 0]])\n np.testing.assert_array_equal(expected, actual)\n\n # read current value\n actual = await tensor.read(slice(None, None, None), t1)\n expected = np.array([[1, 1, 0, 0], [1, 1, 0, 0]])\n np.testing.assert_array_equal(expected, actual)\n\n # read new value\n t2 = normalize_timestamp()\n actual = await tensor.read(slice(None, None, None), t2)\n expected = np.array([[1, 1, 0, 0], [1, 1, 0, 0]])\n np.testing.assert_array_equal(expected, actual)\n\n # read latest value\n actual = await tensor.read(slice(None, None, None))\n expected = np.array([[1, 1, 0, 0], [1, 1, 0, 0]])\n np.testing.assert_array_equal(expected, actual)\n\n # seal on staled value\n if is_async:\n sealed = await tensor.seal(timestamp=t0)\n info = await session.execute(sealed)\n await info\n actual = await session.fetch(sealed)\n else:\n sealed = await tensor.seal(timestamp=t0)\n session.execute(sealed)\n actual = session.fetch(sealed)\n expected = np.array([[0, 0, 0, 0], [0, 0, 0, 0]])\n np.testing.assert_array_equal(expected, actual)\n\n # non exists after sealed\n with pytest.raises(ValueError):\n await tensor.seal()\n",
"# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nfrom ... import opcodes as OperandDef\nfrom ...serialization.serializables import AnyField\nfrom ...config import options\nfrom ...lib.filesystem import open_file, glob, file_size\nfrom ...utils import ceildiv, ModulePlaceholder\nfrom ..operands import TensorOperandMixin, TensorOperand\n\ntry:\n from PIL import Image\nexcept ImportError:\n Image = ModulePlaceholder(\"PIL\")\n\n\ndef _read_image(fpath):\n return np.asarray(Image.open(fpath))\n\n\nclass TensorImread(TensorOperand, TensorOperandMixin):\n _op_type_ = OperandDef.IMREAD\n\n _filepath = AnyField(\"filepath\")\n\n def __init__(self, filepath=None, **kwargs):\n super().__init__(_filepath=filepath, **kwargs)\n\n @property\n def filepath(self):\n return self._filepath\n\n @classmethod\n def tile(cls, op):\n out_shape = op.outputs[0].shape\n paths = (\n op.filepath if isinstance(op.filepath, (tuple, list)) else glob(op.filepath)\n )\n chunk_size = op.outputs[0].extra_params.raw_chunk_size\n n_chunks = ceildiv(len(paths), chunk_size)\n if len(paths) > 1:\n chunks = []\n splits = []\n for i in range(n_chunks):\n chunk_op = op.copy().reset_key()\n chunk_op._filepath = paths[i * chunk_size : (i + 1) * chunk_size]\n file_nums = len(chunk_op._filepath)\n shape = (file_nums,) + out_shape[1:]\n chunk = chunk_op.new_chunk(\n None, shape=shape, index=(i,) + (0,) * (len(out_shape) - 1)\n )\n chunks.append(chunk)\n splits.append(file_nums)\n nsplits = (tuple(splits),) + tuple((s,) for s in out_shape[1:])\n else:\n chunk_op = op.copy().reset_key()\n chunks = [\n chunk_op.new_chunk(None, shape=out_shape, index=(0,) * len(out_shape))\n ]\n nsplits = tuple((s,) for s in out_shape)\n new_op = op.copy()\n return new_op.new_tensors(None, shape=out_shape, chunks=chunks, nsplits=nsplits)\n\n @classmethod\n def execute(cls, ctx, op):\n if isinstance(op.filepath, list):\n arrays = np.empty(op.outputs[0].shape)\n for i, path in enumerate(op.filepath):\n with open_file(path, \"rb\") as f:\n arrays[i] = _read_image(f)\n ctx[op.outputs[0].key] = np.array(arrays)\n else:\n with open_file(op.filepath, \"rb\") as f:\n ctx[op.outputs[0].key] = np.array(_read_image(f))\n\n def __call__(self, shape, chunk_size):\n return self.new_tensor(None, shape, raw_chunk_size=chunk_size)\n\n\ndef imread(path, chunk_size=None):\n paths = path if isinstance(path, (tuple, list)) else glob(path)\n with open_file(paths[0], \"rb\") as f:\n sample_data = _read_image(f)\n img_shape = sample_data.shape\n img_size = file_size(paths[0])\n if len(paths) > 1:\n shape = (len(paths),) + img_shape\n else:\n shape = img_shape\n if chunk_size is None:\n chunk_size = int(options.chunk_store_limit / img_size)\n op = TensorImread(filepath=path, chunk_size=chunk_size, dtype=sample_data.dtype)\n return op(shape=shape, chunk_size=chunk_size)\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nfrom ... import opcodes as OperandDef\nfrom ..datasource import tensor as astensor\nfrom .core import TensorReduction, TensorReductionMixin\n\n\nclass TensorAll(TensorReduction, TensorReductionMixin):\n _op_type_ = OperandDef.ALL\n _func_name = \"all\"\n\n def __init__(self, axis=None, keepdims=None, combine_size=None, stage=None, **kw):\n stage = self._rewrite_stage(stage)\n super().__init__(\n _axis=axis,\n _keepdims=keepdims,\n _combine_size=combine_size,\n stage=stage,\n **kw\n )\n\n\ndef all(a, axis=None, out=None, keepdims=None, combine_size=None):\n \"\"\"\n Test whether all array elements along a given axis evaluate to True.\n\n Parameters\n ----------\n a : array_like\n Input tensor or object that can be converted to a tensor.\n axis : None or int or tuple of ints, optional\n Axis or axes along which a logical AND reduction is performed.\n The default (`axis` = `None`) is to perform a logical AND over all\n the dimensions of the input array. `axis` may be negative, in\n which case it counts from the last to the first axis.\n\n If this is a tuple of ints, a reduction is performed on multiple\n axes, instead of a single axis or all the axes as before.\n out : Tensor, optional\n Alternate output tensor in which to place the result.\n It must have the same shape as the expected output and its\n type is preserved (e.g., if ``dtype(out)`` is float, the result\n will consist of 0.0's and 1.0's). See `doc.ufuncs` (Section\n \"Output arguments\") for more details.\n\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the input tensor.\n\n If the default value is passed, then `keepdims` will not be\n passed through to the `all` method of sub-classes of\n `ndarray`, however any non-default value will be. If the\n sub-classes `sum` method does not implement `keepdims` any\n exceptions will be raised.\n combine_size: int, optional\n The number of chunks to combine.\n\n Returns\n -------\n all : Tensor, bool\n A new boolean or tensor is returned unless `out` is specified,\n in which case a reference to `out` is returned.\n\n See Also\n --------\n Tensor.all : equivalent method\n\n any : Test whether any element along a given axis evaluates to True.\n\n Notes\n -----\n Not a Number (NaN), positive infinity and negative infinity\n evaluate to `True` because these are not equal to zero.\n\n Examples\n --------\n >>> import mars.tensor as mt\n\n >>> mt.all([[True,False],[True,True]]).execute()\n False\n\n >>> mt.all([[True,False],[True,True]], axis=0).execute()\n array([ True, False])\n\n >>> mt.all([-1, 4, 5]).execute()\n True\n\n >>> mt.all([1.0, mt.nan]).execute()\n True\n\n \"\"\"\n a = astensor(a)\n if a.dtype == np.object_:\n dtype = a.dtype\n else:\n dtype = np.dtype(bool)\n op = TensorAll(axis=axis, dtype=dtype, keepdims=keepdims, combine_size=combine_size)\n return op(a, out=out)\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\n\ndef result_type(*tensors_and_dtypes):\n \"\"\"\n Returns the type that results from applying the NumPy\n type promotion rules to the arguments.\n\n Type promotion in Mars works similarly to the rules in languages\n like C++, with some slight differences. When both scalars and\n arrays are used, the array's type takes precedence and the actual value\n of the scalar is taken into account.\n\n For example, calculating 3*a, where a is an array of 32-bit floats,\n intuitively should result in a 32-bit float output. If the 3 is a\n 32-bit integer, the NumPy rules indicate it can't convert losslessly\n into a 32-bit float, so a 64-bit float should be the result type.\n By examining the value of the constant, '3', we see that it fits in\n an 8-bit integer, which can be cast losslessly into the 32-bit float.\n\n Parameters\n ----------\n tensors_and_dtypes : list of tensors and dtypes\n The operands of some operation whose result type is needed.\n\n Returns\n -------\n out : dtype\n The result type.\n\n See also\n --------\n dtype, promote_types, min_scalar_type, can_cast\n\n Notes\n -----\n The specific algorithm used is as follows.\n\n Categories are determined by first checking which of boolean,\n integer (int/uint), or floating point (float/complex) the maximum\n kind of all the arrays and the scalars are.\n\n If there are only scalars or the maximum category of the scalars\n is higher than the maximum category of the arrays,\n the data types are combined with :func:`promote_types`\n to produce the return value.\n\n Otherwise, `min_scalar_type` is called on each array, and\n the resulting data types are all combined with :func:`promote_types`\n to produce the return value.\n\n The set of int values is not a subset of the uint values for types\n with the same number of bits, something not reflected in\n :func:`min_scalar_type`, but handled as a special case in `result_type`.\n\n Examples\n --------\n >>> import mars.tensor as mt\n\n >>> mt.result_type(3, mt.arange(7, dtype='i1'))\n dtype('int8')\n\n >>> mt.result_type('i4', 'c8')\n dtype('complex128')\n\n >>> mt.result_type(3.0, -2)\n dtype('float64')\n \"\"\"\n from ..core import Tensor\n\n arrays_and_dtypes = [\n a.dtype if isinstance(a, Tensor) else a for a in tensors_and_dtypes\n ]\n return np.result_type(*arrays_and_dtypes)\n",
"# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import OrderedDict\n\nimport numpy as np\nimport pandas as pd\n\nfrom ..... import dataframe as md\n\n\ndef test_dataframe_expanding_agg(setup):\n raw = pd.DataFrame(\n {\n \"a\": np.random.randint(100, size=(10,)),\n \"b\": np.random.rand(10),\n \"c\": np.random.randint(100, size=(10,)),\n \"d\": [\"c\" * i for i in np.random.randint(4, size=10)],\n }\n )\n raw.b[:3] = np.nan\n raw.b[5:7] = np.nan\n\n df = md.DataFrame(raw, chunk_size=(10, 3))\n\n r = df.expanding().agg([\"sum\"])\n pd.testing.assert_frame_equal(r.execute().fetch(), raw.expanding().agg([\"sum\"]))\n\n df = md.DataFrame(raw, chunk_size=(3, 2))\n\n aggs = [\"sum\", \"count\", \"min\", \"max\", \"mean\", \"var\", \"std\"]\n\n for fun_name in aggs:\n r = df.expanding().agg(fun_name)\n pd.testing.assert_frame_equal(\n r.execute().fetch(), raw.expanding().agg(fun_name)\n )\n\n r = df.expanding().agg([\"sum\"])\n pd.testing.assert_frame_equal(r.execute().fetch(), raw.expanding().agg([\"sum\"]))\n\n r = df.expanding().agg(aggs)\n pd.testing.assert_frame_equal(r.execute().fetch(), raw.expanding().agg(aggs))\n\n agg_dict = {\"c\": \"sum\"}\n r = df.expanding().agg(agg_dict)\n pd.testing.assert_frame_equal(r.execute().fetch(), raw.expanding().agg(agg_dict))\n\n agg_dict = OrderedDict([(\"a\", [\"sum\", \"var\"]), (\"b\", \"var\")])\n r = df.expanding().agg(agg_dict)\n pd.testing.assert_frame_equal(r.execute().fetch(), raw.expanding().agg(agg_dict))\n\n r = df.expanding(0).agg(aggs)\n pd.testing.assert_frame_equal(r.execute().fetch(), raw.expanding(0).agg(aggs))\n\n r = df.expanding(2).agg(aggs)\n pd.testing.assert_frame_equal(r.execute().fetch(), raw.expanding(2).agg(aggs))\n\n agg_dict = OrderedDict([(\"a\", [\"min\", \"max\"]), (\"b\", \"max\"), (\"c\", \"sum\")])\n r = df.expanding(2).agg(agg_dict)\n pd.testing.assert_frame_equal(r.execute().fetch(), raw.expanding(2).agg(agg_dict))\n\n\ndef test_series_expanding_agg(setup):\n raw = pd.Series(np.random.rand(10), name=\"a\")\n raw[:3] = np.nan\n raw[5:7] = np.nan\n\n series = md.Series(raw, chunk_size=10)\n\n r = series.expanding().agg([\"sum\"])\n pd.testing.assert_frame_equal(r.execute().fetch(), raw.expanding().agg([\"sum\"]))\n\n r = series.expanding().agg(\"sum\")\n pd.testing.assert_series_equal(r.execute().fetch(), raw.expanding().agg(\"sum\"))\n\n series = md.Series(raw, chunk_size=3)\n\n aggs = [\"sum\", \"count\", \"min\", \"max\", \"mean\", \"var\", \"std\"]\n\n for fun_name in aggs:\n r = series.expanding().agg(fun_name)\n pd.testing.assert_series_equal(\n r.execute().fetch(), raw.expanding().agg(fun_name)\n )\n\n r = series.expanding().agg([\"sum\"])\n pd.testing.assert_frame_equal(r.execute().fetch(), raw.expanding().agg([\"sum\"]))\n\n r = series.expanding().agg(aggs)\n pd.testing.assert_frame_equal(r.execute().fetch(), raw.expanding().agg(aggs))\n\n r = series.expanding(2).agg(aggs)\n pd.testing.assert_frame_equal(r.execute().fetch(), raw.expanding(2).agg(aggs))\n\n r = series.expanding(0).agg(aggs)\n pd.testing.assert_frame_equal(r.execute().fetch(), raw.expanding(0).agg(aggs))\n"
] | [
[
"numpy.dtype"
],
[
"numpy.dtype"
],
[
"pandas.testing.assert_series_equal",
"numpy.isnan",
"pandas.DataFrame",
"numpy.testing.assert_almost_equal",
"numpy.random.RandomState"
],
[
"numpy.isnan"
],
[
"numpy.testing.assert_array_equal",
"numpy.array",
"numpy.full"
],
[
"numpy.array",
"numpy.empty"
],
[
"numpy.dtype"
],
[
"numpy.result_type"
],
[
"numpy.random.rand",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
leelige/mindspore | [
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc",
"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"
] | [
"research/cv/Neighbor2Neighbor/src/dataset.py",
"official/recommend/ncf/src/dataset.py",
"official/cv/ssd/src/box_utils.py",
"research/cv/Pix2Pix/postprocess.py",
"official/cv/crnn_seq2seq_ocr/src/dataset.py",
"research/cv/mobilenetV3_small_x1_0/src/monitor.py",
"official/cv/unet3d/eval.py",
"official/cv/tinydarknet/src/lr_scheduler/warmup_step_lr.py",
"official/nlp/pangu_alpha/src/pangu_alpha.py",
"official/gnn/gat/preprocess.py",
"official/recommend/deepfm/src/preprocess_data.py",
"research/hpc/molecular_dynamics/postprocess.py",
"research/cv/simple_baselines/train.py",
"official/cv/east/src/dataset.py",
"research/cv/eppmvsnet/validate.py",
"official/nlp/textcnn/postprocess.py",
"research/cv/ssd_ghostnet/src/dataset.py",
"official/cv/openpose/src/dataset.py",
"official/cv/sphereface/src/datasets/classification.py",
"official/cv/resnet/src/resnet_gpu_benchmark.py",
"research/nlp/senta/infer/sdk/main.py",
"official/recommend/deep_and_cross/src/datasets.py",
"research/cv/glore_res200/export.py",
"official/nlp/ernie/postprocess.py",
"research/cv/AVA_hpa/src/resnet.py",
"official/cv/fastscnn/cal_mIoU.py",
"research/cv/FaceAttribute/infer/sdk/python_faceattribute/main.py",
"official/cv/openpose/eval.py",
"research/cv/FaceQualityAssessment/eval.py",
"research/hpc/pinns/src/NavierStokes/export_ns.py",
"official/cv/resnet/infer/ResNet18/sdk/prec/classification_task_metric.py",
"official/nlp/pangu_alpha/serving_increment/pangu_distributed/pangu/servable_config.py",
"research/nlp/dscnn/export.py",
"research/cv/ProtoNet/src/EvalCallBack.py",
"research/cv/IPT/eval.py",
"official/cv/warpctc/postprocess.py",
"research/cv/ssd_inceptionv2/src/lr_schedule.py",
"research/cv/single_path_nas/export.py",
"research/cv/FaceDetection/src/data_to_mindrecord_eval.py",
"research/cv/vgg19/src/utils/sampler.py",
"research/hpc/ocean_model/src/GOMO.py",
"official/cv/centerface/dependency/centernet/src/lib/datasets/dataset/coco_hp.py",
"official/cv/srcnn/src/metric.py"
] | [
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n'''dataloader'''\nimport os\nimport glob\nimport numpy as np\nimport PIL.Image as Image\nimport mindspore.dataset as ds\nimport mindspore.dataset.vision.c_transforms as CV\n\nclass DataLoader_Imagenet_val:\n '''DataLoader_Imagenet_val'''\n def __init__(self, data_dir, patch=256, noise_style=\"gauss25\", batch_size=4):\n super(DataLoader_Imagenet_val, self).__init__()\n self.data_dir = data_dir\n self.patch = patch\n self.train_fns = glob.glob(os.path.join(self.data_dir, \"*\"))\n self.train_fns.sort()\n print('fetch {} samples for training'.format(len(self.train_fns)))\n self.noise_generator = AugmentNoise(noise_style)\n self.batch_size = batch_size\n self.test = 1\n def __getitem__(self, index):\n # fetch image\n fn = self.train_fns[index]\n im = Image.open(fn)\n im = np.array(im, dtype=np.float32)\n # random crop\n H = im.shape[0]\n W = im.shape[1]\n if H - self.patch > 0:\n xx = np.random.randint(0, H - self.patch)\n im = im[xx:xx + self.patch, :, :]\n if W - self.patch > 0:\n yy = np.random.randint(0, W - self.patch)\n im = im[:, yy:yy + self.patch, :]\n im /= 255.0 #clean image\n noisy = self.noise_generator.add_noise(im)\n\n return im, noisy\n\n def __len__(self):\n return len(self.train_fns)\n\nclass AugmentNoise():\n '''AugmentNoise'''\n def __init__(self, style):\n if style.startswith('gauss'):\n self.params = [\n float(p) / 255.0 for p in style.replace('gauss', '').split('_')\n ]\n if len(self.params) == 1:\n self.style = \"gauss_fix\"\n elif len(self.params) == 2:\n self.style = \"gauss_range\"\n elif style.startswith('poisson'):\n self.params = [\n float(p) for p in style.replace('poisson', '').split('_')\n ]\n if len(self.params) == 1:\n self.style = \"poisson_fix\"\n elif len(self.params) == 2:\n self.style = \"poisson_range\"\n\n def add_noise(self, x):\n '''add_noise'''\n shape = x.shape\n if self.style == \"gauss_fix\":\n std = self.params[0]\n return np.array(x + np.random.normal(size=shape) * std,\n dtype=np.float32)\n if self.style == \"gauss_range\":\n min_std, max_std = self.params\n std = np.random.uniform(low=min_std, high=max_std, size=(1, 1, 1))\n return np.array(x + np.random.normal(size=shape) * std,\n dtype=np.float32)\n if self.style == \"poisson_fix\":\n lam = self.params[0]\n return np.array(np.random.poisson(lam * x) / lam, dtype=np.float32)\n assert self.style == \"poisson_range\"\n min_lam, max_lam = self.params\n lam = np.random.uniform(low=min_lam, high=max_lam, size=(1, 1, 1))\n return np.array(np.random.poisson(lam * x) / lam, dtype=np.float32)\n\n\ndef create_Dataset(data_dir, patch, noise_style, batch_size, device_num, rank, shuffle):\n\n dataset = DataLoader_Imagenet_val(data_dir, patch, noise_style, batch_size)\n hwc_to_chw = CV.HWC2CHW()\n data_set = ds.GeneratorDataset(dataset, column_names=[\"image\", \"noisy\"], \\\n num_parallel_workers=8, shuffle=shuffle, num_shards=device_num, shard_id=rank)\n data_set = data_set.map(input_columns=[\"image\"], operations=hwc_to_chw, num_parallel_workers=8)\n data_set = data_set.map(input_columns=[\"noisy\"], operations=hwc_to_chw, num_parallel_workers=8)\n data_set = data_set.batch(batch_size, drop_remainder=True)\n return data_set, data_set.get_dataset_size()\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Dataset loading, creation and processing\"\"\"\nimport logging\nimport math\nimport os\nimport time\nimport timeit\nimport pickle\nimport numpy as np\nimport pandas as pd\n\nfrom mindspore.dataset import GeneratorDataset, Sampler\n\nimport src.constants as rconst\nimport src.movielens as movielens\nimport src.stat_utils as stat_utils\n\nDATASET_TO_NUM_USERS_AND_ITEMS = {\n \"ml-1m\": (6040, 3706),\n \"ml-20m\": (138493, 26744)\n}\n\n_EXPECTED_CACHE_KEYS = (\n rconst.TRAIN_USER_KEY, rconst.TRAIN_ITEM_KEY, rconst.EVAL_USER_KEY,\n rconst.EVAL_ITEM_KEY, rconst.USER_MAP, rconst.ITEM_MAP)\n\n\ndef load_data(data_dir, dataset):\n \"\"\"\n Load data in .csv format and output structured data.\n\n This function reads in the raw CSV of positive items, and performs three\n preprocessing transformations:\n\n 1) Filter out all users who have not rated at least a certain number\n of items. (Typically 20 items)\n\n 2) Zero index the users and items such that the largest user_id is\n `num_users - 1` and the largest item_id is `num_items - 1`\n\n 3) Sort the dataframe by user_id, with timestamp as a secondary sort key.\n This allows the dataframe to be sliced by user in-place, and for the last\n item to be selected simply by calling the `-1` index of a user's slice.\n\n While all of these transformations are performed by Pandas (and are therefore\n single-threaded), they only take ~2 minutes, and the overhead to apply a\n MapReduce pattern to parallel process the dataset adds significant complexity\n for no computational gain. For a larger dataset parallelizing this\n preprocessing could yield speedups. (Also, this preprocessing step is only\n performed once for an entire run.\n \"\"\"\n logging.info(\"Beginning loading data...\")\n\n raw_rating_path = os.path.join(data_dir, dataset, movielens.RATINGS_FILE)\n cache_path = os.path.join(data_dir, dataset, rconst.RAW_CACHE_FILE)\n\n valid_cache = os.path.exists(cache_path)\n if valid_cache:\n with open(cache_path, 'rb') as f:\n cached_data = pickle.load(f)\n\n for key in _EXPECTED_CACHE_KEYS:\n if key not in cached_data:\n valid_cache = False\n\n if not valid_cache:\n logging.info(\"Removing stale raw data cache file.\")\n os.remove(cache_path)\n\n if valid_cache:\n data = cached_data\n else:\n # process data and save to .csv\n with open(raw_rating_path) as f:\n df = pd.read_csv(f)\n\n # Get the info of users who have more than 20 ratings on items\n grouped = df.groupby(movielens.USER_COLUMN)\n df = grouped.filter(lambda x: len(x) >= rconst.MIN_NUM_RATINGS)\n\n original_users = df[movielens.USER_COLUMN].unique()\n original_items = df[movielens.ITEM_COLUMN].unique()\n\n # Map the ids of user and item to 0 based index for following processing\n logging.info(\"Generating user_map and item_map...\")\n user_map = {user: index for index, user in enumerate(original_users)}\n item_map = {item: index for index, item in enumerate(original_items)}\n\n df[movielens.USER_COLUMN] = df[movielens.USER_COLUMN].apply(\n lambda user: user_map[user])\n df[movielens.ITEM_COLUMN] = df[movielens.ITEM_COLUMN].apply(\n lambda item: item_map[item])\n\n num_users = len(original_users)\n num_items = len(original_items)\n\n assert num_users <= np.iinfo(rconst.USER_DTYPE).max\n assert num_items <= np.iinfo(rconst.ITEM_DTYPE).max\n assert df[movielens.USER_COLUMN].max() == num_users - 1\n assert df[movielens.ITEM_COLUMN].max() == num_items - 1\n\n # This sort is used to shard the dataframe by user, and later to select\n # the last item for a user to be used in validation.\n logging.info(\"Sorting by user, timestamp...\")\n\n # This sort is equivalent to\n # df.sort_values([movielens.USER_COLUMN, movielens.TIMESTAMP_COLUMN],\n # inplace=True)\n # except that the order of items with the same user and timestamp are\n # sometimes different. For some reason, this sort results in a better\n # hit-rate during evaluation, matching the performance of the MLPerf\n # reference implementation.\n df.sort_values(by=movielens.TIMESTAMP_COLUMN, inplace=True)\n df.sort_values([movielens.USER_COLUMN, movielens.TIMESTAMP_COLUMN],\n inplace=True, kind=\"mergesort\")\n\n # The dataframe does not reconstruct indices in the sort or filter steps.\n df = df.reset_index()\n\n grouped = df.groupby(movielens.USER_COLUMN, group_keys=False)\n eval_df, train_df = grouped.tail(1), grouped.apply(lambda x: x.iloc[:-1])\n\n data = {\n rconst.TRAIN_USER_KEY:\n train_df[movielens.USER_COLUMN].values.astype(rconst.USER_DTYPE),\n rconst.TRAIN_ITEM_KEY:\n train_df[movielens.ITEM_COLUMN].values.astype(rconst.ITEM_DTYPE),\n rconst.EVAL_USER_KEY:\n eval_df[movielens.USER_COLUMN].values.astype(rconst.USER_DTYPE),\n rconst.EVAL_ITEM_KEY:\n eval_df[movielens.ITEM_COLUMN].values.astype(rconst.ITEM_DTYPE),\n rconst.USER_MAP: user_map,\n rconst.ITEM_MAP: item_map,\n \"create_time\": time.time(),\n }\n\n logging.info(\"Writing raw data cache.\")\n with open(cache_path, \"wb\") as f:\n pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)\n\n num_users, num_items = DATASET_TO_NUM_USERS_AND_ITEMS[dataset]\n if num_users != len(data[rconst.USER_MAP]):\n raise ValueError(\"Expected to find {} users, but found {}\".format(\n num_users, len(data[rconst.USER_MAP])))\n if num_items != len(data[rconst.ITEM_MAP]):\n raise ValueError(\"Expected to find {} items, but found {}\".format(\n num_items, len(data[rconst.ITEM_MAP])))\n\n return data, num_users, num_items\n\n\ndef construct_lookup_variables(train_pos_users, train_pos_items, num_users):\n \"\"\"Lookup variables\"\"\"\n index_bounds = None\n sorted_train_pos_items = None\n\n def index_segment(user):\n lower, upper = index_bounds[user:user + 2]\n items = sorted_train_pos_items[lower:upper]\n\n negatives_since_last_positive = np.concatenate(\n [items[0][np.newaxis], items[1:] - items[:-1] - 1])\n\n return np.cumsum(negatives_since_last_positive)\n\n start_time = timeit.default_timer()\n inner_bounds = np.argwhere(train_pos_users[1:] -\n train_pos_users[:-1])[:, 0] + 1\n (upper_bound,) = train_pos_users.shape\n index_bounds = np.array([0] + inner_bounds.tolist() + [upper_bound])\n\n # Later logic will assume that the users are in sequential ascending order.\n assert np.array_equal(train_pos_users[index_bounds[:-1]], np.arange(num_users))\n\n sorted_train_pos_items = train_pos_items.copy()\n\n for i in range(num_users):\n lower, upper = index_bounds[i:i + 2]\n sorted_train_pos_items[lower:upper].sort()\n\n total_negatives = np.concatenate([\n index_segment(i) for i in range(num_users)])\n\n logging.info(\"Negative total vector built. Time: {:.1f} seconds\".format(\n timeit.default_timer() - start_time))\n\n return total_negatives, index_bounds, sorted_train_pos_items\n\n\nclass NCFDataset:\n \"\"\"\n A dataset for NCF network.\n \"\"\"\n\n def __init__(self,\n pos_users,\n pos_items,\n num_users,\n num_items,\n batch_size,\n total_negatives,\n index_bounds,\n sorted_train_pos_items,\n num_neg,\n is_training=True):\n self._pos_users = pos_users\n self._pos_items = pos_items\n self._num_users = num_users\n self._num_items = num_items\n\n self._batch_size = batch_size\n\n self._total_negatives = total_negatives\n self._index_bounds = index_bounds\n self._sorted_train_pos_items = sorted_train_pos_items\n\n self._is_training = is_training\n\n if self._is_training:\n self._train_pos_count = self._pos_users.shape[0]\n else:\n self._eval_users_per_batch = int(\n batch_size // (1 + rconst.NUM_EVAL_NEGATIVES))\n\n _pos_count = pos_users.shape[0]\n _num_samples = (1 + num_neg) * _pos_count\n self.dataset_len = math.ceil(_num_samples / batch_size)\n\n def lookup_negative_items(self, negative_users):\n \"\"\"Lookup negative items\"\"\"\n output = np.zeros(shape=negative_users.shape, dtype=rconst.ITEM_DTYPE) - 1\n\n left_index = self._index_bounds[negative_users]\n right_index = self._index_bounds[negative_users + 1] - 1\n\n num_positives = right_index - left_index + 1\n num_negatives = self._num_items - num_positives\n neg_item_choice = stat_utils.very_slightly_biased_randint(num_negatives)\n\n # Shortcuts:\n # For points where the negative is greater than or equal to the tally before\n # the last positive point there is no need to bisect. Instead the item id\n # corresponding to the negative item choice is simply:\n # last_postive_index + 1 + (neg_choice - last_negative_tally)\n # Similarly, if the selection is less than the tally at the first positive\n # then the item_id is simply the selection.\n #\n # Because MovieLens organizes popular movies into low integers (which is\n # preserved through the preprocessing), the first shortcut is very\n # efficient, allowing ~60% of samples to bypass the bisection. For the same\n # reason, the second shortcut is rarely triggered (<0.02%) and is therefore\n # not worth implementing.\n use_shortcut = neg_item_choice >= self._total_negatives[right_index]\n output[use_shortcut] = (\n self._sorted_train_pos_items[right_index] + 1 +\n (neg_item_choice - self._total_negatives[right_index])\n )[use_shortcut]\n\n if np.all(use_shortcut):\n # The bisection code is ill-posed when there are no elements.\n return output\n\n not_use_shortcut = np.logical_not(use_shortcut)\n left_index = left_index[not_use_shortcut]\n right_index = right_index[not_use_shortcut]\n neg_item_choice = neg_item_choice[not_use_shortcut]\n\n num_loops = np.max(\n np.ceil(np.log2(num_positives[not_use_shortcut])).astype(np.int32))\n\n for _ in range(num_loops):\n mid_index = (left_index + right_index) // 2\n right_criteria = self._total_negatives[mid_index] > neg_item_choice\n left_criteria = np.logical_not(right_criteria)\n\n right_index[right_criteria] = mid_index[right_criteria]\n left_index[left_criteria] = mid_index[left_criteria]\n\n # Expected state after bisection pass:\n # The right index is the smallest index whose tally is greater than the\n # negative item choice index.\n\n assert np.all((right_index - left_index) <= 1)\n\n output[not_use_shortcut] = (\n self._sorted_train_pos_items[right_index] - (self._total_negatives[right_index] - neg_item_choice)\n )\n\n assert np.all(output >= 0)\n\n return output\n\n def _get_train_item(self, index):\n \"\"\"Get train item\"\"\"\n (mask_start_index,) = index.shape\n index_mod = np.mod(index, self._train_pos_count)\n\n # get batch of users\n users = self._pos_users[index_mod]\n\n # get batch of items\n negative_indices = np.greater_equal(index, self._train_pos_count)\n negative_users = users[negative_indices]\n negative_items = self.lookup_negative_items(negative_users=negative_users)\n items = self._pos_items[index_mod]\n items[negative_indices] = negative_items\n\n # get batch of labels\n labels = np.logical_not(negative_indices)\n\n # pad last partial batch\n pad_length = self._batch_size - index.shape[0]\n if pad_length:\n user_pad = np.arange(pad_length, dtype=users.dtype) % self._num_users\n item_pad = np.arange(pad_length, dtype=items.dtype) % self._num_items\n label_pad = np.zeros(shape=(pad_length,), dtype=labels.dtype)\n users = np.concatenate([users, user_pad])\n items = np.concatenate([items, item_pad])\n labels = np.concatenate([labels, label_pad])\n\n users = np.reshape(users, (self._batch_size, 1)) # (_batch_size, 1), int32\n items = np.reshape(items, (self._batch_size, 1)) # (_batch_size, 1), int32\n mask_start_index = np.array(mask_start_index, dtype=np.int32) # (_batch_size, 1), int32\n valid_pt_mask = np.expand_dims(\n np.less(np.arange(self._batch_size), mask_start_index), -1).astype(np.float32) # (_batch_size, 1), bool\n labels = np.reshape(labels, (self._batch_size, 1)).astype(np.int32) # (_batch_size, 1), bool\n\n return users, items, labels, valid_pt_mask\n\n @staticmethod\n def _assemble_eval_batch(users, positive_items, negative_items,\n users_per_batch):\n \"\"\"Construct duplicate_mask and structure data accordingly.\n\n The positive items should be last so that they lose ties. However, they\n should not be masked out if the true eval positive happens to be\n selected as a negative. So instead, the positive is placed in the first\n position, and then switched with the last element after the duplicate\n mask has been computed.\n\n Args:\n users: An array of users in a batch. (should be identical along axis 1)\n positive_items: An array (batch_size x 1) of positive item indices.\n negative_items: An array of negative item indices.\n users_per_batch: How many users should be in the batch. This is passed\n as an argument so that ncf_test.py can use this method.\n\n Returns:\n User, item, and duplicate_mask arrays.\n \"\"\"\n items = np.concatenate([positive_items, negative_items], axis=1)\n\n # We pad the users and items here so that the duplicate mask calculation\n # will include padding. The metric function relies on all padded elements\n # except the positive being marked as duplicate to mask out padded points.\n if users.shape[0] < users_per_batch:\n pad_rows = users_per_batch - users.shape[0]\n padding = np.zeros(shape=(pad_rows, users.shape[1]), dtype=np.int32)\n users = np.concatenate([users, padding.astype(users.dtype)], axis=0)\n items = np.concatenate([items, padding.astype(items.dtype)], axis=0)\n\n duplicate_mask = stat_utils.mask_duplicates(items, axis=1).astype(np.float32)\n\n items[:, (0, -1)] = items[:, (-1, 0)]\n duplicate_mask[:, (0, -1)] = duplicate_mask[:, (-1, 0)]\n\n assert users.shape == items.shape == duplicate_mask.shape\n return users, items, duplicate_mask\n\n def _get_eval_item(self, index):\n \"\"\"Get eval item\"\"\"\n low_index, high_index = index\n users = np.repeat(self._pos_users[low_index:high_index, np.newaxis],\n 1 + rconst.NUM_EVAL_NEGATIVES, axis=1)\n positive_items = self._pos_items[low_index:high_index, np.newaxis]\n negative_items = (self.lookup_negative_items(negative_users=users[:, :-1])\n .reshape(-1, rconst.NUM_EVAL_NEGATIVES))\n\n users, items, duplicate_mask = self._assemble_eval_batch(\n users, positive_items, negative_items, self._eval_users_per_batch)\n\n users = np.reshape(users.flatten(), (self._batch_size, 1)) # (self._batch_size, 1), int32\n items = np.reshape(items.flatten(), (self._batch_size, 1)) # (self._batch_size, 1), int32\n duplicate_mask = np.reshape(duplicate_mask.flatten(), (self._batch_size, 1)) # (self._batch_size, 1), bool\n\n return users, items, duplicate_mask\n\n def __getitem__(self, index):\n \"\"\"\n Get a batch of samples.\n \"\"\"\n if self._is_training:\n return self._get_train_item(index)\n\n return self._get_eval_item(index)\n\n def __len__(self):\n \"\"\"\n Return length of the dataset, i.e., the number of batches for an epoch\n \"\"\"\n return self.dataset_len\n\n\nclass RandomSampler(Sampler):\n \"\"\"\n A random sampler for dataset.\n \"\"\"\n\n def __init__(self, pos_count, num_train_negatives, batch_size):\n self.pos_count = pos_count\n self._num_samples = (1 + num_train_negatives) * self.pos_count\n self._batch_size = batch_size\n self._num_batches = math.ceil(self._num_samples / self._batch_size)\n super().__init__(self._num_batches)\n\n def __iter__(self):\n \"\"\"\n Return indices of all batches within an epoch.\n \"\"\"\n indices = stat_utils.permutation((self._num_samples, stat_utils.random_int32()))\n\n batch_indices = [indices[x * self._batch_size:(x + 1) * self._batch_size] for x in range(self._num_batches)]\n\n # padding last batch indices if necessary\n if len(batch_indices) > 2 and len(batch_indices[-2]) != len(batch_indices[-1]):\n pad_nums = len(batch_indices[-2]) - len(batch_indices[-1])\n pad_indices = np.random.randint(0, self._num_samples, pad_nums)\n batch_indices[-1] = np.hstack((batch_indices[-1], pad_indices))\n\n return iter(batch_indices)\n\n\nclass DistributedSamplerOfTrain:\n \"\"\"\n A distributed sampler for dataset.\n \"\"\"\n\n def __init__(self, pos_count, num_train_negatives, batch_size, rank_id, rank_size):\n \"\"\"\n Distributed sampler of training dataset.\n \"\"\"\n self._num_samples = (1 + num_train_negatives) * pos_count\n self._rank_id = rank_id\n self._rank_size = rank_size\n self._batch_size = batch_size\n\n self._batchs_per_rank = int(math.ceil(self._num_samples / self._batch_size / rank_size))\n self._samples_per_rank = int(math.ceil(self._batchs_per_rank * self._batch_size))\n self._total_num_samples = self._samples_per_rank * self._rank_size\n\n def __iter__(self):\n \"\"\"\n Returns the data after each sampling.\n \"\"\"\n indices = stat_utils.permutation((self._num_samples, stat_utils.random_int32()))\n indices = indices.tolist()\n indices.extend(indices[:self._total_num_samples - len(indices)])\n indices = indices[self._rank_id:self._total_num_samples:self._rank_size]\n batch_indices = [indices[x * self._batch_size:(x + 1) * self._batch_size] for x in range(self._batchs_per_rank)]\n\n return iter(np.array(batch_indices))\n\n def __len__(self):\n \"\"\"\n Returns the length after each sampling.\n \"\"\"\n return self._batchs_per_rank\n\n\nclass SequenceSampler(Sampler):\n \"\"\"\n A sequence sampler for dataset.\n \"\"\"\n\n def __init__(self, eval_batch_size, num_users):\n self._eval_users_per_batch = int(\n eval_batch_size // (1 + rconst.NUM_EVAL_NEGATIVES))\n self._eval_elements_in_epoch = num_users * (1 + rconst.NUM_EVAL_NEGATIVES)\n self._eval_batches_per_epoch = self.count_batches(\n self._eval_elements_in_epoch, eval_batch_size)\n super().__init__(self._eval_batches_per_epoch)\n\n def __iter__(self):\n indices = [(x * self._eval_users_per_batch, (x + 1) * self._eval_users_per_batch)\n for x in range(self._eval_batches_per_epoch)]\n\n # padding last batch indices if necessary\n if len(indices) > 2 and len(indices[-2]) != len(indices[-1]):\n pad_nums = len(indices[-2]) - len(indices[-1])\n pad_indices = np.random.randint(0, self._eval_elements_in_epoch, pad_nums)\n indices[-1] = np.hstack((indices[-1], pad_indices))\n\n return iter(indices)\n\n @staticmethod\n def count_batches(example_count, batch_size, batches_per_step=1):\n \"\"\"Determine the number of batches, rounding up to fill all devices.\"\"\"\n x = (example_count + batch_size - 1) // batch_size\n return (x + batches_per_step - 1) // batches_per_step * batches_per_step\n\n\nclass DistributedSamplerOfEval:\n \"\"\"\n A distributed sampler for eval dataset.\n \"\"\"\n\n def __init__(self, eval_batch_size, num_users, rank_id, rank_size):\n self._eval_users_per_batch = int(\n eval_batch_size // (1 + rconst.NUM_EVAL_NEGATIVES))\n self._eval_elements_in_epoch = num_users * (1 + rconst.NUM_EVAL_NEGATIVES)\n self._eval_batches_per_epoch = self.count_batches(\n self._eval_elements_in_epoch, eval_batch_size)\n\n self._rank_id = rank_id\n self._rank_size = rank_size\n self._eval_batch_size = eval_batch_size\n\n self._batchs_per_rank = int(math.ceil(self._eval_batches_per_epoch / rank_size))\n\n def __iter__(self):\n indices = [(x * self._eval_users_per_batch, (x + self._rank_id + 1) * self._eval_users_per_batch)\n for x in range(self._batchs_per_rank)]\n\n return iter(np.array(indices))\n\n @staticmethod\n def count_batches(example_count, batch_size, batches_per_step=1):\n \"\"\"Determine the number of batches, rounding up to fill all devices.\"\"\"\n x = (example_count + batch_size - 1) // batch_size\n return (x + batches_per_step - 1) // batches_per_step * batches_per_step\n\n def __len__(self):\n return self._batchs_per_rank\n\n\ndef parse_eval_batch_size(eval_batch_size):\n \"\"\"\n Parse eval batch size.\n \"\"\"\n if eval_batch_size % (1 + rconst.NUM_EVAL_NEGATIVES):\n raise ValueError(\"Eval batch size {} is not divisible by {}\".format(\n eval_batch_size, 1 + rconst.NUM_EVAL_NEGATIVES))\n return eval_batch_size\n\n\ndef create_dataset(test_train=True, data_dir='./dataset/', dataset='ml-1m', train_epochs=14, batch_size=256,\n eval_batch_size=160000, num_neg=4, rank_id=None, rank_size=None):\n \"\"\"\n Create NCF dataset.\n \"\"\"\n data, num_users, num_items = load_data(data_dir, dataset)\n\n train_pos_users = data[rconst.TRAIN_USER_KEY]\n train_pos_items = data[rconst.TRAIN_ITEM_KEY]\n eval_pos_users = data[rconst.EVAL_USER_KEY]\n eval_pos_items = data[rconst.EVAL_ITEM_KEY]\n\n total_negatives, index_bounds, sorted_train_pos_items = \\\n construct_lookup_variables(train_pos_users, train_pos_items, num_users)\n\n if test_train:\n print(train_pos_users, train_pos_items, num_users, num_items, batch_size, total_negatives, index_bounds,\n sorted_train_pos_items)\n dataset = NCFDataset(train_pos_users, train_pos_items, num_users, num_items, batch_size, total_negatives,\n index_bounds, sorted_train_pos_items, num_neg)\n sampler = RandomSampler(train_pos_users.shape[0], num_neg, batch_size)\n if rank_id is not None and rank_size is not None:\n sampler = DistributedSamplerOfTrain(train_pos_users.shape[0], num_neg, batch_size, rank_id, rank_size)\n\n ds = GeneratorDataset(dataset,\n column_names=[movielens.USER_COLUMN,\n movielens.ITEM_COLUMN,\n \"labels\",\n rconst.VALID_POINT_MASK],\n sampler=sampler)\n\n else:\n eval_batch_size = parse_eval_batch_size(eval_batch_size=eval_batch_size)\n dataset = NCFDataset(eval_pos_users, eval_pos_items, num_users, num_items,\n eval_batch_size, total_negatives, index_bounds,\n sorted_train_pos_items, num_neg, is_training=False)\n sampler = SequenceSampler(eval_batch_size, num_users)\n\n ds = GeneratorDataset(dataset,\n column_names=[movielens.USER_COLUMN,\n movielens.ITEM_COLUMN,\n rconst.DUPLICATE_MASK],\n sampler=sampler)\n\n repeat_count = train_epochs if test_train else train_epochs + 1\n ds = ds.repeat(repeat_count)\n\n return ds, num_users, num_items\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"Bbox utils\"\"\"\n\nimport math\nimport itertools as it\nimport numpy as np\nfrom src.model_utils.config import config\nfrom .anchor_generator import GridAnchorGenerator\n\n\nclass GeneratDefaultBoxes():\n \"\"\"\n Generate Default boxes for SSD, follows the order of (W, H, archor_sizes).\n `self.default_boxes` has a shape of [archor_sizes, H, W, 4], the last dimension is [y, x, h, w].\n `self.default_boxes_tlbr` has a shape as `self.default_boxes`, the last dimension is [y1, x1, y2, x2].\n \"\"\"\n def __init__(self):\n fk = config.img_shape[0] / np.array(config.steps)\n scale_rate = (config.max_scale - config.min_scale) / (len(config.num_default) - 1)\n scales = [config.min_scale + scale_rate * i for i in range(len(config.num_default))] + [1.0]\n self.default_boxes = []\n for idex, feature_size in enumerate(config.feature_size):\n sk1 = scales[idex]\n sk2 = scales[idex + 1]\n sk3 = math.sqrt(sk1 * sk2)\n if idex == 0 and not config.aspect_ratios[idex]:\n w, h = sk1 * math.sqrt(2), sk1 / math.sqrt(2)\n all_sizes = [(0.1, 0.1), (w, h), (h, w)]\n else:\n all_sizes = [(sk1, sk1)]\n for aspect_ratio in config.aspect_ratios[idex]:\n w, h = sk1 * math.sqrt(aspect_ratio), sk1 / math.sqrt(aspect_ratio)\n all_sizes.append((w, h))\n all_sizes.append((h, w))\n all_sizes.append((sk3, sk3))\n\n assert len(all_sizes) == config.num_default[idex]\n\n for i, j in it.product(range(feature_size), repeat=2):\n for w, h in all_sizes:\n cx, cy = (j + 0.5) / fk[idex], (i + 0.5) / fk[idex]\n self.default_boxes.append([cy, cx, h, w])\n\n def to_tlbr(cy, cx, h, w):\n return cy - h / 2, cx - w / 2, cy + h / 2, cx + w / 2\n\n # For IoU calculation\n self.default_boxes_tlbr = np.array(tuple(to_tlbr(*i) for i in self.default_boxes), dtype='float32')\n self.default_boxes = np.array(self.default_boxes, dtype='float32')\n\nif hasattr(config, 'use_anchor_generator') and config.use_anchor_generator:\n generator = GridAnchorGenerator(config.img_shape, 4, 2, [1.0, 2.0, 0.5])\n default_boxes, default_boxes_tlbr = generator.generate_multi_levels(config.steps)\nelse:\n default_boxes_tlbr = GeneratDefaultBoxes().default_boxes_tlbr\n default_boxes = GeneratDefaultBoxes().default_boxes\ny1, x1, y2, x2 = np.split(default_boxes_tlbr[:, :4], 4, axis=-1)\nvol_anchors = (x2 - x1) * (y2 - y1)\nmatching_threshold = config.match_threshold\n\n\ndef ssd_bboxes_encode(boxes):\n \"\"\"\n Labels anchors with ground truth inputs.\n\n Args:\n boxex: ground truth with shape [N, 5], for each row, it stores [y, x, h, w, cls].\n\n Returns:\n gt_loc: location ground truth with shape [num_anchors, 4].\n gt_label: class ground truth with shape [num_anchors, 1].\n num_matched_boxes: number of positives in an image.\n \"\"\"\n\n def jaccard_with_anchors(bbox):\n \"\"\"Compute jaccard score a box and the anchors.\"\"\"\n # Intersection bbox and volume.\n ymin = np.maximum(y1, bbox[0])\n xmin = np.maximum(x1, bbox[1])\n ymax = np.minimum(y2, bbox[2])\n xmax = np.minimum(x2, bbox[3])\n w = np.maximum(xmax - xmin, 0.)\n h = np.maximum(ymax - ymin, 0.)\n\n # Volumes.\n inter_vol = h * w\n union_vol = vol_anchors + (bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) - inter_vol\n jaccard = inter_vol / union_vol\n return np.squeeze(jaccard)\n\n pre_scores = np.zeros((config.num_ssd_boxes), dtype=np.float32)\n t_boxes = np.zeros((config.num_ssd_boxes, 4), dtype=np.float32)\n t_label = np.zeros((config.num_ssd_boxes), dtype=np.int64)\n for bbox in boxes:\n label = int(bbox[4])\n scores = jaccard_with_anchors(bbox)\n idx = np.argmax(scores)\n scores[idx] = 2.0\n mask = (scores > matching_threshold)\n mask = mask & (scores > pre_scores)\n pre_scores = np.maximum(pre_scores, scores * mask)\n t_label = mask * label + (1 - mask) * t_label\n for i in range(4):\n t_boxes[:, i] = mask * bbox[i] + (1 - mask) * t_boxes[:, i]\n\n index = np.nonzero(t_label)\n\n # Transform to tlbr.\n bboxes = np.zeros((config.num_ssd_boxes, 4), dtype=np.float32)\n bboxes[:, [0, 1]] = (t_boxes[:, [0, 1]] + t_boxes[:, [2, 3]]) / 2\n bboxes[:, [2, 3]] = t_boxes[:, [2, 3]] - t_boxes[:, [0, 1]]\n\n # Encode features.\n bboxes_t = bboxes[index]\n default_boxes_t = default_boxes[index]\n bboxes_t[:, :2] = (bboxes_t[:, :2] - default_boxes_t[:, :2]) / (default_boxes_t[:, 2:] * config.prior_scaling[0])\n tmp = np.maximum(bboxes_t[:, 2:4] / default_boxes_t[:, 2:4], 0.000001)\n bboxes_t[:, 2:4] = np.log(tmp) / config.prior_scaling[1]\n bboxes[index] = bboxes_t\n\n num_match = np.array([len(np.nonzero(t_label)[0])], dtype=np.int32)\n return bboxes, t_label.astype(np.int32), num_match\n\n\ndef ssd_bboxes_decode(boxes):\n \"\"\"Decode predict boxes to [y, x, h, w]\"\"\"\n boxes_t = boxes.copy()\n default_boxes_t = default_boxes.copy()\n boxes_t[:, :2] = boxes_t[:, :2] * config.prior_scaling[0] * default_boxes_t[:, 2:] + default_boxes_t[:, :2]\n boxes_t[:, 2:4] = np.exp(boxes_t[:, 2:4] * config.prior_scaling[1]) * default_boxes_t[:, 2:4]\n\n bboxes = np.zeros((len(boxes_t), 4), dtype=np.float32)\n\n bboxes[:, [0, 1]] = boxes_t[:, [0, 1]] - boxes_t[:, [2, 3]] / 2\n bboxes[:, [2, 3]] = boxes_t[:, [0, 1]] + boxes_t[:, [2, 3]] / 2\n\n return np.clip(bboxes, 0, 1)\n\n\ndef intersect(box_a, box_b):\n \"\"\"Compute the intersect of two sets of boxes.\"\"\"\n max_yx = np.minimum(box_a[:, 2:4], box_b[2:4])\n min_yx = np.maximum(box_a[:, :2], box_b[:2])\n inter = np.clip((max_yx - min_yx), a_min=0, a_max=np.inf)\n return inter[:, 0] * inter[:, 1]\n\n\ndef jaccard_numpy(box_a, box_b):\n \"\"\"Compute the jaccard overlap of two sets of boxes.\"\"\"\n inter = intersect(box_a, box_b)\n area_a = ((box_a[:, 2] - box_a[:, 0]) *\n (box_a[:, 3] - box_a[:, 1]))\n area_b = ((box_b[2] - box_b[0]) *\n (box_b[3] - box_b[1]))\n union = area_a + area_b - inter\n return inter / union\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\n postprocess\n\"\"\"\nimport os\nimport numpy as np\nfrom PIL import Image\nfrom src.utils.config import get_args\nfrom mindspore import Tensor\n\ndef save_image(img, img_path):\n \"\"\"Save a numpy image to the disk\n\n Parameters:\n img (numpy array / Tensor): image to save.\n image_path (str): the path of the image.\n \"\"\"\n if isinstance(img, Tensor):\n img = img.asnumpy()\n elif not isinstance(img, np.ndarray):\n raise ValueError(\"img should be Tensor or numpy array, but get {}\".format(type(img)))\n img = decode_image(img)\n\n img_pil = Image.fromarray(img)\n img_pil.save(img_path + \".jpg\")\n\ndef decode_image(img):\n \"\"\"Decode a [1, C, H, W] Tensor to image numpy array.\"\"\"\n mean = 0.5 * 255\n std = 0.5 * 255\n\n return (img * std + mean).astype(np.uint8).transpose((1, 2, 0))\n\nif __name__ == '__main__':\n args = get_args()\n\n result_dir = \"./result_Files\"\n object_imageSize = 256\n rst_path = result_dir\n\n for i in range(len(os.listdir(rst_path))):\n file_name = os.path.join(rst_path, \"Pix2Pix_data_bs\" + str(args.batch_size) + '_' + str(i) + '_0.bin')\n output = np.fromfile(file_name, np.float32).reshape(3, object_imageSize, object_imageSize)\n print(output.shape)\n save_image(output, './310_infer_img' + str(i + 1))\n print(\"=======image\", i + 1, \"saved success=======\")\n print(\"Generate images success!\")\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"FSNS dataset\"\"\"\n\nimport cv2\nimport numpy as np\nfrom PIL import Image\n\nimport mindspore.dataset as de\nimport mindspore.dataset.vision.c_transforms as C\nimport mindspore.dataset.vision.py_transforms as P\nimport mindspore.dataset.transforms.c_transforms as ops\nimport mindspore.common.dtype as mstype\n\nfrom src.model_utils.config import config\n\n\nclass AugmentationOps():\n def __init__(self, min_area_ratio=0.8, aspect_ratio_range=(0.8, 1.2), brightness=32./255.,\n contrast=0.5, saturation=0.5, hue=0.2, img_tile_shape=(150, 150)):\n self.min_area_ratio = min_area_ratio\n self.aspect_ratio_range = aspect_ratio_range\n self.img_tile_shape = img_tile_shape\n self.random_image_distortion_ops = P.RandomColorAdjust(brightness=brightness,\n contrast=contrast,\n saturation=saturation,\n hue=hue)\n\n def __call__(self, img):\n img_h = self.img_tile_shape[0]\n img_w = self.img_tile_shape[1]\n img_new = np.zeros([128, 512, 3], dtype=np.float32)\n\n for i in range(4):\n img_tile = img[:, (i*150):((i+1)*150), :]\n # Random crop cut from the street sign image, resized to the same size.\n # Assures that the crop covers at least 0.8 area of the input image.\n # Aspect ratio of cropped image is within [0.8,1.2] range.\n h = img_h + 1\n w = img_w + 1\n\n while (w >= img_w or h >= img_h):\n aspect_ratio = np.random.uniform(self.aspect_ratio_range[0],\n self.aspect_ratio_range[1])\n h_low = np.ceil(np.sqrt(self.min_area_ratio * img_h * img_w / aspect_ratio))\n h_high = np.floor(np.sqrt(img_h * img_w / aspect_ratio))\n h = np.random.randint(h_low, h_high)\n w = int(h * aspect_ratio)\n\n y = np.random.randint(img_w - w)\n x = np.random.randint(img_h - h)\n img_tile = img_tile[x:(x+h), y:(y+w), :]\n # Randomly chooses one of the 4 interpolation resize methods.\n interpolation = np.random.choice([cv2.INTER_LINEAR,\n cv2.INTER_CUBIC,\n cv2.INTER_AREA,\n cv2.INTER_NEAREST])\n img_tile = cv2.resize(img_tile, (128, 128), interpolation=interpolation)\n # Random color distortion ops.\n img_tile_pil = Image.fromarray(img_tile)\n img_tile_pil = self.random_image_distortion_ops(img_tile_pil)\n img_tile = np.array(img_tile_pil)\n img_new[:, (i*128):((i+1)*128), :] = img_tile\n\n img_new = 2 * (img_new / 255.) - 1\n return img_new\n\n\nclass ImageResizeWithRescale():\n def __init__(self, standard_img_height, standard_img_width, channel_size=3):\n self.standard_img_height = standard_img_height\n self.standard_img_width = standard_img_width\n self.channel_size = channel_size\n\n def __call__(self, img):\n img = cv2.resize(img, (self.standard_img_width, self.standard_img_height))\n img = 2 * (img / 255.) - 1\n return img\n\n\ndef random_teacher_force(images, source_ids, target_ids):\n teacher_force = np.random.random() < config.teacher_force_ratio\n teacher_force_array = np.array([teacher_force], dtype=bool)\n return images, source_ids, target_ids, teacher_force_array\n\n\ndef create_ocr_train_dataset(mindrecord_file, batch_size=32, rank_size=1, rank_id=0,\n is_training=True, num_parallel_workers=4, use_multiprocessing=True):\n ds = de.MindDataset(mindrecord_file,\n columns_list=[\"image\", \"decoder_input\", \"decoder_target\"],\n num_shards=rank_size,\n shard_id=rank_id,\n num_parallel_workers=num_parallel_workers,\n shuffle=is_training)\n aug_ops = AugmentationOps()\n transforms = [C.Decode(),\n aug_ops,\n C.HWC2CHW()]\n ds = ds.map(operations=transforms, input_columns=[\"image\"], python_multiprocessing=use_multiprocessing,\n num_parallel_workers=num_parallel_workers)\n ds = ds.map(operations=ops.PadEnd([config.max_length], 0), input_columns=[\"decoder_target\"])\n ds = ds.map(operations=random_teacher_force, input_columns=[\"image\", \"decoder_input\", \"decoder_target\"],\n output_columns=[\"image\", \"decoder_input\", \"decoder_target\", \"teacher_force\"],\n column_order=[\"image\", \"decoder_input\", \"decoder_target\", \"teacher_force\"])\n type_cast_op_bool = ops.TypeCast(mstype.bool_)\n ds = ds.map(operations=type_cast_op_bool, input_columns=\"teacher_force\")\n print(\"Train dataset size= %s\" % (int(ds.get_dataset_size())))\n ds = ds.batch(batch_size, drop_remainder=True)\n return ds\n\n\ndef create_ocr_val_dataset(mindrecord_file, batch_size=32, rank_size=1, rank_id=0,\n num_parallel_workers=4, use_multiprocessing=True):\n ds = de.MindDataset(mindrecord_file,\n columns_list=[\"image\", \"annotation\", \"decoder_input\", \"decoder_target\"],\n num_shards=rank_size,\n shard_id=rank_id,\n num_parallel_workers=num_parallel_workers,\n shuffle=False)\n resize_rescale_op = ImageResizeWithRescale(standard_img_height=128, standard_img_width=512)\n transforms = [C.Decode(),\n resize_rescale_op,\n C.HWC2CHW()]\n ds = ds.map(operations=transforms, input_columns=[\"image\"], python_multiprocessing=use_multiprocessing,\n num_parallel_workers=num_parallel_workers)\n ds = ds.map(operations=ops.PadEnd([config.max_length], 0), input_columns=[\"decoder_target\"],\n python_multiprocessing=use_multiprocessing, num_parallel_workers=8)\n ds = ds.map(operations=ops.PadEnd([config.max_length], 0), input_columns=[\"decoder_input\"],\n python_multiprocessing=use_multiprocessing, num_parallel_workers=8)\n ds = ds.batch(batch_size, drop_remainder=True)\n print(\"Val dataset size= %s\" % (str(int(ds.get_dataset_size())*batch_size)))\n return ds\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Monitor loss and time\"\"\"\nimport time\nimport numpy as np\nfrom mindspore import Tensor\nfrom mindspore.train.callback import Callback\nclass Monitor(Callback):\n \"\"\"\n Monitor loss and time.\n\n Args:\n lr_init (numpy array): train lr\n\n Returns:\n None\n\n Examples:\n >>> Monitor(100,lr_init=Tensor([0.05]*100).asnumpy())\n \"\"\"\n\n def __init__(self, lr_init=None):\n super(Monitor, self).__init__()\n self.lr_init = lr_init\n self.lr_init_len = len(lr_init)\n\n def epoch_begin(self, run_context):\n self.losses = []\n self.epoch_time = time.time()\n\n def epoch_end(self, run_context):\n cb_params = run_context.original_args()\n\n epoch_mseconds = (time.time() - self.epoch_time) * 1000\n per_step_mseconds = epoch_mseconds / cb_params.batch_num\n print(\"epoch time: {:5.3f}, per step time: {:5.3f}, avg loss: {:5.3f}\".format(epoch_mseconds,\n per_step_mseconds,\n np.mean(self.losses)))\n\n def step_begin(self, run_context):\n self.step_time = time.time()\n\n def step_end(self, run_context):\n \"\"\"step end\"\"\"\n cb_params = run_context.original_args()\n step_mseconds = (time.time() - self.step_time) * 1000\n step_loss = cb_params.net_outputs\n\n if isinstance(step_loss, (tuple, list)) and isinstance(step_loss[0], Tensor):\n step_loss = step_loss[0]\n if isinstance(step_loss, Tensor):\n step_loss = np.mean(step_loss.asnumpy())\n\n self.losses.append(step_loss)\n cur_step_in_epoch = (cb_params.cur_step_num - 1) % cb_params.batch_num\n\n print(\"epoch: [{:3d}/{:3d}], step:[{:5d}/{:5d}], loss:[{:5.3f}/{:5.3f}], time:[{:5.3f}], lr:[{:5.3f}]\".format(\n cb_params.cur_epoch_num -\n 1, cb_params.epoch_num, cur_step_in_epoch, cb_params.batch_num, step_loss,\n np.mean(self.losses), step_mseconds, self.lr_init[cb_params.cur_step_num - 1]))\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport os\nimport numpy as np\nfrom mindspore import dtype as mstype\nfrom mindspore import Model, context, Tensor\nfrom mindspore.train.serialization import load_checkpoint, load_param_into_net\nfrom src.dataset import create_dataset\nfrom src.unet3d_model import UNet3d, UNet3d_\nfrom src.utils import create_sliding_window, CalculateDice\nfrom src.model_utils.config import config\nfrom src.model_utils.moxing_adapter import moxing_wrapper\n\ndevice_id = int(os.getenv('DEVICE_ID'))\ncontext.set_context(mode=context.GRAPH_MODE, device_target=config.device_target, save_graphs=False, device_id=device_id)\n\n@moxing_wrapper()\ndef test_net(data_path, ckpt_path):\n data_dir = data_path + \"/image/\"\n seg_dir = data_path + \"/seg/\"\n eval_dataset = create_dataset(data_path=data_dir, seg_path=seg_dir, is_training=False)\n eval_data_size = eval_dataset.get_dataset_size()\n print(\"train dataset length is:\", eval_data_size)\n\n if config.device_target == 'Ascend':\n network = UNet3d()\n else:\n network = UNet3d_()\n network.set_train(False)\n param_dict = load_checkpoint(ckpt_path)\n load_param_into_net(network, param_dict)\n model = Model(network)\n index = 0\n total_dice = 0\n for batch in eval_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):\n image = batch[\"image\"]\n seg = batch[\"seg\"]\n print(\"current image shape is {}\".format(image.shape), flush=True)\n sliding_window_list, slice_list = create_sliding_window(image, config.roi_size, config.overlap)\n image_size = (config.batch_size, config.num_classes) + image.shape[2:]\n output_image = np.zeros(image_size, np.float32)\n count_map = np.zeros(image_size, np.float32)\n importance_map = np.ones(config.roi_size, np.float32)\n for window, slice_ in zip(sliding_window_list, slice_list):\n window_image = Tensor(window, mstype.float32)\n pred_probs = model.predict(window_image)\n output_image[slice_] += pred_probs.asnumpy()\n count_map[slice_] += importance_map\n output_image = output_image / count_map\n dice, _ = CalculateDice(output_image, seg)\n print(\"The {} batch dice is {}\".format(index, dice), flush=True)\n total_dice += dice\n index = index + 1\n avg_dice = total_dice / eval_data_size\n print(\"**********************End Eval***************************************\")\n print(\"eval average dice is {}\".format(avg_dice))\n\nif __name__ == '__main__':\n test_net(data_path=config.data_path,\n ckpt_path=config.checkpoint_file_path)\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"lr\"\"\"\n\nfrom collections import Counter\nimport numpy as np\n\nfrom .linear_warmup import linear_warmup_lr\n\n\ndef warmup_step_lr(lr, lr_epochs, steps_per_epoch, warmup_epochs, max_epoch, gamma=0.1):\n \"\"\"warmup step lr\"\"\"\n base_lr = lr\n warmup_init_lr = 0\n total_steps = int(max_epoch * steps_per_epoch)\n warmup_steps = int(warmup_epochs * steps_per_epoch)\n milestones = lr_epochs\n milestones_steps = []\n for milestone in milestones:\n milestones_step = milestone * steps_per_epoch\n milestones_steps.append(milestones_step)\n\n lr_each_step = []\n lr = base_lr\n milestones_steps_counter = Counter(milestones_steps)\n for i in range(total_steps):\n if i < warmup_steps:\n lr = linear_warmup_lr(i + 1, warmup_steps, base_lr, warmup_init_lr)\n else:\n lr = lr * gamma ** milestones_steps_counter[i]\n lr_each_step.append(lr)\n\n return np.array(lr_each_step).astype(np.float32)\n\n\ndef multi_step_lr(lr, milestones, steps_per_epoch, max_epoch, gamma=0.1):\n \"\"\"lr\"\"\"\n return warmup_step_lr(lr, milestones, steps_per_epoch, 0, max_epoch, gamma=gamma)\n\n\ndef step_lr(lr, epoch_size, steps_per_epoch, max_epoch, gamma=0.1):\n \"\"\"lr\"\"\"\n lr_epochs = []\n for i in range(1, max_epoch):\n if i % epoch_size == 0:\n lr_epochs.append(i)\n return multi_step_lr(lr, lr_epochs, steps_per_epoch, max_epoch, gamma=gamma)\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"PanguAlpha model\"\"\"\nimport os\nimport copy\nimport numpy as np\nimport mindspore.nn as nn\nimport mindspore.common.dtype as mstype\nfrom mindspore.common.initializer import initializer\nfrom mindspore import Tensor, Parameter\nfrom mindspore.ops import operations as P\nfrom mindspore.ops import functional as F\nfrom mindspore.nn import Cell\nfrom mindspore.parallel.nn.transformer import VocabEmbedding, TransformerEncoder, TransformerEncoderLayer, \\\n AttentionMask\nfrom mindspore.parallel.nn import MoEConfig\nfrom mindspore.parallel.nn.layers import _LayerNorm\n\n\nclass EmbeddingLayer(nn.Cell):\n r\"\"\"Embedding layer of the PanGUAlpha Model\"\"\"\n def __init__(self, config):\n super(EmbeddingLayer, self).__init__()\n # Only for the pipeline mode, the embedding needs to be row sliced.\n self.word_embedding = VocabEmbedding(vocab_size=config.vocab_size,\n embedding_size=config.hidden_size,\n param_init=initializer(\"normal\", [config.vocab_size, config.hidden_size],\n dtype=config.param_init_type),\n parallel_config=config.parallel_config.embedding_dp_mp_config)\n copied_parallel_config = copy.deepcopy(config.parallel_config)\n copied_parallel_config.vocab_emb_dp = True\n self.position_embedding = VocabEmbedding(vocab_size=config.seq_length,\n embedding_size=config.hidden_size,\n param_init=initializer(\"normal\",\n [config.seq_length, config.hidden_size],\n dtype=config.param_init_type),\n parallel_config=copied_parallel_config.embedding_dp_mp_config)\n self.add = P.Add().shard(\n ((config.parallel_config.data_parallel, 1, 1), (config.parallel_config.data_parallel, 1, 1)))\n self.dropout = nn.Dropout(1 - config.dropout_rate)\n self.dropout.dropout.shard(((config.parallel_config.data_parallel, 1, 1),))\n self.is_first_iteration = True\n self.use_past = config.use_past\n self.batch_size = config.batch_size\n\n def construct(self, input_ids, input_position, init_reset, batch_valid_length):\n word_embedding, word_table = self.word_embedding(input_ids)\n if self.use_past and not self.is_first_iteration:\n _, seq_length = F.shape(input_ids)\n input_position = batch_valid_length.view(self.batch_size, seq_length)\n position_embedding, _ = self.position_embedding(input_position)\n embed = self.add(word_embedding, position_embedding)\n embed = self.dropout(embed)\n return embed, word_table\n\n def get_word_embedding_weight(self):\n return self.word_embedding.embedding_table\n\n\nclass QueryLayer(TransformerEncoderLayer):\n r\"\"\"Query Layer at the final layer.\"\"\"\n def __init__(self, batch_size,\n hidden_size,\n ffn_hidden_size,\n num_heads,\n seq_length,\n attention_dropout_rate=0.1,\n hidden_dropout_rate=0.1,\n post_layernorm_residual=False,\n param_init_type=mstype.float32,\n hidden_act='gelu',\n use_past=False,\n parallel_config=None,\n softmax_compute_type=mstype.float32):\n super(QueryLayer, self).__init__(batch_size=batch_size,\n hidden_size=hidden_size,\n ffn_hidden_size=ffn_hidden_size,\n num_heads=num_heads,\n seq_length=seq_length,\n attention_dropout_rate=attention_dropout_rate,\n hidden_dropout_rate=hidden_dropout_rate,\n post_layernorm_residual=post_layernorm_residual,\n param_init_type=param_init_type,\n hidden_act=hidden_act,\n use_past=use_past,\n parallel_config=parallel_config.dp_mp_config,\n softmax_compute_type=softmax_compute_type)\n\n def construct(self, x, query_vector, input_mask, init_reset=True, batch_valid_length=None):\n r\"\"\"\n The forward process of the block.\n \"\"\"\n # [bs * seq_length, embedding_size]\n input_x = self.layernorm1(x)\n input_x = F.cast(input_x, self.dtype)\n\n # indicate whether reset saved states\n key_reset = None\n value_reset = None\n\n if self.use_past:\n # reset states, init_reset True for reuse and False for reset\n key_reset = self.assign(self.key_past, self.mul(self.key_past, F.cast(init_reset, self.dtype)))\n value_reset = self.assign(self.value_past, self.mul(self.value_past, F.cast(init_reset, self.dtype)))\n # add dependency for desired execution order\n input_x = F.depend(input_x, key_reset)\n input_x = F.depend(input_x, value_reset)\n\n attention, layer_present = self.attention(query_vector, input_x, input_x, input_mask,\n self.key_past, self.value_past, batch_valid_length)\n # For post-layernorm the inputs for residual path are output of self-attention and output of layernorm\n if self.post_layernorm_residual:\n x = self.add(input_x, attention)\n # For pre-layernorm the inputs for residual path are output of self-attention and input of this layer\n else:\n x = self.add(x, attention)\n\n output_x = self.layernorm2(x)\n output_x = F.cast(output_x, self.dtype)\n mlp_logit = self.output(output_x)\n\n value_update = None\n key_update = None\n if self.use_past:\n # current key and value\n key_present, value_present = layer_present\n # update key and value calculated this step\n key_update = self.assign(self.key_past, key_present)\n value_update = self.assign(self.value_past, value_present)\n # add dependency for desired execution order\n key_update = F.depend(key_update, key_reset)\n value_update = F.depend(value_update, value_reset)\n\n # add dependency for desired execution order\n mlp_logit = F.depend(mlp_logit, value_update)\n mlp_logit = F.depend(mlp_logit, key_update)\n\n if self.post_layernorm_residual:\n output = self.add(output_x, mlp_logit)\n else:\n output = self.add(x, mlp_logit)\n return output, layer_present\n\n\nclass PanGuHead(Cell):\n \"\"\"\n Head to get the logits of each token in the vocab\n Args:\n config(): the config of network\n Inputs:\n state: the output of the backbone\n embedding_table: the embedding table of the vocabulary\n Returns:\n logits: Tensor, the logits of the corresponding inputs\n \"\"\"\n\n def __init__(self,\n hidden_size,\n compute_type=mstype.float16,\n parallel_config=None):\n super(PanGuHead, self).__init__()\n if parallel_config.vocab_emb_dp:\n self.matmul = P.MatMul(transpose_b=True).shard(((parallel_config.data_parallel, 1), (1, 1)))\n else:\n self.matmul = P.MatMul(transpose_b=True).shard(((parallel_config.data_parallel, 1), (\n parallel_config.model_parallel, 1)))\n self.hidden_size = hidden_size\n self.dtype = compute_type\n self.cast = P.Cast()\n\n def construct(self, state, embed):\n state = P.Reshape()(state, (-1, self.hidden_size))\n # output logits over vocabulary [bs*seq_length, vocab_size]\n logits = self.matmul(self.cast(state, self.dtype), self.cast(embed, self.dtype))\n return logits\n\n\ndef set_parallel_configure_for_layer(network, layer_id, offset, parallel_config, layers):\n r\"\"\"\n Default setting for the pipeline is: `(layer_id + offset) // (layers / pipeline_stage)`.\n\n\n Args:\n network(Cell) - Represents the transformer block\n layer_id(int) - Means the layer index for the current module, counts from zero.\n offset(int) - Means the layer_index needs a offset, if there are other modules in the net.\n layers(int) - The total layers used for the model.\n \"\"\"\n # Used for the pipeline's stages setting\n # As the final layer is not included here, so we need to manually add here.\n # original: if set two stages, layers on two stages will be [15, 16+1]\n # with 1 added, the layers on two stages will be [16, 15 +1]\n pp_dis = max(int((layers + 1)/ parallel_config.pipeline_stage), 1)\n # the pipeline stage must be in [0, parallel_config.pipeline_stage - 1]\n pp_id = min((layer_id + offset) // pp_dis, parallel_config.pipeline_stage - 1)\n network.pipeline_stage = pp_id\n print(f\"pipeline stage id is {pp_id}\", flush=True)\n\n # Used for optimizer's fusion tag\n dis = max(int((layers + 1) / parallel_config.gradient_aggregation_group), 1)\n if parallel_config.pipeline_stage > 1:\n # we give the fusion in pipeline mode a fixed value, otherwise the performance may become worse.\n network.set_comm_fusion(2)\n else:\n network.set_comm_fusion(int((layer_id + offset) / dis) + 1)\n # Used for enabling recomputation of the block\n if parallel_config.recompute:\n network.recompute()\n\n\nclass PanguAlpha_Model(Cell):\n r\"\"\"The base backbone of the PanGuAlpha model\"\"\"\n def __init__(self, config):\n super(PanguAlpha_Model, self).__init__()\n self.is_pipeline = config.parallel_config.pipeline_stage > 1\n self.embedding = EmbeddingLayer(config)\n self.config = config\n self.layernorm = _LayerNorm((config.hidden_size,)).to_float(mstype.float32)\n if config.parallel_config.pipeline_stage > 1:\n self.layernorm.set_comm_fusion(2)\n else:\n self.layernorm.set_comm_fusion(config.parallel_config.gradient_aggregation_group)\n self.layernorm.shard(((config.parallel_config.data_parallel, 1),))\n self.layernorm.pipeline_stage = config.parallel_config.pipeline_stage - 1\n # Configure the shard configure of the Embedding layer\n self.embedding.pipeline_stage = 0\n self.num_layers = config.num_layers\n if config.use_moe:\n moe_config = MoEConfig(expert_num=config.parallel_config.data_parallel * config.per_dp_dim_expert_num)\n else:\n moe_config = MoEConfig(expert_num=1)\n # The shard setting of Transformer is set within the class StackedTransformer\n self.blocks = TransformerEncoder(num_layers=config.num_layers - 1,\n batch_size=config.batch_size,\n hidden_size=config.hidden_size,\n ffn_hidden_size=config.ffn_hidden_size,\n num_heads=config.num_heads,\n seq_length=config.seq_length,\n attention_dropout_rate=config.dropout_rate,\n hidden_dropout_rate=config.dropout_rate,\n lambda_func=set_parallel_configure_for_layer,\n param_init_type=config.param_init_type,\n use_past=config.use_past,\n parallel_config=config.parallel_config,\n moe_config=moe_config,\n softmax_compute_type=config.softmax_compute_type).blocks\n copied_parallel_config = copy.deepcopy(config.parallel_config)\n copied_parallel_config.vocab_emb_dp = True\n self.top_query_embedding = VocabEmbedding(vocab_size=config.seq_length,\n embedding_size=config.hidden_size,\n param_init=initializer(\"normal\",\n [config.seq_length, config.hidden_size],\n dtype=config.param_init_type),\n parallel_config=copied_parallel_config.embedding_dp_mp_config)\n self.top_query_embedding.pipeline_stage = config.parallel_config.pipeline_stage - 1\n if config.parallel_config.pipeline_stage > 1:\n self.top_query_embedding.set_comm_fusion(2)\n else:\n self.top_query_embedding.set_comm_fusion(config.parallel_config.gradient_aggregation_group)\n\n self.top_query_layer = QueryLayer(batch_size=config.batch_size,\n hidden_size=config.hidden_size,\n ffn_hidden_size=config.ffn_hidden_size,\n num_heads=config.num_heads,\n seq_length=config.seq_length,\n attention_dropout_rate=config.dropout_rate,\n hidden_dropout_rate=config.dropout_rate,\n hidden_act=config.hidden_act,\n param_init_type=config.param_init_type,\n use_past=config.use_past,\n parallel_config=config.parallel_config)\n if config.parallel_config.recompute:\n self.top_query_layer.recompute()\n self.top_query_layer.set_comm_fusion(config.parallel_config.gradient_aggregation_group)\n self.top_query_layer.pipeline_stage = config.parallel_config.pipeline_stage - 1\n\n self.dtype = mstype.float16\n\n if config.load_ckpt_path:\n self.load_embedding_from_ckpt(config.load_ckpt_path)\n self.run_type = config.run_type\n\n def construct(self, input_ids,\n input_position,\n encoder_masks,\n init_reset=True,\n batch_valid_length=None):\n r\"\"\"forward pass of the model\"\"\"\n embed, word_table = self.embedding(input_ids, input_position, init_reset, batch_valid_length)\n hidden_state = P.Cast()(embed, self.dtype)\n # the input of the incremental prediction is 3d\n if self.run_type != 'predict':\n hidden_state = self.reshape_to_2d(hidden_state)\n if self.blocks is not None:\n for i in range(self.num_layers - 1):\n hidden_state, _ = self.blocks[i](hidden_state, encoder_masks, init_reset, batch_valid_length)\n if self.is_pipeline:\n top_query_hidden_states, _ = self.top_query_embedding(input_position)\n top_query_hidden_states = self.reshape_to_2d(top_query_hidden_states)\n encoder_output, _ = self.top_query_layer(hidden_state, top_query_hidden_states,\n encoder_masks, init_reset, batch_valid_length)\n encoder_output = self.layernorm(encoder_output)\n else:\n hidden_state = self.reshape_to_2d(hidden_state)\n encoder_output = self.layernorm(hidden_state)\n encoder_output = P.Cast()(encoder_output, self.dtype)\n top_query_hidden_states, _ = self.top_query_embedding(input_position)\n top_query_hidden_states = self.reshape_to_2d(top_query_hidden_states)\n encoder_output, _ = self.top_query_layer(encoder_output, top_query_hidden_states,\n encoder_masks, init_reset, batch_valid_length)\n\n return encoder_output, word_table\n\n def reshape_to_2d(self, x):\n r\"\"\"reshape nd tensor to 2d, if n <= 2, keep original shape.\"\"\"\n shape = F.shape(x)\n if len(shape) <= 2:\n return x\n x = F.reshape(x, (-1, shape[-1]))\n return x\n\n def load_embedding_from_ckpt(self, load_ckpt_path):\n r\"\"\"load the weights from the checkpoint\"\"\"\n def load_param(path):\n if os.path.exists(path):\n p_table = np.load(path)\n table_param = Tensor(p_table, mstype.float32)\n else:\n raise ValueError(f\"{path} file not exits, \"\n f\"please check whether embedding file exit.\")\n return table_param\n\n # three embedding needed to be loaded\n # Loading the embedding table from the ckpt path:\n position_embedding_path = os.path.join(load_ckpt_path, 'position_embedding.npy')\n word_embedding_path = os.path.join(load_ckpt_path, 'word_embedding.npy')\n top_query_embedding_path = os.path.join(load_ckpt_path, 'top_query_embedding.npy')\n self.embedding.word_embedding.embedding_table = Parameter(initializer(load_param(word_embedding_path),\n [self.config.vocab_size,\n self.config.hidden_size]),\n name='word_embedding_table', parallel_optimizer=False)\n self.embedding.position_embedding.embedding_table = Parameter(initializer(load_param(position_embedding_path),\n [self.config.seq_length,\n self.config.hidden_size]),\n name='position_embedding_table',\n parallel_optimizer=False)\n self.top_query_embedding.embedding_table = Parameter(initializer(load_param(top_query_embedding_path),\n [self.config.seq_length,\n self.config.hidden_size]),\n name='query_embedding_table', parallel_optimizer=False)\n\n\nclass PanguAlphaModel(nn.Cell):\n \"\"\"\n The PanguAlpha network consisting of two parts the backbone and the head\n Args:\n config(PanguAlphaConfig): the config of network\n Inputs:\n input_ids: the tokenized inputs\n input_mask: the mask indicating whether each position is a valid input\n past: the previous feature map\n Returns:\n logits: Tensor: the logits of the corresponding inputs with shape (batch_size, seq_length, vocab_size)\n \"\"\"\n\n def __init__(self, config):\n super(PanguAlphaModel, self).__init__()\n # Network head to get logits over vocabulary\n copied_parallel_config = copy.deepcopy(config.parallel_config)\n if copied_parallel_config.pipeline_stage > 1:\n copied_parallel_config.vocab_emb_dp = False\n self.head = PanGuHead(hidden_size=config.hidden_size,\n parallel_config=copied_parallel_config)\n self.head.pipeline_stage = config.parallel_config.pipeline_stage - 1\n self.backbone = PanguAlpha_Model(config)\n self.backbone.embedding.word_embedding.embedding_table.add_pipeline_stage(self.head.pipeline_stage)\n\n def construct(self, input_ids, input_position, attention_mask,\n init_reset=True, batch_valid_length=None):\n output_states, word_table = self.backbone(input_ids, input_position, attention_mask,\n init_reset, batch_valid_length)\n logits = self.head(output_states, word_table)\n return logits\n\n\nclass PanGUAlphaWithLoss(Cell):\n \"\"\"\n PanguAlpha training loss for generation.\n Args:\n config(PanGUConfig)\n Inputs:\n input_ids: the tokenized inputs\n past: the previous feature map\n Returns:\n output: Tensor, the loss of the network\n \"\"\"\n\n def __init__(self, config, network, loss):\n super(PanGUAlphaWithLoss, self).__init__(auto_prefix=False)\n self.batch_size = config.batch_size\n self.seq_length = config.seq_length\n dp = config.parallel_config.data_parallel\n self.network = network\n self.eod_token = config.eod_token\n self.loss = loss\n\n self.slice = P.StridedSlice().shard(((dp, 1),))\n self.not_equal = P.NotEqual().shard(((dp, 1), ()))\n self.batch_size = config.batch_size\n self.len = config.seq_length\n self.slice2 = P.StridedSlice().shard(((dp, 1, 1),))\n self.micro_batch_step = 1\n if config.parallel_config.pipeline_stage > 1:\n self.micro_batch_step = config.parallel_config.micro_batch_num\n\n def construct(self, input_ids, input_position=None, attention_mask=None):\n r\"\"\"Forward process of the pangu alpha model\"\"\"\n tokens = self.slice(input_ids, (0, 0), (self.batch_size, -1), (1, 1))\n input_position = self.slice(input_position, (0, 0), (self.batch_size, self.len), (1, 1))\n decoder_attention_masks = self.slice2(attention_mask, (0, 0, 0), (self.batch_size, self.len, self.len),\n (1, 1, 1))\n input_mask = F.cast(self.not_equal(tokens, self.eod_token),\n mstype.float32)\n\n logits = self.network(tokens,\n input_position,\n decoder_attention_masks)\n # Get label corresponding to input tokens\n labels = self.slice(input_ids, (0, 1), (self.batch_size, self.len + 1),\n (1, 1))\n labels = P.Reshape()(labels, (-1,))\n input_mask = P.Reshape()(input_mask, (-1,))\n output = self.loss(logits, labels, input_mask)\n return output\n\n\nclass EvalNet(nn.Cell):\n \"\"\"\n PanguAlpha evaluation net\n Args:\n backbone: backbone network of PanguAlpha\n generate: enable generate mode\n Inputs:\n input_ids: the tokenized inpus\n current_index: the index of current token\n init_reset: whether reset saved states\n Returns:\n outputs: Tensor, corresponding output for different tasks\n \"\"\"\n\n def __init__(self, backbone, generate=False, pad_token=6, seq_length=1024):\n super(EvalNet, self).__init__(auto_prefix=False)\n self.backbone = backbone\n self.pad_token = pad_token\n self.argmax = P.Argmax()\n self.generate = generate\n self.topk = P.TopK(sorted=True).shard(((1, 1),))\n self.gather = P.GatherV2().shard(((1, 1), (1,)))\n self.log_softmax = P.LogSoftmax().shard(((1, 1, 1),))\n self.get_attention_mask = AttentionMask(seq_length)\n self.expand = P.ExpandDims().shard(((1, 1, 1),))\n # used for incremental prediction\n self.all_ones_attention_mask = Tensor(np.ones((1, 1, seq_length)), mstype.float32)\n\n def construct(self, input_ids, current_index, init_reset=True, batch_valid_length=None):\n \"\"\"evaluation net\"\"\"\n input_mask = F.cast(F.not_equal(input_ids, self.pad_token), mstype.float32)\n bs, seq_length = F.shape(input_ids)\n input_position = F.tuple_to_array(F.make_range(seq_length))\n input_position = P.Tile()(input_position, (bs, 1))\n if self.is_first_iteration is False:\n attention_mask = P.Tile()(self.all_ones_attention_mask, (bs, 1, 1))\n else:\n attention_mask = self.get_attention_mask(input_mask)\n logits = self.backbone(input_ids, input_position, attention_mask,\n init_reset, batch_valid_length)\n index = current_index.view(1,)\n logits = self.gather(logits, index, 0)\n logits = logits.view(bs, 1, -1)\n log_probs = self.log_softmax(logits)\n return log_probs\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"preprocess\"\"\"\nimport argparse\nimport os\n\nimport numpy as np\nfrom src.dataset import load_and_process\n\n\ndef generate_bin():\n \"\"\"Generate bin files.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', type=str, default='./data/cora/cora_mr', help='Data dir')\n parser.add_argument('--train_nodes_num', type=int, default=140, help='Nodes numbers for training')\n parser.add_argument('--eval_nodes_num', type=int, default=500, help='Nodes numbers for evaluation')\n parser.add_argument('--test_nodes_num', type=int, default=1000, help='Nodes numbers for test')\n parser.add_argument('--result_path', type=str, default='./preprocess_Result/', help='Result path')\n args = parser.parse_args()\n\n feature, biases, _, _, _, _, y_test, test_mask = load_and_process(args.data_dir,\n args.train_nodes_num,\n args.eval_nodes_num,\n args.test_nodes_num)\n feature_path = os.path.join(args.result_path, '00_data')\n biases_path = os.path.join(args.result_path, '01_data')\n y_test_path = os.path.join(args.result_path, 'y_test.npy')\n test_mask_path = os.path.join(args.result_path, 'test_mask.npy')\n\n os.makedirs(feature_path)\n os.makedirs(biases_path)\n\n feature.tofile(os.path.join(feature_path, 'feature.bin'))\n biases.tofile(os.path.join(biases_path, 'biases.bin'))\n np.save(y_test_path, y_test)\n np.save(test_mask_path, test_mask)\n\nif __name__ == \"__main__\":\n generate_bin()\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Download raw data and preprocessed data.\"\"\"\nimport os\nimport pickle\nimport collections\nimport numpy as np\nfrom mindspore.mindrecord import FileWriter\nfrom .model_utils.config import config\n\nclass StatsDict():\n \"\"\"preprocessed data\"\"\"\n\n def __init__(self, field_size, dense_dim, slot_dim, skip_id_convert):\n self.field_size = field_size\n self.dense_dim = dense_dim\n self.slot_dim = slot_dim\n self.skip_id_convert = bool(skip_id_convert)\n\n self.val_cols = [\"val_{}\".format(i + 1) for i in range(self.dense_dim)]\n self.cat_cols = [\"cat_{}\".format(i + 1) for i in range(self.slot_dim)]\n\n self.val_min_dict = {col: 0 for col in self.val_cols}\n self.val_max_dict = {col: 0 for col in self.val_cols}\n\n self.cat_count_dict = {col: collections.defaultdict(int) for col in self.cat_cols}\n\n self.oov_prefix = \"OOV\"\n\n self.cat2id_dict = {}\n self.cat2id_dict.update({col: i for i, col in enumerate(self.val_cols)})\n self.cat2id_dict.update(\n {self.oov_prefix + col: i + len(self.val_cols) for i, col in enumerate(self.cat_cols)})\n\n def stats_vals(self, val_list):\n \"\"\"Handling weights column\"\"\"\n assert len(val_list) == len(self.val_cols)\n\n def map_max_min(i, val):\n key = self.val_cols[i]\n if val != \"\":\n if float(val) > self.val_max_dict[key]:\n self.val_max_dict[key] = float(val)\n if float(val) < self.val_min_dict[key]:\n self.val_min_dict[key] = float(val)\n\n for i, val in enumerate(val_list):\n map_max_min(i, val)\n\n def stats_cats(self, cat_list):\n \"\"\"Handling cats column\"\"\"\n\n assert len(cat_list) == len(self.cat_cols)\n\n def map_cat_count(i, cat):\n key = self.cat_cols[i]\n self.cat_count_dict[key][cat] += 1\n\n for i, cat in enumerate(cat_list):\n map_cat_count(i, cat)\n\n def save_dict(self, dict_path, prefix=\"\"):\n with open(os.path.join(dict_path, \"{}val_max_dict.pkl\".format(prefix)), \"wb\") as file_wrt:\n pickle.dump(self.val_max_dict, file_wrt)\n with open(os.path.join(dict_path, \"{}val_min_dict.pkl\".format(prefix)), \"wb\") as file_wrt:\n pickle.dump(self.val_min_dict, file_wrt)\n with open(os.path.join(dict_path, \"{}cat_count_dict.pkl\".format(prefix)), \"wb\") as file_wrt:\n pickle.dump(self.cat_count_dict, file_wrt)\n\n def load_dict(self, dict_path, prefix=\"\"):\n with open(os.path.join(dict_path, \"{}val_max_dict.pkl\".format(prefix)), \"rb\") as file_wrt:\n self.val_max_dict = pickle.load(file_wrt)\n with open(os.path.join(dict_path, \"{}val_min_dict.pkl\".format(prefix)), \"rb\") as file_wrt:\n self.val_min_dict = pickle.load(file_wrt)\n with open(os.path.join(dict_path, \"{}cat_count_dict.pkl\".format(prefix)), \"rb\") as file_wrt:\n self.cat_count_dict = pickle.load(file_wrt)\n print(\"val_max_dict.items()[:50]:{}\".format(list(self.val_max_dict.items())))\n print(\"val_min_dict.items()[:50]:{}\".format(list(self.val_min_dict.items())))\n\n def get_cat2id(self, threshold=100):\n for key, cat_count_d in self.cat_count_dict.items():\n new_cat_count_d = dict(filter(lambda x: x[1] > threshold, cat_count_d.items()))\n for cat_str, _ in new_cat_count_d.items():\n self.cat2id_dict[key + \"_\" + cat_str] = len(self.cat2id_dict)\n print(\"cat2id_dict.size:{}\".format(len(self.cat2id_dict)))\n print(\"cat2id.dict.items()[:50]:{}\".format(list(self.cat2id_dict.items())[:50]))\n\n def map_cat2id(self, values, cats):\n \"\"\"Cat to id\"\"\"\n\n def minmax_scale_value(i, val):\n max_v = float(self.val_max_dict[\"val_{}\".format(i + 1)])\n return float(val) * 1.0 / max_v\n\n id_list = []\n weight_list = []\n for i, val in enumerate(values):\n if val == \"\":\n id_list.append(i)\n weight_list.append(0)\n else:\n key = \"val_{}\".format(i + 1)\n id_list.append(self.cat2id_dict[key])\n weight_list.append(minmax_scale_value(i, float(val)))\n\n for i, cat_str in enumerate(cats):\n key = \"cat_{}\".format(i + 1) + \"_\" + cat_str\n if key in self.cat2id_dict:\n if self.skip_id_convert is True:\n # For the synthetic data, if the generated id is between [0, max_vcoab], but the num examples is l\n # ess than vocab_size/ slot_nums the id will still be converted to [0, real_vocab], where real_vocab\n # the actually the vocab size, rather than the max_vocab. So a simple way to alleviate this\n # problem is skip the id convert, regarding the synthetic data id as the final id.\n id_list.append(cat_str)\n else:\n id_list.append(self.cat2id_dict[key])\n else:\n id_list.append(self.cat2id_dict[self.oov_prefix + \"cat_{}\".format(i + 1)])\n weight_list.append(1.0)\n return id_list, weight_list\n\n\ndef mkdir_path(file_path):\n if not os.path.exists(file_path):\n os.makedirs(file_path)\n\n\ndef statsdata(file_path, dict_output_path, recommendation_dataset_stats_dict, dense_dim=13, slot_dim=26):\n \"\"\"Preprocess data and save data\"\"\"\n with open(file_path, encoding=\"utf-8\") as file_in:\n errorline_list = []\n count = 0\n for line in file_in:\n count += 1\n line = line.strip(\"\\n\")\n items = line.split(\"\\t\")\n if len(items) != (dense_dim + slot_dim + 1):\n errorline_list.append(count)\n print(\"Found line length: {}, suppose to be {}, the line is {}\".format(len(items),\n dense_dim + slot_dim + 1, line))\n continue\n if count % 1000000 == 0:\n print(\"Have handled {}w lines.\".format(count // 10000))\n values = items[1: dense_dim + 1]\n cats = items[dense_dim + 1:]\n\n assert len(values) == dense_dim, \"values.size: {}\".format(len(values))\n assert len(cats) == slot_dim, \"cats.size: {}\".format(len(cats))\n recommendation_dataset_stats_dict.stats_vals(values)\n recommendation_dataset_stats_dict.stats_cats(cats)\n recommendation_dataset_stats_dict.save_dict(dict_output_path)\n\n\ndef random_split_trans2mindrecord(input_file_path, output_file_path, recommendation_dataset_stats_dict,\n part_rows=2000000, line_per_sample=1000, train_line_count=None,\n test_size=0.1, seed=2020, dense_dim=13, slot_dim=26):\n \"\"\"Random split data and save mindrecord\"\"\"\n if train_line_count is None:\n raise ValueError(\"Please provide training file line count\")\n test_size = int(train_line_count * test_size)\n all_indices = [i for i in range(train_line_count)]\n np.random.seed(seed)\n np.random.shuffle(all_indices)\n print(\"all_indices.size:{}\".format(len(all_indices)))\n test_indices_set = set(all_indices[:test_size])\n print(\"test_indices_set.size:{}\".format(len(test_indices_set)))\n print(\"-----------------------\" * 10 + \"\\n\" * 2)\n\n train_data_list = []\n test_data_list = []\n ids_list = []\n wts_list = []\n label_list = []\n\n writer_train = FileWriter(os.path.join(output_file_path, \"train_input_part.mindrecord\"), 21)\n writer_test = FileWriter(os.path.join(output_file_path, \"test_input_part.mindrecord\"), 3)\n\n schema = {\"label\": {\"type\": \"float32\", \"shape\": [-1]}, \"feat_vals\": {\"type\": \"float32\", \"shape\": [-1]},\n \"feat_ids\": {\"type\": \"int32\", \"shape\": [-1]}}\n writer_train.add_schema(schema, \"CRITEO_TRAIN\")\n writer_test.add_schema(schema, \"CRITEO_TEST\")\n\n with open(input_file_path, encoding=\"utf-8\") as file_in:\n items_error_size_lineCount = []\n count = 0\n train_part_number = 0\n test_part_number = 0\n for i, line in enumerate(file_in):\n count += 1\n if count % 1000000 == 0:\n print(\"Have handle {}w lines.\".format(count // 10000))\n line = line.strip(\"\\n\")\n items = line.split(\"\\t\")\n if len(items) != (1 + dense_dim + slot_dim):\n items_error_size_lineCount.append(i)\n continue\n label = float(items[0])\n values = items[1:1 + dense_dim]\n cats = items[1 + dense_dim:]\n\n assert len(values) == dense_dim, \"values.size: {}\".format(len(values))\n assert len(cats) == slot_dim, \"cats.size: {}\".format(len(cats))\n\n ids, wts = recommendation_dataset_stats_dict.map_cat2id(values, cats)\n\n ids_list.extend(ids)\n wts_list.extend(wts)\n label_list.append(label)\n\n if count % line_per_sample == 0:\n if i not in test_indices_set:\n train_data_list.append({\"feat_ids\": np.array(ids_list, dtype=np.int32),\n \"feat_vals\": np.array(wts_list, dtype=np.float32),\n \"label\": np.array(label_list, dtype=np.float32)\n })\n else:\n test_data_list.append({\"feat_ids\": np.array(ids_list, dtype=np.int32),\n \"feat_vals\": np.array(wts_list, dtype=np.float32),\n \"label\": np.array(label_list, dtype=np.float32)\n })\n if train_data_list and len(train_data_list) % part_rows == 0:\n writer_train.write_raw_data(train_data_list)\n train_data_list.clear()\n train_part_number += 1\n\n if test_data_list and len(test_data_list) % part_rows == 0:\n writer_test.write_raw_data(test_data_list)\n test_data_list.clear()\n test_part_number += 1\n\n ids_list.clear()\n wts_list.clear()\n label_list.clear()\n\n if train_data_list:\n writer_train.write_raw_data(train_data_list)\n if test_data_list:\n writer_test.write_raw_data(test_data_list)\n writer_train.commit()\n writer_test.commit()\n\n print(\"-------------\" * 10)\n print(\"items_error_size_lineCount.size(): {}.\".format(len(items_error_size_lineCount)))\n print(\"-------------\" * 10)\n np.save(\"items_error_size_lineCount.npy\", items_error_size_lineCount)\n\n\nif __name__ == '__main__':\n data_path = config.data_path\n\n target_field_size = config.dense_dim + config.slot_dim\n stats = StatsDict(field_size=target_field_size, dense_dim=config.dense_dim, slot_dim=config.slot_dim,\n skip_id_convert=config.skip_id_convert)\n data_file_path = data_path + \"origin_data/train.txt\"\n stats_output_path = data_path + \"stats_dict/\"\n mkdir_path(stats_output_path)\n statsdata(data_file_path, stats_output_path, stats, dense_dim=config.dense_dim, slot_dim=config.slot_dim)\n\n stats.load_dict(dict_path=stats_output_path, prefix=\"\")\n stats.get_cat2id(threshold=config.threshold)\n\n in_file_path = data_path + \"origin_data/train.txt\"\n output_path = data_path + \"mindrecord/\"\n mkdir_path(output_path)\n random_split_trans2mindrecord(in_file_path, output_path, stats, part_rows=2000000,\n train_line_count=config.train_line_count, line_per_sample=1000,\n test_size=0.1, seed=2020, dense_dim=config.dense_dim, slot_dim=config.slot_dim)\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"postprocess.\"\"\"\nimport os\nimport numpy as np\nfrom src.model_utils.config import config\nfrom src.model_utils.moxing_adapter import moxing_wrapper\n\n\ndef modelarts_pre_process():\n pass\n\n\n@moxing_wrapper(pre_process=modelarts_pre_process)\ndef cal_acc():\n \"\"\"\n Calculate the accuracy of inference results.\n \"\"\"\n result_path0 = os.path.join(config.post_result_path, \"h2o_0.bin\")\n result_path1 = os.path.join(config.post_result_path, \"h2o_1.bin\")\n energy = np.fromfile(result_path0, np.float32).reshape(1,)\n atom_ener = np.fromfile(result_path1, np.float32).reshape(192,)\n print('energy:', energy)\n print('atom_energy:', atom_ener)\n\n baseline = np.load(config.baseline_path)\n ae = baseline['e']\n\n if not np.mean((ae - atom_ener.reshape(-1,)) ** 2) < 3e-6:\n raise ValueError(\"Failed to varify atom_ener\")\n\n print('successful')\n\n\nif __name__ == '__main__':\n cal_acc()\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n'''\ntrain\n'''\nfrom __future__ import division\n\nimport os\nimport ast\nimport argparse\nimport numpy as np\nfrom mindspore import context, Tensor\nfrom mindspore.context import ParallelMode\nfrom mindspore.communication.management import init\nfrom mindspore.train import Model\nfrom mindspore.train.callback import TimeMonitor, LossMonitor, ModelCheckpoint, CheckpointConfig\nfrom mindspore.nn.optim import Adam\nfrom mindspore.common import set_seed\n\nfrom src.config import config\nfrom src.pose_resnet import GetPoseResNet\nfrom src.network_with_loss import JointsMSELoss, PoseResNetWithLoss\nfrom src.dataset import keypoint_dataset\n\nif config.MODELARTS.IS_MODEL_ARTS:\n import moxing as mox\n\nset_seed(config.GENERAL.TRAIN_SEED)\ndef get_lr(begin_epoch,\n total_epochs,\n steps_per_epoch,\n lr_init=0.1,\n factor=0.1,\n epoch_number_to_drop=(90, 120)\n ):\n '''\n get_lr\n '''\n lr_each_step = []\n total_steps = steps_per_epoch * total_epochs\n step_number_to_drop = [steps_per_epoch * x for x in epoch_number_to_drop]\n for i in range(int(total_steps)):\n if i in step_number_to_drop:\n lr_init = lr_init * factor\n lr_each_step.append(lr_init)\n current_step = steps_per_epoch * begin_epoch\n lr_each_step = np.array(lr_each_step, dtype=np.float32)\n learning_rate = lr_each_step[current_step:]\n return learning_rate\n\ndef parse_args():\n '''\n args\n '''\n parser = argparse.ArgumentParser(description=\"Simplebaseline training\")\n parser.add_argument('--data_url', required=False, default=None, help='Location of data.')\n parser.add_argument('--train_url', required=False, default=None, help='Location of training outputs.')\n parser.add_argument('--device_id', required=False, default=None, type=int, help='Location of training outputs.')\n parser.add_argument(\"--device_target\", type=str, choices=[\"Ascend\", \"GPU\", \"CPU\"], default=\"Ascend\",\n help=\"device target\")\n parser.add_argument('--run_distribute', required=False, default=False, help='Location of training outputs.')\n parser.add_argument('--is_model_arts', type=ast.literal_eval, default=False, help='Location of training outputs.')\n args = parser.parse_args()\n return args\n\ndef main():\n print(\"loading parse...\")\n args = parse_args()\n device_id = args.device_id\n device_target = args.device_target\n config.GENERAL.RUN_DISTRIBUTE = args.run_distribute\n config.MODELARTS.IS_MODEL_ARTS = args.is_model_arts\n if config.GENERAL.RUN_DISTRIBUTE or config.MODELARTS.IS_MODEL_ARTS:\n device_id = int(os.getenv('DEVICE_ID'))\n context.set_context(mode=context.GRAPH_MODE,\n device_target=device_target,\n save_graphs=False,\n device_id=device_id)\n\n if config.GENERAL.RUN_DISTRIBUTE:\n init()\n rank = int(os.getenv('DEVICE_ID'))\n device_num = int(os.getenv('RANK_SIZE'))\n context.set_auto_parallel_context(device_num=device_num,\n parallel_mode=ParallelMode.DATA_PARALLEL,\n gradients_mean=True)\n else:\n rank = 0\n device_num = 1\n\n if config.MODELARTS.IS_MODEL_ARTS:\n mox.file.copy_parallel(src_url=args.data_url, dst_url=config.MODELARTS.CACHE_INPUT)\n\n dataset, _ = keypoint_dataset(config,\n rank=rank,\n group_size=device_num,\n train_mode=True,\n num_parallel_workers=config.TRAIN.NUM_PARALLEL_WORKERS,\n )\n net = GetPoseResNet(config)\n loss = JointsMSELoss(config.LOSS.USE_TARGET_WEIGHT)\n net_with_loss = PoseResNetWithLoss(net, loss)\n dataset_size = dataset.get_dataset_size()\n lr = Tensor(get_lr(config.TRAIN.BEGIN_EPOCH,\n config.TRAIN.END_EPOCH,\n dataset_size,\n lr_init=config.TRAIN.LR,\n factor=config.TRAIN.LR_FACTOR,\n epoch_number_to_drop=config.TRAIN.LR_STEP))\n opt = Adam(net.trainable_params(), learning_rate=lr)\n time_cb = TimeMonitor(data_size=dataset_size)\n loss_cb = LossMonitor()\n cb = [time_cb, loss_cb]\n if config.TRAIN.SAVE_CKPT:\n config_ck = CheckpointConfig(save_checkpoint_steps=dataset_size, keep_checkpoint_max=20)\n prefix = ''\n if config.GENERAL.RUN_DISTRIBUTE:\n prefix = 'multi_' + 'train_poseresnet_' + config.GENERAL.VERSION + '_' + os.getenv('DEVICE_ID')\n else:\n prefix = 'single_' + 'train_poseresnet_' + config.GENERAL.VERSION\n\n directory = ''\n if config.MODELARTS.IS_MODEL_ARTS:\n directory = config.MODELARTS.CACHE_OUTPUT + 'device_'+ os.getenv('DEVICE_ID')\n elif config.GENERAL.RUN_DISTRIBUTE:\n directory = config.TRAIN.CKPT_PATH + 'device_'+ os.getenv('DEVICE_ID')\n else:\n directory = config.TRAIN.CKPT_PATH + 'device'\n\n ckpoint_cb = ModelCheckpoint(prefix=prefix, directory=directory, config=config_ck)\n cb.append(ckpoint_cb)\n model = Model(net_with_loss, loss_fn=None, optimizer=opt, amp_level=\"O2\")\n epoch_size = config.TRAIN.END_EPOCH - config.TRAIN.BEGIN_EPOCH\n print(\"************ Start training now ************\")\n print('start training, epoch size = %d' % epoch_size)\n model.train(epoch_size, dataset, callbacks=cb)\n\n if config.MODELARTS.IS_MODEL_ARTS:\n mox.file.copy_parallel(src_url=config.MODELARTS.CACHE_OUTPUT, dst_url=args.train_url)\n\nif __name__ == '__main__':\n main()\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport math\nimport os\n\nfrom shapely.geometry import Polygon\nimport numpy as np\nimport cv2\nfrom PIL import Image\n\nimport mindspore.dataset as de\nimport mindspore.dataset.vision.c_transforms as CV\nfrom src.distributed_sampler import DistributedSampler\n\n\ndef cal_distance(x1, y1, x2, y2):\n '''calculate the Euclidean distance'''\n return math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)\n\n\ndef move_points(vertices, index1, index2, r, coef):\n '''move the two points to shrink edge\n Input:\n vertices: vertices of text region <numpy.ndarray, (8,)>\n index1 : offset of point1\n index2 : offset of point2\n r : [r1, r2, r3, r4] in paper\n coef : shrink ratio in paper\n Output:\n vertices: vertices where one edge has been shinked\n '''\n index1 = index1 % 4\n index2 = index2 % 4\n x1_index = index1 * 2 + 0\n y1_index = index1 * 2 + 1\n x2_index = index2 * 2 + 0\n y2_index = index2 * 2 + 1\n\n r1 = r[index1]\n r2 = r[index2]\n length_x = vertices[x1_index] - vertices[x2_index]\n length_y = vertices[y1_index] - vertices[y2_index]\n length = cal_distance(\n vertices[x1_index],\n vertices[y1_index],\n vertices[x2_index],\n vertices[y2_index])\n if length > 1:\n ratio = (r1 * coef) / length\n vertices[x1_index] += ratio * (-length_x)\n vertices[y1_index] += ratio * (-length_y)\n ratio = (r2 * coef) / length\n vertices[x2_index] += ratio * length_x\n vertices[y2_index] += ratio * length_y\n return vertices\n\n\ndef shrink_poly(vertices, coef=0.3):\n '''shrink the text region\n Input:\n vertices: vertices of text region <numpy.ndarray, (8,)>\n coef : shrink ratio in paper\n Output:\n v : vertices of shrunk text region <numpy.ndarray, (8,)>\n '''\n x1, y1, x2, y2, x3, y3, x4, y4 = vertices\n r1 = min(cal_distance(x1, y1, x2, y2), cal_distance(x1, y1, x4, y4))\n r2 = min(cal_distance(x2, y2, x1, y1), cal_distance(x2, y2, x3, y3))\n r3 = min(cal_distance(x3, y3, x2, y2), cal_distance(x3, y3, x4, y4))\n r4 = min(cal_distance(x4, y4, x1, y1), cal_distance(x4, y4, x3, y3))\n r = [r1, r2, r3, r4]\n\n # obtain offset to perform move_points() automatically\n if cal_distance(x1, y1, x2, y2) + cal_distance(x3, y3, x4, y4) > \\\n cal_distance(x2, y2, x3, y3) + cal_distance(x1, y1, x4, y4):\n offset = 0 # two longer edges are (x1y1-x2y2) & (x3y3-x4y4)\n else:\n offset = 1 # two longer edges are (x2y2-x3y3) & (x4y4-x1y1)\n\n v = vertices.copy()\n v = move_points(v, 0 + offset, 1 + offset, r, coef)\n v = move_points(v, 2 + offset, 3 + offset, r, coef)\n v = move_points(v, 1 + offset, 2 + offset, r, coef)\n v = move_points(v, 3 + offset, 4 + offset, r, coef)\n return v\n\n\ndef get_rotate_mat(theta):\n '''positive theta value means rotate clockwise'''\n return np.array([[math.cos(theta), -math.sin(theta)],\n [math.sin(theta), math.cos(theta)]])\n\n\ndef rotate_vertices(vertices, theta, anchor=None):\n '''rotate vertices around anchor\n Input:\n vertices: vertices of text region <numpy.ndarray, (8,)>\n theta : angle in radian measure\n anchor : fixed position during rotation\n Output:\n rotated vertices <numpy.ndarray, (8,)>\n '''\n v = vertices.reshape((4, 2)).T\n if anchor is None:\n anchor = v[:, :1]\n rotate_mat = get_rotate_mat(theta)\n res = np.dot(rotate_mat, v - anchor)\n return (res + anchor).T.reshape(-1)\n\n\ndef get_boundary(vertices):\n '''get the tight boundary around given vertices\n Input:\n vertices: vertices of text region <numpy.ndarray, (8,)>\n Output:\n the boundary\n '''\n x1, y1, x2, y2, x3, y3, x4, y4 = vertices\n x_min = min(x1, x2, x3, x4)\n x_max = max(x1, x2, x3, x4)\n y_min = min(y1, y2, y3, y4)\n y_max = max(y1, y2, y3, y4)\n return x_min, x_max, y_min, y_max\n\n\ndef cal_error(vertices):\n '''default orientation is x1y1 : left-top, x2y2 : right-top, x3y3 : right-bot, x4y4 : left-bot\n calculate the difference between the vertices orientation and default orientation\n Input:\n vertices: vertices of text region <numpy.ndarray, (8,)>\n Output:\n err : difference measure\n '''\n x_min, x_max, y_min, y_max = get_boundary(vertices)\n x1, y1, x2, y2, x3, y3, x4, y4 = vertices\n err = cal_distance(x1, y1, x_min, y_min) + \\\n cal_distance(x2, y2, x_max, y_min) + \\\n cal_distance(x3, y3, x_max, y_max) + \\\n cal_distance(x4, y4, x_min, y_max)\n return err\n\n\ndef find_min_rect_angle(vertices):\n '''find the best angle to rotate poly and obtain min rectangle\n Input:\n vertices: vertices of text region <numpy.ndarray, (8,)>\n Output:\n the best angle <radian measure>\n '''\n angle_interval = 1\n angle_list = list(range(-90, 90, angle_interval))\n area_list = []\n for theta in angle_list:\n rotated = rotate_vertices(vertices, theta / 180 * math.pi)\n x1, y1, x2, y2, x3, y3, x4, y4 = rotated\n temp_area = (max(x1, x2, x3, x4) - min(x1, x2, x3, x4)) * \\\n (max(y1, y2, y3, y4) - min(y1, y2, y3, y4))\n area_list.append(temp_area)\n\n sorted_area_index = sorted(\n list(\n range(\n len(area_list))),\n key=lambda k: area_list[k])\n min_error = float('inf')\n best_index = -1\n rank_num = 10\n # find the best angle with correct orientation\n for index in sorted_area_index[:rank_num]:\n rotated = rotate_vertices(vertices, angle_list[index] / 180 * math.pi)\n temp_error = cal_error(rotated)\n if temp_error < min_error:\n min_error = temp_error\n best_index = index\n return angle_list[best_index] / 180 * math.pi\n\n\ndef is_cross_text(start_loc, length, vertices):\n '''check if the crop image crosses text regions\n Input:\n start_loc: left-top position\n length : length of crop image\n vertices : vertices of text regions <numpy.ndarray, (n,8)>\n Output:\n True if crop image crosses text region\n '''\n if vertices.size == 0:\n return False\n start_w, start_h = start_loc\n a = np.array([start_w, start_h, start_w +\n length, start_h, start_w +\n length, start_h +\n length, start_w, start_h +\n length]).reshape((4, 2))\n p1 = Polygon(a).convex_hull\n for vertice in vertices:\n p2 = Polygon(vertice.reshape((4, 2))).convex_hull\n inter = p1.intersection(p2).area\n if 0.01 <= inter / p2.area <= 0.99:\n return True\n return False\n\n\ndef crop_img(img, vertices, labels, length):\n '''crop img patches to obtain batch and augment\n Input:\n img : PIL Image\n vertices : vertices of text regions <numpy.ndarray, (n,8)>\n labels : 1->valid, 0->ignore, <numpy.ndarray, (n,)>\n length : length of cropped image region\n Output:\n region : cropped image region\n new_vertices: new vertices in cropped region\n '''\n h, w = img.height, img.width\n # confirm the shortest side of image >= length\n if h >= w and w < length:\n img = img.resize((length, int(h * length / w)), Image.BILINEAR)\n elif h < w and h < length:\n img = img.resize((int(w * length / h), length), Image.BILINEAR)\n ratio_w = img.width / w\n ratio_h = img.height / h\n assert (ratio_w >= 1 and ratio_h >= 1)\n\n new_vertices = np.zeros(vertices.shape)\n if vertices.size > 0:\n new_vertices[:, [0, 2, 4, 6]] = vertices[:, [0, 2, 4, 6]] * ratio_w\n new_vertices[:, [1, 3, 5, 7]] = vertices[:, [1, 3, 5, 7]] * ratio_h\n\n # find random position\n remain_h = img.height - length\n remain_w = img.width - length\n flag = True\n cnt = 0\n while flag and cnt < 1000:\n cnt += 1\n start_w = int(np.random.rand() * remain_w)\n start_h = int(np.random.rand() * remain_h)\n flag = is_cross_text([start_w, start_h], length,\n new_vertices[labels == 1, :])\n box = (start_w, start_h, start_w + length, start_h + length)\n region = img.crop(box)\n if new_vertices.size == 0:\n return region, new_vertices\n\n new_vertices[:, [0, 2, 4, 6]] -= start_w\n new_vertices[:, [1, 3, 5, 7]] -= start_h\n return region, new_vertices\n\n\ndef rotate_all_pixels(rotate_mat, anchor_x, anchor_y, length):\n '''get rotated locations of all pixels for next stages\n Input:\n rotate_mat: rotatation matrix\n anchor_x : fixed x position\n anchor_y : fixed y position\n length : length of image\n Output:\n rotated_x : rotated x positions <numpy.ndarray, (length,length)>\n rotated_y : rotated y positions <numpy.ndarray, (length,length)>\n '''\n x = np.arange(length)\n y = np.arange(length)\n x, y = np.meshgrid(x, y)\n x_lin = x.reshape((1, x.size))\n y_lin = y.reshape((1, x.size))\n coord_mat = np.concatenate((x_lin, y_lin), 0)\n rotated_coord = np.matmul(rotate_mat.astype(np.float16),\n (coord_mat - np.array([[anchor_x],\n [anchor_y]])).astype(np.float16)) + np.array([[anchor_x],\n [anchor_y]])\n rotated_x = rotated_coord[0, :].reshape(x.shape)\n rotated_y = rotated_coord[1, :].reshape(y.shape)\n return rotated_x, rotated_y\n\n\ndef adjust_height(img, vertices, ratio=0.2):\n '''adjust height of image to aug data\n Input:\n img : PIL Image\n vertices : vertices of text regions <numpy.ndarray, (n,8)>\n ratio : height changes in [0.8, 1.2]\n Output:\n img : adjusted PIL Image\n new_vertices: adjusted vertices\n '''\n ratio_h = 1 + ratio * (np.random.rand() * 2 - 1)\n old_h = img.height\n new_h = int(np.around(old_h * ratio_h))\n img = img.resize((img.width, new_h), Image.BILINEAR)\n\n new_vertices = vertices.copy()\n if vertices.size > 0:\n new_vertices[:, [1, 3, 5, 7]] = vertices[:, [1, 3, 5, 7]] * (new_h / old_h)\n return img, new_vertices\n\n\ndef rotate_img(img, vertices, angle_range=10):\n '''rotate image [-10, 10] degree to aug data\n Input:\n img : PIL Image\n vertices : vertices of text regions <numpy.ndarray, (n,8)>\n angle_range : rotate range\n Output:\n img : rotated PIL Image\n new_vertices: rotated vertices\n '''\n center_x = (img.width - 1) / 2\n center_y = (img.height - 1) / 2\n angle = angle_range * (np.random.rand() * 2 - 1)\n img = img.rotate(angle, Image.BILINEAR)\n new_vertices = np.zeros(vertices.shape)\n for i, vertice in enumerate(vertices):\n new_vertices[i, :] = rotate_vertices(\n vertice, -angle / 180 * math.pi, np.array([[center_x], [center_y]]))\n return img, new_vertices\n\n\ndef get_score_geo(img, vertices, labels, scale, length):\n '''generate score gt and geometry gt\n Input:\n img : PIL Image\n vertices: vertices of text regions <numpy.ndarray, (n,8)>\n labels : 1->valid, 0->ignore, <numpy.ndarray, (n,)>\n scale : feature map / image\n length : image length\n Output:\n score gt, geo gt, ignored\n '''\n score_map = np.zeros(\n (int(img.height * scale), int(img.width * scale), 1), np.float32)\n geo_map = np.zeros(\n (int(img.height * scale), int(img.width * scale), 5), np.float32)\n ignored_map = np.zeros(\n (int(img.height * scale), int(img.width * scale), 1), np.float32)\n\n index = np.arange(0, length, int(1 / scale))\n index_x, index_y = np.meshgrid(index, index)\n ignored_polys = []\n polys = []\n\n for i, vertice in enumerate(vertices):\n if labels[i] == 0:\n ignored_polys.append(np.around(scale * vertice.reshape((4, 2))).astype(np.int32))\n continue\n\n poly = np.around(scale * shrink_poly(vertice).reshape((4, 2))).astype(np.int32)\n polys.append(poly)\n temp_mask = np.zeros(score_map.shape[:-1], np.float32)\n cv2.fillPoly(temp_mask, [poly], 1)\n\n theta = find_min_rect_angle(vertice)\n rotate_mat = get_rotate_mat(theta)\n\n rotated_vertices = rotate_vertices(vertice, theta)\n x_min, x_max, y_min, y_max = get_boundary(rotated_vertices)\n rotated_x, rotated_y = rotate_all_pixels(rotate_mat, vertice[0], vertice[1], length)\n\n d1 = rotated_y - y_min\n d1[d1 < 0] = 0\n d2 = y_max - rotated_y\n d2[d2 < 0] = 0\n d3 = rotated_x - x_min\n d3[d3 < 0] = 0\n d4 = x_max - rotated_x\n d4[d4 < 0] = 0\n geo_map[:, :, 0] += d1[index_y, index_x] * temp_mask\n geo_map[:, :, 1] += d2[index_y, index_x] * temp_mask\n geo_map[:, :, 2] += d3[index_y, index_x] * temp_mask\n geo_map[:, :, 3] += d4[index_y, index_x] * temp_mask\n geo_map[:, :, 4] += theta * temp_mask\n\n cv2.fillPoly(ignored_map, ignored_polys, 1)\n cv2.fillPoly(score_map, polys, 1)\n return score_map, geo_map, ignored_map\n\n\ndef extract_vertices(lines):\n '''extract vertices info from txt lines\n Input:\n lines : list of string info\n Output:\n vertices: vertices of text regions <numpy.ndarray, (n,8)>\n labels : 1->valid, 0->ignore, <numpy.ndarray, (n,)>\n '''\n labels = []\n vertices = []\n for line in lines:\n vertices.append(list(map(int, line.rstrip('\\n').lstrip('\\ufeff').split(',')[:8])))\n label = 0 if '###' in line else 1\n labels.append(label)\n return np.array(vertices), np.array(labels)\n\n\nclass ICDAREASTDataset:\n def __init__(self, img_path, gt_path, scale=0.25, length=512):\n super(ICDAREASTDataset, self).__init__()\n self.img_files = [os.path.join(\n img_path,\n img_file) for img_file in sorted(os.listdir(img_path))]\n self.gt_files = [\n os.path.join(\n gt_path,\n gt_file) for gt_file in sorted(\n os.listdir(gt_path))]\n self.scale = scale\n self.length = length\n\n def __getitem__(self, index):\n with open(self.gt_files[index], 'r') as f:\n lines = f.readlines()\n vertices, labels = extract_vertices(lines)\n\n img = Image.open(self.img_files[index])\n img, vertices = adjust_height(img, vertices)\n img, vertices = rotate_img(img, vertices)\n img, vertices = crop_img(img, vertices, labels, self.length)\n score_map, geo_map, ignored_map = get_score_geo(\n img, vertices, labels, self.scale, self.length)\n score_map = score_map.transpose(2, 0, 1)\n ignored_map = ignored_map.transpose(2, 0, 1)\n geo_map = geo_map.transpose(2, 0, 1)\n if np.sum(score_map) < 1:\n score_map[0, 0, 0] = 1\n return img, score_map, geo_map, ignored_map\n\n def __len__(self):\n return len(self.img_files)\n\n\ndef create_east_dataset(\n img_root,\n txt_root,\n batch_size,\n device_num,\n rank,\n is_training=True):\n east_data = ICDAREASTDataset(img_path=img_root, gt_path=txt_root)\n distributed_sampler = DistributedSampler(\n len(east_data), device_num, 0 if device_num == 1 else rank, shuffle=True)\n\n trans_list = [CV.RandomColorAdjust(0.5, 0.5, 0.5, 0.25),\n CV.Rescale(1 / 255.0, 0),\n CV.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n CV.HWC2CHW()]\n if is_training:\n dataset_column_names = [\n \"image\",\n \"score_map\",\n \"geo_map\",\n \"training_mask\"]\n ds = de.GeneratorDataset(\n east_data,\n column_names=dataset_column_names,\n num_parallel_workers=32,\n sampler=distributed_sampler)\n ds = ds.map(\n operations=trans_list,\n input_columns=[\"image\"],\n num_parallel_workers=8,\n python_multiprocessing=True)\n ds = ds.batch(batch_size, num_parallel_workers=8, drop_remainder=True)\n\n return ds, len(east_data)\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"EPP-MVSNet's validation process on BlendedMVS dataset\"\"\"\n\nimport os\nimport time\nfrom argparse import ArgumentParser\n\nimport cv2\nimport numpy as np\nfrom tqdm import tqdm\n\nimport mindspore.dataset as ds\nfrom mindspore import context\nfrom mindspore.ops import operations as P\n\nfrom src.eppmvsnet import EPPMVSNet\nfrom src.blendedmvs import BlendedMVSDataset\nfrom src.utils import save_pfm, AverageMeter\n\n\ndef get_opts():\n \"\"\"set options\"\"\"\n parser = ArgumentParser()\n parser.add_argument('--gpu_id', type=int, default=0, choices=[0, 1, 2, 3, 4, 5, 6, 7],\n help='which gpu used to inference')\n ## data\n parser.add_argument('--root_dir', type=str,\n default='/home/ubuntu/data/DTU/mvs_training/dtu/',\n help='root directory of dtu dataset')\n parser.add_argument('--dataset_name', type=str, default='blendedmvs',\n choices=['blendedmvs'],\n help='which dataset to train/val')\n parser.add_argument('--split', type=str, default=None,\n help='which split to evaluate')\n parser.add_argument('--scan', type=str, default=None, nargs='+',\n help='specify scan to evaluate (must be in the split)')\n # for depth prediction\n parser.add_argument('--n_views', type=int, default=5,\n help='number of views (including ref) to be used in testing')\n parser.add_argument('--depth_interval', type=float, default=128,\n help='depth interval unit in mm')\n parser.add_argument('--n_depths', nargs='+', type=int, default=[32, 16, 8],\n help='number of depths in each level')\n parser.add_argument('--interval_ratios', nargs='+', type=float, default=[4.0, 2.0, 1.0],\n help='depth interval ratio to multiply with --depth_interval in each level')\n parser.add_argument('--img_wh', nargs=\"+\", type=int, default=[1152, 864],\n help='resolution (img_w, img_h) of the image, must be multiples of 32')\n parser.add_argument('--ckpt_path', type=str, default='ckpts/exp2/_ckpt_epoch_10.ckpt',\n help='pretrained checkpoint path to load')\n parser.add_argument('--save_visual', default=False, action='store_true',\n help='save depth and proba visualization or not')\n parser.add_argument('--entropy_range', action='store_true', default=False,\n help='whether to use entropy range method')\n parser.add_argument('--conf', type=float, default=0.9,\n help='min confidence for pixel to be valid')\n parser.add_argument('--levels', type=int, default=3, choices=[3, 4, 5],\n help='number of FPN levels (fixed to be 3!)')\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = get_opts()\n context.set_context(mode=0, device_target='GPU', device_id=args.gpu_id, save_graphs=False,\n enable_graph_kernel=False)\n\n dataset = BlendedMVSDataset(args.root_dir, args.split, n_views=args.n_views, depth_interval=args.depth_interval,\n img_wh=tuple(args.img_wh), levels=args.levels, scan=args.scan)\n img_wh = args.img_wh\n scans = dataset.scans\n\n print(args.n_depths)\n print(args.interval_ratios)\n # Step 1. Create depth estimation and probability for each scan\n EPPMVSNet_eval = EPPMVSNet(n_depths=args.n_depths, interval_ratios=args.interval_ratios,\n entropy_range=args.entropy_range, height=args.img_wh[1], width=args.img_wh[0])\n EPPMVSNet_eval.set_train(False)\n\n depth_dir = f'results/{args.dataset_name}/{args.split}/depth'\n print('Creating depth and confidence predictions...')\n if args.scan:\n data_range = [i for i, x in enumerate(dataset.metas) if x[0] == args.scan]\n else:\n data_range = range(len(dataset))\n test_loader = ds.GeneratorDataset(dataset, column_names=[\"imgs\", \"proj_mats\", \"init_depth_min\", \"depth_interval\",\n \"scan\", \"vid\", \"depth_0\", \"mask_0\", \"fix_depth_interval\"],\n num_parallel_workers=1, shuffle=False)\n test_loader = test_loader.batch(batch_size=1)\n test_data_size = test_loader.get_dataset_size()\n print(\"train dataset length is:\", test_data_size)\n\n pbar = tqdm(enumerate(test_loader.create_tuple_iterator()), dynamic_ncols=True, total=test_data_size)\n\n metrics = ['stage3_l1_loss', 'stage3_less1_acc', 'stage3_less3_acc']\n avg_metrics = {t: AverageMeter() for t in metrics}\n\n forward_time_avg = AverageMeter()\n\n scan_list, vid_list = [], []\n\n depth_folder = f'{img_wh[0]}_{img_wh[1]}_{args.n_views - 1}'\n\n for i, sample in pbar:\n imgs, proj_mats, init_depth_min, depth_interval, scan, vid, depth_0, mask_0, fix_depth_interval = sample\n scan = scan[0].asnumpy()\n scan_str = \"\"\n for num in scan:\n scan_str += chr(num)\n scan = scan_str\n vid = vid[0].asnumpy()\n\n depth_file_dir = os.path.join(depth_dir, scan, depth_folder)\n if not os.path.exists(depth_file_dir):\n os.makedirs(depth_file_dir, exist_ok=True)\n\n begin = time.time()\n\n results = EPPMVSNet_eval(imgs, proj_mats, init_depth_min, depth_interval)\n\n forward_time = time.time() - begin\n if i != 0:\n forward_time_avg.update(forward_time)\n\n depth, proba = results\n depth = P.Squeeze()(depth).asnumpy()\n depth = np.nan_to_num(depth) # change nan to 0\n proba = P.Squeeze()(proba).asnumpy()\n proba = np.nan_to_num(proba) # change nan to 0\n\n save_pfm(os.path.join(depth_dir, f'{scan}/{depth_folder}/depth_{vid:04d}.pfm'), depth)\n save_pfm(os.path.join(depth_dir, f'{scan}/{depth_folder}/proba_{vid:04d}.pfm'), proba)\n\n # record l1 loss of each image\n scan_list.append(scan)\n vid_list.append(vid)\n\n pred_depth = depth\n gt = P.Squeeze()(depth_0).asnumpy()\n mask = P.Squeeze()(mask_0).asnumpy()\n\n abs_err = np.abs(pred_depth - gt)\n abs_err_scaled = abs_err / fix_depth_interval.asnumpy()\n\n l1 = abs_err_scaled[mask].mean()\n less1 = (abs_err_scaled[mask] < 1.).astype(np.float32).mean()\n less3 = (abs_err_scaled[mask] < 3.).astype(np.float32).mean()\n\n avg_metrics[f'stage3_l1_loss'].update(l1)\n avg_metrics[f'stage3_less1_acc'].update(less1)\n avg_metrics[f'stage3_less3_acc'].update(less3)\n\n if args.save_visual:\n mi = np.min(depth[depth > 0])\n ma = np.max(depth)\n depth = (depth - mi) / (ma - mi + 1e-8)\n depth = (255 * depth).astype(np.uint8)\n depth_img = cv2.applyColorMap(depth, cv2.COLORMAP_JET)\n cv2.imwrite(os.path.join(depth_dir, f'{scan}/{depth_folder}/depth_visual_{vid:04d}.jpg'), depth_img)\n cv2.imwrite(os.path.join(depth_dir, f'{scan}/{depth_folder}/proba_visual_{vid:04d}.jpg'),\n (255 * (proba > args.conf)).astype(np.uint8))\n print(f'step {i} time: {forward_time}s')\n print(f'mean forward time: {forward_time_avg.avg}')\n\n with open(f'results/{args.dataset_name}/{args.split}/metrics.txt', 'w') as f:\n for i in avg_metrics.items():\n f.writelines((i[0]) + ':' + str(np.round(i[1].avg, 4)) + '\\n')\n f.writelines('mean forward time(s/pic):' + str(np.round(forward_time_avg.avg, 4)) + '\\n')\n f.close()\n print('Done!')\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\n##############postprocess#################\n\"\"\"\nimport os\nimport numpy as np\nfrom mindspore.nn.metrics import Accuracy\nfrom model_utils.config import config\n\nif __name__ == '__main__':\n\n file_prefix = 'textcnn_bs' + str(config.batch_size) + '_'\n\n metric = Accuracy()\n metric.clear()\n label_list = np.load(config.label_dir, allow_pickle=True)\n\n for idx, label in enumerate(label_list):\n pred = np.fromfile(os.path.join(config.result_dir, file_prefix + str(idx) + '_0.bin'), np.float32)\n pred = pred.reshape(config.batch_size, int(pred.shape[0]/config.batch_size))\n metric.update(pred, label)\n accuracy = metric.eval()\n print(\"accuracy: \", accuracy)\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"SSD dataset\"\"\"\n\nfrom __future__ import division\n\nimport os\nimport json\nimport xml.etree.ElementTree as et\nimport numpy as np\nimport cv2\n\nimport mindspore.dataset as de\nimport mindspore.dataset.vision.c_transforms as C2\nfrom mindspore.mindrecord import FileWriter\nfrom src.model_utils.config import config\nfrom .box_utils import jaccard_numpy, ssd_bboxes_encode\n\n\ndef _rand(a=0., b=1.):\n \"\"\"Generate random.\"\"\"\n return np.random.rand() * (b - a) + a\n\n\ndef get_imageId_from_fileName(filename):\n \"\"\"Get imageID from fileName\"\"\"\n try:\n filename = os.path.splitext(filename)[0]\n return int(filename)\n except:\n raise NotImplementedError(\n 'Filename %s is supposed to be an integer.' % (filename))\n\n\ndef random_sample_crop(image, boxes):\n \"\"\"Random Crop the image and boxes\"\"\"\n height, width, _ = image.shape\n min_iou = np.random.choice([None, 0.1, 0.3, 0.5, 0.7, 0.9])\n\n if min_iou is None:\n return image, boxes\n\n # max trails (50)\n for _ in range(50):\n image_t = image\n\n w = _rand(0.3, 1.0) * width\n h = _rand(0.3, 1.0) * height\n\n # aspect ratio constraint b/t .5 & 2\n if h / w < 0.5 or h / w > 2:\n continue\n\n left = _rand() * (width - w)\n top = _rand() * (height - h)\n\n rect = np.array([int(top), int(left), int(top+h), int(left+w)])\n overlap = jaccard_numpy(boxes, rect)\n\n # dropout some boxes\n drop_mask = overlap > 0\n if not drop_mask.any():\n continue\n\n if overlap[drop_mask].min() < min_iou and overlap[drop_mask].max() > (min_iou + 0.2):\n continue\n\n image_t = image_t[rect[0]:rect[2], rect[1]:rect[3], :]\n\n centers = (boxes[:, :2] + boxes[:, 2:4]) / 2.0\n\n m1 = (rect[0] < centers[:, 0]) * (rect[1] < centers[:, 1])\n m2 = (rect[2] > centers[:, 0]) * (rect[3] > centers[:, 1])\n\n # mask in that both m1 and m2 are true\n mask = m1 * m2 * drop_mask\n\n # have any valid boxes? try again if not\n if not mask.any():\n continue\n\n # take only matching gt boxes\n boxes_t = boxes[mask, :].copy()\n\n boxes_t[:, :2] = np.maximum(boxes_t[:, :2], rect[:2])\n boxes_t[:, :2] -= rect[:2]\n boxes_t[:, 2:4] = np.minimum(boxes_t[:, 2:4], rect[2:4])\n boxes_t[:, 2:4] -= rect[:2]\n\n return image_t, boxes_t\n return image, boxes\n\n\ndef preprocess_fn(img_id, image, box, is_training):\n \"\"\"Preprocess function for dataset.\"\"\"\n def _infer_data(image, input_shape):\n img_h, img_w, _ = image.shape\n input_h, input_w = input_shape\n\n image = cv2.resize(image, (input_w, input_h))\n\n # When the channels of image is 1\n if len(image.shape) == 2:\n image = np.expand_dims(image, axis=-1)\n image = np.concatenate([image, image, image], axis=-1)\n\n return img_id, image, np.array((img_h, img_w), np.float32)\n\n def _data_aug(image, box, is_training, image_size=(300, 300)):\n \"\"\"Data augmentation function.\"\"\"\n ih, iw, _ = image.shape\n w, h = image_size\n\n if not is_training:\n return _infer_data(image, image_size)\n\n # Random crop\n box = box.astype(np.float32)\n image, box = random_sample_crop(image, box)\n ih, iw, _ = image.shape\n\n # Resize image\n image = cv2.resize(image, (w, h))\n\n # Flip image or not\n flip = _rand() < .5\n if flip:\n image = cv2.flip(image, 1, dst=None)\n\n # When the channels of image is 1\n if len(image.shape) == 2:\n image = np.expand_dims(image, axis=-1)\n image = np.concatenate([image, image, image], axis=-1)\n\n box[:, [0, 2]] = box[:, [0, 2]] / ih\n box[:, [1, 3]] = box[:, [1, 3]] / iw\n\n if flip:\n box[:, [1, 3]] = 1 - box[:, [3, 1]]\n\n box, label, num_match = ssd_bboxes_encode(box)\n return image, box, label, num_match\n return _data_aug(image, box, is_training, image_size=config.img_shape)\n\n\ndef create_voc_label(is_training):\n \"\"\"Get image path and annotation from VOC.\"\"\"\n voc_dir = config.voc_dir\n cls_map = {name: i for i, name in enumerate(config.coco_classes)}\n sub_dir = 'train' if is_training else 'eval'\n voc_dir = os.path.join(voc_dir, sub_dir)\n if not os.path.isdir(voc_dir):\n raise ValueError(f'Cannot find {sub_dir} dataset path.')\n\n image_dir = anno_dir = voc_dir\n if os.path.isdir(os.path.join(voc_dir, 'Images')):\n image_dir = os.path.join(voc_dir, 'Images')\n if os.path.isdir(os.path.join(voc_dir, 'Annotations')):\n anno_dir = os.path.join(voc_dir, 'Annotations')\n\n if not is_training:\n data_dir = config.voc_root\n json_file = os.path.join(\n data_dir, config.instances_set.format(sub_dir))\n file_dir = os.path.split(json_file)[0]\n if not os.path.isdir(file_dir):\n os.makedirs(file_dir)\n json_dict = {\"images\": [], \"type\": \"instances\", \"annotations\": [],\n \"categories\": []}\n bnd_id = 1\n\n image_files_dict = {}\n image_anno_dict = {}\n images = []\n for anno_file in os.listdir(anno_dir):\n print(anno_file)\n if not anno_file.endswith('xml'):\n continue\n tree = et.parse(os.path.join(anno_dir, anno_file))\n root_node = tree.getroot()\n file_name = root_node.find('filename').text\n img_id = get_imageId_from_fileName(file_name)\n image_path = os.path.join(image_dir, file_name)\n print(image_path)\n if not os.path.isfile(image_path):\n print(f'Cannot find image {file_name} according to annotations.')\n continue\n\n labels = []\n for obj in root_node.iter('object'):\n cls_name = obj.find('name').text\n if cls_name not in cls_map:\n print(f'Label \"{cls_name}\" not in \"{config.coco_classes}\"')\n continue\n bnd_box = obj.find('bndbox')\n x_min = int(bnd_box.find('xmin').text) - 1\n y_min = int(bnd_box.find('ymin').text) - 1\n x_max = int(bnd_box.find('xmax').text) - 1\n y_max = int(bnd_box.find('ymax').text) - 1\n labels.append([y_min, x_min, y_max, x_max, cls_map[cls_name]])\n\n if not is_training:\n o_width = abs(x_max - x_min)\n o_height = abs(y_max - y_min)\n ann = {'area': o_width * o_height, 'iscrowd': 0, 'image_id':\n img_id, 'bbox': [x_min, y_min, o_width, o_height],\n 'category_id': cls_map[cls_name], 'id': bnd_id,\n 'ignore': 0,\n 'segmentation': []}\n json_dict['annotations'].append(ann)\n bnd_id = bnd_id + 1\n\n if labels:\n images.append(img_id)\n image_files_dict[img_id] = image_path\n image_anno_dict[img_id] = np.array(labels)\n\n if not is_training:\n size = root_node.find(\"size\")\n width = int(size.find('width').text)\n height = int(size.find('height').text)\n image = {'file_name': file_name, 'height': height, 'width': width,\n 'id': img_id}\n json_dict['images'].append(image)\n\n if not is_training:\n for cls_name, cid in cls_map.items():\n cat = {'supercategory': 'none', 'id': cid, 'name': cls_name}\n json_dict['categories'].append(cat)\n json_fp = open(json_file, 'w')\n json_str = json.dumps(json_dict)\n json_fp.write(json_str)\n json_fp.close()\n\n return images, image_files_dict, image_anno_dict\n\n\ndef create_coco_label(is_training):\n \"\"\"Get image path and annotation from COCO.\"\"\"\n from pycocotools.coco import COCO\n\n coco_root = os.path.join(config.data_path, \"coco_ori\")\n data_type = config.val_data_type\n if is_training:\n data_type = config.train_data_type\n\n # Classes need to train or test.\n train_cls = config.coco_classes\n train_cls_dict = {}\n for i, cls in enumerate(train_cls):\n train_cls_dict[cls] = i\n\n anno_json = os.path.join(coco_root, config.instances_set.format(data_type))\n\n coco = COCO(anno_json)\n classs_dict = {}\n cat_ids = coco.loadCats(coco.getCatIds())\n for cat in cat_ids:\n classs_dict[cat[\"id\"]] = cat[\"name\"]\n\n image_ids = coco.getImgIds()\n images = []\n image_path_dict = {}\n image_anno_dict = {}\n\n for img_id in image_ids:\n image_info = coco.loadImgs(img_id)\n file_name = image_info[0][\"file_name\"]\n anno_ids = coco.getAnnIds(imgIds=img_id, iscrowd=None)\n anno = coco.loadAnns(anno_ids)\n image_path = os.path.join(coco_root, data_type, file_name)\n annos = []\n iscrowd = False\n for label in anno:\n bbox = label[\"bbox\"]\n class_name = classs_dict[label[\"category_id\"]]\n iscrowd = iscrowd or label[\"iscrowd\"]\n if class_name in train_cls:\n x_min, x_max = bbox[0], bbox[0] + bbox[2]\n y_min, y_max = bbox[1], bbox[1] + bbox[3]\n annos.append(\n list(map(round, [y_min, x_min, y_max, x_max])) + [train_cls_dict[class_name]])\n\n if not is_training and iscrowd:\n continue\n if len(annos) >= 1:\n images.append(img_id)\n image_path_dict[img_id] = image_path\n image_anno_dict[img_id] = np.array(annos)\n\n return images, image_path_dict, image_anno_dict\n\n\ndef anno_parser(annos_str):\n \"\"\"Parse annotation from string to list.\"\"\"\n annos = []\n for anno_str in annos_str:\n anno = list(map(int, anno_str.strip().split(',')))\n annos.append(anno)\n return annos\n\n\ndef filter_valid_data(image_dir, anno_path):\n \"\"\"Filter valid image file, which both in image_dir and anno_path.\"\"\"\n images = []\n image_path_dict = {}\n image_anno_dict = {}\n if not os.path.isdir(image_dir):\n raise RuntimeError(\"Path given is not valid.\")\n if not os.path.isfile(anno_path):\n raise RuntimeError(\"Annotation file is not valid.\")\n\n with open(anno_path, \"rb\") as f:\n lines = f.readlines()\n for img_id, line in enumerate(lines):\n line_str = line.decode(\"utf-8\").strip()\n line_split = str(line_str).split(' ')\n file_name = line_split[0]\n image_path = os.path.join(image_dir, file_name)\n if os.path.isfile(image_path):\n images.append(img_id)\n image_path_dict[img_id] = image_path\n image_anno_dict[img_id] = anno_parser(line_split[1:])\n\n return images, image_path_dict, image_anno_dict\n\n\ndef voc_data_to_mindrecord(mindrecord_dir, is_training, prefix=\"ssd.mindrecord\", file_num=8):\n \"\"\"Create MindRecord file by image_dir and anno_path.\"\"\"\n mindrecord_path = os.path.join(mindrecord_dir, prefix)\n writer = FileWriter(mindrecord_path, file_num)\n images, image_path_dict, image_anno_dict = create_voc_label(is_training)\n\n ssd_json = {\n \"img_id\": {\"type\": \"int32\", \"shape\": [1]},\n \"image\": {\"type\": \"bytes\"},\n \"annotation\": {\"type\": \"int32\", \"shape\": [-1, 5]},\n }\n writer.add_schema(ssd_json, \"ssd_json\")\n\n for img_id in images:\n image_path = image_path_dict[img_id]\n with open(image_path, 'rb') as f:\n img = f.read()\n annos = np.array(image_anno_dict[img_id], dtype=np.int32)\n img_id = np.array([img_id], dtype=np.int32)\n row = {\"img_id\": img_id, \"image\": img, \"annotation\": annos}\n writer.write_raw_data([row])\n writer.commit()\n\n\ndef data_to_mindrecord_byte_image(dataset=\"coco\", is_training=True, prefix=\"ssd.mindrecord\", file_num=8):\n \"\"\"Create MindRecord file.\"\"\"\n mindrecord_dir = os.path.join(config.data_path, \"MindRecord_COCO\")\n mindrecord_path = os.path.join(mindrecord_dir, prefix)\n writer = FileWriter(mindrecord_path, file_num)\n if dataset == \"coco\":\n images, image_path_dict, image_anno_dict = create_coco_label(\n is_training)\n else:\n images, image_path_dict, image_anno_dict = filter_valid_data(\n config.image_dir, config.anno_path)\n\n ssd_json = {\n \"img_id\": {\"type\": \"int32\", \"shape\": [1]},\n \"image\": {\"type\": \"bytes\"},\n \"annotation\": {\"type\": \"int32\", \"shape\": [-1, 5]},\n }\n writer.add_schema(ssd_json, \"ssd_json\")\n\n for img_id in images:\n image_path = image_path_dict[img_id]\n with open(image_path, 'rb') as f:\n img = f.read()\n annos = np.array(image_anno_dict[img_id], dtype=np.int32)\n img_id = np.array([img_id], dtype=np.int32)\n row = {\"img_id\": img_id, \"image\": img, \"annotation\": annos}\n writer.write_raw_data([row])\n writer.commit()\n\n\ndef create_ssd_dataset(mindrecord_file, batch_size=32, repeat_num=10, device_num=1, rank=0,\n is_training=True, num_parallel_workers=4):\n \"\"\"Create SSD dataset with MindDataset.\"\"\"\n ds = de.MindDataset(mindrecord_file, columns_list=[\"img_id\", \"image\", \"annotation\"], num_shards=device_num,\n shard_id=rank, num_parallel_workers=num_parallel_workers, shuffle=is_training)\n decode = C2.Decode()\n ds = ds.map(input_columns=[\"image\"], operations=decode)\n change_swap_op = C2.HWC2CHW()\n normalize_op = C2.Normalize(\n mean=[0.485*255, 0.456*255, 0.406*255], std=[0.229*255, 0.224*255, 0.225*255])\n color_adjust_op = C2.RandomColorAdjust(\n brightness=0.4, contrast=0.4, saturation=0.4)\n compose_map_func = (lambda img_id, image, annotation: preprocess_fn(\n img_id, image, annotation, is_training))\n if is_training:\n output_columns = [\"image\", \"box\", \"label\", \"num_match\"]\n trans = [color_adjust_op, normalize_op, change_swap_op]\n else:\n output_columns = [\"img_id\", \"image\", \"image_shape\"]\n trans = [normalize_op, change_swap_op]\n ds = ds.map(input_columns=[\"img_id\", \"image\", \"annotation\"],\n output_columns=output_columns, column_order=output_columns,\n operations=compose_map_func, python_multiprocessing=is_training,\n num_parallel_workers=num_parallel_workers)\n ds = ds.map(input_columns=[\"image\"], operations=trans, python_multiprocessing=is_training,\n num_parallel_workers=num_parallel_workers)\n ds = ds.batch(batch_size, drop_remainder=True)\n ds = ds.repeat(repeat_num)\n return ds\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport os\nimport math\nimport random\nimport numpy as np\nimport cv2\nfrom pycocotools.coco import COCO as ReadJson\nimport mindspore.dataset as de\nfrom src.model_utils.config import config, JointType\n\n\n\ncv2.setNumThreads(0)\n\nclass txtdataset():\n def __init__(self, train, imgpath, maskpath, insize, mode='train', n_samples=None):\n self.train = train\n self.mode = mode\n self.imgpath = imgpath\n self.maskpath = maskpath\n self.insize = insize\n self.maxtime = 0\n self.catIds = train.getCatIds(catNms=['person'])\n self.imgIds = sorted(train.getImgIds(catIds=self.catIds))\n if self.mode == 'train':\n self.clean_imgIds()\n if self.mode in ['val', 'eval'] and n_samples is not None:\n self.imgIds = random.sample(self.imgIds, n_samples)\n print('{} images: {}'.format(mode, len(self)))\n\n\n def __len__(self):\n return len(self.imgIds)\n\n def clean_imgIds(self):\n print(\"cleaning imgids\")\n\n for img_id in self.imgIds.copy():\n annotations = None\n anno_ids = self.train.getAnnIds(imgIds=[img_id], iscrowd=None)\n\n # annotation for that image\n if anno_ids:\n annotations_for_img = self.train.loadAnns(anno_ids)\n\n person_cnt = 0\n valid_annotations_for_img = []\n for annotation in annotations_for_img:\n # if too few keypoints or too small\n if annotation['num_keypoints'] >= config.min_keypoints and \\\n annotation['area'] > config.min_area:\n person_cnt += 1\n valid_annotations_for_img.append(annotation)\n\n # if person annotation\n if person_cnt > 0:\n annotations = valid_annotations_for_img\n if annotations is None:\n self.imgIds.remove(img_id)\n\n def overlay_paf(self, img, paf):\n hue = ((np.arctan2(paf[1], paf[0]) / np.pi) / -2 + 0.5)\n saturation = np.sqrt(paf[0] ** 2 + paf[1] ** 2)\n saturation[saturation > 1.0] = 1.0\n value = saturation.copy()\n hsv_paf = np.vstack((hue[np.newaxis], saturation[np.newaxis], value[np.newaxis])).transpose(1, 2, 0)\n rgb_paf = cv2.cvtColor((hsv_paf * 255).astype(np.uint8), cv2.COLOR_HSV2BGR)\n img = cv2.addWeighted(img, 0.6, rgb_paf, 0.4, 0)\n return img\n\n def overlay_pafs(self, img, pafs):\n mix_paf = np.zeros((2,) + img.shape[:-1])\n paf_flags = np.zeros(mix_paf.shape) # for constant paf\n\n for paf in pafs.reshape((int(pafs.shape[0]/2), 2,) + pafs.shape[1:]):\n paf_flags = paf != 0\n paf_flags += np.broadcast_to(paf_flags[0] | paf_flags[1], paf.shape)\n mix_paf += paf\n\n mix_paf[paf_flags > 0] /= paf_flags[paf_flags > 0]\n img = self.overlay_paf(img, mix_paf)\n return img\n\n def overlay_heatmap(self, img, heatmap):\n rgb_heatmap = cv2.applyColorMap((heatmap * 255).astype(np.uint8), cv2.COLORMAP_JET)\n img = cv2.addWeighted(img, 0.6, rgb_heatmap, 0.4, 0)\n return img\n\n def overlay_ignore_mask(self, img, ignore_mask):\n img = img * np.repeat((ignore_mask == 0).astype(np.uint8)[:, :, None], 3, axis=2)\n return img\n\n # -------------------- augment code --------------------------------\n def get_pose_bboxes(self, poses):\n pose_bboxes = []\n for pose in poses:\n x1 = pose[pose[:, 2] > 0][:, 0].min()\n y1 = pose[pose[:, 2] > 0][:, 1].min()\n x2 = pose[pose[:, 2] > 0][:, 0].max()\n y2 = pose[pose[:, 2] > 0][:, 1].max()\n pose_bboxes.append([x1, y1, x2, y2])\n pose_bboxes = np.array(pose_bboxes)\n return pose_bboxes\n\n def resize_data(self, img, ignore_mask, poses, shape):\n \"\"\"resize img, mask and annotations\"\"\"\n img_h, img_w, _ = img.shape\n\n resized_img = cv2.resize(img, shape)\n ignore_mask = cv2.resize(ignore_mask.astype(np.uint8), shape).astype('bool')\n poses[:, :, :2] = (poses[:, :, :2] * np.array(shape) / np.array((img_w, img_h)))\n return resized_img, ignore_mask, poses\n\n def random_resize_img(self, img, ignore_mask, poses):\n h, w, _ = img.shape\n joint_bboxes = self.get_pose_bboxes(poses)\n bbox_sizes = ((joint_bboxes[:, 2:] - joint_bboxes[:, :2] + 1) ** 2).sum(axis=1) ** 0.5\n\n min_scale = config.min_box_size / bbox_sizes.min()\n max_scale = config.max_box_size / bbox_sizes.max()\n\n min_scale = min(max(min_scale, config.min_scale), 1)\n max_scale = min(max(max_scale, 1), config.max_scale)\n\n scale = float((max_scale - min_scale) * random.random() + min_scale)\n shape = (round(w * scale), round(h * scale))\n\n resized_img, resized_mask, resized_poses = self.resize_data(img, ignore_mask, poses, shape)\n return resized_img, resized_mask, resized_poses\n\n def random_rotate_img(self, img, mask, poses):\n h, w, _ = img.shape\n degree = np.random.randn() / 3 * config.max_rotate_degree\n rad = degree * math.pi / 180\n center = (w / 2, h / 2)\n R = cv2.getRotationMatrix2D(center, degree, 1)\n bbox = (w * abs(math.cos(rad)) + h * abs(math.sin(rad)), w * abs(math.sin(rad)) + h * abs(math.cos(rad)))\n R[0, 2] += bbox[0] / 2 - center[0]\n R[1, 2] += bbox[1] / 2 - center[1]\n rotate_img = cv2.warpAffine(img, R, (int(bbox[0]+0.5), int(bbox[1]+0.5)), flags=cv2.INTER_CUBIC,\n borderMode=cv2.BORDER_CONSTANT, borderValue=[127.5, 127.5, 127.5])\n rotate_mask = cv2.warpAffine(mask.astype('uint8')*255, R, (int(bbox[0]+0.5), int(bbox[1]+0.5))) > 0\n\n tmp_poses = np.ones_like(poses)\n tmp_poses[:, :, :2] = poses[:, :, :2].copy()\n tmp_rotate_poses = np.dot(tmp_poses, R.T) # apply rotation matrix to the poses\n rotate_poses = poses.copy() # to keep visibility flag\n rotate_poses[:, :, :2] = tmp_rotate_poses\n return rotate_img, rotate_mask, rotate_poses\n\n def random_crop_img(self, img, ignore_mask, poses):\n h, w, _ = img.shape\n insize = self.insize\n joint_bboxes = self.get_pose_bboxes(poses)\n bbox = random.choice(joint_bboxes) # select a bbox randomly\n bbox_center = bbox[:2] + (bbox[2:] - bbox[:2]) / 2\n\n r_xy = np.random.rand(2)\n perturb = ((r_xy - 0.5) * 2 * config.center_perterb_max)\n center = (bbox_center + perturb + 0.5).astype('i')\n\n crop_img = np.zeros((insize, insize, 3), 'uint8') + 127.5\n crop_mask = np.zeros((insize, insize), 'bool')\n\n offset = (center - (insize - 1) / 2 + 0.5).astype('i')\n offset_ = (center + (insize - 1) / 2 - (w - 1, h - 1) + 0.5).astype('i')\n\n x1, y1 = (center - (insize-1)/2 + 0.5).astype('i')\n x2, y2 = (center + (insize-1)/2 + 0.5).astype('i')\n\n x1 = max(x1, 0)\n y1 = max(y1, 0)\n x2 = min(x2, w-1)\n y2 = min(y2, h-1)\n\n x_from = -offset[0] if offset[0] < 0 else 0\n y_from = -offset[1] if offset[1] < 0 else 0\n x_to = insize - offset_[0] - 1 if offset_[0] >= 0 else insize - 1\n y_to = insize - offset_[1] - 1 if offset_[1] >= 0 else insize - 1\n\n crop_img[y_from:y_to+1, x_from:x_to+1] = img[y1:y2+1, x1:x2+1].copy()\n crop_mask[y_from:y_to+1, x_from:x_to+1] = ignore_mask[y1:y2+1, x1:x2+1].copy()\n\n poses[:, :, :2] -= offset\n return crop_img.astype('uint8'), crop_mask, poses\n\n def distort_color(self, img):\n img_max = np.broadcast_to(np.array(255, dtype=np.uint8), img.shape[:-1])\n img_min = np.zeros(img.shape[:-1], dtype=np.uint8)\n\n hsv_img = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2HSV).astype(np.int32)\n hsv_img[:, :, 0] = np.maximum(np.minimum(hsv_img[:, :, 0] - 10 + np.random.randint(20 + 1), img_max), img_min) # hue\n hsv_img[:, :, 1] = np.maximum(np.minimum(hsv_img[:, :, 1] - 40 + np.random.randint(80 + 1), img_max), img_min) # saturation\n hsv_img[:, :, 2] = np.maximum(np.minimum(hsv_img[:, :, 2] - 30 + np.random.randint(60 + 1), img_max), img_min) # value\n hsv_img = hsv_img.astype(np.uint8)\n\n distorted_img = cv2.cvtColor(hsv_img, cv2.COLOR_HSV2BGR)\n return distorted_img\n\n def flip_img(self, img, mask, poses):\n flipped_img = cv2.flip(img, 1)\n flipped_mask = cv2.flip(mask.astype(np.uint8), 1).astype('bool')\n poses[:, :, 0] = img.shape[1] - 1 - poses[:, :, 0]\n\n def swap_joints(poses, joint_type_, joint_type_2):\n tmp = poses[:, joint_type_].copy()\n poses[:, joint_type_] = poses[:, joint_type_2]\n poses[:, joint_type_2] = tmp\n\n swap_joints(poses, JointType.LeftEye, JointType.RightEye)\n swap_joints(poses, JointType.LeftEar, JointType.RightEar)\n swap_joints(poses, JointType.LeftShoulder, JointType.RightShoulder)\n swap_joints(poses, JointType.LeftElbow, JointType.RightElbow)\n swap_joints(poses, JointType.LeftHand, JointType.RightHand)\n swap_joints(poses, JointType.LeftWaist, JointType.RightWaist)\n swap_joints(poses, JointType.LeftKnee, JointType.RightKnee)\n swap_joints(poses, JointType.LeftFoot, JointType.RightFoot)\n return flipped_img, flipped_mask, poses\n\n def augment_data(self, img, ignore_mask, poses):\n aug_img = img.copy()\n aug_img, ignore_mask, poses = self.random_resize_img(aug_img, ignore_mask, poses)\n aug_img, ignore_mask, poses = self.random_rotate_img(aug_img, ignore_mask, poses)\n aug_img, ignore_mask, poses = self.random_crop_img(aug_img, ignore_mask, poses)\n if np.random.randint(2):\n aug_img = self.distort_color(aug_img)\n if np.random.randint(2):\n aug_img, ignore_mask, poses = self.flip_img(aug_img, ignore_mask, poses)\n\n return aug_img, ignore_mask, poses\n # ------------------------------- end -----------------------------------\n\n\n # ------------------------------ Heatmap ------------------------------------\n # return shape: (height, width)\n def generate_gaussian_heatmap(self, shape, joint, sigma):\n x, y = joint\n grid_x = np.tile(np.arange(shape[1]), (shape[0], 1))\n grid_y = np.tile(np.arange(shape[0]), (shape[1], 1)).transpose()\n grid_distance = (grid_x - x) ** 2 + (grid_y - y) ** 2\n gaussian_heatmap = np.exp(-0.5 * grid_distance / sigma**2)\n return gaussian_heatmap\n\n def generate_heatmaps(self, img, poses, heatmap_sigma):\n heatmaps = np.zeros((0,) + img.shape[:-1])\n sum_heatmap = np.zeros(img.shape[:-1])\n for joint_index in range(len(JointType)):\n heatmap = np.zeros(img.shape[:-1])\n for pose in poses:\n if pose[joint_index, 2] > 0:\n jointmap = self.generate_gaussian_heatmap(img.shape[:-1], pose[joint_index][:2], heatmap_sigma)\n heatmap[jointmap > heatmap] = jointmap[jointmap > heatmap]\n sum_heatmap[jointmap > sum_heatmap] = jointmap[jointmap > sum_heatmap]\n heatmaps = np.vstack((heatmaps, heatmap.reshape((1,) + heatmap.shape)))\n bg_heatmap = 1 - sum_heatmap # background channel\n heatmaps = np.vstack((heatmaps, bg_heatmap[None]))\n return heatmaps.astype('f')\n\n def generate_gaussian_heatmap_fast(self, shape, joint, sigma):\n x, y = joint\n grid_x = np.tile(np.arange(shape[1]), (shape[0], 1))\n grid_y = np.tile(np.arange(shape[0]), (shape[1], 1)).transpose()\n grid_x = grid_x + 0.4375\n grid_y = grid_y + 0.4375\n grid_distance = (grid_x - x) ** 2 + (grid_y - y) ** 2\n gaussian_heatmap = np.exp(-0.5 * grid_distance / sigma**2)\n return gaussian_heatmap\n\n def generate_heatmaps_fast(self, img, poses, heatmap_sigma):\n resize_shape = (img.shape[0] // 8, img.shape[1] // 8)\n heatmaps = np.zeros((0,) + resize_shape)\n sum_heatmap = np.zeros(resize_shape)\n for joint_index in range(len(JointType)):\n heatmap = np.zeros(resize_shape)\n for pose in poses:\n if pose[joint_index, 2] > 0:\n jointmap = self.generate_gaussian_heatmap_fast(resize_shape, pose[joint_index][:2]/8,\n heatmap_sigma/8)\n index_1 = jointmap > heatmap\n heatmap[index_1] = jointmap[index_1]\n index_2 = jointmap > sum_heatmap\n sum_heatmap[index_2] = jointmap[index_2]\n heatmaps = np.vstack((heatmaps, heatmap.reshape((1,) + heatmap.shape)))\n\n bg_heatmap = 1 - sum_heatmap # background channel\n heatmaps = np.vstack((heatmaps, bg_heatmap[None]))\n return heatmaps.astype('f')\n # ------------------------------ end ------------------------------------\n\n # ------------------------------ PAF ------------------------------------\n # return shape: (2, height, width)\n def generate_constant_paf(self, shape, joint_from, joint_to, paf_width):\n if np.array_equal(joint_from, joint_to): # same joint\n return np.zeros((2,) + shape[:-1])\n\n joint_distance = np.linalg.norm(joint_to - joint_from)\n unit_vector = (joint_to - joint_from) / joint_distance\n rad = np.pi / 2\n # [[0, 1], [-1, 0]]\n rot_matrix = np.array([[np.cos(rad), np.sin(rad)], [-np.sin(rad), np.cos(rad)]])\n # [[u_y], [-u_x]]\n vertical_unit_vector = np.dot(rot_matrix, unit_vector)\n grid_x = np.tile(np.arange(shape[1]), (shape[0], 1))\n grid_y = np.tile(np.arange(shape[0]), (shape[1], 1)).transpose()\n horizontal_inner_product = unit_vector[0] * (grid_x - joint_from[0]) + unit_vector[1] * (grid_y - joint_from[1])\n horizontal_paf_flag = (horizontal_inner_product >= 0) & (horizontal_inner_product <= joint_distance)\n vertical_inner_product = vertical_unit_vector[0] * (grid_x - joint_from[0]) + vertical_unit_vector[1] *\\\n (grid_y - joint_from[1])\n vertical_paf_flag = np.abs(vertical_inner_product) <= paf_width # paf_width : 8\n paf_flag = horizontal_paf_flag & vertical_paf_flag\n constant_paf = np.stack((paf_flag, paf_flag)) *\\\n np.broadcast_to(unit_vector, shape[:-1] + (2,)).transpose(2, 0, 1)\n\n return constant_paf\n\n def generate_pafs(self, img, poses, paf_sigma):\n pafs = np.zeros((0,) + img.shape[:-1])\n\n for limb in config.limbs_point:\n paf = np.zeros((2,) + img.shape[:-1])\n paf_flags = np.zeros(paf.shape) # for constant paf\n\n for pose in poses:\n joint_from, joint_to = pose[limb]\n if joint_from[2] > 0 and joint_to[2] > 0:\n limb_paf = self.generate_constant_paf(img.shape, joint_from[:2], joint_to[:2], paf_sigma) # [2, 368, 368]\n limb_paf_flags = limb_paf != 0\n paf_flags += np.broadcast_to(limb_paf_flags[0] | limb_paf_flags[1], limb_paf.shape)\n\n paf += limb_paf\n\n paf[paf_flags > 0] /= paf_flags[paf_flags > 0]\n pafs = np.vstack((pafs, paf))\n return pafs.astype('f')\n\n def generate_constant_paf_fast(self, shape, joint_from, joint_to, paf_width):\n if np.array_equal(joint_from, joint_to): # same joint\n return np.zeros((2,) + shape[:-1])\n\n joint_distance = np.linalg.norm(joint_to - joint_from)\n unit_vector = (joint_to - joint_from) / joint_distance\n rad = np.pi / 2\n # [[0, 1], [-1, 0]]\n rot_matrix = np.array([[np.cos(rad), np.sin(rad)], [-np.sin(rad), np.cos(rad)]])\n # [[u_y], [-u_x]]\n vertical_unit_vector = np.dot(rot_matrix, unit_vector)\n grid_x = np.tile(np.arange(shape[1]), (shape[0], 1))\n grid_y = np.tile(np.arange(shape[0]), (shape[1], 1)).transpose()\n grid_x = grid_x + 0.4375\n grid_y = grid_y + 0.4375\n horizontal_inner_product = unit_vector[0] * (grid_x - joint_from[0]) + unit_vector[1] * (grid_y - joint_from[1])\n horizontal_paf_flag = (horizontal_inner_product >= 0) & (horizontal_inner_product <= joint_distance)\n vertical_inner_product = vertical_unit_vector[0] * (grid_x - joint_from[0]) + vertical_unit_vector[1] *\\\n (grid_y - joint_from[1])\n vertical_paf_flag = np.abs(vertical_inner_product) <= paf_width # paf_width : 8/8 = 1\n paf_flag = horizontal_paf_flag & vertical_paf_flag\n constant_paf = np.stack((paf_flag, paf_flag)) *\\\n np.broadcast_to(unit_vector, shape[:-1] + (2,)).transpose(2, 0, 1)\n\n return constant_paf\n\n def generate_pafs_fast(self, img, poses, paf_sigma):\n resize_shape = (img.shape[0]//8, img.shape[1]//8, 3)\n pafs = np.zeros((0,) + resize_shape[:-1])\n\n for limb in config.limbs_point:\n paf = np.zeros((2,) + resize_shape[:-1])\n paf_flags = np.zeros(paf.shape) # for constant paf\n\n for pose in poses:\n joint_from, joint_to = pose[limb]\n if joint_from[2] > 0 and joint_to[2] > 0:\n limb_paf = self.generate_constant_paf_fast(resize_shape, joint_from[:2]/8, joint_to[:2]/8, paf_sigma/8) # [2, 368, 368]\n limb_paf_flags = limb_paf != 0\n paf_flags += np.broadcast_to(limb_paf_flags[0] | limb_paf_flags[1], limb_paf.shape)\n\n paf += limb_paf\n\n index_1 = paf_flags > 0\n paf[index_1] /= paf_flags[index_1]\n pafs = np.vstack((pafs, paf))\n return pafs.astype('f')\n # ------------------------------ end ------------------------------------\n\n def get_img_annotation(self, ind=None, img_id=None):\n annotations = None\n\n if ind is not None:\n img_id = self.imgIds[ind]\n anno_ids = self.train.getAnnIds(imgIds=[img_id], iscrowd=None)\n\n # annotation for that image\n if anno_ids:\n annotations_for_img = self.train.loadAnns(anno_ids)\n\n person_cnt = 0\n valid_annotations_for_img = []\n for annotation in annotations_for_img:\n # if too few keypoints or too small\n if annotation['num_keypoints'] >= config.min_keypoints and annotation['area'] > config.min_area:\n person_cnt += 1\n valid_annotations_for_img.append(annotation)\n\n # if person annotation\n if person_cnt > 0:\n annotations = valid_annotations_for_img\n\n img_path = os.path.join(self.imgpath, self.train.loadImgs([img_id])[0]['file_name'])\n mask_path = os.path.join(self.maskpath, '{:012d}.png'.format(img_id))\n img = cv2.imread(img_path)\n ignore_mask = cv2.imread(mask_path, 0)\n if ignore_mask is None:\n ignore_mask = np.zeros(img.shape[:2], np.float32)\n else:\n ignore_mask[ignore_mask == 255] = 1\n\n if self.mode == 'eval':\n return img, img_id, annotations_for_img, ignore_mask\n\n return img, img_id, annotations, ignore_mask.astype('f')\n\n def parse_annotation(self, annotations):\n poses = np.zeros((0, len(JointType), 3), dtype=np.int32)\n\n for ann in annotations:\n ann_pose = np.array(ann['keypoints']).reshape(-1, 3)\n pose = np.zeros((1, len(JointType), 3), dtype=np.int32)\n\n # convert poses position\n for i, joint_index in enumerate(config.joint_indices):\n pose[0][joint_index] = ann_pose[i]\n\n # compute neck position\n if pose[0][JointType.LeftShoulder][2] > 0 and pose[0][JointType.RightShoulder][2] > 0:\n pose[0][JointType.Neck][0] = int((pose[0][JointType.LeftShoulder][0] +\n pose[0][JointType.RightShoulder][0]) / 2)\n pose[0][JointType.Neck][1] = int((pose[0][JointType.LeftShoulder][1] +\n pose[0][JointType.RightShoulder][1]) / 2)\n pose[0][JointType.Neck][2] = 2\n\n poses = np.vstack((poses, pose))\n return poses\n\n def resize_output(self, input_np, map_h=46, map_w=46):\n if len(input_np.shape) == 3:\n output = np.zeros((input_np.shape[0], map_h, map_w))\n for i in range(input_np.shape[0]):\n output[i] = cv2.resize(input_np[i], (map_w, map_h))\n return output.astype('f')\n\n input_np = input_np.astype('f')\n output = cv2.resize(input_np, (map_h, map_w))\n return output\n\n def generate_labels(self, img, poses, ignore_mask):\n img, ignore_mask, poses = self.augment_data(img, ignore_mask, poses)\n resized_img, ignore_mask, resized_poses = self.resize_data(img, ignore_mask, poses,\n shape=(self.insize, self.insize))\n\n resized_heatmaps = self.generate_heatmaps_fast(resized_img, resized_poses, config.heatmap_sigma)\n\n resized_pafs = self.generate_pafs_fast(resized_img, resized_poses, config.paf_sigma)\n\n ignore_mask = cv2.morphologyEx(ignore_mask.astype('uint8'), cv2.MORPH_DILATE, np.ones((16, 16))).astype('bool')\n resized_ignore_mask = self.resize_output(ignore_mask)\n\n\n return resized_img, resized_pafs, resized_heatmaps, resized_ignore_mask\n\n def preprocess(self, img):\n x_data = img.astype('f')\n x_data /= 255\n x_data -= 0.5\n x_data = x_data.transpose(2, 0, 1)\n return x_data\n\n def __getitem__(self, i):\n img, img_id, annotations, ignore_mask = self.get_img_annotation(ind=i)\n\n if self.mode in ['eval', 'val']:\n # don't need to make heatmaps/pafs\n return img, np.array([img_id])\n\n # if no annotations are available\n while annotations is None:\n print(\"none annotations\", img_id)\n img_id = self.imgIds[np.random.randint(len(self))]\n img, img_id, annotations, ignore_mask = self.get_img_annotation(img_id=img_id)\n\n poses = self.parse_annotation(annotations)\n\n # TEST\n # return img, poses, ignore_mask\n\n resized_img, pafs, heatmaps, ignore_mask = self.generate_labels(img, poses, ignore_mask)\n resized_img = self.preprocess(resized_img)\n ignore_mask = 1. - ignore_mask\n\n # # TEST\n # print(\"Shape: \", resized_img.dtype, \" \", pafs.dtype, \" \", heatmaps.dtype, \" \", ignore_mask.dtype)\n\n return resized_img, pafs, heatmaps, ignore_mask\n\n\nclass DistributedSampler():\n def __init__(self, dataset, rank, group_size, shuffle=True, seed=0):\n self.dataset = dataset\n self.rank = rank\n self.group_size = group_size\n self.dataset_len = len(self.dataset)\n self.num_samplers = int(math.ceil(self.dataset_len * 1.0 / self.group_size))\n self.total_size = self.num_samplers * self.group_size\n self.shuffle = shuffle\n self.seed = seed\n\n def __iter__(self):\n if self.shuffle:\n self.seed = (self.seed + 1) & 0xffffffff\n np.random.seed(self.seed)\n indices = np.random.permutation(self.dataset_len).tolist()\n else:\n indices = list(range(len(self.dataset_len)))\n indices += indices[:(self.total_size - len(indices))]\n indices = indices[self.rank::self.group_size]\n return iter(indices)\n\n def __len__(self):\n return self.num_samplers\n\n\ndef valdata(jsonpath, imgpath, rank, group_size, mode='val', maskpath=''):\n #cv2.setNumThreads(0)\n val = ReadJson(jsonpath)\n dataset = txtdataset(val, imgpath, maskpath, config.insize, mode=mode)\n sampler = DistributedSampler(dataset, rank, group_size)\n ds = de.GeneratorDataset(dataset, ['img', 'img_id'], num_parallel_workers=8, sampler=sampler)\n return ds\n\n\ndef create_dataset(jsonpath, imgpath, maskpath, batch_size, rank, group_size, mode='train', repeat_num=1, shuffle=True,\n multiprocessing=True, num_worker=20):\n\n train = ReadJson(jsonpath)\n dataset = txtdataset(train, imgpath, maskpath, config.insize, mode=mode)\n if group_size == 1:\n de_dataset = de.GeneratorDataset(dataset, [\"image\", \"pafs\", \"heatmaps\", \"ignore_mask\"],\n shuffle=shuffle,\n num_parallel_workers=num_worker,\n python_multiprocessing=multiprocessing)\n else:\n de_dataset = de.GeneratorDataset(dataset, [\"image\", \"pafs\", \"heatmaps\", \"ignore_mask\"],\n shuffle=shuffle,\n num_parallel_workers=num_worker,\n python_multiprocessing=multiprocessing,\n num_shards=group_size,\n shard_id=rank)\n\n de_dataset = de_dataset.batch(batch_size=batch_size, drop_remainder=True)\n de_dataset = de_dataset.repeat(repeat_num)\n\n return de_dataset\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"\nA function that returns a dataset for classification.\n\"\"\"\nimport random\nimport os\n\nfrom PIL import Image, ImageFile\nfrom mindspore import dtype as mstype\nimport mindspore.dataset as de\nimport mindspore.dataset.vision.c_transforms as vision_C\nimport mindspore.dataset.transforms.c_transforms as normal_C\nfrom src.datasets.sampler import DistributedSampler\nfrom src.model_utils.matlab_cp2tform import get_similarity_transform_for_cv2\nimport cv2\nimport numpy as np\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\ndef alignment(src_img, src_pts):\n of = 2\n ref_pts = [[30.2946+of, 51.6963+of], [65.5318+of, 51.5014+of],\n [48.0252+of, 71.7366+of], [33.5493+of, 92.3655+of], [62.7299+of, 92.2041+of]]\n crop_size = (96+of*2, 112+of*2)\n\n s = np.array(src_pts).astype(np.float32)\n r = np.array(ref_pts).astype(np.float32)\n\n tfm = get_similarity_transform_for_cv2(s, r)\n face_img = cv2.warpAffine(src_img, tfm, crop_size)\n return face_img\n\nclass TxtDataset():\n \"\"\"\n read dataset from txt\n \"\"\"\n def __init__(self, root, txt_name):\n super(TxtDataset, self).__init__()\n self.imgs = []\n self.labels = []\n self.ptsrem = []\n self.num = 0\n fin = open(txt_name, \"r\")\n num = 0\n for line in fin:\n src_pts = []\n num = num+1\n lets = line.split('\\t')\n for i in range(5):\n src_pts.append([int(lets[2*i+2]), int(lets[2*i+3])])\n self.ptsrem.append(src_pts)\n img_name = lets[0]\n img_name = img_name\n label = lets[1]\n self.imgs.append(os.path.join(root, img_name))\n self.labels.append(int(label))\n fin.close()\n\n def __getitem__(self, index):\n img = np.array(Image.open(self.imgs[index]).convert('RGB'), np.float32)\n img = img[:, :, ::-1]\n img = alignment(img, self.ptsrem[index])\n if random.random() > 0.5:\n rx = random.randint(0, 2*2)\n ry = random.randint(0, 2*2)\n img = img[ry:ry+112, rx:rx+96, :]\n else:\n img = img[2:2+112, 2:2+96, :]\n img = (img-127.5)/128\n return img, self.labels[index]\n def __len__(self):\n return len(self.imgs)\n\n\ndef classification_dataset_imagenet(data_dir, image_size, per_batch_size, max_epoch, rank, group_size, mode='train',\n input_mode='folder', root='', num_parallel_workers=None, shuffle=None,\n sampler=None, class_indexing=None, drop_remainder=True, transform=None,\n target_transform=None):\n\n if transform is None:\n if mode == 'train':\n transform_img = [\n vision_C.RandomColorAdjust(brightness=0.4, saturation=0.4),\n #vision_C.GaussianBlur((3,3),0.05),\n vision_C.RandomHorizontalFlip(prob=0.5),\n vision_C.HWC2CHW()\n ]\n else:\n transform_img = [\n vision_C.Resize((112, 96)),\n vision_C.HWC2CHW()\n ]\n else:\n transform_img = transform\n\n if target_transform is None:\n transform_label = [\n normal_C.TypeCast(mstype.int32)\n ]\n else:\n transform_label = target_transform\n\n\n dataset = TxtDataset(root, data_dir)\n sampler = DistributedSampler(dataset, rank, group_size, shuffle=shuffle)\n de_dataset = de.GeneratorDataset(dataset, [\"image\", \"label\"], sampler=sampler)\n\n de_dataset = de_dataset.map(input_columns=\"image\", num_parallel_workers=8, operations=transform_img)\n de_dataset = de_dataset.map(input_columns=\"label\", num_parallel_workers=8, operations=transform_label)\n\n columns_to_project = [\"image\", \"label\"]\n de_dataset = de_dataset.project(columns=columns_to_project)\n\n de_dataset = de_dataset.batch(per_batch_size, drop_remainder=drop_remainder)\n return de_dataset\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"ResNet.\"\"\"\nimport numpy as np\nfrom scipy.stats import truncnorm\nimport mindspore.nn as nn\nimport mindspore.common.dtype as mstype\nfrom mindspore.ops import operations as P\nfrom mindspore.common.tensor import Tensor\n\nformat_ = \"NHWC\"\n# tranpose shape to NCHW, default init is NHWC.\ndef _trans_shape(shape, shape_format):\n if shape_format == \"NCHW\":\n return (shape[0], shape[3], shape[1], shape[2])\n return shape\n\ndef _conv_variance_scaling_initializer(in_channel, out_channel, kernel_size):\n fan_in = in_channel * kernel_size * kernel_size\n scale = 1.0\n scale /= max(1., fan_in)\n stddev = (scale ** 0.5) / .87962566103423978\n mu, sigma = 0, stddev\n weight = truncnorm(-2, 2, loc=mu, scale=sigma).rvs(out_channel * in_channel * kernel_size * kernel_size)\n weight = np.reshape(weight, (out_channel, kernel_size, kernel_size, in_channel))\n return Tensor(weight, dtype=mstype.float32)\n\ndef _weight_variable(shape, factor=0.01):\n init_value = np.random.randn(*shape).astype(np.float32) * factor\n return Tensor(init_value)\n\n\ndef _conv3x3(in_channel, out_channel, stride=1):\n weight_shape = (out_channel, 3, 3, in_channel)\n weight_shape = _trans_shape(weight_shape, format_)\n weight = _weight_variable(weight_shape)\n return nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=stride,\n padding=1, pad_mode='pad', weight_init=weight, data_format=format_)\n\ndef _conv1x1(in_channel, out_channel, stride=1):\n weight_shape = (out_channel, 1, 1, in_channel)\n weight_shape = _trans_shape(weight_shape, format_)\n weight = _weight_variable(weight_shape)\n return nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=stride,\n padding=0, pad_mode='pad', weight_init=weight, data_format=format_)\n\ndef _conv7x7(in_channel, out_channel, stride=1):\n weight_shape = (out_channel, 7, 7, in_channel)\n weight_shape = _trans_shape(weight_shape, format_)\n weight = _weight_variable(weight_shape)\n return nn.Conv2d(in_channel, out_channel, kernel_size=7, stride=stride,\n padding=3, pad_mode='pad', weight_init=weight, data_format=format_)\n\n\ndef _bn(channel):\n return nn.BatchNorm2d(channel, eps=1e-4, momentum=0.9, gamma_init=1, beta_init=0,\n moving_mean_init=0, moving_var_init=1, data_format=format_)\n\ndef _bn_last(channel):\n return nn.BatchNorm2d(channel, eps=1e-4, momentum=0.9, gamma_init=0, beta_init=0,\n moving_mean_init=0, moving_var_init=1, data_format=format_)\n\ndef _fc(in_channel, out_channel):\n weight_shape = (out_channel, in_channel)\n weight = _weight_variable(weight_shape)\n return nn.Dense(in_channel, out_channel, has_bias=True, weight_init=weight, bias_init=0)\n\n\nclass ResidualBlock(nn.Cell):\n \"\"\"\n ResNet V1 residual block definition.\n\n Args:\n in_channel (int): Input channel.\n out_channel (int): Output channel.\n stride (int): Stride size for the first convolutional layer. Default: 1.\n\n Returns:\n Tensor, output tensor.\n\n Examples:\n >>> ResidualBlock(3, 256, stride=2)\n \"\"\"\n expansion = 4\n\n def __init__(self,\n in_channel,\n out_channel,\n stride=1):\n super(ResidualBlock, self).__init__()\n self.stride = stride\n channel = out_channel // self.expansion\n self.conv1 = _conv1x1(in_channel, channel, stride=1)\n self.bn1 = _bn(channel)\n self.conv2 = _conv3x3(channel, channel, stride=stride)\n self.bn2 = _bn(channel)\n\n self.conv3 = _conv1x1(channel, out_channel, stride=1)\n self.bn3 = _bn_last(out_channel)\n self.relu = nn.ReLU()\n\n self.down_sample = False\n\n if stride != 1 or in_channel != out_channel:\n self.down_sample = True\n self.down_sample_layer = None\n\n if self.down_sample:\n self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel, stride), _bn(out_channel)])\n self.add = P.Add()\n\n def construct(self, x):\n identity = x\n if self.down_sample:\n identity = self.down_sample_layer(identity)\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n out = self.conv3(out)\n out = self.bn3(out)\n\n out = self.add(identity, out)\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Cell):\n \"\"\"\n ResNet architecture.\n\n Args:\n block (Cell): Block for network.\n layer_nums (list): Numbers of block in different layers.\n in_channels (list): Input channel in each layer.\n out_channels (list): Output channel in each layer.\n strides (list): Stride size in each layer.\n num_classes (int): The number of classes that the training images are belonging to.\n Returns:\n Tensor, output tensor.\n\n Examples:\n >>> ResNet(ResidualBlock,\n >>> [3, 4, 6, 3],\n >>> [64, 256, 512, 1024],\n >>> [256, 512, 1024, 2048],\n >>> [1, 2, 2, 2],\n >>> 10)\n \"\"\"\n\n def __init__(self,\n block,\n layer_nums,\n in_channels,\n out_channels,\n strides,\n num_classes):\n super(ResNet, self).__init__()\n\n if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:\n raise ValueError(\"the length of layer_num, in_channels, out_channels list must be 4!\")\n input_data_channel = 4\n if format_ == \"NCHW\":\n input_data_channel = 3\n self.conv1 = _conv7x7(input_data_channel, 64, stride=2)\n self.bn1 = _bn(64)\n self.relu = P.ReLU()\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode=\"same\", data_format=format_)\n self.layer1 = self._make_layer(block,\n layer_nums[0],\n in_channel=in_channels[0],\n out_channel=out_channels[0],\n stride=strides[0])\n self.layer2 = self._make_layer(block,\n layer_nums[1],\n in_channel=in_channels[1],\n out_channel=out_channels[1],\n stride=strides[1])\n self.layer3 = self._make_layer(block,\n layer_nums[2],\n in_channel=in_channels[2],\n out_channel=out_channels[2],\n stride=strides[2])\n self.layer4 = self._make_layer(block,\n layer_nums[3],\n in_channel=in_channels[3],\n out_channel=out_channels[3],\n stride=strides[3])\n\n self.avg_pool = P.AvgPool(7, 1, data_format=format_)\n self.flatten = nn.Flatten()\n self.end_point = _fc(out_channels[3], num_classes)\n\n def _make_layer(self, block, layer_num, in_channel, out_channel, stride):\n \"\"\"\n Make stage network of ResNet.\n\n Args:\n block (Cell): Resnet block.\n layer_num (int): Layer number.\n in_channel (int): Input channel.\n out_channel (int): Output channel.\n stride (int): Stride size for the first convolutional layer.\n Returns:\n SequentialCell, the output layer.\n\n Examples:\n >>> _make_layer(ResidualBlock, 3, 128, 256, 2)\n \"\"\"\n layers = []\n\n resnet_block = block(in_channel, out_channel, stride=stride)\n layers.append(resnet_block)\n for _ in range(1, layer_num):\n resnet_block = block(out_channel, out_channel, stride=1)\n layers.append(resnet_block)\n return nn.SequentialCell(layers)\n\n def construct(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n c1 = self.maxpool(x)\n\n c2 = self.layer1(c1)\n c3 = self.layer2(c2)\n c4 = self.layer3(c3)\n c5 = self.layer4(c4)\n\n out = self.avg_pool(c5)\n out = self.flatten(out)\n out = self.end_point(out)\n\n return out\n\n\ndef resnet50(class_num=1001, dtype=\"fp16\"):\n \"\"\"\n Get ResNet50 neural network.\n\n Args:\n class_num (int): Class number.\n\n Returns:\n Cell, cell instance of ResNet50 neural network.\n\n Examples:\n >>> net = resnet50(1001)\n \"\"\"\n global format_\n if dtype == \"fp32\":\n format_ = \"NCHW\"\n return ResNet(ResidualBlock,\n [3, 4, 6, 3],\n [64, 256, 512, 1024],\n [256, 512, 1024, 2048],\n [1, 2, 2, 2],\n class_num)\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"\nsample script of CLUE infer using SDK run in docker\n\"\"\"\n\nimport argparse\nimport glob\nimport os\nimport time\n\nimport MxpiDataType_pb2 as MxpiDataType\nimport numpy as np\nfrom StreamManagerApi import StreamManagerApi, MxDataInput, InProtobufVector, \\\n MxProtobufIn, StringVector\n\ntotal = 0\ntotal_acc = 0\n\n\ndef parse_args():\n \"\"\"set and check parameters.\"\"\"\n parser = argparse.ArgumentParser(description=\"bert process\")\n parser.add_argument(\"--pipeline\", type=str, default=\"\", help=\"SDK infer pipeline\")\n parser.add_argument(\"--data_dir\", type=str, default=\"\",\n help=\"Dataset contain input_ids, input_mask, segment_ids, label_ids\")\n parser.add_argument(\"--label_file\", type=str, default=\"\", help=\"label ids to name\")\n parser.add_argument(\"--output_file\", type=str, default=\"\", help=\"save result to file\")\n parser.add_argument(\"--do_eval\", type=str, default=False, help=\"eval the accuracy of model \")\n args_opt = parser.parse_args()\n return args_opt\n\n\ndef send_source_data(appsrc_id, filename, stream_name, stream_manager):\n \"\"\"\n Construct the input of the stream,\n send inputs data to a specified stream based on streamName.\n\n Returns:\n bool: send data success or not\n \"\"\"\n tensor = np.fromfile(filename, dtype=np.int32)\n tensor = np.expand_dims(tensor, 0)\n tensor_package_list = MxpiDataType.MxpiTensorPackageList()\n tensor_package = tensor_package_list.tensorPackageVec.add()\n array_bytes = tensor.tobytes()\n data_input = MxDataInput()\n data_input.data = array_bytes\n tensor_vec = tensor_package.tensorVec.add()\n tensor_vec.deviceId = 0\n tensor_vec.memType = 0\n for i in tensor.shape:\n tensor_vec.tensorShape.append(i)\n tensor_vec.dataStr = data_input.data\n tensor_vec.tensorDataSize = len(array_bytes)\n\n key = \"appsrc{}\".format(appsrc_id).encode('utf-8')\n protobuf_vec = InProtobufVector()\n protobuf = MxProtobufIn()\n protobuf.key = key\n protobuf.type = b'MxTools.MxpiTensorPackageList'\n protobuf.protobuf = tensor_package_list.SerializeToString()\n protobuf_vec.push_back(protobuf)\n\n ret = stream_manager.SendProtobuf(stream_name, appsrc_id, protobuf_vec)\n if ret < 0:\n print(\"Failed to send data to stream.\")\n return False\n return True\n\n\ndef send_appsrc_data(args, file_name, stream_name, stream_manager):\n \"\"\"\n send three stream to infer model, include input ids, input mask and token type_id.\n\n Returns:\n bool: send data success or not\n \"\"\"\n input_ids = os.path.realpath(os.path.join(args.data_dir, \"00_data\", file_name))\n if not send_source_data(0, input_ids, stream_name, stream_manager):\n return False\n input_mask = os.path.realpath(os.path.join(args.data_dir, \"01_data\", file_name))\n if not send_source_data(1, input_mask, stream_name, stream_manager):\n return False\n token_type_id = os.path.realpath(os.path.join(args.data_dir, \"02_data\", file_name))\n if not send_source_data(2, token_type_id, stream_name, stream_manager):\n return False\n return True\n\n\ndef read_label_file(label_file):\n \"\"\"\n Args:\n label_file:\n \"address\"\n \"book\"\n ...\n Returns:\n label list\n \"\"\"\n label_list = [line.strip() for line in open(label_file).readlines()]\n return label_list\n\n\ndef process_infer_to_cluner(args, logit_id, each_label_length=4):\n \"\"\"\n find label and position from the logit_id tensor.\n\n Args:\n args: param of config.\n logit_id: shape is [128], example: [0..32.34..0].\n each_label_length: each label have 4 prefix, [\"S_\", \"B_\", \"M_\", \"E_\"].\n\n Returns:\n dict of visualization result, as 'position': [9, 10]\n \"\"\"\n label_list = read_label_file(os.path.realpath(args.label_file))\n find_cluner = False\n result_list = []\n for i, value in enumerate(logit_id):\n if value > 0:\n if not find_cluner:\n start = i\n cluner_name = label_list[(value - 1) // each_label_length]\n find_cluner = True\n else:\n if label_list[(value - 1) // each_label_length] != cluner_name:\n item = {}\n item[cluner_name] = [start - 1, i - 2]\n result_list.append(item)\n start = i\n cluner_name = label_list[(value - 1) // each_label_length]\n else:\n if find_cluner:\n item = {}\n item[cluner_name] = [start - 1, i - 2]\n result_list.append(item)\n find_cluner = False\n\n return result_list\n\n\ndef count_pred_result(args, file_name, logit_id):\n \"\"\"\n support two method to calc f1 sore, if dataset has two class, suggest using BF1,\n else more than two class, suggest using MF1.\n Args:\n args: param of config.\n file_name: label file name.\n logit_id: output tensor of infer.\n max_seq_length: sentence input length default is 128.\n\n global:\n TP: pred == target\n FP: in pred but not in target\n FN: in target but not in pred\n \"\"\"\n global total, total_acc\n label_file = os.path.realpath(os.path.join(args.data_dir, \"03_data\", file_name))\n label_ids = np.fromfile(label_file, np.int32)\n # label_ids.reshape(max_seq_length, -1)\n acc = (label_ids == logit_id).sum()\n total += len(label_ids)\n total_acc += acc\n\n\ndef post_process(args, file_name, infer_result):\n \"\"\"\n process the result of infer tensor to Visualization results.\n Args:\n args: param of config.\n file_name: label file name.\n infer_result: get logit from infer result\n max_seq_length: sentence input length default is 128.\n \"\"\"\n # print the infer result\n print(\"==============================================================\")\n result = MxpiDataType.MxpiTensorPackageList()\n result.ParseFromString(infer_result[0].messageBuf)\n res = np.frombuffer(result.tensorPackageVec[0].tensorVec[0].dataStr, dtype='<f4')\n # res = res.reshape(max_seq_length, -1)\n\n\n logit_id = np.argmax(res, axis=-1)\n logit_id = np.reshape(logit_id, -1)\n print(\"output tensor is: \", logit_id)\n # cluner_list = process_infer_to_cluner(args, logit_id)\n # print(cluner_list)\n with open(args.output_file, \"a\") as file:\n file.write(\"{}: {}\\n\".format(file_name, str(logit_id)))\n\n if args.do_eval == 'True':\n count_pred_result(args, file_name, logit_id)\n\n\ndef run():\n \"\"\"\n read pipeline and do infer\n \"\"\"\n args = parse_args()\n # init stream manager\n stream_manager_api = StreamManagerApi()\n ret = stream_manager_api.InitManager()\n if ret != 0:\n print(\"Failed to init Stream manager, ret=%s\" % str(ret))\n return\n\n # create streams by pipeline config file\n with open(os.path.realpath(args.pipeline), 'rb') as f:\n pipeline_str = f.read()\n ret = stream_manager_api.CreateMultipleStreams(pipeline_str)\n if ret != 0:\n print(\"Failed to create Stream, ret=%s\" % str(ret))\n return\n\n stream_name = b'im_senta'\n infer_total_time = 0\n # input_ids file list, every file content a tensor[1,128]\n file_list = glob.glob(os.path.join(os.path.realpath(args.data_dir), \"00_data\", \"*.bin\"))\n for input_ids in file_list:\n file_name = input_ids.split('/')[-1]\n if not send_appsrc_data(args, file_name, stream_name, stream_manager_api):\n return\n # Obtain the inference result by specifying streamName and uniqueId.\n key_vec = StringVector()\n key_vec.push_back(b'mxpi_tensorinfer0')\n start_time = time.time()\n infer_result = stream_manager_api.GetProtobuf(stream_name, 0, key_vec)\n infer_total_time += time.time() - start_time\n if infer_result.size() == 0:\n print(\"inferResult is null\")\n return\n if infer_result[0].errorCode != 0:\n print(\"GetProtobuf error. errorCode=%d\" % (infer_result[0].errorCode))\n return\n post_process(args, file_name, infer_result)\n\n if args.do_eval == 'True':\n print(\"==============================================================\")\n acc = 0\n if total > 0:\n acc = total_acc / total\n print(\"Acc: \", acc)\n print(\"==============================================================\")\n print(\"Infer images sum: {}, cost total time: {:.6f} sec.\".format(len(file_list), infer_total_time))\n print(\"The throughput: {:.6f} bin/sec.\".format(len(file_list) / infer_total_time))\n # destroy streams\n stream_manager_api.DestroyAllStreams()\n\n\nif __name__ == '__main__':\n run()\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"train_dataset.\"\"\"\n\n\nimport os\nimport math\nfrom enum import Enum\nimport numpy as np\nimport pandas as pd\nimport mindspore.dataset.engine as de\nimport mindspore.common.dtype as mstype\n\n\nclass DataType(Enum):\n \"\"\"\n Enumerate supported dataset format.\n \"\"\"\n MINDRECORD = 1\n TFRECORD = 2\n H5 = 3\n\n\nclass H5Dataset():\n \"\"\"\n H5DataSet\n \"\"\"\n input_length = 39\n\n def __init__(self, data_path, train_mode=True, train_num_of_parts=21,\n test_num_of_parts=3):\n self._hdf_data_dir = data_path\n self._is_training = train_mode\n\n if self._is_training:\n self._file_prefix = 'train'\n self._num_of_parts = train_num_of_parts\n else:\n self._file_prefix = 'test'\n self._num_of_parts = test_num_of_parts\n\n self.data_size = self._bin_count(self._hdf_data_dir, self._file_prefix,\n self._num_of_parts)\n print(\"data_size: {}\".format(self.data_size))\n\n def _bin_count(self, hdf_data_dir, file_prefix, num_of_parts):\n size = 0\n for part in range(num_of_parts):\n _y = pd.read_hdf(os.path.join(hdf_data_dir,\n file_prefix + '_output_part_' + str(\n part) + '.h5'))\n size += _y.shape[0]\n return size\n\n def _iterate_hdf_files_(self, num_of_parts=None,\n shuffle_block=False):\n \"\"\"\n iterate among hdf files(blocks). when the whole data set is finished, the iterator restarts\n from the beginning, thus the data stream will never stop\n :param train_mode: True or false,false is eval_mode,\n this file iterator will go through the train set\n :param num_of_parts: number of files\n :param shuffle_block: shuffle block files at every round\n :return: input_hdf_file_name, output_hdf_file_name, finish_flag\n \"\"\"\n parts = np.arange(num_of_parts)\n while True:\n if shuffle_block:\n for _ in range(int(shuffle_block)):\n np.random.shuffle(parts)\n for i, p in enumerate(parts):\n yield os.path.join(self._hdf_data_dir,\n self._file_prefix + '_input_part_' + str(\n p) + '.h5'), \\\n os.path.join(self._hdf_data_dir,\n self._file_prefix + '_output_part_' + str(\n p) + '.h5'), i + 1 == len(parts)\n\n def _generator(self, X, y, batch_size, shuffle=True):\n \"\"\"\n should be accessed only in private\n :param X:\n :param y:\n :param batch_size:\n :param shuffle:\n :return:\n \"\"\"\n number_of_batches = np.ceil(1. * X.shape[0] / batch_size)\n counter = 0\n finished = False\n sample_index = np.arange(X.shape[0])\n if shuffle:\n for _ in range(int(shuffle)):\n np.random.shuffle(sample_index)\n assert X.shape[0] > 0\n while True:\n batch_index = sample_index[\n batch_size * counter: batch_size * (counter + 1)]\n X_batch = X[batch_index]\n y_batch = y[batch_index]\n counter += 1\n yield X_batch, y_batch, finished\n if counter == number_of_batches:\n counter = 0\n finished = True\n\n def batch_generator(self, batch_size=1000,\n random_sample=False, shuffle_block=False):\n \"\"\"\n :param train_mode: True or false,false is eval_mode,\n :param batch_size\n :param num_of_parts: number of files\n :param random_sample: if True, will shuffle\n :param shuffle_block: shuffle file blocks at every round\n :return:\n \"\"\"\n\n for hdf_in, hdf_out, _ in self._iterate_hdf_files_(self._num_of_parts,\n shuffle_block):\n start = stop = None\n X_all = pd.read_hdf(hdf_in, start=start, stop=stop).values\n y_all = pd.read_hdf(hdf_out, start=start, stop=stop).values\n data_gen = self._generator(X_all, y_all, batch_size,\n shuffle=random_sample)\n finished = False\n\n while not finished:\n X, y, finished = data_gen.__next__()\n X_id = X[:, 0:self.input_length]\n X_va = X[:, self.input_length:]\n yield np.array(X_id.astype(dtype=np.int32)), np.array(\n X_va.astype(dtype=np.float32)), np.array(\n y.astype(dtype=np.float32))\n\n\ndef _get_h5_dataset(data_dir, train_mode=True, epochs=1, batch_size=1000):\n \"\"\"\n get_h5_dataset\n \"\"\"\n data_para = {\n 'batch_size': batch_size,\n }\n if train_mode:\n data_para['random_sample'] = True\n data_para['shuffle_block'] = True\n\n h5_dataset = H5Dataset(data_path=data_dir, train_mode=train_mode)\n numbers_of_batch = math.ceil(h5_dataset.data_size / batch_size)\n\n def _iter_h5_data():\n train_eval_gen = h5_dataset.batch_generator(**data_para)\n for _ in range(0, numbers_of_batch, 1):\n yield train_eval_gen.__next__()\n\n ds = de.GeneratorDataset(_iter_h5_data(), [\"ids\", \"weights\", \"labels\"])\n ds = ds.repeat(epochs)\n return ds\n\n\ndef _padding_func(batch_size, manual_shape, target_column, field_size=39):\n \"\"\"\n get padding_func\n \"\"\"\n if manual_shape:\n generate_concat_offset = [item[0]+item[1] for item in manual_shape]\n part_size = int(target_column / len(generate_concat_offset))\n filled_value = []\n for i in range(field_size, target_column):\n filled_value.append(generate_concat_offset[i//part_size]-1)\n print(\"Filed Value:\", filled_value)\n\n def padding_func(x, y, z):\n x = np.array(x).flatten().reshape(batch_size, field_size)\n y = np.array(y).flatten().reshape(batch_size, field_size)\n z = np.array(z).flatten().reshape(batch_size, 1)\n\n x_id = np.ones((batch_size, target_column - field_size),\n dtype=np.int32) * filled_value\n x_id = np.concatenate([x, x_id.astype(dtype=np.int32)], axis=1)\n mask = np.concatenate(\n [y, np.zeros((batch_size, target_column-39), dtype=np.float32)], axis=1)\n return (x_id, mask, z)\n else:\n def padding_func(x, y, z):\n x = np.array(x).flatten().reshape(batch_size, field_size)\n y = np.array(y).flatten().reshape(batch_size, field_size)\n z = np.array(z).flatten().reshape(batch_size, 1)\n return (x, y, z)\n return padding_func\n\n\ndef _get_tf_dataset(data_dir, train_mode=True, epochs=1, batch_size=1000,\n line_per_sample=1000, rank_size=None, rank_id=None,\n manual_shape=None, target_column=40):\n \"\"\"\n get_tf_dataset\n \"\"\"\n dataset_files = []\n file_prefix_name = 'train' if train_mode else 'test'\n shuffle = train_mode\n for (dirpath, _, filenames) in os.walk(data_dir):\n for filename in filenames:\n if file_prefix_name in filename and \"tfrecord\" in filename:\n dataset_files.append(os.path.join(dirpath, filename))\n schema = de.Schema()\n schema.add_column('feat_ids', de_type=mstype.int32)\n schema.add_column('feat_vals', de_type=mstype.float32)\n schema.add_column('label', de_type=mstype.float32)\n if rank_size is not None and rank_id is not None:\n ds = de.TFRecordDataset(dataset_files=dataset_files, shuffle=shuffle, schema=schema, num_parallel_workers=8,\n num_shards=rank_size, shard_id=rank_id, shard_equal_rows=True)\n else:\n ds = de.TFRecordDataset(dataset_files=dataset_files,\n shuffle=shuffle, schema=schema, num_parallel_workers=8)\n ds = ds.batch(int(batch_size / line_per_sample),\n drop_remainder=True)\n\n ds = ds.map(operations=_padding_func(batch_size, manual_shape, target_column),\n input_columns=['feat_ids', 'feat_vals', 'label'],\n column_order=['feat_ids', 'feat_vals', 'label'], num_parallel_workers=8)\n ds = ds.repeat(epochs)\n return ds\n\n\ndef _get_mindrecord_dataset(directory, train_mode=True, epochs=10, batch_size=16000,\n line_per_sample=1000, rank_size=None, rank_id=None,\n manual_shape=None, target_column=40):\n \"\"\"\n Get dataset with mindrecord format.\n\n Args:\n directory (str): Dataset directory.\n train_mode (bool): Whether dataset is use for train or eval (default=True).\n epochs (int): Dataset epoch size (default=1).\n batch_size (int): Dataset batch size (default=1000).\n line_per_sample (int): The number of sample per line (default=1000).\n rank_size (int): The number of device, not necessary for single device (default=None).\n rank_id (int): Id of device, not necessary for single device (default=None).\n\n Returns:\n Dataset.\n \"\"\"\n file_prefix_name = 'train_input_part.mindrecord' if train_mode else 'test_input_part.mindrecord'\n file_suffix_name = '00' if train_mode else '0'\n shuffle = train_mode\n\n if rank_size is not None and rank_id is not None:\n ds = de.MindDataset(os.path.join(directory, file_prefix_name + file_suffix_name),\n columns_list=['feat_ids', 'feat_vals', 'label'],\n num_shards=rank_size, shard_id=rank_id, shuffle=shuffle,\n num_parallel_workers=8)\n else:\n ds = de.MindDataset(os.path.join(directory, file_prefix_name + file_suffix_name),\n columns_list=['feat_ids', 'feat_vals', 'label'],\n shuffle=shuffle, num_parallel_workers=8)\n ds = ds.batch(int(batch_size / line_per_sample), drop_remainder=True)\n ds = ds.map(_padding_func(batch_size, manual_shape, target_column, target_column-1),\n input_columns=['feat_ids', 'feat_vals', 'label'],\n column_order=['feat_ids', 'feat_vals', 'label'],\n num_parallel_workers=8)\n ds = ds.repeat(epochs)\n return ds\n\n\ndef _get_vocab_size(target_column_number, worker_size, total_vocab_size, multiply=False, per_vocab_size=None):\n \"\"\"\n get_vocab_size\n \"\"\"\n # Only 39\n inidival_vocabs = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 691, 540, 20855, 23639, 182, 15,\n 10091, 347, 4, 16366, 4494, 21293, 3103, 27, 6944, 22366, 11, 3267, 1610,\n 5, 21762, 14, 15, 15030, 61, 12220]\n\n new_vocabs = inidival_vocabs + [1] * \\\n (target_column_number - len(inidival_vocabs))\n part_size = int(target_column_number / worker_size)\n\n # According to the workers, we merge some fields into the same part\n new_vocab_size = []\n for i in range(0, target_column_number, part_size):\n new_vocab_size.append(sum(new_vocabs[i: i + part_size]))\n\n index_offsets = [0]\n\n # The gold feature numbers ared used to calculate the offset\n features = [item for item in new_vocab_size]\n\n # According to the per_vocab_size, maxize the vocab size\n if per_vocab_size is not None:\n new_vocab_size = [per_vocab_size] * worker_size\n else:\n # Expands the vocabulary of each field by the multiplier\n if multiply is True:\n cur_sum = sum(new_vocab_size)\n k = total_vocab_size/cur_sum\n new_vocab_size = [\n math.ceil(int(item*k)/worker_size)*worker_size for item in new_vocab_size]\n new_vocab_size = [(item // 8 + 1)*8 for item in new_vocab_size]\n\n else:\n if total_vocab_size > sum(new_vocab_size):\n new_vocab_size[-1] = total_vocab_size - \\\n sum(new_vocab_size[:-1])\n new_vocab_size = [item for item in new_vocab_size]\n else:\n raise ValueError(\n \"Please providede the correct vocab size, now is {}\".format(total_vocab_size))\n\n for i in range(worker_size-1):\n off = index_offsets[i] + features[i]\n index_offsets.append(off)\n\n print(\"the offset: \", index_offsets)\n manual_shape = tuple(\n ((new_vocab_size[i], index_offsets[i]) for i in range(worker_size)))\n vocab_total = sum(new_vocab_size)\n return manual_shape, vocab_total\n\n\ndef compute_manual_shape(config, worker_size):\n target_column = (config.field_size // worker_size + 1) * worker_size\n config.field_size = target_column\n manual_shape, vocab_total = _get_vocab_size(target_column, worker_size, total_vocab_size=config.vocab_size,\n per_vocab_size=None, multiply=False)\n config.manual_shape = manual_shape\n config.vocab_size = int(vocab_total)\n\n\ndef create_dataset(data_dir, train_mode=True, epochs=1, batch_size=1000,\n data_type=DataType.TFRECORD, line_per_sample=1000,\n rank_size=None, rank_id=None, manual_shape=None, target_column=40):\n \"\"\"\n create_dataset\n \"\"\"\n if data_type == DataType.TFRECORD:\n return _get_tf_dataset(data_dir, train_mode, epochs, batch_size,\n line_per_sample, rank_size=rank_size, rank_id=rank_id,\n manual_shape=manual_shape, target_column=target_column)\n if data_type == DataType.MINDRECORD:\n return _get_mindrecord_dataset(data_dir, train_mode, epochs, batch_size,\n line_per_sample, rank_size=rank_size, rank_id=rank_id,\n manual_shape=manual_shape, target_column=target_column)\n\n if rank_size > 1:\n raise RuntimeError(\"please use tfrecord dataset.\")\n return _get_h5_dataset(data_dir, train_mode, epochs, batch_size)\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\n##############export checkpoint file into air, onnx, mindir models#################\npython export.py\n\"\"\"\nimport argparse\nimport numpy as np\n\nfrom mindspore import Tensor, load_checkpoint, load_param_into_net, export, context\nimport mindspore.common.dtype as ms\n\nfrom src.config import config1 as config\nfrom src.glore_resnet200 import glore_resnet200\n\nparser = argparse.ArgumentParser(description='Classification')\nparser.add_argument(\"--device_id\", type=int, default=0, help=\"Device id\")\nparser.add_argument(\"--batch_size\", type=int, default=1, help=\"batch size\")\nparser.add_argument(\"--file_name\", type=str, default=\"glore_resnet200\", help=\"output file name.\")\nparser.add_argument('--file_format', type=str, choices=[\"AIR\", \"ONNX\", \"MINDIR\"], default='AIR', help='file format')\nparser.add_argument(\"--device_target\", type=str, choices=[\"Ascend\", \"GPU\", \"CPU\"], default=\"Ascend\",\n help=\"device target\")\nparser.add_argument(\"--ckpt_path\", type=str, default=None)\n\nargs = parser.parse_args()\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)\nif args.device_target == \"Ascend\":\n context.set_context(device_id=args.device_id)\n\nif __name__ == '__main__':\n net = glore_resnet200(class_num=config.class_num)\n assert args.ckpt_path is not None, \"arg.ckpt_path is None.\"\n param_dict = load_checkpoint(args.ckpt_path)\n load_param_into_net(net, param_dict)\n\n input_arr = Tensor(np.ones([args.batch_size, 3, 224, 224]), ms.float32)\n export(net, input_arr, file_name=args.file_name, file_format=args.file_format)\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n'''\npostprocess script.\n'''\n\nimport os\nimport argparse\nimport numpy as np\nfrom mindspore import Tensor\nfrom src.assessment_method import Accuracy, F1\nfrom run_ernie_classifier import eval_result_print\n\nparser = argparse.ArgumentParser(description=\"postprocess\")\nparser.add_argument(\"--batch_size\", type=int, default=1, help=\"Eval batch size, default is 1\")\nparser.add_argument(\"--label_dir\", type=str, default=\"\", help=\"label data dir\")\nparser.add_argument(\"--result_dir\", type=str, default=\"./result_Files\", help=\"infer result Files\")\nparser.add_argument(\"--task_type\", type=str, default='chnsenticorp', help=\"dataset name\")\n\nargs, _ = parser.parse_known_args()\n\nif __name__ == \"__main__\":\n args.batch_size = 1\n if args.task_type == 'chnsenticorp':\n num_class = 3\n assessment_method = 'accuracy'\n callback = Accuracy()\n elif args.task_type == 'xnli':\n num_class = 3\n assessment_method = 'accuracy'\n callback = Accuracy()\n elif args.task_type == 'dbqa':\n num_class = 2\n assessment_method = 'f1'\n callback = F1(num_class)\n else:\n raise ValueError(\"dataset not supported, support: [chnsenticorp, xnli, dbqa]\")\n\n file_name = os.listdir(args.label_dir)\n for f in file_name:\n f_name = os.path.join(args.result_dir, f.split('.')[0] + '_0.bin')\n logits = np.fromfile(f_name, np.float32).reshape(args.batch_size, num_class)\n logits = Tensor(logits)\n label_ids = np.fromfile(os.path.join(args.label_dir, f), np.int32)\n label_ids = Tensor(label_ids.reshape(args.batch_size, 1))\n callback.update(logits, label_ids)\n\n print(\"==============================================================\")\n eval_result_print(assessment_method, callback)\n print(\"==============================================================\")\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"ResNet.\"\"\"\nimport numpy as np\nimport mindspore.nn as nn\nfrom mindspore.ops import operations as P\nfrom mindspore.common.tensor import Tensor\nfrom mindspore.common.initializer import initializer\nimport mindspore.common.dtype as mstype\n\n\ndef _weight_variable(shape):\n \"\"\"weight_variable\"\"\"\n return initializer('HeUniform', shape=shape, dtype=mstype.float32)\n\n\ndef _weight_variable_(shape, factor=0.01):\n init_value = np.random.randn(*shape).astype(np.float32) * factor\n return Tensor(init_value)\n\n\ndef _conv3x3(in_channel, out_channel, stride=1):\n return nn.Conv2d(in_channel, out_channel,\n kernel_size=3, stride=stride, padding=0, pad_mode='same', weight_init=\"HeUniform\")\n\n\ndef _conv1x1(in_channel, out_channel, stride=1):\n return nn.Conv2d(in_channel, out_channel,\n kernel_size=1, stride=stride, padding=0, pad_mode='same', weight_init=\"HeUniform\")\n\n\ndef _conv7x7(in_channel, out_channel, stride=1):\n return nn.Conv2d(in_channel, out_channel,\n kernel_size=7, stride=stride, padding=0, pad_mode='same', weight_init=\"HeUniform\")\n\n\ndef _bn(channel, training=True):\n if training:\n return nn.BatchNorm2d(channel, eps=1e-4, momentum=0.9,\n gamma_init=1, beta_init=0, moving_mean_init=0, moving_var_init=1)\n return nn.BatchNorm2d(channel, eps=1e-4, momentum=0.9,\n gamma_init=1, beta_init=0, moving_mean_init=0,\n moving_var_init=1, use_batch_statistics=training)\n\n\ndef _bn_last(channel, training=True):\n if training:\n return nn.BatchNorm2d(channel, eps=1e-4, momentum=0.9,\n gamma_init=1, beta_init=0, moving_mean_init=0, moving_var_init=1)\n return nn.BatchNorm2d(channel, eps=1e-4, momentum=0.9,\n gamma_init=0, beta_init=0, moving_mean_init=0,\n moving_var_init=1, use_batch_statistics=training)\n\n\ndef _fc(in_channel, out_channel):\n return nn.Dense(in_channel, out_channel, has_bias=True, weight_init=\"HeUniform\", bias_init=0)\n\n\nclass BasicBlock(nn.Cell):\n \"\"\"\n basic block for resnet18 and resnet34\n \"\"\"\n expansion = 1\n\n def __init__(self,\n in_channel,\n out_channel,\n stride=1\n ):\n super(BasicBlock, self).__init__()\n channel = out_channel // self.expansion\n self.conv1 = _conv3x3(in_channel, channel, stride=1)\n self.bn1 = _bn(channel)\n\n self.conv2 = _conv3x3(channel, out_channel, stride=stride)\n self.bn2 = _bn_last(channel)\n\n self.relu = nn.ReLU()\n\n self.down_sample = False\n\n if stride != 1 or in_channel != out_channel:\n self.down_sample = True\n self.down_sample_layer = None\n\n if self.down_sample:\n self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel, stride),\n _bn(out_channel)])\n self.add = P.TensorAdd()\n\n def construct(self, x):\n \"\"\"forward function\"\"\"\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.down_sample:\n identity = self.down_sample_layer(identity)\n\n out = self.add(out, identity)\n out = self.relu(out)\n\n return out\n\n\nclass ResidualBlock(nn.Cell):\n \"\"\"\n ResNet V1 residual block definition.\n\n Args:\n in_channel (int): Input channel.\n out_channel (int): Output channel.\n stride (int): Stride size for the first convolutional layer. Default: 1.\n\n Returns:\n Tensor, output tensor.\n\n Examples:\n >>> ResidualBlock(3, 256, stride=2)\n \"\"\"\n expansion = 4\n\n def __init__(self,\n in_channel,\n out_channel,\n stride=1):\n super(ResidualBlock, self).__init__()\n\n channel = out_channel // self.expansion\n self.conv1 = _conv1x1(in_channel, channel, stride=1)\n self.bn1 = _bn(channel)\n\n self.conv2 = _conv3x3(channel, channel, stride=stride)\n self.bn2 = _bn(channel)\n\n self.conv3 = _conv1x1(channel, out_channel, stride=1)\n self.bn3 = _bn_last(out_channel)\n\n self.relu = nn.ReLU()\n\n self.down_sample = False\n\n if stride != 1 or in_channel != out_channel:\n self.down_sample = True\n self.down_sample_layer = None\n\n if self.down_sample:\n self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel, stride),\n _bn(out_channel)])\n self.add = P.TensorAdd()\n\n def construct(self, x):\n \"\"\"forward function\"\"\"\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.down_sample:\n identity = self.down_sample_layer(identity)\n\n out = self.add(out, identity)\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Cell):\n \"\"\"\n ResNet architecture.\n\n Args:\n block (Cell): Block for network.\n layer_nums (list): Numbers of block in different layers.\n in_channels (list): Input channel in each layer.\n out_channels (list): Output channel in each layer.\n strides (list): Stride size in each layer.\n low_dims (int): The dimension of outputThe.\n Returns:\n Tensor, output tensor.\n\n Examples:\n >>> ResNet(ResidualBlock,\n >>> [3, 4, 6, 3],\n >>> [64, 256, 512, 1024],\n >>> [256, 512, 1024, 2048],\n >>> [1, 2, 2, 2],\n >>> 128)\n \"\"\"\n\n def __init__(self,\n block,\n layer_nums,\n in_channels,\n out_channels,\n strides,\n low_dims,\n pretrain=True,\n use_MLP=False,\n nclass=10):\n super(ResNet, self).__init__()\n\n if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:\n raise ValueError(\n \"the length of layer_num, in_channels, out_channels list must be 4!\")\n\n self.pretrain = pretrain\n self.use_MLP = use_MLP\n self.concat = P.Concat()\n self.split = P.Split(0, 3)\n self.l2norm = P.L2Normalize(axis=1)\n\n self.conv1 = _conv7x7(3, 64, stride=1)\n self.bn1 = _bn(64)\n self.relu = nn.ReLU()\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode=\"same\")\n\n self.layer1 = self._make_layer(block,\n layer_nums[0],\n in_channel=in_channels[0],\n out_channel=out_channels[0],\n stride=strides[0])\n self.layer2 = self._make_layer(block,\n layer_nums[1],\n in_channel=in_channels[1],\n out_channel=out_channels[1],\n stride=strides[1])\n self.layer3 = self._make_layer(block,\n layer_nums[2],\n in_channel=in_channels[2],\n out_channel=out_channels[2],\n stride=strides[2])\n self.layer4 = self._make_layer(block,\n layer_nums[3],\n in_channel=in_channels[3],\n out_channel=out_channels[3],\n stride=strides[3])\n\n self.mean = P.ReduceMean(keep_dims=True)\n self.flatten = nn.Flatten()\n self.end_point = _fc(block.expansion * 512, low_dims)\n self.end_point_class = _fc(block.expansion * 512, nclass)\n self.sigmoid = P.Sigmoid()\n self.mlp_layer1 = _fc(block.expansion * 512, block.expansion * 512)\n self.mlp_layer2 = _fc(block.expansion * 512, low_dims)\n\n def _make_layer(self, block, layer_num, in_channel, out_channel, stride):\n \"\"\"\n Make stage network of ResNet.\n\n Args:\n block (Cell): Resnet block.\n layer_num (int): Layer number.\n in_channel (int): Input channel.\n out_channel (int): Output channel.\n stride (int): Stride size for the first convolutional layer.\n\n Returns:\n SequentialCell, the output layer.\n\n Examples:\n >>> _make_layer(ResidualBlock, 3, 128, 256, 2)\n \"\"\"\n layers = []\n\n resnet_block = block(in_channel, out_channel,\n stride=stride)\n layers.append(resnet_block)\n\n for _ in range(1, layer_num):\n resnet_block = block(out_channel, out_channel,\n stride=1)\n layers.append(resnet_block)\n\n return nn.SequentialCell(layers)\n\n def construct(self, x):\n \"\"\"forward function\"\"\"\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n c1 = self.maxpool(x)\n c2 = self.layer1(c1)\n c3 = self.layer2(c2)\n c4 = self.layer3(c3)\n c5 = self.layer4(c4)\n\n out = self.mean(c5, (2, 3))\n out = self.flatten(out)\n\n if self.pretrain:\n if self.use_MLP:\n out = self.mlp_layer1(out)\n out = self.mlp_layer2(out)\n else:\n out = self.end_point(out)\n out = self.l2norm(out)\n out1, out2, out3 = self.split(out)\n return out1, out2, out3\n out = self.end_point_class(out)\n out = self.sigmoid(out)\n return out\n\n\ndef resnet50(low_dims=128, pretrain=True, use_MLP=False, classes=10):\n \"\"\"\n Get ResNet50 neural network.\n\n Args:\n class_num (int): Class number.\n\n Returns:\n Cell, cell instance of ResNet50 neural network.\n\n Examples:\n >>> net = resnet50(128)\n \"\"\"\n return ResNet(ResidualBlock,\n [3, 4, 6, 3],\n [64, 256, 512, 1024],\n [256, 512, 1024, 2048],\n [1, 2, 2, 2],\n low_dims=low_dims,\n pretrain=pretrain,\n use_MLP=use_MLP,\n nclass=classes)\n\n\ndef resnet101(low_dims=128, pretrain=True, use_MLP=False, classes=10):\n \"\"\"\n Get ResNet101 neural network.\n\n Args:\n class_num (int): Class number.\n\n Returns:\n Cell, cell instance of ResNet101 neural network.\n\n Examples:\n >>> net = resnet101(128)\n \"\"\"\n return ResNet(ResidualBlock,\n [3, 4, 23, 3],\n [64, 256, 512, 1024],\n [256, 512, 1024, 2048],\n [1, 2, 2, 2],\n low_dims=low_dims,\n pretrain=pretrain,\n use_MLP=use_MLP,\n nclass=classes)\n\n\ndef resnet18(low_dims=128, pretrain=True, use_MLP=False, classes=10):\n \"\"\"\n Get ResNet18 neural network.\n\n Returns:\n Cell, cell instance of ResNet18 neural network.\n\n Examples:\n >>> net = resnet18(128)\n \"\"\"\n return ResNet(BasicBlock,\n [2, 2, 2, 2],\n [64, 64, 128, 256],\n [64, 128, 256, 512],\n [1, 2, 2, 2],\n low_dims=low_dims,\n pretrain=pretrain,\n use_MLP=use_MLP,\n nclass=classes)\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport argparse\nimport os\nimport time\nimport glob\nimport numpy as np\nimport PIL.Image as Image\nfrom tabulate import tabulate\n\n## Params\nparser = argparse.ArgumentParser()\nparser.add_argument('--label_path', type=str\n , help='directory of dataset label')\nparser.add_argument('--output_path', default=None, type=str\n , help='path of the predict files that generated by the model')\nparser.add_argument('--image_height', default=768, type=int, help='image_height')\nparser.add_argument('--image_width', default=768, type=int, help='image_width')\nparser.add_argument('--save_mask', default=0, type=int, help='0 for False, 1 for True')\nparser.add_argument('--mask_result_path', default='./mask_result', type=str\n , help='the folder to save the semantic mask images')\n\nargs = parser.parse_args()\n\ncityspallete = [\n 128, 64, 128,\n 244, 35, 232,\n 70, 70, 70,\n 102, 102, 156,\n 190, 153, 153,\n 153, 153, 153,\n 250, 170, 30,\n 220, 220, 0,\n 107, 142, 35,\n 152, 251, 152,\n 0, 130, 180,\n 220, 20, 60,\n 255, 0, 0,\n 0, 0, 142,\n 0, 0, 70,\n 0, 60, 100,\n 0, 80, 100,\n 0, 0, 230,\n 119, 11, 32,\n]\nclasses = ('road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light',\n 'traffic sign', 'vegetation', 'terrain', 'sky', 'person', 'rider', 'car',\n 'truck', 'bus', 'train', 'motorcycle', 'bicycle')\n\nclass SegmentationMetric():\n \"\"\"Computes pixAcc and mIoU metric scores\n \"\"\"\n def __init__(self, nclass):\n super(SegmentationMetric, self).__init__()\n self.nclass = nclass\n self.reset()\n\n def update(self, preds, labels):\n \"\"\"Updates the internal evaluation result.\n Parameters\n ----------\n labels : 'NumpyArray' or list of `NumpyArray`\n The labels of the data.\n preds : 'NumpyArray' or list of `NumpyArray`\n Predicted values.\n \"\"\"\n def evaluate_worker(self, pred, label):\n correct, labeled = batch_pix_accuracy(pred, label)\n inter, union = batch_intersection_union(pred, label, self.nclass)\n self.total_correct += correct\n self.total_label += labeled\n self.total_inter += inter\n self.total_union += union\n evaluate_worker(self, preds, labels)\n\n def get(self, return_category_iou=False):\n \"\"\"Gets the current evaluation result.\n Returns\n -------\n metrics : tuple of float\n pixAcc and mIoU\n \"\"\"\n # remove np.spacing(1)\n pixAcc = 1.0 * self.total_correct / (2.220446049250313e-16 + self.total_label)\n IoU = 1.0 * self.total_inter / (2.220446049250313e-16 + self.total_union)\n mIoU = IoU.mean().item()\n if return_category_iou:\n return pixAcc, mIoU, IoU\n return pixAcc, mIoU\n\n def reset(self):\n \"\"\"Resets the internal evaluation result to initial state.\"\"\"\n self.total_inter = np.zeros(self.nclass)\n self.total_union = np.zeros(self.nclass)\n self.total_correct = 0\n self.total_label = 0\n\ndef batch_pix_accuracy(output, target):\n \"\"\"PixAcc\"\"\"\n # inputs are numpy array, output 4D NCHW where 'C' means label classes, target 3D NHW\n predict = np.argmax(output.astype(np.int64), 1) + 1\n target = target.astype(np.int64) + 1\n pixel_labeled = (target > 0).sum()\n pixel_correct = ((predict == target) * (target > 0)).sum()\n assert pixel_correct <= pixel_labeled, \"Correct area should be smaller than Labeled\"\n return pixel_correct, pixel_labeled\n\ndef batch_intersection_union(output, target, nclass):\n \"\"\"mIoU\"\"\"\n # inputs are numpy array, output 4D, target 3D\n mini = 1\n maxi = nclass\n nbins = nclass\n predict = np.argmax(output.astype(np.float32), 1) + 1\n target = target.astype(np.float32) + 1\n\n predict = predict.astype(np.float32) * (target > 0).astype(np.float32)\n intersection = predict * (predict == target).astype(np.float32)\n # areas of intersection and union\n # element 0 in intersection occur the main difference from np.bincount. set boundary to -1 is necessary.\n area_inter, _ = np.histogram(intersection, bins=nbins, range=(mini, maxi))\n area_pred, _ = np.histogram(predict, bins=nbins, range=(mini, maxi))\n area_lab, _ = np.histogram(target, bins=nbins, range=(mini, maxi))\n area_union = area_pred + area_lab - area_inter\n assert (area_inter > area_union).sum() == 0, \"Intersection area should be smaller than Union area\"\n return area_inter.astype(np.float32), area_union.astype(np.float32)\n\ndef cal_mIoU():\n file_list = glob.glob(os.path.join(args.label_path, '*'))\n start_time = time.time()\n metric = SegmentationMetric(19)\n metric.reset()\n if args.save_mask and not os.path.exists(args.mask_result_path):\n os.makedirs(args.mask_result_path)\n for index, file in enumerate(sorted(file_list)):\n label = np.fromfile(file, dtype=np.int32)\n label = label.reshape(args.image_height, args.image_width)\n\n filename = file.split(os.sep)[-1][:-10] # get the name of image file\n predict_path = os.path.join(args.output_path, filename + \"_img_0.bin\")\n predict = np.fromfile(predict_path, dtype=np.float32)\n predict = predict.reshape(1, 19, args.image_height, args.image_width)\n metric.update(predict, label)\n pixAcc, mIoU = metric.get()\n print(\"[EVAL] Sample: {:d}, pixAcc: {:.3f}, mIoU: {:.3f}\".format(index + 1, pixAcc * 100, mIoU * 100))\n\n if args.save_mask:\n output = np.argmax(predict[0], axis=0)\n out_img = Image.fromarray(output.astype('uint8'))\n out_img.putpalette(cityspallete)\n outname = str(filename) + '.png'\n out_img.save(os.path.join(args.mask_result_path, outname))\n\n pixAcc, mIoU, category_iou = metric.get(return_category_iou=True)\n print('End validation pixAcc: {:.3f}, mIoU: {:.3f}'.format(pixAcc * 100, mIoU * 100))\n txtName = os.path.join(args.mask_result_path, \"eval_results.txt\")\n with open(txtName, \"w\") as f:\n string = 'validation pixAcc:' + str(pixAcc * 100) + ', mIoU:' + str(mIoU * 100)\n f.write(string)\n f.write('\\n')\n headers = ['class id', 'class name', 'iou']\n table = []\n for i, cls_name in enumerate(classes):\n table.append([cls_name, category_iou[i]])\n string = 'class name: ' + cls_name + ' iou: ' + str(category_iou[i]) + '\\n'\n f.write(string)\n print('Category iou: \\n {}'.format(tabulate(table, headers, \\\n tablefmt='grid', showindex=\"always\", numalign='center', stralign='center')))\n time_used = time.time() - start_time\n print(\"Time cost:\"+str(time_used)+\" seconds!\")\n\nif __name__ == '__main__':\n cal_mIoU()\n",
"\"\"\"\nCopyright 2021 Huawei Technologies Co., Ltd\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport os\nimport sys\nimport io\nimport cv2\nimport numpy as np\nimport MxpiDataType_pb2 as MxpiDataType\nfrom PIL import Image\nfrom StreamManagerApi import MxDataInput, StringVector, StreamManagerApi, InProtobufVector, MxProtobufIn\n\nif __name__ == '__main__':\n stream_manager_api = StreamManagerApi()\n ret = stream_manager_api.InitManager()\n if ret != 0:\n print(\"Failed to init Stream manager, ret=%s\" % str(ret))\n exit()\n\n # create streams by pipeline config file\n with open(\"../pipeline/faceattribute.pipeline\", 'rb') as f:\n pipelineStr = f.read()\n ret = stream_manager_api.CreateMultipleStreams(pipelineStr)\n\n if ret != 0:\n print(\"Failed to create Stream, ret=%s\" % str(ret))\n exit()\n\n # Construct the input of the stream\n data_input = MxDataInput()\n\n # label_txt index\n label_txt = sys.argv[1]\n\n # Define the required later\n total_data_num_age, total_data_num_gen, total_data_num_mask = 0, 0, 0\n age_num, gen_num, mask_num = 0, 0, 0\n gen_tp_num, mask_tp_num, gen_fp_num = 0, 0, 0\n mask_fp_num, gen_fn_num, mask_fn_num = 0, 0, 0\n with open(label_txt, 'r') as ft:\n lines = ft.readlines()\n for line in lines:\n sline = line.strip().split(\" \")\n image_file = sline[0]\n imgName = image_file.split('/')\n # Get the name of the image\n file_name = imgName[-1]\n gt_age, gt_gen, gt_mask = int(sline[1]), int(sline[2]), int(sline[3])\n # Get the total path to the image\n file_path = sline[0]\n if not (file_name.lower().endswith(\".jpg\") or file_name.lower().endswith(\".jpeg\")):\n continue\n print(\"processing img \", file_path)\n with open(file_path, 'rb') as f:\n img = f.read()\n # Reproduce the preprocessing operations in the eval.py file\n data = io.BytesIO(img)\n img = Image.open(data)\n img = img.convert('RGB')\n img = np.asarray(img)\n img = cv2.resize(img, (112, 112))\n img = (img - 127.5) / 127.5\n img = img.astype(np.float32)\n img = img.transpose(2, 0, 1)\n tensor = img[None]\n\n inPluginId = 0\n tensorPackageList = MxpiDataType.MxpiTensorPackageList()\n tensorPackage = tensorPackageList.tensorPackageVec.add()\n # add feature data begin\n array_bytes = tensor.tobytes()\n dataInput = MxDataInput()\n dataInput.data = array_bytes\n tensorVec = tensorPackage.tensorVec.add()\n tensorVec.deviceId = 0\n tensorVec.memType = 0\n for i in tensor.shape:\n tensorVec.tensorShape.append(i)\n tensorVec.dataStr = dataInput.data\n # compute the number of bytes of feature data\n tensorVec.tensorDataSize = len(array_bytes)\n # add feature data end\n\n key = \"appsrc{}\".format(inPluginId).encode('utf-8')\n protobufVec = InProtobufVector()\n protobuf = MxProtobufIn()\n protobuf.key = key\n protobuf.type = b'MxTools.MxpiTensorPackageList'\n protobuf.protobuf = tensorPackageList.SerializeToString()\n protobufVec.push_back(protobuf)\n\n empty_data = []\n stream_name = b'im_resnet18'\n\n in_plugin_id = 0\n uniqueId = stream_manager_api.SendProtobuf(stream_name, inPluginId, protobufVec)\n if uniqueId < 0:\n print(\"Failed to send data to stream.\")\n exit()\n keyVec = StringVector()\n keyVec.push_back(b'mxpi_tensorinfer0')\n # get inference result\n inferResult = stream_manager_api.GetProtobuf(stream_name, 0, keyVec)\n\n if inferResult.size() == 0:\n print(\"inferResult is null\")\n exit()\n if inferResult[0].errorCode != 0:\n print(\"GetProtobuf error. errorCode=%d\" % (\n inferResult[0].errorCode))\n exit()\n result = MxpiDataType.MxpiTensorPackageList()\n result.ParseFromString(inferResult[0].messageBuf)\n resAge = np.frombuffer(result.tensorPackageVec[0].tensorVec[0].dataStr, dtype='float32')\n resGender = np.frombuffer(result.tensorPackageVec[0].tensorVec[1].dataStr, dtype='float32')\n resMask = np.frombuffer(result.tensorPackageVec[0].tensorVec[2].dataStr, dtype='float32')\n\n age_result_np = np.empty(shape=(1, 9))\n flag = 0\n for item in resAge:\n age_result_np[0][flag] = item\n flag += 1\n\n gen_result_np = np.empty(shape=(1, 2))\n\n flag = 0\n for item in resGender:\n gen_result_np[0][flag] = item\n flag += 1\n mask_result_np = np.empty(shape=(1, 2))\n\n flag = 0\n for item in resMask:\n mask_result_np[0][flag] = item\n flag += 1\n age_prob = age_result_np[0].tolist()\n gen_prob = gen_result_np[0].tolist()\n mask_prob = mask_result_np[0].tolist()\n age = age_prob.index(max(age_prob))\n gen = gen_prob.index(max(gen_prob))\n mask = mask_prob.index(max(mask_prob))\n if gt_age == age:\n age_num += 1\n if gt_gen == gen:\n gen_num += 1\n if gt_mask == mask:\n mask_num += 1\n\n if gen == 1:\n if gt_gen == 1:\n gen_tp_num += 1\n elif gt_gen == 0:\n gen_fp_num += 1\n elif gen == 0 and gt_gen == 1:\n gen_fn_num += 1\n\n if gt_mask == 1 and mask == 1:\n mask_tp_num += 1\n if gt_mask == 0 and mask == 1:\n mask_fp_num += 1\n if gt_mask == 1 and mask == 0:\n mask_fn_num += 1\n\n if gt_age != -1:\n total_data_num_age += 1\n if gt_gen != -1:\n total_data_num_gen += 1\n if gt_mask != -1:\n total_data_num_mask += 1\n # The following package is not recommended if it has too many parameters\n print(\"age_num is \", age_num)\n age_accuracy = float(age_num) / float(total_data_num_age)\n\n gen_precision = float(gen_tp_num) / (float(gen_tp_num) + float(gen_fp_num))\n gen_recall = float(gen_tp_num) / (float(gen_tp_num) + float(gen_fn_num))\n gen_accuracy = float(gen_num) / float(total_data_num_gen)\n gen_f1 = 2. * gen_precision * gen_recall / (gen_precision + gen_recall)\n\n print(\"mask_tp_num is \" + str(mask_tp_num))\n print(\"mask_fn_num is \" + str(mask_fn_num))\n print(\"mask_fp_num is \" + str(mask_fp_num))\n mask_precision = float(mask_tp_num) / (float(mask_tp_num) + float(mask_fp_num))\n mask_recall = float(mask_tp_num) / (float(mask_tp_num) + float(mask_fn_num))\n mask_accuracy = float(mask_num) / float(total_data_num_mask)\n mask_f1 = 2. * mask_precision * mask_recall / (mask_precision + mask_recall)\n\n print('total age num: ', total_data_num_age)\n print('total gen num: ', total_data_num_gen)\n print('total mask num: ', total_data_num_mask)\n print('age accuracy: ', age_accuracy)\n print('gen accuracy: ', gen_accuracy)\n print('mask accuracy: ', mask_accuracy)\n print('gen precision: ', gen_precision)\n print('gen recall: ', gen_recall)\n print('gen f1: ', gen_f1)\n print('mask precision: ', mask_precision)\n print('mask recall: ', mask_recall)\n print('mask f1: ', mask_f1)\n\n result_txt = os.path.join('./result.txt')\n if os.path.exists(result_txt):\n os.remove(result_txt)\n with open(result_txt, 'a') as ft:\n ft.write('total age num: {}\\n'.format(total_data_num_age))\n ft.write('total gen num: {}\\n'.format(total_data_num_gen))\n ft.write('total mask num: {}\\n'.format(total_data_num_mask))\n ft.write('age accuracy: {}\\n'.format(age_accuracy))\n ft.write('gen accuracy: {}\\n'.format(gen_accuracy))\n ft.write('mask accuracy: {}\\n'.format(mask_accuracy))\n ft.write('gen precision: {}\\n'.format(gen_precision))\n ft.write('gen recall: {}\\n'.format(gen_recall))\n ft.write('gen f1: {}\\n'.format(gen_f1))\n ft.write('mask precision: {}\\n'.format(mask_precision))\n ft.write('mask recall: {}\\n'.format(mask_recall))\n ft.write('mask f1: {}\\n'.format(mask_f1))\n # destroy streams\n stream_manager_api.DestroyAllStreams()\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport json\nimport os\nimport warnings\nimport sys\nimport numpy as np\nimport cv2\nfrom scipy.ndimage.filters import gaussian_filter\nfrom tqdm import tqdm\nfrom pycocotools.coco import COCO as LoadAnn\nfrom pycocotools.cocoeval import COCOeval as MapEval\nfrom mindspore import context, Tensor\nfrom mindspore.train.serialization import load_checkpoint, load_param_into_net\nfrom mindspore.communication.management import init\nfrom mindspore.common import dtype as mstype\nfrom src.openposenet import OpenPoseNet\nfrom src.dataset import valdata\nfrom src.model_utils.config import config, JointType\nfrom src.model_utils.moxing_adapter import moxing_wrapper\nfrom src.model_utils.device_adapter import get_device_id, get_rank_id, get_device_num\n\n\nwarnings.filterwarnings(\"ignore\")\ndevid = get_device_id()\ncontext.set_context(mode=context.GRAPH_MODE,\n device_target=config.device_target, save_graphs=False, device_id=devid)\nshow_gt = 0\n\n\ndef evaluate_mAP(res_file, ann_file, ann_type='keypoints', silence=True):\n class NullWriter():\n def write(self, arg):\n pass\n if silence:\n nullwrite = NullWriter()\n oldstdout = sys.stdout\n sys.stdout = nullwrite # disable output\n\n Gt = LoadAnn(ann_file)\n Dt = Gt.loadRes(res_file)\n\n Eval = MapEval(Gt, Dt, ann_type)\n Eval.evaluate()\n Eval.accumulate()\n Eval.summarize()\n\n if silence:\n sys.stdout = oldstdout # enable output\n\n stats_names = ['AP', 'Ap .5', 'AP .75', 'AP (M)', 'AP (L)',\n 'AR', 'AR .5', 'AR .75', 'AR (M)', 'AR (L)']\n info_str = {}\n for ind, name in enumerate(stats_names):\n info_str[name] = Eval.stats[ind]\n\n return info_str\n\n\ndef load_model(test_net, model_path):\n assert os.path.exists(model_path)\n param_dict = load_checkpoint(model_path)\n param_dict_new = {}\n for key, values in param_dict.items():\n\n if key.startswith('moment'):\n continue\n elif key.startswith('network'):\n param_dict_new[key[8:]] = values\n\n load_param_into_net(test_net, param_dict_new)\n\n\ndef preprocess(img):\n x_data = img.astype('f')\n x_data /= 255\n x_data -= 0.5\n x_data = x_data.transpose(2, 0, 1)[None]\n return x_data\n\n\ndef getImgsPath(img_dir_path):\n filepaths = []\n dirpaths = []\n pathName = img_dir_path\n\n for root, dirs, files in os.walk(pathName):\n for file in files:\n file_path = os.path.join(root, file)\n filepaths.append(file_path)\n for d in dirs:\n dir_path = os.path.join(root, d)\n dirpaths.append(dir_path)\n return filepaths\n\n\ndef compute_optimal_size(orig_img, img_size, stride=8):\n orig_img_h, orig_img_w, _ = orig_img.shape\n aspect = orig_img_h / orig_img_w\n if orig_img_h < orig_img_w:\n img_h = img_size\n img_w = np.round(img_size / aspect).astype(int)\n surplus = img_w % stride\n if surplus != 0:\n img_w += stride - surplus\n else:\n img_w = img_size\n img_h = np.round(img_size * aspect).astype(int)\n surplus = img_h % stride\n if surplus != 0:\n img_h += stride - surplus\n return (img_w, img_h)\n\n\ndef compute_peaks_from_heatmaps(heatmaps):\n\n heatmaps = heatmaps[:-1]\n\n all_peaks = []\n peak_counter = 0\n for i, heatmap in enumerate(heatmaps):\n heatmap = gaussian_filter(heatmap, sigma=config.gaussian_sigma)\n\n map_left = np.zeros(heatmap.shape)\n map_right = np.zeros(heatmap.shape)\n map_top = np.zeros(heatmap.shape)\n map_bottom = np.zeros(heatmap.shape)\n\n map_left[1:, :] = heatmap[:-1, :]\n map_right[:-1, :] = heatmap[1:, :]\n map_top[:, 1:] = heatmap[:, :-1]\n map_bottom[:, :-1] = heatmap[:, 1:]\n\n peaks_binary = np.logical_and.reduce((\n heatmap > config.heatmap_peak_thresh,\n heatmap > map_left,\n heatmap > map_right,\n heatmap > map_top,\n heatmap > map_bottom,\n ))\n\n peaks = zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])\n\n peaks_with_score = [(i,) + peak_pos + (heatmap[peak_pos[1], peak_pos[0]],) for peak_pos in peaks]\n\n peaks_id = range(peak_counter, peak_counter + len(peaks_with_score))\n peaks_with_score_and_id = [peaks_with_score[i] + (peaks_id[i],) for i in range(len(peaks_id))]\n\n peak_counter += len(peaks_with_score_and_id)\n all_peaks.append(peaks_with_score_and_id)\n all_peaks = np.array([peak for peaks_each_category in all_peaks for peak in peaks_each_category])\n\n return all_peaks\n\n\ndef compute_candidate_connections(paf, cand_a, cand_b, img_len, params_):\n candidate_connections = []\n for joint_a in cand_a:\n for joint_b in cand_b:\n vector = joint_b[:2] - joint_a[:2]\n norm = np.linalg.norm(vector)\n if norm == 0:\n continue\n ys = np.linspace(joint_a[1], joint_b[1], num=params_.n_integ_points)\n xs = np.linspace(joint_a[0], joint_b[0], num=params_.n_integ_points)\n integ_points = np.stack([ys, xs]).T.round().astype('i')\n\n paf_in_edge = np.hstack([paf[0][np.hsplit(integ_points, 2)], paf[1][np.hsplit(integ_points, 2)]])\n unit_vector = vector / norm\n inner_products = np.dot(paf_in_edge, unit_vector)\n integ_value = inner_products.sum() / len(inner_products)\n integ_value_with_dist_prior = integ_value + min(params_.limb_length_ratio * img_len / norm -\n params_.length_penalty_value, 0)\n n_valid_points = sum(inner_products > params_.inner_product_thresh)\n if n_valid_points > params_.n_integ_points_thresh and integ_value_with_dist_prior > 0:\n candidate_connections.append([int(joint_a[3]), int(joint_b[3]), integ_value_with_dist_prior])\n candidate_connections = sorted(candidate_connections, key=lambda x: x[2], reverse=True)\n return candidate_connections\n\n\ndef compute_connections(pafs, all_peaks, img_len, params_):\n all_connections = []\n for i in range(len(params_.limbs_point)):\n paf_index = [i * 2, i * 2 + 1]\n paf = pafs[paf_index] # shape: (2, 320, 320)\n limb_point = params_.limbs_point[i] # example: [<JointType.Neck: 1>, <JointType.RightWaist: 8>]\n cand_a = all_peaks[all_peaks[:, 0] == limb_point[0]][:, 1:]\n cand_b = all_peaks[all_peaks[:, 0] == limb_point[1]][:, 1:]\n\n if cand_a.shape[0] > 0 and cand_b.shape[0] > 0:\n candidate_connections = compute_candidate_connections(paf, cand_a, cand_b, img_len, params_)\n\n connections = np.zeros((0, 3))\n\n for index_a, index_b, score in candidate_connections:\n if index_a not in connections[:, 0] and index_b not in connections[:, 1]:\n connections = np.vstack([connections, [index_a, index_b, score]])\n if len(connections) >= min(len(cand_a), len(cand_b)):\n break\n all_connections.append(connections)\n else:\n all_connections.append(np.zeros((0, 3)))\n return all_connections\n\ndef grouping_key_points(all_connections, candidate_peaks, params_):\n subsets = -1 * np.ones((0, 20))\n\n for l, connections in enumerate(all_connections):\n joint_a, joint_b = params_.limbs_point[l]\n for ind_a, ind_b, score in connections[:, :3]:\n ind_a, ind_b = int(ind_a), int(ind_b)\n joint_found_cnt = 0\n joint_found_subset_index = [-1, -1]\n for subset_ind, subset in enumerate(subsets):\n\n if subset[joint_a] == ind_a or subset[joint_b] == ind_b:\n joint_found_subset_index[joint_found_cnt] = subset_ind\n joint_found_cnt += 1\n\n if joint_found_cnt == 1:\n\n found_subset = subsets[joint_found_subset_index[0]]\n if found_subset[joint_b] != ind_b:\n found_subset[joint_b] = ind_b\n found_subset[-1] += 1 # increment joint count\n found_subset[-2] += candidate_peaks[ind_b, 3] + score\n\n\n elif joint_found_cnt == 2:\n\n found_subset_1 = subsets[joint_found_subset_index[0]]\n found_subset_2 = subsets[joint_found_subset_index[1]]\n\n membership = ((found_subset_1 >= 0).astype(int) + (found_subset_2 >= 0).astype(int))[:-2]\n if not np.any(membership == 2): # merge two subsets when no duplication\n found_subset_1[:-2] += found_subset_2[:-2] + 1 # default is -1\n found_subset_1[-2:] += found_subset_2[-2:]\n found_subset_1[-2] += score\n subsets = np.delete(subsets, joint_found_subset_index[1], axis=0)\n else:\n if found_subset_1[joint_a] == -1:\n found_subset_1[joint_a] = ind_a\n found_subset_1[-1] += 1\n found_subset_1[-2] += candidate_peaks[ind_a, 3] + score\n elif found_subset_1[joint_b] == -1:\n found_subset_1[joint_b] = ind_b\n found_subset_1[-1] += 1\n found_subset_1[-2] += candidate_peaks[ind_b, 3] + score\n if found_subset_2[joint_a] == -1:\n found_subset_2[joint_a] = ind_a\n found_subset_2[-1] += 1\n found_subset_2[-2] += candidate_peaks[ind_a, 3] + score\n elif found_subset_2[joint_b] == -1:\n found_subset_2[joint_b] = ind_b\n found_subset_2[-1] += 1\n found_subset_2[-2] += candidate_peaks[ind_b, 3] + score\n\n elif joint_found_cnt == 0 and l != 9 and l != 13:\n row = -1 * np.ones(20)\n row[joint_a] = ind_a\n row[joint_b] = ind_b\n row[-1] = 2\n row[-2] = sum(candidate_peaks[[ind_a, ind_b], 3]) + score\n subsets = np.vstack([subsets, row])\n elif joint_found_cnt >= 3:\n pass\n\n # delete low score subsets\n keep = np.logical_and(subsets[:, -1] >= params_.n_subset_limbs_thresh,\n subsets[:, -2] / subsets[:, -1] >= params_.subset_score_thresh)\n subsets = subsets[keep]\n return subsets\n\n\ndef subsets_to_pose_array(subsets, all_peaks):\n person_pose_array = []\n for subset in subsets:\n joints = []\n for joint_index in subset[:18].astype('i'):\n if joint_index >= 0:\n joint = all_peaks[joint_index][1:3].tolist()\n joint.append(2)\n joints.append(joint)\n else:\n joints.append([0, 0, 0])\n person_pose_array.append(np.array(joints))\n person_pose_array = np.array(person_pose_array)\n return person_pose_array\n\ndef detect(img, network):\n orig_img = img.copy()\n orig_img_h, orig_img_w, _ = orig_img.shape\n\n input_w, input_h = compute_optimal_size(orig_img, config.inference_img_size) # 368\n map_w, map_h = compute_optimal_size(orig_img, config.inference_img_size)\n\n resized_image = cv2.resize(orig_img, (input_w, input_h))\n x_data = preprocess(resized_image)\n x_data = Tensor(x_data, mstype.float32)\n x_data.requires_grad = False\n\n logit_pafs, logit_heatmap = network(x_data)\n\n logit_pafs = logit_pafs[-1].asnumpy()[0]\n logit_heatmap = logit_heatmap[-1].asnumpy()[0]\n\n pafs = np.zeros((logit_pafs.shape[0], map_h, map_w))\n for i in range(logit_pafs.shape[0]):\n pafs[i] = cv2.resize(logit_pafs[i], (map_w, map_h))\n if show_gt:\n save_path = \"./test_output/\" + str(i) + \"pafs.png\"\n cv2.imwrite(save_path, pafs[i]*255)\n\n heatmaps = np.zeros((logit_heatmap.shape[0], map_h, map_w))\n for i in range(logit_heatmap.shape[0]):\n heatmaps[i] = cv2.resize(logit_heatmap[i], (map_w, map_h))\n if show_gt:\n save_path = \"./test_output/\" + str(i) + \"heatmap.png\"\n cv2.imwrite(save_path, heatmaps[i]*255)\n\n all_peaks = compute_peaks_from_heatmaps(heatmaps)\n if all_peaks.shape[0] == 0:\n return np.empty((0, len(JointType), 3)), np.empty(0)\n all_connections = compute_connections(pafs, all_peaks, map_w, config)\n subsets = grouping_key_points(all_connections, all_peaks, config)\n all_peaks[:, 1] *= orig_img_w / map_w\n all_peaks[:, 2] *= orig_img_h / map_h\n poses = subsets_to_pose_array(subsets, all_peaks)\n scores = subsets[:, -2]\n\n return poses, scores\n\ndef draw_person_pose(orig_img, poses):\n orig_img = cv2.cvtColor(orig_img, cv2.COLOR_BGR2RGB)\n if poses.shape[0] == 0:\n return orig_img\n\n limb_colors = [\n [0, 255, 0], [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255],\n [0, 85, 255], [255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0.],\n [255, 0, 85], [170, 255, 0], [85, 255, 0], [170, 0, 255.], [0, 0, 255],\n [0, 0, 255], [255, 0, 255], [170, 0, 255], [255, 0, 170],\n ]\n\n joint_colors = [\n [255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0],\n [85, 255, 0], [0, 255, 0], [0, 255, 85], [0, 255, 170], [0, 255, 255],\n [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], [170, 0, 255],\n [255, 0, 255], [255, 0, 170], [255, 0, 85]]\n\n canvas = orig_img.copy()\n\n # limbs\n for pose in poses.round().astype('i'):\n for i, (limb, color) in enumerate(zip(config.limbs_point, limb_colors)):\n if i not in (9, 13): # don't show ear-shoulder connection\n limb_ind = np.array(limb)\n if np.all(pose[limb_ind][:, 2] != 0):\n joint1, joint2 = pose[limb_ind][:, :2]\n cv2.line(canvas, tuple(joint1), tuple(joint2), color, 2)\n\n # joints\n for pose in poses.round().astype('i'):\n for i, ((x, y, v), color) in enumerate(zip(pose, joint_colors)):\n if v != 0:\n cv2.circle(canvas, (x, y), 3, color, -1)\n return canvas\n\n\ndef depreprocess(img):\n x_data = img[0]\n x_data += 0.5\n x_data *= 255\n x_data = x_data.astype('uint8')\n x_data = x_data.transpose(1, 2, 0)\n return x_data\n\n\n@moxing_wrapper(pre_process=None)\ndef val():\n config.rank = get_rank_id()\n config.group_size = get_device_num()\n\n if config.is_distributed:\n init()\n config.rank = get_rank_id()\n config.group_size = get_device_num()\n if not os.path.exists(config.output_img_path):\n os.mkdir(config.output_img_path)\n network = OpenPoseNet(vgg_with_bn=config.vgg_with_bn)\n network.set_train(False)\n load_model(network, config.model_path)\n\n print(\"load models right\")\n dataset = valdata(config.ann, config.imgpath_val, config.rank, config.group_size, mode='val')\n dataset_size = dataset.get_dataset_size()\n de_dataset = dataset.create_tuple_iterator()\n\n print(\"eval dataset size: \", dataset_size)\n kpt_json = []\n for _, (img, img_id) in tqdm(enumerate(de_dataset), total=dataset_size):\n img = img.asnumpy()\n img_id = int((img_id.asnumpy())[0])\n poses, scores = detect(img, network)\n\n if poses.shape[0] > 0:\n for index, pose in enumerate(poses):\n data = dict()\n\n pose = pose[[0, 15, 14, 17, 16, 5, 2, 6, 3, 7, 4, 11, 8, 12, 9, 13, 10, 1], :].round().astype('i')\n\n keypoints = pose.reshape(-1).tolist()\n keypoints = keypoints[:-3]\n data['image_id'] = img_id\n data['score'] = scores[index]\n data['category_id'] = 1\n data['keypoints'] = keypoints\n kpt_json.append(data)\n else:\n print(\"Predict poses size is zero.\", flush=True)\n img = draw_person_pose(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), poses)\n\n save_path = os.path.join(config.output_img_path, str(img_id)+\".png\")\n cv2.imwrite(save_path, img)\n\n result_json = 'eval_result.json'\n with open(os.path.join(config.output_img_path, result_json), 'w') as fid:\n json.dump(kpt_json, fid)\n res = evaluate_mAP(os.path.join(config.output_img_path, result_json), ann_file=config.ann)\n print('result: ', res)\n\n\nif __name__ == \"__main__\":\n val()\n",
"# Copyright 2020-2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Face Quality Assessment eval.\"\"\"\nimport os\nimport time\nimport warnings\nimport numpy as np\nimport cv2\nfrom tqdm import tqdm\n\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.train.serialization import load_checkpoint, load_param_into_net\nfrom mindspore.ops import operations as P\nfrom mindspore import context\n\nfrom src.face_qa import FaceQABackbone\n\nfrom model_utils.config import config\nfrom model_utils.moxing_adapter import moxing_wrapper\nfrom model_utils.device_adapter import get_device_id, get_device_num\n\nwarnings.filterwarnings('ignore')\n\n\ndef softmax(x):\n \"\"\"Compute softmax values for each sets of scores in x.\"\"\"\n return np.exp(x) / np.sum(np.exp(x), axis=1)\n\ndef get_md_output(out):\n '''get md output'''\n out_eul = out[0].asnumpy().astype(np.float32)[0]\n heatmap = out[1].asnumpy().astype(np.float32)[0]\n eulers = out_eul * 90\n\n kps_score_sum = 0\n kp_scores = list()\n kp_coord_ori = list()\n\n for i, _ in enumerate(heatmap):\n map_1 = heatmap[i].reshape(1, 48*48)\n map_1 = softmax(map_1)\n\n kp_coor = map_1.argmax()\n max_response = map_1.max()\n kp_scores.append(max_response)\n kps_score_sum += min(max_response, 0.25)\n kp_coor = int((kp_coor % 48) * 2.0), int((kp_coor / 48) * 2.0)\n kp_coord_ori.append(kp_coor)\n\n return kp_scores, kps_score_sum, kp_coord_ori, eulers, 1\n\n\ndef read_gt(txt_path, x_length, y_length):\n '''read gt'''\n txt_line = open(txt_path).readline()\n eulers_txt = txt_line.strip().split(\" \")[:3]\n kp_list = [[-1, -1], [-1, -1], [-1, -1], [-1, -1], [-1, -1]]\n box_cur = txt_line.strip().split(\" \")[3:]\n bndbox = []\n for index in range(len(box_cur) // 2):\n bndbox.append([box_cur[index * 2], box_cur[index * 2 + 1]])\n kp_id = -1\n for box in bndbox:\n kp_id = kp_id + 1\n x_coord = float(box[0])\n y_coord = float(box[1])\n if x_coord < 0 or y_coord < 0:\n continue\n\n kp_list[kp_id][0] = int(float(x_coord) / x_length * 96)\n\n kp_list[kp_id][1] = int(float(y_coord) / y_length * 96)\n\n return eulers_txt, kp_list\n\n\ndef read_img(img_path):\n img_ori = cv2.imread(img_path)\n img = cv2.cvtColor(img_ori, cv2.COLOR_BGR2RGB)\n img = cv2.resize(img, (96, 96))\n img = img.transpose(2, 0, 1)\n img = np.array([img]).astype(np.float32)/255.\n img = Tensor(img)\n return img, img_ori\n\n\nblur_soft = nn.Softmax(0)\nkps_soft = nn.Softmax(-1)\nreshape = P.Reshape()\nargmax = P.ArgMaxWithValue()\n\n\ndef modelarts_pre_process():\n '''modelarts pre process function.'''\n def unzip(zip_file, save_dir):\n import zipfile\n s_time = time.time()\n if not os.path.exists(os.path.join(save_dir, config.modelarts_dataset_unzip_name)):\n zip_isexist = zipfile.is_zipfile(zip_file)\n if zip_isexist:\n fz = zipfile.ZipFile(zip_file, 'r')\n data_num = len(fz.namelist())\n print(\"Extract Start...\")\n print(\"unzip file num: {}\".format(data_num))\n data_print = int(data_num / 100) if data_num > 100 else 1\n i = 0\n for file in fz.namelist():\n if i % data_print == 0:\n print(\"unzip percent: {}%\".format(int(i * 100 / data_num)), flush=True)\n i += 1\n fz.extract(file, save_dir)\n print(\"cost time: {}min:{}s.\".format(int((time.time() - s_time) / 60),\n int(int(time.time() - s_time) % 60)))\n print(\"Extract Done.\")\n else:\n print(\"This is not zip.\")\n else:\n print(\"Zip has been extracted.\")\n\n if config.need_modelarts_dataset_unzip:\n zip_file_1 = os.path.join(config.data_path, config.modelarts_dataset_unzip_name + \".zip\")\n save_dir_1 = os.path.join(config.data_path)\n\n sync_lock = \"/tmp/unzip_sync.lock\"\n\n # Each server contains 8 devices as most.\n if get_device_id() % min(get_device_num(), 8) == 0 and not os.path.exists(sync_lock):\n print(\"Zip file path: \", zip_file_1)\n print(\"Unzip file save dir: \", save_dir_1)\n unzip(zip_file_1, save_dir_1)\n print(\"===Finish extract data synchronization===\")\n try:\n os.mknod(sync_lock)\n except IOError:\n pass\n\n while True:\n if os.path.exists(sync_lock):\n break\n time.sleep(1)\n\n print(\"Device: {}, Finish sync unzip data from {} to {}.\".format(get_device_id(), zip_file_1, save_dir_1))\n\n\n@moxing_wrapper(pre_process=modelarts_pre_process)\ndef run_eval():\n '''run eval'''\n print('----eval----begin----')\n\n model_path = config.pretrained\n result_file = model_path.replace('.ckpt', '.txt')\n if os.path.exists(result_file):\n os.remove(result_file)\n epoch_result = open(result_file, 'a')\n epoch_result.write(model_path + '\\n')\n\n network = FaceQABackbone()\n ckpt_path = model_path\n\n if os.path.isfile(ckpt_path):\n param_dict = load_checkpoint(ckpt_path)\n\n param_dict_new = {}\n for key, values in param_dict.items():\n if key.startswith('moments.'):\n continue\n elif key.startswith('network.'):\n param_dict_new[key[8:]] = values\n else:\n param_dict_new[key] = values\n load_param_into_net(network, param_dict_new)\n\n else:\n print('wrong model path')\n return 1\n\n path = config.eval_dir\n kp_error_all = [[], [], [], [], []]\n eulers_error_all = [[], [], []]\n kp_ipn = []\n\n file_list = os.listdir(path)\n for file_name in tqdm(file_list):\n if file_name.endswith('jpg'):\n img_path = os.path.join(path, file_name)\n img, img_ori = read_img(img_path)\n\n txt_path = img_path.replace('jpg', 'txt')\n\n if os.path.exists(txt_path):\n euler_kps_do = True\n x_length = img_ori.shape[1]\n y_length = img_ori.shape[0]\n eulers_gt, kp_list = read_gt(txt_path, x_length, y_length)\n else:\n euler_kps_do = False\n continue\n\n out = network(img)\n\n _, _, kp_coord_ori, eulers_ori, _ = get_md_output(out)\n\n if euler_kps_do:\n eulgt = list(eulers_gt)\n for euler_id, _ in enumerate(eulers_ori):\n eulori = eulers_ori[euler_id]\n eulers_error_all[euler_id].append(abs(eulori-float(eulgt[euler_id])))\n\n eye01 = kp_list[0]\n eye02 = kp_list[1]\n eye_dis = 1\n cur_flag = True\n if eye01[0] < 0 or eye01[1] < 0 or eye02[0] < 0 or eye02[1] < 0:\n cur_flag = False\n else:\n eye_dis = np.sqrt(np.square(abs(eye01[0]-eye02[0]))+np.square(abs(eye01[1]-eye02[1])))\n cur_error_list = []\n for i in range(5):\n kp_coord_gt = kp_list[i]\n kp_coord_model = kp_coord_ori[i]\n if kp_coord_gt[0] != -1:\n dis = np.sqrt(np.square(\n kp_coord_gt[0] - kp_coord_model[0]) + np.square(kp_coord_gt[1] - kp_coord_model[1]))\n kp_error_all[i].append(dis)\n cur_error_list.append(dis)\n if cur_flag:\n kp_ipn.append(sum(cur_error_list)/len(cur_error_list)/eye_dis)\n\n kp_ave_error = []\n for kps, _ in enumerate(kp_error_all):\n kp_ave_error.append(\"%.3f\" % (sum(kp_error_all[kps])/len(kp_error_all[kps])))\n\n euler_ave_error = []\n elur_mae = []\n for eulers, _ in enumerate(eulers_error_all):\n euler_ave_error.append(\"%.3f\" % (sum(eulers_error_all[eulers])/len(eulers_error_all[eulers])))\n elur_mae.append((sum(eulers_error_all[eulers])/len(eulers_error_all[eulers])))\n\n print(r'5 keypoints average err:'+str(kp_ave_error))\n print(r'3 eulers average err:'+str(euler_ave_error))\n print('IPN of 5 keypoints:'+str(sum(kp_ipn)/len(kp_ipn)*100))\n print('MAE of elur:'+str(sum(elur_mae)/len(elur_mae)))\n\n epoch_result.write(str(sum(kp_ipn)/len(kp_ipn)*100)+'\\t'+str(sum(elur_mae)/len(elur_mae))+'\\t'\n + str(kp_ave_error)+'\\t'+str(euler_ave_error)+'\\n')\n\n print('----eval----end----')\n return 0\n\n\nif __name__ == \"__main__\":\n context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target, save_graphs=False)\n if config.device_target == 'Ascend':\n devid = int(os.getenv('DEVICE_ID'))\n context.set_context(device_id=devid)\n run_eval()\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Export PINNs (Navier-Stokes) model\"\"\"\nimport numpy as np\nimport mindspore.common.dtype as mstype\nfrom mindspore import (Tensor, context, export, load_checkpoint,\n load_param_into_net)\nfrom src.NavierStokes.dataset import generate_training_set_navier_stokes\nfrom src.NavierStokes.net import PINNs_navier\n\n\ndef export_ns(num_neuron, path, ck_file, batch_size, export_format, export_name):\n \"\"\"\n export PINNs for Navier-Stokes model\n\n Args:\n num_neuron (int): number of neurons for fully connected layer in the network\n path (str): path of the dataset for Navier-Stokes equation\n ck_file (str): path for checkpoint file\n batch_size (int): batch size\n export_format (str): file format to export\n export_name (str): name of exported file\n \"\"\"\n context.set_context(mode=context.GRAPH_MODE, device_target='GPU')\n layers = [3, num_neuron, num_neuron, num_neuron, num_neuron, num_neuron, num_neuron, num_neuron,\n num_neuron, 2]\n\n _, lb, ub = generate_training_set_navier_stokes(10, 10, path, 0)\n\n n = PINNs_navier(layers, lb, ub)\n\n param_dict = load_checkpoint(ck_file)\n load_param_into_net(n, param_dict)\n\n inputs = Tensor(np.ones((batch_size, 3)), mstype.float32)\n export(n, inputs, file_name=export_name, file_format=export_format)\n",
"# coding = utf-8\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the BSD 3-Clause License (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport json\nimport numpy as np\n\nnp.set_printoptions(threshold=sys.maxsize)\n\nLABEL_FILE = \"HiAI_label.json\"\n\n\ndef gen_file_name(img_name):\n full_name = img_name.split('/')[-1]\n return os.path.splitext(full_name)\n\n\ndef cre_groundtruth_dict(gtfile_path):\n \"\"\"\n :param filename: file contains the imagename and label number\n :return: dictionary key imagename, value is label number\n \"\"\"\n img_gt_dict = {}\n for gtfile in os.listdir(gtfile_path):\n if gtfile != LABEL_FILE:\n with open(os.path.join(gtfile_path, gtfile), 'r') as f:\n gt = json.load(f)\n ret = gt[\"image\"][\"annotations\"][0][\"category_id\"]\n img_gt_dict[gen_file_name(gtfile)] = ret\n return img_gt_dict\n\n\ndef cre_groundtruth_dict_fromtxt(gtfile_path):\n \"\"\"\n :param filename: file contains the imagename and label number\n :return: dictionary key imagename, value is label number\n \"\"\"\n img_gt_dict = {}\n with open(gtfile_path, 'r')as f:\n for line in f.readlines():\n temp = line.strip().split(\" \")\n img_name = temp[0].split(\".\")[0]\n img_lab = temp[1]\n img_gt_dict[img_name] = img_lab\n return img_gt_dict\n\n\ndef load_statistical_predict_result(filepath):\n \"\"\"\n function:\n the prediction esult file data extraction\n input:\n result file:filepath\n output:\n n_label:numble of label\n data_vec: the probabilitie of prediction in the 1000\n :return: probabilities, numble of label, in_type, color\n \"\"\"\n with open(filepath, 'r')as f:\n data = f.readline()\n temp = data.strip().split(\" \")\n n_label = len(temp)\n data_vec = np.zeros((n_label), dtype=np.float32)\n in_type = ''\n color = ''\n if n_label == 0:\n in_type = f.readline()\n color = f.readline()\n else:\n for ind, cls_ind in enumerate(temp):\n data_vec[ind] = np.int32(cls_ind)\n return data_vec, n_label, in_type, color\n\n\ndef create_visualization_statistical_result(prediction_file_path,\n result_store_path, file_name,\n img_gt_dict, topn=5):\n \"\"\"\n :param prediction_file_path:\n :param result_store_path:\n :param file_name:\n :param img_gt_dict:\n :param topn:\n :return:\n \"\"\"\n writer = open(os.path.join(result_store_path, file_name), 'w')\n table_dict = {\"title\": \"Overall statistical evaluation\", \"value\": []}\n\n count = 0\n res_cnt = 0\n n_labels = \"\"\n count_hit = np.zeros(topn)\n for tfile_name in os.listdir(prediction_file_path):\n count += 1\n temp = tfile_name.split('.')[0]\n index = temp.rfind('_')\n img_name = temp[:index]\n filepath = os.path.join(prediction_file_path, tfile_name)\n\n ret = load_statistical_predict_result(filepath)\n prediction = ret[0]\n n_labels = ret[1]\n\n gt = img_gt_dict[img_name]\n if n_labels == 1000:\n real_label = int(gt)\n elif n_labels == 1001:\n real_label = int(gt) + 1\n else:\n real_label = int(gt)\n\n res_cnt = min(len(prediction), topn)\n for i in range(res_cnt):\n if str(real_label) == str(int(prediction[i])):\n count_hit[i] += 1\n break\n if 'value' not in table_dict.keys():\n print(\"the item value does not exist!\")\n else:\n table_dict[\"value\"].extend(\n [{\"key\": \"Number of images\", \"value\": str(count)},\n {\"key\": \"Number of classes\", \"value\": str(n_labels)}])\n if count == 0:\n accuracy = 0\n else:\n accuracy = np.cumsum(count_hit) / count\n for i in range(res_cnt):\n table_dict[\"value\"].append({\"key\": \"Top\" + str(i + 1) + \" accuracy\",\n \"value\": str(\n round(accuracy[i] * 100, 2)) + '%'})\n json.dump(table_dict, writer)\n writer.close()\n\n\nif __name__ == '__main__':\n try:\n # txt file path\n folder_davinci_target = sys.argv[1]\n # annotation files path, \"val_label.txt\"\n annotation_file_path = sys.argv[2]\n # the path to store the results json path\n result_json_path = sys.argv[3]\n # result json file name\n json_file_name = sys.argv[4]\n except IndexError:\n print(\"Please enter target file result folder | ground truth label file\"\n \"| result json file folder | \"\n \"result json file name, such as \"\n \"./result val_label.txt . result.json\")\n exit(1)\n\n if not os.path.exists(folder_davinci_target):\n print(\"Target file folder does not exist.\")\n\n if not os.path.exists(annotation_file_path):\n print(\"Ground truth file does not exist.\")\n\n if not os.path.exists(result_json_path):\n print(\"Result folder doesn't exist.\")\n\n img_label_dict = cre_groundtruth_dict_fromtxt(annotation_file_path)\n create_visualization_statistical_result(folder_davinci_target,\n result_json_path, json_file_name,\n img_label_dict, topn=5)\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"servable config for pangu alpha\"\"\"\n\nimport os\nimport time\nfrom easydict import EasyDict\nimport numpy as np\nfrom mindspore_serving.server import register\nfrom mindspore_serving.server import distributed\n\nfrom pangu.tokenization_jieba import JIEBATokenizer\n\ncur_dir = os.path.abspath(os.path.dirname(__file__))\ntokenizer_path = os.path.join(cur_dir, \"tokenizer\")\ntokenizer = JIEBATokenizer(os.path.join(tokenizer_path, \"vocab.vocab\"), os.path.join(tokenizer_path, \"vocab.model\"))\nend_token = tokenizer.eot_id\n\nconfig = EasyDict({\n 'frequency_penalty': 1.5,\n 'presence_penalty': 0.3,\n 'max_generate_length': 500,\n 'top_k_num': 3,\n 'top_p': 1.0,\n 'end_token': 9,\n 'seq_length': 1024,\n 'vocab_size': 40000,\n})\n\n\ndef topk_fun(logits, topk=5):\n \"\"\"Get topk\"\"\"\n target_column = logits[0].tolist()\n sorted_array = [(k, v) for k, v in enumerate(target_column)]\n sorted_array.sort(key=lambda x: x[1], reverse=True)\n topk_array = sorted_array[:topk]\n index, value = zip(*topk_array)\n index = np.array([index])\n value = np.array([value])\n return value, index\n\n\nmodel = distributed.declare_servable(rank_size=8, stage_size=1, with_batch_dim=False)\n\n\ndef predict_stage(input_sentence):\n \"\"\"generate sentence with given input_sentence\"\"\"\n\n print(f\"----------------------------- begin {input_sentence} ---------\", flush=True)\n time_start = time.time()\n\n tokens = tokenizer.tokenize(input_sentence)\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n outputs = generate_increment(input_ids)\n\n return_tokens = tokenizer.convert_ids_to_tokens(outputs)\n reply = \"\".join(return_tokens)\n\n print(f\"time cost {(time.time() - time_start) * 1000}ms, request '{input_sentence}' get reply '{reply}'\",\n flush=True)\n\n return reply\n\n\ndef generate_increment(origin_inputs):\n \"\"\"\n Text generation for incremental inference\n\n Inputs:\n model: the model for inferencing\n origin_inputs: the original inputs based on which the model will continue writing\n config: inference configurations\n\n Returns:\n outputs: the ids for the generated text\n \"\"\"\n # Get configurations for inference\n frequency_penalty = config.frequency_penalty\n presence_penalty = config.presence_penalty\n top_p = config.top_p\n top_k_num = config.top_k_num\n max_generate_length = config.max_generate_length\n seq_length = config.seq_length\n vocab_size = config.vocab_size\n\n # Init outputs with original inputs\n outputs = origin_inputs\n origin_inputs = np.array([origin_inputs])\n _, valid_length = origin_inputs.shape\n # If target length exceeds seq_length, use seq_length instead\n target_length = valid_length + max_generate_length\n target_length = seq_length if target_length > seq_length else target_length\n\n # A list of the frequency of each token\n frequency_list = np.array([[0 for _ in range(vocab_size)]])\n pad_length = seq_length - origin_inputs.shape[-1]\n # Pad original inputs to seq_length\n input_ids = np.pad(origin_inputs, ((0, 0), (0, pad_length)), 'constant', constant_values=(0, 0))\n\n # Indicate the exact token position\n current_index = valid_length - 1 if valid_length - 1 > 0 else 0\n current_index = np.array([current_index], np.int32)\n batch_valid_length = np.array([current_index], np.int32)\n # For first graph, not_init should be false\n init_true = True\n init_false = False\n init = init_false\n # Call a single inference with input size of (bs, seq_length)\n logits = model.call(np.array(input_ids, np.int32), current_index, init, batch_valid_length, subgraph=0)\n\n # Claim the second graph and set not_init to true\n init = init_true\n\n # A single loop generates one token, loop until reaching target seq_length or generating eod token\n while valid_length < target_length:\n # Reshape the output logits\n log_probs = logits.reshape(1, vocab_size)\n\n # Get the revised log_probs considering frequency and presence penalty to eliminate duplicate in generated results\n log_probs = log_probs.reshape(1, vocab_size)\n log_probs_revised = log_probs - frequency_list * frequency_penalty - (frequency_list > 0) * presence_penalty\n\n # Convert the log_probs to probability\n logits = np.power(10, np.array(log_probs_revised, np.float32))\n\n # If top_p is less than 1.0, use top_p sampling\n if top_p < 1.0:\n # Only consider the 5000 largest logits to reduce computation\n sorted_logits, index = topk_fun(logits, 5000)\n cumsum_logits = np.cumsum(sorted_logits, 1)\n cumsum_logits = cumsum_logits[0]\n index = index[0]\n sorted_logits = sorted_logits[0]\n top_p_num = sum(cumsum_logits > top_p)\n # In case the probability is smooth, the sum of 5000 largest probabilities are not large enough\n if top_p_num == 0:\n top_p_num = 5000\n # Get the corresponding probs and indices\n probs = sorted_logits[:top_p_num]\n p_args = index[:top_p_num]\n p = probs / sum(probs)\n # if top_p is set to 1.0, use top_k sampling\n else:\n # Get the corresponding probs and indices\n probs, p_args = topk_fun(logits, top_k_num)\n probs = probs[0]\n p_args = p_args[0]\n # Avoid rounding error\n if sum(probs) == 0:\n probs = np.array([1 / top_k_num for _ in range(top_k_num)])\n p = probs / sum(probs)\n\n # Random select a token as final output for this round\n target_index = np.random.choice(len(p), p=p)\n # Stop judgment\n if p_args[target_index] == end_token or valid_length == target_length - 1:\n break\n\n # Update frequency list\n target = p_args[target_index]\n frequency_list[0][target] = frequency_list[0][target] + 1\n valid_length += 1\n\n batch_valid_length = np.array([valid_length - 1], np.int32)\n current_index = np.array([0], np.int32)\n input_id = np.array([[target]], np.int32)\n # Update outputs with current generated token\n outputs.append(int(target))\n\n # Call a single inference with input size of (bs, 1)\n logits = model.call(input_id, current_index, init, batch_valid_length, subgraph=1)\n # Return valid outputs out of padded outputs\n return outputs\n\n\[email protected]_method(output_names=[\"output_sentence\"])\ndef predict(input_sentence):\n reply = register.add_stage(predict_stage, input_sentence, outputs_count=1)\n return reply\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ===========================================================================\n\"\"\"DSCNN export.\"\"\"\nimport os\nimport numpy as np\nfrom mindspore import Tensor\nfrom mindspore.train.serialization import export\nfrom src.ds_cnn import DSCNN\nfrom src.models import load_ckpt\nfrom src.model_utils.config import config\nfrom src.model_utils.moxing_adapter import moxing_wrapper\n\n\ndef modelarts_pre_process():\n config.file_name = os.path.join(config.output_path, config.file_name)\n\n\n@moxing_wrapper(pre_process=None)\ndef model_export():\n network = DSCNN(config, config.model_size_info)\n load_ckpt(network, config.export_ckpt_path, False)\n x = np.random.uniform(0.0, 1.0, size=[config.per_batch_size, 1, config.model_setting_spectrogram_length,\n config.model_setting_dct_coefficient_count]).astype(np.float32)\n export(network, Tensor(x), file_name=config.file_name, file_format=config.file_format)\n\n\nif __name__ == '__main__':\n model_export()\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\nCallback for eval\n\"\"\"\n\nimport os\nfrom mindspore.train.callback import Callback\nfrom mindspore import save_checkpoint\nimport numpy as np\n\n\nclass EvalCallBack(Callback):\n \"\"\"\n CallBack class\n \"\"\"\n def __init__(self, options, net, eval_dataset, path, rank_id=0):\n self.net = net\n self.eval_dataset = eval_dataset\n self.path = path\n self.avgacc = 0\n self.avgloss = 0\n self.bestacc = 0\n self.options = options\n self.rank_id = rank_id\n\n\n def epoch_begin(self, run_context):\n \"\"\"\n CallBack epoch begin\n \"\"\"\n cb_param = run_context.original_args()\n cur_epoch = cb_param.cur_epoch_num\n print('=========EPOCH {} BEGIN========='.format(cur_epoch))\n\n def epoch_end(self, run_context):\n \"\"\"\n CallBack epoch end\n \"\"\"\n cb_param = run_context.original_args()\n cur_epoch = cb_param.cur_epoch_num\n cur_net = cb_param.network\n # print(cur_net)\n evalnet = self.net\n self.avgacc, self.avgloss = self.eval(self.eval_dataset, evalnet)\n\n if self.avgacc > self.bestacc:\n self.bestacc = self.avgacc\n print('Epoch {}: Avg Accuracy: {}(best) Avg Loss:{}'.format(cur_epoch, self.avgacc, self.avgloss))\n best_path = os.path.join(self.path, f'best_ck_{self.rank_id}.ckpt')\n save_checkpoint(cur_net, best_path)\n\n else:\n print('Epoch {}: Avg Accuracy: {} Avg Loss:{}'.format(cur_epoch, self.avgacc, self.avgloss))\n last_path = os.path.join(self.path, f'last_ck_{self.rank_id}.ckpt')\n save_checkpoint(cur_net, last_path)\n print(\"Best Acc:\", self.bestacc)\n print('=========EPOCH {} END========='.format(cur_epoch))\n\n def eval(self, inp, net):\n \"\"\"\n CallBack eval\n \"\"\"\n avg_acc = list()\n avg_loss = list()\n for _ in range(10):\n for batch in inp.create_dict_iterator():\n x = batch['data']\n y = batch['label']\n classes = batch['classes']\n acc, loss = net(x, y, classes)\n avg_acc.append(acc.asnumpy())\n avg_loss.append(loss.asnumpy())\n avg_acc = np.mean(avg_acc)\n avg_loss = np.mean(avg_loss)\n\n return avg_acc, avg_loss\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"eval script\"\"\"\nimport os\nimport numpy as np\nimport mindspore.dataset as ds\nfrom mindspore import Tensor, context\nfrom mindspore.common import dtype as mstype\nfrom mindspore.train.serialization import load_checkpoint, load_param_into_net\nfrom src.args import args\nimport src.ipt_model as ipt\nfrom src.data.srdata import SRData\nfrom src.metrics import calc_psnr, quantize\n\ndevice_id = int(os.getenv('DEVICE_ID', '0'))\ncontext.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\", device_id=device_id, save_graphs=False)\ncontext.set_context(max_call_depth=10000)\n\ndef sub_mean(x):\n red_channel_mean = 0.4488 * 255\n green_channel_mean = 0.4371 * 255\n blue_channel_mean = 0.4040 * 255\n x[:, 0, :, :] -= red_channel_mean\n x[:, 1, :, :] -= green_channel_mean\n x[:, 2, :, :] -= blue_channel_mean\n return x\n\ndef add_mean(x):\n red_channel_mean = 0.4488 * 255\n green_channel_mean = 0.4371 * 255\n blue_channel_mean = 0.4040 * 255\n x[:, 0, :, :] += red_channel_mean\n x[:, 1, :, :] += green_channel_mean\n x[:, 2, :, :] += blue_channel_mean\n return x\n\ndef eval_net():\n \"\"\"eval\"\"\"\n if args.epochs == 0:\n args.epochs = 1e8\n\n for arg in vars(args):\n if vars(args)[arg] == 'True':\n vars(args)[arg] = True\n elif vars(args)[arg] == 'False':\n vars(args)[arg] = False\n train_dataset = SRData(args, name=args.data_test, train=False, benchmark=False)\n train_de_dataset = ds.GeneratorDataset(train_dataset, ['LR', 'HR', \"idx\", \"filename\"], shuffle=False)\n train_de_dataset = train_de_dataset.batch(1, drop_remainder=True)\n train_loader = train_de_dataset.create_dict_iterator(output_numpy=True)\n\n net_m = ipt.IPT(args)\n if args.pth_path:\n param_dict = load_checkpoint(args.pth_path)\n load_param_into_net(net_m, param_dict)\n net_m.set_train(False)\n idx = Tensor(np.ones(args.task_id), mstype.int32)\n inference = ipt.IPT_post(net_m, args)\n print('load mindspore net successfully.')\n num_imgs = train_de_dataset.get_dataset_size()\n psnrs = np.zeros((num_imgs, 1))\n for batch_idx, imgs in enumerate(train_loader):\n lr = imgs['LR']\n hr = imgs['HR']\n lr = sub_mean(lr)\n lr = Tensor(lr, mstype.float32)\n pred = inference.forward(lr, idx)\n pred_np = add_mean(pred.asnumpy())\n pred_np = quantize(pred_np, 255)\n psnr = calc_psnr(pred_np, hr, args.scale[0], 255.0)\n print(\"current psnr: \", psnr)\n psnrs[batch_idx, 0] = psnr\n if args.denoise:\n print('Mean psnr of %s DN_%s is %.4f' % (args.data_test[0], args.sigma, psnrs.mean(axis=0)[0]))\n elif args.derain:\n print('Mean psnr of Derain is %.4f' % (psnrs.mean(axis=0)))\n else:\n print('Mean psnr of %s x%s is %.4f' % (args.data_test[0], args.scale[0], psnrs.mean(axis=0)[0]))\n\nif __name__ == '__main__':\n print(\"Start eval function!\")\n eval_net()\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"post process for 310 inference\"\"\"\nimport os\nimport numpy as np\nfrom src.model_utils.config import config as cf\n\nbatch_Size = 1\n\n\ndef is_eq(pred_lbl, target):\n pred_diff = len(target) - len(pred_lbl)\n if pred_diff > 0:\n pred_lbl.extend([10] * pred_diff)\n return pred_lbl == target\n\n\ndef get_prediction(y_pred):\n seq_len, batch_size, _ = y_pred.shape\n indices = y_pred.argmax(axis=2)\n lens = [seq_len] * batch_size\n pred_lbl = []\n for i in range(batch_size):\n idx = indices[:, i]\n last_idx = 10\n pred_lbl = []\n for j in range(lens[i]):\n cur_idx = idx[j]\n if cur_idx not in [last_idx, 10]:\n pred_lbl.append(cur_idx)\n last_idx = cur_idx\n return pred_lbl\n\n\ndef calcul_acc(y_pred, y):\n correct_num = 0\n total_num = 0\n for b_idx, target in enumerate(y):\n if is_eq(y_pred[b_idx], target):\n correct_num += 1\n total_num += 1\n if total_num == 0:\n raise RuntimeError('Accuracy can not be calculated, because the number of samples is 0.')\n return correct_num / total_num\n\n\ndef get_result(result_path, label_path):\n files = os.listdir(result_path)\n preds = []\n labels = []\n label_dict = {}\n with open(label_path, 'r') as file:\n lines = file.readlines()\n for line in lines:\n label_dict[line.split(',')[0]] = np.array(\n line.replace('\\n', '').replace('[', '').replace(']', '').split(',')[1:]).astype(dtype=int).tolist()\n for file in files:\n label = label_dict[file]\n labels.append(label)\n resultPath = os.path.join(result_path, file)\n output = np.fromfile(resultPath, dtype=np.float16).reshape((-1, batch_Size, 11))\n preds.append(get_prediction(output))\n acc = round(calcul_acc(preds, labels), 3)\n print(\"Total data: {}, accuracy: {}\".format(len(labels), acc))\n\n\nif __name__ == '__main__':\n get_result(cf.result_path, cf.label_path)\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"Learning rate schedule\"\"\"\n\nimport math\nimport numpy as np\n\ndef get_lr(global_step, lr_init, lr_end, lr_max, warmup_epochs, total_epochs, steps_per_epoch):\n \"\"\"\n generate learning rate array\n\n Args:\n global_step(int): total steps of the pre_training\n lr_init(float): init learning rate\n lr_end(float): end learning rate\n lr_max(float): max learning rate\n warmup_epochs(float): number of warmup epochs\n total_epochs(int): total epoch of training\n steps_per_epoch(int): steps of one epoch\n\n Returns:\n np.array, learning rate array\n \"\"\"\n lr_each_step = []\n total_steps = steps_per_epoch * total_epochs\n warmup_steps = steps_per_epoch * warmup_epochs\n for i in range(total_steps):\n if i < warmup_steps:\n lr = lr_init + (lr_max - lr_init) * i / warmup_steps\n else:\n lr = lr_end + \\\n (lr_max - lr_end) * \\\n (1. + math.cos(math.pi * (i - warmup_steps) / (total_steps - warmup_steps))) / 2.\n if lr < 0.0:\n lr = 0.0\n lr_each_step.append(lr)\n\n current_step = global_step\n lr_each_step = np.array(lr_each_step).astype(np.float32)\n learning_rate = lr_each_step[current_step:]\n\n return learning_rate\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\n##############export checkpoint file into air, onnx or mindir model#################\npython export.py\n\"\"\"\nimport argparse\n\nimport numpy as np\nfrom mindspore import Tensor, load_checkpoint, load_param_into_net, export, context\n\nimport src.spnasnet as spnasnet\nfrom src.config import imagenet_cfg\n\nparser = argparse.ArgumentParser(description='single-path-nas export')\nparser.add_argument(\"--device_id\", type=int, default=0, help=\"Device id\")\nparser.add_argument(\"--batch_size\", type=int, default=1, help=\"batch size\")\nparser.add_argument(\"--ckpt_file\", type=str, required=True, help=\"Checkpoint file path.\")\nparser.add_argument(\"--file_name\", type=str, default=\"single-path-nas\", help=\"output file name.\")\nparser.add_argument('--width', type=int, default=224, help='input width')\nparser.add_argument('--height', type=int, default=224, help='input height')\nparser.add_argument(\"--file_format\", type=str, choices=[\"AIR\", \"ONNX\", \"MINDIR\"], default=\"MINDIR\", help=\"file format\")\nparser.add_argument(\"--device_target\", type=str, default=\"Ascend\",\n choices=[\"Ascend\",], help=\"device target(default: Ascend)\")\nargs = parser.parse_args()\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)\nif args.device_target == \"Ascend\":\n context.set_context(device_id=args.device_id)\nelse:\n raise ValueError(\"Unsupported platform.\")\n\nif __name__ == '__main__':\n net = spnasnet.spnasnet(num_classes=imagenet_cfg.num_classes)\n\n assert args.ckpt_file is not None, \"checkpoint_path is None.\"\n\n param_dict = load_checkpoint(args.ckpt_file)\n load_param_into_net(net, param_dict)\n\n input_arr = Tensor(np.zeros([args.batch_size, 3, args.height, args.width], np.float32))\n export(net, input_arr, file_name=args.file_name, file_format=args.file_format)\n",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Convert VOC format dataset to mindrecord for evaluating Face detection.\"\"\"\nimport os\nimport xml.etree.ElementTree as ET\nimport numpy as np\n\nfrom PIL import Image\nfrom mindspore import log as logger\nfrom mindspore.mindrecord import FileWriter\n\ndataset_root_list = [\"Your_VOC_dataset_path1\",\n \"Your_VOC_dataset_path2\",\n \"Your_VOC_dataset_pathN\",\n ]\n\nmindrecord_file_name = \"Your_output_path/data.mindrecord\"\n\nmindrecord_num = 8\nis_train = False\nclass_indexing_1 = {'face': 0}\n\n\ndef prepare_file_paths():\n '''prepare_file_paths'''\n image_files = []\n anno_files = []\n image_names = []\n for dataset_root in dataset_root_list:\n if not os.path.isdir(dataset_root):\n raise ValueError(\"dataset root is invalid!\")\n anno_dir = os.path.join(dataset_root, \"Annotations\")\n image_dir = os.path.join(dataset_root, \"JPEGImages\")\n if is_train:\n valid_txt = os.path.join(dataset_root, \"ImageSets/Main/train.txt\")\n else:\n valid_txt = os.path.join(dataset_root, \"ImageSets/Main/test.txt\")\n\n ret_image_files, ret_anno_files, ret_image_names = filter_valid_files_by_txt(image_dir, anno_dir, valid_txt)\n image_files.extend(ret_image_files)\n anno_files.extend(ret_anno_files)\n image_names.extend(ret_image_names)\n return image_files, anno_files, image_names\n\n\ndef filter_valid_files_by_txt(image_dir, anno_dir, valid_txt):\n '''filter_valid_files_by_txt'''\n with open(valid_txt, \"r\") as txt:\n valid_names = txt.readlines()\n image_files = []\n anno_files = []\n image_names = []\n for name in valid_names:\n strip_name = name.strip(\"\\n\")\n anno_joint_path = os.path.join(anno_dir, strip_name + \".xml\")\n if os.path.isfile(anno_joint_path):\n image_joint_path = os.path.join(image_dir, strip_name + \".jpg\")\n image_name = image_joint_path.split('/')[-1].replace('.jpg', '')\n if os.path.isfile(image_joint_path):\n image_files.append(image_joint_path)\n anno_files.append(anno_joint_path)\n image_names.append(image_name)\n continue\n image_joint_path = os.path.join(image_dir, strip_name + \".png\")\n image_name = image_joint_path.split('/')[-1].replace('.png', '')\n if os.path.isfile(image_joint_path):\n image_files.append(image_joint_path)\n anno_files.append(anno_joint_path)\n image_names.append(image_name)\n return image_files, anno_files, image_names\n\n\ndef deserialize(member, class_indexing):\n '''deserialize'''\n class_name = member[0].text\n if class_name in class_indexing:\n class_num = class_indexing[class_name]\n else:\n return None\n bnx = member.find('bndbox')\n box_x_min = float(bnx.find('xmin').text)\n box_y_min = float(bnx.find('ymin').text)\n box_x_max = float(bnx.find('xmax').text)\n box_y_max = float(bnx.find('ymax').text)\n width = float(box_x_max - box_x_min + 1)\n height = float(box_y_max - box_y_min + 1)\n\n try:\n ignore = float(member.find('ignore').text)\n except ValueError:\n ignore = 0.0\n return [class_num, box_x_min, box_y_min, width, height, ignore]\n\n\ndef get_data(image_file, anno_file, image_name):\n '''get_data'''\n count = 0\n annotation = []\n tree = ET.parse(anno_file)\n root = tree.getroot()\n\n with Image.open(image_file) as fd:\n orig_width, orig_height = fd.size\n\n with open(image_file, 'rb') as f:\n img = f.read()\n\n for member in root.findall('object'):\n anno = deserialize(member, class_indexing_1)\n if anno is not None:\n annotation.extend(anno)\n count += 1\n\n for member in root.findall('Object'):\n anno = deserialize(member, class_indexing_1)\n if anno is not None:\n annotation.extend(anno)\n count += 1\n\n if count == 0:\n annotation = np.array([[-1, -1, -1, -1, -1, -1]], dtype='float64')\n count = 1\n\n data = {\n \"image\": img,\n \"annotation\": np.array(annotation, dtype='float64'),\n \"image_name\": image_name,\n \"image_size\": np.array([orig_width, orig_height], dtype='int32')\n }\n return data\n\n\ndef convert_yolo_data_to_mindrecord():\n '''convert_yolo_data_to_mindrecord'''\n\n writer = FileWriter(mindrecord_file_name, mindrecord_num)\n yolo_json = {\n \"image\": {\"type\": \"bytes\"},\n \"annotation\": {\"type\": \"float64\", \"shape\": [-1, 6]},\n \"image_name\": {\"type\": \"string\"},\n \"image_size\": {\"type\": \"int32\", \"shape\": [-1, 2]}\n }\n\n print('Loading eval data...')\n image_files, anno_files, image_names = prepare_file_paths()\n dataset_size = len(anno_files)\n assert dataset_size == len(image_files)\n assert dataset_size == len(image_names)\n logger.info(\"#size of dataset: {}\".format(dataset_size))\n data = []\n for i in range(dataset_size):\n data.append(get_data(image_files[i], anno_files[i], image_names[i]))\n\n print('Writing eval data to mindrecord...')\n writer.add_schema(yolo_json, \"yolo_json\")\n if data is None:\n raise ValueError(\"None needs writing to mindrecord.\")\n writer.write_raw_data(data)\n writer.commit()\n\n\nconvert_yolo_data_to_mindrecord()\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\nchoose samples from the dataset\n\"\"\"\nimport math\nimport numpy as np\n\nclass DistributedSampler():\n \"\"\"\n sampling the dataset.\n\n Args:\n Returns:\n num_samples, number of samples.\n \"\"\"\n def __init__(self, dataset, rank, group_size, shuffle=True, seed=0):\n self.dataset = dataset\n self.rank = rank\n self.group_size = group_size\n self.dataset_length = len(self.dataset)\n self.num_samples = int(math.ceil(self.dataset_length * 1.0 / self.group_size))\n self.total_size = self.num_samples * self.group_size\n self.shuffle = shuffle\n self.seed = seed\n\n def __iter__(self):\n if self.shuffle:\n self.seed = (self.seed + 1) & 0xffffffff\n np.random.seed(self.seed)\n indices = np.random.permutation(self.dataset_length).tolist()\n else:\n indices = list(range(len(self.dataset_length)))\n\n indices += indices[:(self.total_size - len(indices))]\n indices = indices[self.rank::self.group_size]\n return iter(indices)\n\n def __len__(self):\n return self.num_samples\n ",
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"GOMO Model\"\"\"\n\nimport numpy as np\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.ops import operations as P, functional as F\nfrom mindspore.common.parameter import Parameter\nimport mindspore.common.dtype as mstype\nfrom mindspore.common.initializer import initializer\nfrom src.stencil import AXB, AXF, AYB, AYF, AZB, AZF\nfrom src.stencil import DXB, DXF, DYB, DYF, DZB, DZF\nfrom src.Grid import Grid\n\n\ndef read_init(variable, im, jm, kb):\n \"\"\"\n read init variable from nc file\n\n Args:\n variable(dict): The initial variables from inputs file.\n im (int): The size of x direction.\n jm (int): The size of y direction.\n kb (int): The size of z direction.\n\n Returns:\n tuple[Tensor], The initial variables.\n \"\"\"\n\n dx = Tensor(variable[\"dx\"])\n dy = Tensor(variable[\"dy\"])\n dz = Tensor(variable[\"dz\"])\n tb = Tensor(variable[\"tb\"])\n sb = Tensor(variable[\"sb\"])\n ub = Tensor(variable[\"ub\"])\n vb = Tensor(variable[\"vb\"])\n uab = Tensor(variable[\"uab\"])\n vab = Tensor(variable[\"vab\"])\n elb = Tensor(variable[\"elb\"])\n etb = Tensor(variable[\"etb\"])\n dt = Tensor(variable[\"dt\"])\n h = Tensor(variable[\"h\"])\n vfluxf = Tensor(variable[\"vfluxf\"])\n z = Tensor(variable[\"z\"])\n zz = Tensor(variable[\"zz\"])\n dzz = Tensor(variable[\"dzz\"])\n cor = Tensor(variable[\"cor\"])\n fsm = Tensor(variable[\"fsm\"])\n w = Tensor(np.zeros([im, jm, kb], np.float32))\n wubot = Tensor(np.zeros([im, jm, 1], np.float32))\n wvbot = Tensor(np.zeros([im, jm, 1], np.float32))\n vfluxb = Tensor(np.zeros([im, jm, 1], np.float32))\n utb = Tensor(np.zeros([im, jm, 1], np.float32))\n vtb = Tensor(np.zeros([im, jm, 1], np.float32))\n dhb = Tensor(np.zeros([im, jm, 1], np.float32))\n egb = Tensor(np.zeros([im, jm, 1], np.float32))\n\n return dx, dy, dz, uab, vab, elb, etb, sb, tb, ub, vb, dt, h, w, wubot, wvbot, vfluxb, utb, vtb, dhb, \\\n egb, vfluxf, z, zz, dzz, cor, fsm\n\n\nclass Shift(nn.Cell):\n \"\"\"\n Shift operations\n \"\"\"\n def __init__(self, dims):\n super(Shift, self).__init__()\n\n pad_list = ()\n self.slice_list = ()\n for i, _ in enumerate(dims):\n if dims[i] >= 0:\n pad_list += ((dims[i], 0),)\n self.slice_list += (0,)\n else:\n pad_list += ((0, -dims[i]),)\n self.slice_list += (-dims[i],)\n self.pad = P.Pad((pad_list))\n self.slice = P.Slice()\n self.shape = P.Shape()\n\n def construct(self, x):\n \"\"\"construct\"\"\"\n x1 = self.pad(x)\n x_shape = self.shape(x)\n x1 = self.slice(x1, self.slice_list, x_shape)\n return x1\n\n\nclass GOMO_init(nn.Cell):\n \"\"\"\n Get Ocean Model GOMO init variables\n \"\"\"\n\n def __init__(self, im, jm, kb, stencil_width):\n super(GOMO_init, self).__init__()\n self.Grid = Grid(im, jm, kb)\n self.im = im\n self.jm = jm\n self.kb = kb\n self.kbm1 = self.kb - 1\n self.kbm2 = self.kb - 2\n self.imm1 = self.im - 1\n self.jmm1 = self.jm - 1\n self.grav = 9.8060\n self.rhoref = 1025.0\n self.z0b = 0.01\n self.small = 1e-9\n self.tbias = 0.0\n self.sbias = 0.0\n self.aam_init = 500.0\n self.cbcmax = 1.0\n self.cbcmin = 0.0025\n self.kappa = 0.40\n self.mat_ones = Tensor(np.float32(np.ones([self.im, self.jm, self.kb])))\n self.rmean = Parameter(Tensor(np.zeros([self.im, self.jm, self.kb], dtype=np.float32)), name=\"rmean\",\n requires_grad=False)\n\n self.AXB = AXB(stencil_width=stencil_width)\n self.AXF = AXF(stencil_width=stencil_width)\n self.AYB = AYB(stencil_width=stencil_width)\n self.AYF = AYF(stencil_width=stencil_width)\n self.AZB = AZB(stencil_width=stencil_width)\n self.AZF = AZF(stencil_width=stencil_width)\n self.DXB = DXB(stencil_width=stencil_width)\n self.DXF = DXF(stencil_width=stencil_width)\n self.DYB = DYB(stencil_width=stencil_width)\n self.DYF = DYF(stencil_width=stencil_width)\n self.DZB = DZB(stencil_width=stencil_width)\n self.DZF = DZF(stencil_width=stencil_width)\n self.abs = P.Abs()\n self.sqrt = P.Sqrt()\n self.log = P.Log()\n self.csum = P.CumSum()\n self.reduce_sum = P.ReduceSum(keep_dims=True)\n self.assign = P.Assign()\n self.pow = P.Pow()\n\n shape3 = (self.im, self.jm, self.kb)\n z_h = np.zeros(shape3, dtype=np.float32)\n z_h[:, :, 0] = 1.\n self.z_h = Tensor(z_h)\n z_e = np.zeros(shape3, dtype=np.float32)\n z_e[:, :, -1] = 1.\n self.z_e = Tensor(z_e)\n z_he1 = np.zeros(shape3, dtype=np.float32)\n z_he1[:, :, 0:self.kb - 1] = 1.\n self.z_he1 = Tensor(z_he1)\n\n def dens(self, si, ti, zz, h, fsm):\n \"\"\"\n density compute function\n Args:\n si: Salinity.\n ti: Potential temperature.\n zz: sigma coordinate, intermediate between Z.\n h: the bottom depth (m)\n fsm: Mask for scalar variables; = 0 over land; = 1 over water\n Returns:\n rhoo[Tensor], density.\n \"\"\"\n tr = ti + self.tbias\n sr = si + self.sbias\n tr2 = tr * tr\n tr3 = tr2 * tr\n tr4 = tr3 * tr\n p = self.grav * self.rhoref * (-1 * zz * self.mat_ones * h) * 1e-5\n rhor2 = -0.157406e0 + 6.793952e-2 * tr - 9.095290e-3 * tr2 + 1.001685e-4 * tr3 - 1.120083e-6 * tr4 + \\\n 6.536332e-9 * tr4 * tr\n rhor1 = rhor2 + (0.824493e0 - 4.0899e-3 * tr + 7.6438e-5 * tr2 - 8.2467e-7 * tr3 + 5.3875e-9 * tr4) * sr + \\\n (-5.72466e-3 + 1.0227e-4 * tr - 1.6546e-6 * tr2) * self.abs(sr) ** 1.5e0 + 4.8314e-4 * sr * sr\n cr1 = 1449.1e0 + .0821e0 * p + 4.55e0 * tr - .045e0 * tr2 + 1.34e0 * (sr - 35.e0)\n cr = p / (cr1 * cr1)\n rhor = rhor1 + 1.e5 * cr * (1.e0 - 2.e0 * cr)\n rhoo = rhor / self.rhoref * fsm\n return rhoo\n\n def bottom_friction(self, zz1, h):\n \"\"\"\n bottom_friction\n \"\"\"\n zz_kbm1 = P.Slice()(zz1, (self.kb - 2,), (1,))\n cbc = (self.kappa / self.log((1.0 + zz_kbm1) * h / self.z0b)) * (self.kappa / self.log((1.0 + zz_kbm1) * h\n / self.z0b))\n cbc_compare = P.Cast()(cbc > self.cbcmax, mstype.float32)\n cbc = cbc * (1 - cbc_compare) + cbc_compare * self.cbcmax\n cbc_compare = P.Cast()(cbc < self.cbcmin, mstype.float32)\n cbc = cbc * (1 - cbc_compare) + cbc_compare * self.cbcmin\n return cbc\n\n def construct(self, dx, dy, dz, uab, vab, elb, etb, sb, tb, ub, vb, h, w, vfluxf, zz, fsm):\n \"\"\"construct\"\"\"\n x_d, y_d, z_d = self.Grid(dx, dy, dz)\n\n rho = self.dens(sb, tb, zz, h, fsm)\n rmean = self.rmean * (1 - self.z_he1) + rho * self.z_he1\n zz1 = zz[0, 0, :]\n ua = uab\n va = vab\n el = elb\n et = etb\n etf = et\n d = h + el\n dt = h + et\n l = dt * 0.1e0\n q2b = self.mat_ones * self.small\n q2lb = l * q2b\n kh = l * self.sqrt(q2b)\n km = kh\n kq = kh\n aam = self.mat_ones * self.aam_init\n w = w * (1 - self.z_h) + self.z_h * vfluxf\n q2 = q2b\n q2l = q2lb\n t = tb\n s = sb\n u = ub\n v = vb\n cbc = self.bottom_friction(zz1, h)\n\n return ua, va, el, et, etf, d, dt, l, q2b, q2lb, kh, km, kq, aam, w, q2, q2l, t, s, u, v, cbc, rmean, rho, \\\n x_d, y_d, z_d\n\n\nclass GOMO(nn.Cell):\n \"\"\"\n Get Ocean Model GOMO\n \"\"\"\n\n def __init__(self, im, jm, kb, stencil_width, variable, x_d, y_d, z_d, q2b, q2lb, aam, cbc, rmean):\n super(GOMO, self).__init__()\n self.x_d = x_d\n self.y_d = y_d\n self.z_d = z_d\n self.q2b_init = q2b\n self.q2lb_init = q2lb\n self.aam_init = aam\n self.cbc_init = cbc\n self.rmean_init = rmean\n self.im = im\n self.jm = jm\n self.kb = kb\n self.kbm1 = self.kb - 1\n self.kbm2 = self.kb - 2\n self.imm1 = self.im - 1\n self.jmm1 = self.jm - 1\n self.fclim_flag = False\n self._parameter_init(variable)\n self._boundary_process()\n self._constant_init()\n\n self.AXB = AXB(stencil_width=stencil_width)\n self.AXF = AXF(stencil_width=stencil_width)\n self.AYB = AYB(stencil_width=stencil_width)\n self.AYF = AYF(stencil_width=stencil_width)\n self.AZB = AZB(stencil_width=stencil_width)\n self.AZF = AZF(stencil_width=stencil_width)\n self.DXB = DXB(stencil_width=stencil_width)\n self.DXF = DXF(stencil_width=stencil_width)\n self.DYB = DYB(stencil_width=stencil_width)\n self.DYF = DYF(stencil_width=stencil_width)\n self.DZB = DZB(stencil_width=stencil_width)\n self.DZF = DZF(stencil_width=stencil_width)\n self.abs = P.Abs()\n self.sqrt = P.Sqrt()\n self.log = P.Log()\n self.csum = P.CumSum()\n self.reduce_sum = P.ReduceSum(keep_dims=True)\n self.assign = P.Assign()\n self.pow = P.Pow()\n\n def _boundary_process(self,):\n \"\"\"boundary process\"\"\"\n shape2 = (self.im, self.jm, 1)\n shape3 = (self.im, self.jm, self.kb)\n\n # 2d boundary\n x_he = np.zeros(shape2, dtype=np.float32)\n x_he[[0, -1], :, :] = 1\n self.x_he = Tensor(x_he)\n x_h = np.zeros(shape2, dtype=np.float32)\n x_h[0, :, :] = 1.\n self.x_h = Tensor(x_h)\n x_e = np.zeros(shape2, dtype=np.float32)\n x_e[-1, :, :] = 1.\n self.x_e = Tensor(x_e)\n x_h1 = np.zeros(shape2, dtype=np.float32)\n x_h1[1, :, :] = 1.\n self.x_h1 = Tensor(x_h1)\n x_e1 = np.zeros(shape2, dtype=np.float32)\n x_e1[-2, :, :] = 1.\n self.x_e1 = Tensor(x_e1)\n y_he = np.zeros(shape2, dtype=np.float32)\n y_he[:, [0, -1], :] = 1.\n self.y_he = Tensor(y_he)\n y_h = np.zeros(shape2, dtype=np.float32)\n y_h[:, 0, :] = 1.\n self.y_h = Tensor(y_h)\n y_e = np.zeros(shape2, dtype=np.float32)\n y_e[:, -1, :] = 1.\n self.y_e = Tensor(y_e)\n y_h1 = np.zeros(shape2, dtype=np.float32)\n y_h1[:, 1, :] = 1.\n self.y_h1 = Tensor(y_h1)\n y_e1 = np.zeros(shape2, dtype=np.float32)\n y_e1[:, -2, :] = 1.\n self.y_e1 = Tensor(y_e1)\n\n # 3d boundary\n x_h3d = np.zeros(shape3, dtype=np.float32)\n x_h3d[0, :, :] = 1.\n self.x_h3d = Tensor(x_h3d)\n x_e3d = np.zeros(shape3, dtype=np.float32)\n x_e3d[-1, :, :] = 1.\n self.x_e3d = Tensor(x_e3d)\n x_he3d = np.zeros(shape3, dtype=np.float32)\n x_he3d[[0, -1], :, :] = 1\n self.x_he3d = Tensor(x_he3d)\n x_e13d = np.zeros(shape3, dtype=np.float32)\n x_e13d[-2, :, :] = 1.\n self.x_e13d = Tensor(x_e13d)\n x_h13d = np.zeros(shape3, dtype=np.float32)\n x_h13d[1, :, :] = 1.\n self.x_h13d = Tensor(x_h13d)\n y_h3d = np.zeros(shape3, dtype=np.float32)\n y_h3d[:, 0, :] = 1.\n self.y_h3d = Tensor(y_h3d)\n y_e3d = np.zeros(shape3, dtype=np.float32)\n y_e3d[:, -1, :] = 1.\n self.y_e3d = Tensor(y_e3d)\n y_e13d = np.zeros(shape3, dtype=np.float32)\n y_e13d[:, -2, :] = 1.\n self.y_e13d = Tensor(y_e13d)\n y_he3d = np.zeros(shape3, dtype=np.float32)\n y_he3d[:, [0, -1], :] = 1.\n self.y_he3d = Tensor(y_he3d)\n y_h13d = np.zeros(shape3, dtype=np.float32)\n y_h13d[:, 1, :] = 1.\n self.y_h13d = Tensor(y_h13d)\n z_h = np.zeros(shape3, dtype=np.float32)\n z_h[:, :, 0] = 1.\n self.z_h = Tensor(z_h)\n z_h1 = np.zeros(shape3, dtype=np.float32)\n z_h1[:, :, 1] = 1.\n self.z_h1 = Tensor(z_h1)\n z_e = np.zeros(shape3, dtype=np.float32)\n z_e[:, :, -1] = 1.\n self.z_e = Tensor(z_e)\n z_e1 = np.zeros(shape3, dtype=np.float32)\n z_e1[:, :, -2] = 1.\n self.z_e1 = Tensor(z_e1)\n z_he = np.zeros(shape3, dtype=np.float32)\n z_he[:, :, [0, -1]] = 1.\n self.z_he = Tensor(z_he)\n z_he1 = np.zeros(shape3, dtype=np.float32)\n z_he1[:, :, 0:self.kb - 1] = 1.\n self.z_he1 = Tensor(z_he1)\n z_he2 = np.zeros(shape3, dtype=np.float32)\n z_he2[:, :, 0:self.kb - 2] = 1.\n self.z_he2 = Tensor(z_he2)\n z_he3 = np.zeros(shape3, dtype=np.float32)\n z_he3[:, :, 1:self.kb - 2] = 1.\n self.z_he3 = Tensor(z_he3)\n\n self.zslice = ()\n for i in range(self.kb):\n z_k = np.zeros(shape3, dtype=np.float32)\n z_k[:, :, i] = 1.0\n self.zslice += (Tensor(z_k),)\n\n def _constant_init(self,):\n \"\"\"constant init\"\"\"\n self.small = 1e-9\n self.tbias = 0.0\n self.sbias = 0.0\n self.grav = 9.8059999999999992\n self.kappa = 0.40\n self.rhoref = 1025.0\n self.dte = 6.0\n self.isplit = 30\n self.dti = self.dte * self.isplit\n self.dte2 = self.dte * 2e0\n self.dti2 = self.dti * 2e0\n self.z0b = 0.01\n self.npg = 1\n self.nadv = 1\n self.ramp = 1.0\n self.horcon = 0.20\n self.umol = 2.e-5\n self.aam_init = 500.0\n self.nbct = 1\n self.nbcs = 1\n self.hmax = 4500.0\n self.nsbdy = 1\n self.ispi = 1.0 / self.isplit\n self.isp2i = self.ispi / 2.0\n self.ispadv = 5\n self.alpha = 0.2250\n self.smoth = 0.100\n self.vmaxl = 100.0\n self.cbcmax = 1.0\n self.cbcmin = 0.0025\n self.tprni = 0.20\n self.small_debug = 1e-15\n self.matrix_small = Tensor(self.small * np.ones([self.im, self.jm, self.kb]), mstype.float32)\n self.matrix_grav = Tensor(self.grav * np.ones([self.im, self.jm, self.kb]), mstype.float32)\n self.matrix2d_grav = Tensor(self.grav * np.ones([self.im, self.jm, 1]), mstype.float32)\n self.matrix1_dti2 = Tensor(self.dti2 * np.ones([self.im, self.jm, self.kb]), mstype.float32)\n self.matrix2_dti2 = Tensor(2.0 * self.dti2 * np.ones([self.im, self.jm, self.kb]), mstype.float32)\n self.matrix_ispi = Tensor(self.ispi * np.ones([self.im, self.jm, 1]), mstype.float32)\n self.matrix_isp2i = Tensor(self.isp2i * np.ones([self.im, self.jm, 1]), mstype.float32)\n self.matrix2_dte = Tensor(2.0 * self.dte * np.ones([self.im, self.jm, 1]), mstype.float32)\n self.matrix_ramp = Tensor(self.ramp * np.ones([self.im, self.jm, 1]), mstype.float32)\n self.matrix_smoth = Tensor(0.5 * self.smoth * np.ones([self.im, self.jm, 1]), mstype.float32)\n self.matrix_alpha = Tensor(self.alpha * np.ones([self.im, self.jm, 1]), mstype.float32)\n self.mat_ones = Tensor(np.float32(np.ones([self.im, self.jm, self.kb])))\n self.mat_ones_2d = Tensor(np.float32(np.ones([self.im, self.jm, 1])))\n self.mat_twos_2d = Tensor(2 * np.float32(np.ones([self.im, self.jm, 1])))\n self.mat_zeros = Tensor(np.float32(np.zeros([self.im, self.jm, self.kb])))\n self.mat_zeros_im_jm_1 = Tensor(np.float32(np.zeros([self.im, self.jm, 1])))\n\n def _parameter_init(self, variable):\n \"\"\"parameter init\"\"\"\n self.z = Parameter(Tensor(variable[\"z\"]), name=\"z\", requires_grad=False)\n self.zz = Parameter(Tensor(variable[\"zz\"]), name=\"zz\", requires_grad=False)\n self.dzz = Parameter(Tensor(variable[\"dzz\"]), name=\"dzz\", requires_grad=False)\n self.dx = Parameter(Tensor(variable[\"dx\"]), name=\"dx\", requires_grad=False)\n self.dy = Parameter(Tensor(variable[\"dy\"]), name=\"dy\", requires_grad=False)\n self.dz = Parameter(Tensor(variable[\"dz\"]), name=\"dz\", requires_grad=False)\n self.cor = Parameter(Tensor(variable[\"cor\"]), name=\"cor\", requires_grad=False)\n self.h = Parameter(Tensor(variable[\"h\"]), name=\"h\", requires_grad=False)\n self.fsm = Parameter(Tensor(variable[\"fsm\"]), name=\"fsm\", requires_grad=False)\n self.dum = Parameter(Tensor(variable[\"dum\"]), name=\"dum\", requires_grad=False)\n self.dvm = Parameter(Tensor(variable[\"dvm\"]), name=\"dvm\", requires_grad=False)\n self.art = Parameter(Tensor(variable[\"art\"]), name=\"art\", requires_grad=False)\n self.aru = Parameter(Tensor(variable[\"aru\"]), name=\"aru\", requires_grad=False)\n self.arv = Parameter(Tensor(variable[\"arv\"]), name=\"arv\", requires_grad=False)\n self.rfe = Parameter(Tensor(variable[\"rfe\"]), name=\"rfe\", requires_grad=False)\n self.rfw = Parameter(Tensor(variable[\"rfw\"]), name=\"rfw\", requires_grad=False)\n self.rfn = Parameter(Tensor(variable[\"rfn\"]), name=\"rfn\", requires_grad=False)\n self.rfs = Parameter(Tensor(variable[\"rfs\"]), name=\"rfs\", requires_grad=False)\n self.east_e = Parameter(Tensor(variable[\"east_e\"]), name=\"east_e\", requires_grad=False)\n self.north_e = Parameter(Tensor(variable[\"north_e\"]), name=\"north_e\", requires_grad=False)\n self.east_c = Parameter(Tensor(variable[\"east_c\"]), name=\"east_c\", requires_grad=False)\n self.north_c = Parameter(Tensor(variable[\"north_c\"]), name=\"north_c\", requires_grad=False)\n self.east_u = Parameter(Tensor(variable[\"east_u\"]), name=\"east_u\", requires_grad=False)\n self.north_u = Parameter(Tensor(variable[\"north_u\"]), name=\"north_u\", requires_grad=False)\n self.east_v = Parameter(Tensor(variable[\"east_v\"]), name=\"east_v\", requires_grad=False)\n self.north_v = Parameter(Tensor(variable[\"north_v\"]), name=\"north_v\", requires_grad=False)\n self.tclim = Parameter(Tensor(variable[\"tclim\"]), name=\"tclim\", requires_grad=False)\n self.sclim = Parameter(Tensor(variable[\"sclim\"]), name=\"sclim\", requires_grad=False)\n self.rot = Parameter(Tensor(variable[\"rot\"]), name=\"rot\", requires_grad=False)\n self.vfluxf = Parameter(Tensor(variable[\"vfluxf\"]), name=\"vfluxf\", requires_grad=False)\n self.wusurf = Parameter(Tensor(variable[\"wusurf\"]), name=\"wusurf\", requires_grad=False)\n self.wvsurf = Parameter(Tensor(variable[\"wvsurf\"]), name=\"wvsurf\", requires_grad=False)\n self.e_atmos = Parameter(Tensor(variable[\"e_atmos\"]), name=\"e_atmos\", requires_grad=False)\n self.uabw = Parameter(Tensor(variable[\"uabw\"]), name=\"uabw\", requires_grad=False)\n self.uabe = Parameter(Tensor(variable[\"uabe\"]), name=\"uabe\", requires_grad=False)\n self.vabs = Parameter(Tensor(variable[\"vabs\"]), name=\"vabs\", requires_grad=False)\n self.vabn = Parameter(Tensor(variable[\"vabn\"]), name=\"vabn\", requires_grad=False)\n self.els = Parameter(Tensor(variable[\"els\"]), name=\"els\", requires_grad=False)\n self.eln = Parameter(Tensor(variable[\"eln\"]), name=\"eln\", requires_grad=False)\n self.ele = Parameter(Tensor(variable[\"ele\"]), name=\"ele\", requires_grad=False)\n self.elw = Parameter(Tensor(variable[\"elw\"]), name=\"elw\", requires_grad=False)\n self.ssurf = Parameter(Tensor(variable[\"ssurf\"]), name=\"ssurf\", requires_grad=False)\n self.tsurf = Parameter(Tensor(variable[\"tsurf\"]), name=\"tsurf\", requires_grad=False)\n self.tbe = Parameter(Tensor(variable[\"tbe\"]), name=\"tbe\", requires_grad=False)\n self.sbe = Parameter(Tensor(variable[\"sbe\"]), name=\"sbe\", requires_grad=False)\n self.sbw = Parameter(Tensor(variable[\"sbw\"]), name=\"sbw\", requires_grad=False)\n self.tbw = Parameter(Tensor(variable[\"tbw\"]), name=\"tbw\", requires_grad=False)\n self.tbn = Parameter(Tensor(variable[\"tbn\"]), name=\"tbn\", requires_grad=False)\n self.tbs = Parameter(Tensor(variable[\"tbs\"]), name=\"tbs\", requires_grad=False)\n self.sbn = Parameter(Tensor(variable[\"sbn\"]), name=\"sbn\", requires_grad=False)\n self.sbs = Parameter(Tensor(variable[\"sbs\"]), name=\"sbs\", requires_grad=False)\n self.wtsurf = Parameter(Tensor(variable[\"wtsurf\"]), name=\"wtsurf\", requires_grad=False)\n self.swrad = Parameter(Tensor(variable[\"swrad\"]), name=\"swrad\", requires_grad=False)\n self.z1 = Parameter(Tensor(variable[\"z\"][0, 0, :]), name=\"z1\", requires_grad=False)\n self.dz1 = Parameter(Tensor(variable[\"dz\"][0:1, 0:1, :]), name=\"dz1\", requires_grad=False)\n self.dzz1 = Parameter(Tensor(variable[\"dzz\"][0:1, 0:1, :]), name=\"dzz1\", requires_grad=False)\n self.z_3d = Parameter(Tensor(np.tile(variable[\"z\"], [self.im, self.jm, 1])), name=\"z_3d\", requires_grad=False)\n self.swrad0 = Parameter(Tensor(np.zeros([self.im, self.jm, 1], np.float32)), name=\"swrad0\", requires_grad=False)\n self.wssurf = Parameter(Tensor(np.zeros([self.im, self.jm, 1], np.float32)), name=\"wssurf\", requires_grad=False)\n self.global_step = Parameter(initializer(0, [1], mstype.int32), name='global_step', requires_grad=False)\n self.q2b = Parameter(self.q2b_init, name=\"q2b\", requires_grad=False)\n self.q2lb = Parameter(self.q2lb_init, name=\"q2lb\", requires_grad=False)\n self.aam = Parameter(self.aam_init, name=\"aam\", requires_grad=False)\n self.cbc = Parameter(self.cbc_init, name=\"cbc\", requires_grad=False)\n self.rmean = Parameter(self.rmean_init, name=\"rmean\", requires_grad=False)\n self.elf = Parameter(Tensor(np.zeros([self.im, self.jm, 1], np.float32)), name=\"elf\", requires_grad=False)\n\n def dens(self, si, ti, zz, h, fsm):\n \"\"\"\n density compute function\n Args:\n si: Salinity.\n ti: Potential temperature.\n zz: sigma coordinate, intermediate between Z.\n h: the bottom depth (m)\n fsm: Mask for scalar variables; = 0 over land; = 1 over water\n Returns:\n rhoo[Tensor], density.\n \"\"\"\n tr = ti + self.tbias\n sr = si + self.sbias\n tr2 = tr * tr\n tr3 = tr2 * tr\n tr4 = tr3 * tr\n p = self.grav * self.rhoref * (-1 * zz * self.mat_ones * h) * 1e-5\n rhor2 = -0.157406e0 + 6.793952e-2 * tr - 9.095290e-3 * tr2 + 1.001685e-4 * tr3 - 1.120083e-6 * tr4 + \\\n 6.536332e-9 * tr4 * tr\n rhor1 = rhor2 + (0.824493e0 - 4.0899e-3 * tr + 7.6438e-5 * tr2 - 8.2467e-7 * tr3 + 5.3875e-9 * tr4) * sr + \\\n (-5.72466e-3 + 1.0227e-4 * tr - 1.6546e-6 * tr2) * self.abs(sr) ** 1.5e0 + 4.8314e-4 * sr * sr\n cr1 = 1449.1e0 + .0821e0 * p + 4.55e0 * tr - .045e0 * tr2 + 1.34e0 * (sr - 35.e0)\n cr = p / (cr1 * cr1)\n rhor = rhor1 + 1.e5 * cr * (1.e0 - 2.e0 * cr)\n rhoo = rhor / self.rhoref * fsm\n return rhoo\n\n def advct(self, dx, dy, u, v, dt, aam, ub, x_d, y_d, vb):\n \"\"\"\n compute adv in x, y direction\n Args:\n dx, dy: Increment in x, y direction, respectively.\n u, v: Velocity in x, y direction, respectively.\n ub, vb: Velocity boundary in x, y direction, respectively.\n dt: time step.\n x_d, y_d: Grid increment in x, y direction.\n Returns:\n advx[Tensor], advy[Tensor]\n \"\"\"\n curv = (self.AYF(v) * self.DXB(self.AXF(dy)) - self.AXF(u) * self.DYB(self.AYF(dx))) / (dx * dy)\n tmp23 = self.AXF(self.AXB(dt) * u) * self.AXF(u) - (dt * aam * 2e0 * self.DXF(ub) / x_d[3])\n tmp23 = tmp23 * (1.0 - self.x_he)\n advx = self.DXB(tmp23) / x_d[2] + self.DYF(self.AXB(self.AYB(dt) * v) * self.AYB(u) - self.AYB(self.AXB(dt))\n * self.AYB(self.AXB(aam)) * (self.DYB(ub) / y_d[0] + self.DXB(vb)\n / x_d[0])) / y_d[2] - \\\n self.AXB(curv * dt * self.AYF(v))\n advx = advx * (1 - self.x_h) * (1 - self.y_he)\n tmp = self.AYF(self.AYB(dt) * v) * self.AYF(v) - dt * aam * 2e0 * self.DYF(vb) / y_d[3]\n tmp = tmp * (1 - self.y_he)\n advy = self.DXF(self.AYB(self.AXB(dt) * u) * self.AXB(v) - self.AYB(self.AXB(dt)) * self.AYB(self.AXB(aam))\n * (self.DYB(ub) / y_d[0] + self.DXB(vb) / x_d[0])) / x_d[1] + self.DYB(tmp) / y_d[1] \\\n + self.AYB(curv * dt * self.AXF(u))\n advy = advy * (1 - self.y_h) * (1 - self.x_he)\n return advx, advy\n\n def baropg(self, x_d, y_d, z_d, zz, rho, rmean, dt, dz, dum, dvm):\n \"\"\"\n baropg\n Args:\n x_d, y_d, z_d: Grid increment in x, y, z direction.\n zz: sigma coordinate, intermediate between Z\n rho: density\n rmean: Horizontal mean density field in z-coordinate.\n dt: time step.\n dz: Increment in z direction.\n dum, dvm: Mask for the u, v component of velocity; = 0 over land; =1 over water\n\n Returns:\n drhox[Tensor], x-component of the internal baroclinic pressure gradient subtract rmean from density before\n integrating\n drhoy[Tensor], y-component of the internal baroclinic pressure gradient subtract rmean from density before\n integrating\n \"\"\"\n tmp = -1 * self.DZB(zz) * self.DXB(self.AZB(rho - rmean)) / x_d[2] * self.AXB(dt) \\\n + self.DXB(dt) / x_d[2] * self.DZB(self.AXB(rho - rmean)) / z_d[1] * self.AZB(zz) * self.AZB(dz)\n\n tmp1 = -1 * zz * self.AXB(dt) * self.DXB(rho - rmean) / x_d[2]\n tmp = tmp * (1 - self.z_h) + tmp1 * self.z_h\n tmp = tmp * (1 - self.z_e)\n drhox = self.ramp * self.grav * self.AXB(dt) * self.csum(tmp, 2) * dum\n drhox = drhox * (1 - self.z_e)\n tmp = -1 * self.DZB(zz) * self.DYB(self.AZB(rho - rmean)) / y_d[1] * self.AYB(dt) + \\\n self.DYB(dt) / y_d[1] * self.DZB(self.AYB(rho - rmean)) / z_d[1] * self.AZB(zz) * self.AZB(dz)\n tmp1 = -1 * zz * self.AYB(dt) * self.DYB(rho - rmean) / y_d[1]\n tmp = tmp * (1 - self.z_h) + tmp1 * self.z_h\n tmp = tmp * (1 - self.z_e)\n\n drhoy = self.ramp * self.grav * self.AYB(dt) * self.csum(tmp, 2) * dvm\n drhoy = drhoy * (1 - self.z_e)\n return drhox, drhoy\n\n def lateral_viscosity(self, dx, dy, u, v, dt, aam, ub, vb, x_d, y_d, z_d, rho, rmean):\n \"\"\"\n lateral viscosity compute function.\n Args:\n dx, dy: Increment in x, y direction, respectively.\n u, v: Velocity in x, y direction, respectively.\n ub, vb: Velocity boundary in x, y direction, respectively.\n x_d, y_d, z_d: Grid increment in x, y, z direction.\n rho: density\n rmean: Horizontal mean density field in z-coordinate.\n dt: time step.\n\n Returns:\n drhox[Tensor], x-component of the internal baroclinic pressure gradient subtract rmean from density before\n integrating.\n drhoy[Tensor], y-component of the internal baroclinic pressure gradient subtract rmean from density before\n integrating.\n advx[Tensor], advy[Tensor], horizontal advection and diffusion terms.\n amm[Tensor], horizontal kinematic viscosity.\n \"\"\"\n advx, advy = self.advct(dx, dy, u, v, dt, aam, ub, x_d, y_d, vb)\n drhox, drhoy = self.baropg(x_d, y_d, z_d, self.zz, rho, rmean, dt, self.dz, self.dum, self.dvm)\n aam = self.horcon * dx * dy * self.sqrt(\n self.DXF(u) / x_d[3] * self.DXF(u) / x_d[3] + self.DYF(v) / y_d[3] * self.DYF(v) / y_d[3] +\n 0.5 * (self.DYB(self.AYF(self.AXF(u))) / y_d[3] + self.DXB(self.AXF(self.AYF(v))) / x_d[3]) *\n (self.DYB(self.AYF(self.AXF(u))) / y_d[3] + self.DXB(self.AXF(self.AYF(v))) / x_d[3]))\n\n aam = aam * (1 - self.x_he3d) + self.aam_init * self.x_he3d\n aam = aam * (1 - self.y_he3d) + self.aam_init * self.y_he3d\n aam = aam * (1 - self.z_e) + self.aam_init * self.z_e\n return advx, advy, drhox, drhoy, aam\n\n def advave(self, x_d, y_d, d, aam2d, uab, vab, ua, va):\n \"\"\"\n advave\n Args:\n x_d, y_d: Grid increment in x, y direction.\n d: Fluid column depth.\n aam2d: vertical average of aam(m2s-1)\n ua, va: Vertical average velocity in x, y direction, respectively.\n uab, vab: Vertical average velocity boundary in x, y direction, respectively.\n\n Returns:\n advua[Tensor], advva[Tensor]\n \"\"\"\n tps = self.AYB(self.AXB(d)) * self.AXB(self.AYB(aam2d)) * (self.DYB(uab) / y_d[0] + self.DXB(vab) / x_d[0])\n tmp = self.AXF(self.AXB(d) * ua) * self.AXF(ua) - 2. * d * aam2d * \\\n self.DXF(uab) / x_d[3]\n tmp = tmp * (1 - self.x_he)\n advua = self.DXB(tmp) / x_d[2] + self.DYF(self.AXB(self.AYB(d) * va) * self.AYB(ua) - tps) / y_d[2]\n advua = advua * (1 - self.x_h)\n advua = advua * (1 - self.y_he)\n\n tmp = self.AYF(self.AYB(d) * va) * self.AYF(va) - 2. * d * aam2d * self.DYF(vab) / y_d[3]\n tmp = tmp * (1 - self.y_he)\n advva = self.DXF(self.AYB(self.AXB(d) * ua) * self.AXB(va) - tps) / x_d[1] + self.DYB(tmp) / y_d[1]\n advva = advva * (1 - self.y_h)\n advva = advva * (1 - self.x_he)\n return advua, advva\n\n def mode_interaction(self, advx, advy, drhox, drhoy, aam, x_d, y_d, d, uab, vab, ua, va, el):\n \"\"\"\n external and internal mode variables interaction\n Args:\n drhox, drhoy: x-component and y-component of the internal baroclinic pressure gradient subtract rmean from\n density before integrating.\n aam: horizontal kinematic viscosity.\n x_d, y_d: Grid increment in x, y direction.\n d: Fluid column depth.\n aam2d: vertical average of aam(m2s-1)\n ua, va: Vertical average velocity in x, y direction, respectively.\n uab, vab: Vertical average velocity boundary in x, y direction, respectively.\n el: the surface elevation as used in the external mode (m).\n\n Returns:\n tuple[Tensor], update variables of external mode\n \"\"\"\n adx2d = self.reduce_sum(advx * self.dz, 2)\n ady2d = self.reduce_sum(advy * self.dz, 2)\n drx2d = self.reduce_sum(drhox * self.dz, 2)\n dry2d = self.reduce_sum(drhoy * self.dz, 2)\n aam2d = self.reduce_sum(aam * self.dz, 2)\n advua, advva = self.advave(x_d, y_d, d, aam2d, uab, vab, ua, va)\n\n adx2d = adx2d - advua\n ady2d = ady2d - advva\n egf = el * self.ispi\n utf = ua * 2.0 * self.AXB(d) * self.isp2i\n vtf = va * 2.0 * self.AYB(d) * self.isp2i\n return adx2d, ady2d, drx2d, dry2d, aam2d, advua, advva, egf, utf, vtf\n\n def external_el(self, x_d, y_d, d, ua, va, elb):\n \"\"\"\n compute surface elevation of external 2d mode\n Args:\n x_d, y_d: Grid increment in x, y direction.\n d: Fluid column depth.\n ua, va: Vertical average velocity in x, y direction, respectively.\n elb: the surface elevation boundary as used in the external mode (m).\n\n Returns:\n elf[Tensor], the surface elevation as used in the external mode (m).\n \"\"\"\n elf = elb - self.matrix2_dte * ((self.DXF(self.AXB(d) * ua) / x_d[3] + self.DYF(self.AYB(d) * va) / y_d[3])\n - self.vfluxf)\n tmp = P.Pad(((0, 1), (0, 0), (0, 0)))(elf)\n tmp = P.Slice()(tmp, (1, 0, 0), P.Shape()(elf))\n elf = elf * (1 - self.x_h) + tmp * self.x_h\n tmp = P.Pad(((1, 0), (0, 0), (0, 0)))(elf)\n tmp = P.Slice()(tmp, (0, 0, 0), P.Shape()(elf))\n elf = elf * (1 - self.x_e) + tmp * self.x_e\n tmp = P.Pad(((0, 0), (0, 1), (0, 0)))(elf)\n tmp = P.Slice()(tmp, (0, 1, 0), P.Shape()(elf))\n elf = elf * (1 - self.y_h) + tmp * self.y_h\n tmp = P.Pad(((0, 0), (1, 0), (0, 0)))(elf)\n tmp = P.Slice()(tmp, (0, 0, 0), P.Shape()(elf))\n elf = elf * (1 - self.y_e) + tmp * self.y_e\n elf = elf * self.fsm\n return elf\n\n def external_ua(self, iext, x_d, y_d, elf, d, ua, va, uab, vab, el, elb, advua, aam2d, adx2d, drx2d, wubot):\n \"\"\"\n compute ua of external 2d mode\n Args:\n iext: step of external mode loop.\n x_d, y_d: Grid increment in x, y direction.\n elf: the surface elevation as used in the external mode (m).\n d: Fluid column depth.\n ua, va: Vertical average velocity in x, y direction, respectively.\n uab, vab: Vertical average velocity boundary in x, y direction, respectively.\n el: the surface elevation as used in the external mode (m).\n elb: the surface elevation boundary as used in the external mode (m).\n aam2d: vertical average of aam(m2s-1)\n adx2d: vertical integrals of advx.\n drx2d: vertical integrals of drhox.\n wubot: <wu(-1)> momentum fluxes at the bottom (m2s-2)\n\n Returns:\n uaf[Tensor], Vertical average velocity in x direction\n \"\"\"\n if iext % self.ispadv == 0:\n tmp = self.AXF(self.AXB(d) * ua) * self.AXF(ua) - self.mat_twos_2d * d * aam2d * self.DXF(uab) / x_d[3]\n tmp = tmp * (1 - self.x_he)\n advua = self.DXB(tmp) / x_d[2] + self.DYF(self.AXB(self.AYB(d) * va) * self.AYB(ua) - self.AYB(self.AXB(d))\n * self.AXB(self.AYB(aam2d)) * (self.DYB(uab) / y_d[0]\n + self.DXB(vab) / x_d[0])) / y_d[2]\n advua = advua * (1 - self.x_h)\n advua = advua * (1 - self.y_he)\n\n uaf = (self.AXB(self.h + elb) * uab - self.matrix2_dte * (adx2d + advua - self.AXB(self.cor * d * self.AYF(va))\n + self.matrix2d_grav * self.AXB(d) *\n ((self.mat_ones_2d - 2.0 * self.alpha) * self.DXB(el)\n / x_d[2] + self.matrix_alpha\n * (self.DXB(elb) / x_d[2] + self.DXB(elf) / x_d[2])\n + self.DXB(self.e_atmos) / x_d[2]) + drx2d\n + (self.wusurf - wubot))) / self.AXB(self.h + elf)\n\n tmpua = self.mat_zeros_im_jm_1\n tmpel = self.mat_zeros_im_jm_1\n tmpua = tmpua * (1 - self.x_e1) + self.uabe * self.x_e1\n tmpua = tmpua * (1 - self.x_h1) + self.uabw * self.x_h1\n tmpel = tmpel * (1 - self.x_e1) + self.ele * self.x_e1\n tmpel = tmpel * (1 - self.x_h1) + self.elw * self.x_h1\n tmp = self.matrix_ramp * (tmpua + self.rfe * P.Sqrt()(self.grav / self.h) * (el - tmpel))\n tmp0 = P.Pad(((1, 0), (0, 0), (0, 0)))(tmp)\n tmp0 = P.Slice()(tmp0, (0, 0, 0), P.Shape()(tmp))\n uaf = uaf * (1 - self.x_e) + tmp0 * self.x_e\n tmp = self.matrix_ramp * (tmpua - self.rfw * P.Sqrt()(self.grav / self.h) * (el - tmpel))\n uaf = uaf * (1 - self.x_h1) + tmp * self.x_h1\n tmp0 = P.Pad(((0, 1), (0, 0), (0, 0)))(tmp)\n tmp0 = P.Slice()(tmp0, (1, 0, 0), P.Shape()(tmp))\n uaf = uaf * (1 - self.x_h) + tmp0 * self.x_h\n uaf = uaf * (1 - self.y_he)\n uaf = uaf * self.dum\n return advua, uaf\n\n def external_va(self, iext, x_d, y_d, elf, d, ua, va, uab, vab, el, elb, advva, aam2d, ady2d, dry2d, wvbot):\n \"\"\"\n compute va of external 2d mode\n Args:\n iext: step of external mode loop.\n x_d, y_d: Grid increment in x, y direction.\n elf: the surface elevation as used in the external mode (m).\n d: Fluid column depth.\n ua, va: Vertical average velocity in x, y direction, respectively.\n uab, vab: Vertical average velocity boundary in x, y direction, respectively.\n el: the surface elevation as used in the external mode (m).\n elb: the surface elevation boundary as used in the external mode (m).\n aam2d: vertical average of aam(m2s-1)\n ady2d: vertical integrals of advy.\n dry2d: vertical integrals of drhoy.\n wvbot: <wv(-1)> momentum fluxes at the bottom (m2s-2)\n\n Returns:\n vaf[Tensor], Vertical average velocity in y direction\n \"\"\"\n if iext % self.ispadv == 0:\n tmp = self.AYF(self.AYB(d) * va) * self.AYF(va) - self.mat_twos_2d * d * aam2d * self.DYF(vab) / y_d[3]\n tmp = tmp * (1 - self.y_he)\n advva = self.DXF(self.AYB(self.AXB(d) * ua) * self.AXB(va) - self.AYB(self.AXB(d))\n * self.AXB(self.AYB(aam2d)) * (self.DYB(uab) / y_d[0] +\n self.DXB(vab) / x_d[0])) / x_d[1] + self.DYB(tmp) / y_d[1]\n advva = advva * (1 - self.y_h)\n advva = advva * (1 - self.x_he)\n\n vaf = (self.AYB(self.h + elb) * vab - self.matrix2_dte * (ady2d + advva + self.AYB(self.cor * d * self.AXF(ua))\n + self.matrix2d_grav * self.AYB(d) *\n ((self.mat_ones_2d - 2.0 * self.alpha) *\n self.DYB(el) / y_d[1] + self.matrix_alpha *\n (self.DYB(elb) / y_d[1] + self.DYB(elf) / y_d[1]) +\n self.DYB(self.e_atmos) / y_d[1]) + dry2d +\n (self.wvsurf - wvbot))) / self.AYB(self.h + elf)\n\n tmpva = self.mat_zeros_im_jm_1\n tmpel = self.mat_zeros_im_jm_1\n tmpva = tmpva * (1 - self.y_e1) + self.vabn * self.y_e1\n tmpva = tmpva * (1 - self.y_h1) + self.vabs * self.y_h1\n tmpel = tmpel * (1 - self.y_e1) + self.eln * self.y_e1\n tmpel = tmpel * (1 - self.y_h1) + self.els * self.y_h1\n tmp = self.matrix_ramp * (tmpva + self.rfn * P.Sqrt()(self.grav / self.h) * (el - tmpel))\n tmp0 = P.Pad(((0, 0), (1, 0), (0, 0)))(tmp)\n tmp0 = P.Slice()(tmp0, (0, 0, 0), P.Shape()(tmp))\n vaf = vaf * (1 - self.y_e) + tmp0 * self.y_e\n tmp = self.matrix_ramp * (tmpva - self.rfs * P.Sqrt()(self.grav / self.h) * (el - tmpel))\n vaf = vaf * (1 - self.y_h1) + tmp * self.y_h1\n tmp0 = P.Pad(((0, 0), (0, 1), (0, 0)))(tmp)\n tmp0 = P.Slice()(tmp0, (0, 1, 0), P.Shape()(tmp))\n vaf = vaf * (1 - self.y_h) + tmp0 * self.y_h\n vaf = vaf * (1 - self.x_he)\n vaf = vaf * self.dvm\n return advva, vaf\n\n def external_update(self, iext, etf, ua, uab, va, vab, el, elb, elf, uaf, vaf, egf, utf, vtf):\n \"\"\"\n update variabls of external 2d mode\n Args:\n iext: step of external mode loop.\n ua, va: Vertical average velocity in x, y direction, respectively.\n uab, vab: Vertical average velocity boundary in x, y direction, respectively.\n egf: the surface elevation also used in the internal mode for the pressure gradient and derived from el\n utf, vtf: ua, va time averaged over the interval, DT = dti(ms-1)\n\n Returns:\n tuple[Tensor], update variables of external mode\n \"\"\"\n vamax = P.ReduceMax()(P.Abs()(vaf))\n if iext == (self.isplit - 2):\n etf = 0.25 * self.smoth * elf\n elif iext == (self.isplit - 1):\n etf = etf + 0.5 * (1.0 - 0.5 * self.smoth) * elf\n elif iext == self.isplit:\n etf = (etf + 0.5 * elf) * self.fsm\n\n # TODO fix control flow\n uab = ua + self.matrix_smoth * (uab - self.mat_twos_2d * ua + uaf)\n ua = F.depend(uaf, uab)\n vab = va + self.matrix_smoth * (vab - self.mat_twos_2d * va + vaf)\n va = F.depend(vaf, vab)\n elb = el + self.matrix_smoth * (elb - self.mat_twos_2d * el + elf)\n el = F.depend(elf, elb)\n d = self.h + el\n if iext != self.isplit:\n egf = egf + el * self.matrix_ispi\n utf = utf + self.mat_twos_2d * ua * self.AXB(d) * self.matrix_isp2i\n vtf = vtf + self.mat_twos_2d * va * self.AYB(d) * self.matrix_isp2i\n return etf, uab, ua, vab, va, elb, el, d, egf, utf, vtf, vamax\n\n def internal_w(self, x_d, y_d, dt, u, v, etf, etb, vfluxb):\n \"\"\"\n compute velocity in z direction\n Args:\n x_d, y_d: Grid increment in x, y direction.\n dt: time step.\n u, v: Velocity in x, y direction, respectively.\n etf:\n etb:\n vfluxb:\n\n Returns:\n w[Tensor], velocity in z direction\n \"\"\"\n del_w = Shift((0, 0, 1))(P.CumSum()(self.dz * (self.DXF(self.AXB(dt) * u) / x_d[3] + self.DYF(self.AYB(dt) * v)\n / y_d[3] + (etf - etb) / self.dti2), 2))\n w = (0.5 * (vfluxb + self.vfluxf) + del_w)\n w_shape = P.Shape()(w)\n tmp_fsm = P.BroadcastTo(w_shape)(self.fsm)\n w = w * tmp_fsm\n w = w * (1. - self.x_he3d)\n w = w * (1. - self.z_e)\n return w\n\n def adjust_uv(self, u, v, utb, vtb, utf, vtf, dt):\n \"\"\"\n adjust velocity in x, y direction\n Args:\n u, v: Velocity in x, y direction, respectively.\n utb, vtb: ua, va boundary time averaged over the interval, DT = dti(ms-1)\n utf, vtf: ua, va time averaged over the interval, DT = dti(ms-1)\n dt: time step.\n\n Returns:\n tuple[Tensor], velocity in x, y direction\n\n \"\"\"\n u = u - self.reduce_sum(u * self.dz, 2) + (utb + utf) / (2.0 * self.AXB(dt))\n u = u * (1 - self.x_h3d)\n u = u * (1. - self.z_e)\n v = v - self.reduce_sum(v * self.dz, 2) + (vtb + vtf) / (2.0 * self.AYB(dt))\n v = v * (1 - self.y_h3d)\n v = v * (1 - self.z_e)\n return u, v\n\n def internal_q(self, x_d, y_d, z_d, etf, aam, q2b, q2lb, q2, q2l, kq, km, kh, u, v, w, dt, dhb, rho,\n wubot, wvbot, t, s):\n \"\"\"\n Compute turbulence kinetic energy of internal mode. Refer to the paper \"OpenArray v1.0: a simple operator\n library for the decoupling of ocean modeling and parallel computing. \" Appendix B formula B13 and B14.\n Args:\n x_d, y_d, z_d: Grid increment in x, y, z direction, respectively.\n q2: Turbulence kinetic energy.\n q2l: Production of turbulence kinetic energy and turbulence length scale.\n km: Vertical kinematic viscosity.\n kh: Vertical mixing coefficient of heat and salinity.\n kq: Vertical mixing coefficient of turbulence kinetic energy.\n u, v, w: Velocity in x, y, w direction, respectively.\n dt: time step.\n t: Potential temperature.\n s: Salinity.\n\n Returns:\n tuple[Tensor], variables of turbulence kinetic energy.\n \"\"\"\n dhf = self.h + etf\n q2f = (q2b * dhb - self.dti2 * (-self.DZB(self.AZF(w * q2)) / z_d[1] + self.DXF(self.AXB(q2) * self.AXB(dt) *\n self.AZB(u) -\n self.AZB(self.AXB(aam)) *\n self.AXB(self.h) *\n self.DXB(q2b) / x_d[2] *\n self.dum) / x_d[3] +\n self.DYF(self.AYB(q2) * self.AYB(dt) * self.AZB(v) - self.AZB(self.AYB(aam)) *\n self.AYB(self.h) * self.DYB(q2b) / y_d[1] * self.dvm) / y_d[3])) / dhf\n q2lf = (q2lb * dhb - self.dti2 * (-self.DZB(self.AZF(w * q2l)) / z_d[1] + self.DXF(self.AXB(q2l) * self.AXB(dt)\n * self.AZB(u) -\n self.AZB(self.AXB(aam)) *\n self.AXB(self.h) *\n self.DXB(q2lb) / x_d[2] *\n self.dum) / x_d[3] +\n self.DYF(self.AYB(q2l) * self.AYB(dt) * self.AZB(v) - self.AZB(self.AYB(aam))\n * self.AYB(self.h) * self.DYB(q2lb) / y_d[1] * self.dvm) /\n y_d[3])) / dhf\n\n a1 = 0.92\n b1 = 16.6\n a2 = 0.74\n b2 = 10.1\n c1 = 0.08\n e1 = 1.8\n e2 = 1.33\n sef = 1.0\n cbcnst = 100.0\n surfl = 2.e5\n shiw = 0.0\n\n dzz1 = Shift((0, 0, 1))(self.dzz1)\n a = self.dz1 * dzz1 * self.mat_ones\n a = - self.dti2 * (self.AZF(kq) + self.umol) / (a * dhf * dhf + self.small_debug)\n a = a * (1 - self.z_he)\n\n dz1 = Shift((0, 0, 1))(self.dz1)\n c = dz1 * dzz1 * self.mat_ones\n c = - self.dti2 * (self.AZB(kq) + self.umol) / (c * dhf * dhf + self.small_debug)\n c = c * (1 - self.z_he)\n\n utau2 = P.Sqrt()(self.AXF(self.wusurf) * self.AXF(self.wusurf) + self.AYF(self.wvsurf) * self.AYF(self.wvsurf))\n\n ee = self.mat_zeros\n gg = (15.8 * cbcnst) ** (2.0 / 3.0) * utau2 * self.z_h * self.mat_ones\n l0 = surfl * utau2 / self.grav * self.mat_ones\n\n tmp = P.Sqrt()(self.AXF(wubot) * self.AXF(wubot) + self.AYF(wvbot) * self.AYF(wvbot)) * (16.6 ** (2.0 / 3.0)) \\\n * sef\n q2f = q2f * (1 - self.z_e) + tmp * self.z_e\n p = self.grav * self.rhoref * (-self.zz * self.mat_ones * self.h) * 1.e-4\n cc = 1449.10 + 0.00821 * p + 4.55 * (t + self.tbias) - 0.045 * (t + self.tbias) * (t + self.tbias) + 1.34 * \\\n (s + self.sbias - 35.0)\n cc = cc / P.Sqrt()((1.0 - 0.01642 * p / cc) * (1.0 - 0.4 * p / (cc * cc)))\n cc = cc * (1 - self.z_e)\n\n q2b = P.Abs()(q2b)\n q2lb = P.Abs()(q2lb)\n boygr = self.matrix_grav * (self.matrix_grav * self.h - self.DZB(rho) * self.AZB(cc * cc) / z_d[1]) / \\\n (self.h * self.AZB(cc * cc))\n boygr = boygr * (1 - self.z_h)\n l_tmp = q2lb / q2b\n kappa_l0 = self.kappa * l0\n\n tmp = P.Cast()(kappa_l0 > l_tmp, mstype.float32) * P.Cast()(self.z_3d > -0.5, mstype.float32)\n l_tmp = kappa_l0 * tmp + l_tmp * (1 - tmp)\n gh = l_tmp * l_tmp * boygr / q2b\n tmp = gh < 0.028\n tmp = P.Cast()(tmp, mstype.float32)\n gh = gh * tmp + 0.028 * (1 - tmp)\n l_tmp = l_tmp * (1 - self.z_h) + self.kappa * l0 * self.z_h\n l_tmp = l_tmp * (1 - self.z_e)\n gh = gh * (1 - self.z_he)\n kn = km * sef * (self.DZB(self.AXF(u)) / z_d[1] * self.DZB(self.AXF(u)) / z_d[1] + self.DZB(self.AYF(v)) / z_d[\n 1] * self.DZB(self.AYF(v)) / z_d[1]) / (dhf * dhf) - shiw * km * boygr + kh * boygr\n kn = kn * (1 - self.x_e3d)\n\n dtef = P.Sqrt()(q2b) / (b1 * l_tmp + self.small)\n dtef = dtef * (1 - self.z_he)\n\n ggtmp = gg\n eetmp = ee\n for k in range(1, self.kbm1):\n gg = 1.0 / (a + c * (1 - Shift((0, 0, 1))(eetmp)) - self.matrix2_dti2 * dtef - 1.0)\n ee = a * gg\n gg = (-self.matrix2_dti2 * kn + c * Shift((0, 0, 1))(ggtmp) - q2f) * gg\n ee = eetmp * (1 - self.zslice[k]) + ee * self.zslice[k]\n gg = ggtmp * (1 - self.zslice[k]) + gg * self.zslice[k]\n ggtmp = gg\n eetmp = ee\n\n q2f = q2f * (1 - self.x_he3d)\n q2ftmp = q2f\n for k in range(self.kbm1 - 1, -1, -1):\n q2f = ee * Shift((0, 0, -1))(q2f) + gg\n q2f = q2ftmp * (1 - self.zslice[k]) + q2f * self.zslice[k]\n q2ftmp = q2f\n q2f = q2f * (1 - self.y_e3d)\n ggtmp = gg\n eetmp = ee\n\n ee = ee * (1 - self.z_h1)\n gg = gg * (1 - self.z_h1)\n q2lf = q2lf * (1 - self.z_he)\n\n dtef = dtef * (1.0 + e2 * ((1.0 / P.Abs()(\n self.z1 - P.Slice()(self.z1, (0,), (1,)) + self.small_debug) +\n 1.0 / P.Abs()(self.z1 - P.Slice()(self.z1, (self.kb - 1,), (1,)) +\n self.small_debug)) * l_tmp / (dhf * self.kappa)) *\n ((1.0 / P.Abs()(self.z1 - P.Slice()(self.z1, (0,), (1,)) + self.small_debug) +\n 1.0 / P.Abs()(self.z1 - P.Slice()(self.z1, (self.kb - 1,), (1,)) + self.small_debug)) *\n l_tmp / (dhf * self.kappa)))\n\n for k in range(1, self.kbm1):\n gg = 1.0 / (a + c * (1 - Shift((0, 0, 1))(eetmp)) - self.matrix1_dti2 * dtef - 1.0)\n ee = a * gg\n gg = (-self.matrix1_dti2 * kn * l_tmp * e1 + c * Shift((0, 0, 1))(ggtmp) - q2lf) * gg\n ee = eetmp * (1 - self.zslice[k]) + ee * self.zslice[k]\n gg = ggtmp * (1 - self.zslice[k]) + gg * self.zslice[k]\n ggtmp = gg\n eetmp = ee\n gg = gg * (1 - self.x_e3d)\n\n q2lftmp = q2lf\n for k in range(self.kbm1 - 1, 0, -1):\n q2lf = ee * Shift((0, 0, -1))(q2lf) + gg\n q2lf = q2lftmp * (1 - self.zslice[k]) + q2lf * self.zslice[k]\n q2lftmp = q2lf\n\n res_compare = P.LogicalOr()((q2f <= self.small), (q2lf <= self.small))\n filters = P.Cast()(res_compare, mstype.float32)\n filters = filters * (1 - self.z_he)\n q2f = q2f * (1 - filters) + self.matrix_small * filters\n q2lf = q2lf * (1 - filters) + 0.1 * dt * self.small * filters\n\n tmp = -gh * (3.0 * a2 * b2 + 18.0 * a1 * a2) + 1\n sh = a2 * (1.0 - 6.0 * a1 / b1) / tmp\n tmp = - gh * (9.0 * a1 * a2) + 1\n sm = (a1 * (1.0 - 3.0 * c1 - 6.0 * a1 / b1) + sh * (18.0 * a1 * a1 + 9.0 * a1 * a2) * gh) / tmp\n\n kn = l_tmp * P.Sqrt()(P.Abs()(q2))\n kq = (kn * sh * 0.41 + kq) * 0.5\n km = (kn * sm + km) * 0.5\n kh = (kn * sh + kh) * 0.5\n\n tmp0 = P.Pad(((0, 0), (1, 0), (0, 0)))(km)\n tmp0 = P.Slice()(tmp0, (0, 0, 0), P.Shape()(km))\n km = km * (1 - self.y_e) + tmp0 * self.fsm * self.y_e\n tmp0 = P.Pad(((0, 0), (1, 0), (0, 0)))(kh)\n tmp0 = P.Slice()(tmp0, (0, 0, 0), P.Shape()(kh))\n kh = kh * (1 - self.y_e) + tmp0 * self.fsm * self.y_e\n tmp0 = P.Pad(((0, 0), (0, 1), (0, 0)))(km)\n tmp0 = P.Slice()(tmp0, (0, 1, 0), P.Shape()(km))\n km = km * (1 - self.y_h) + tmp0 * self.fsm * self.y_h\n tmp0 = P.Pad(((0, 0), (0, 1), (0, 0)))(kh)\n tmp0 = P.Slice()(tmp0, (0, 1, 0), P.Shape()(kh))\n kh = kh * (1 - self.y_h) + tmp0 * self.fsm * self.y_h\n\n tmp0 = P.Pad(((1, 0), (0, 0), (0, 0)))(km)\n tmp0 = P.Slice()(tmp0, (0, 0, 0), P.Shape()(km))\n km = km * (1 - self.x_e) + tmp0 * self.fsm * self.x_e\n tmp0 = P.Pad(((1, 0), (0, 0), (0, 0)))(kh)\n tmp0 = P.Slice()(tmp0, (0, 0, 0), P.Shape()(kh))\n kh = kh * (1 - self.x_e) + tmp0 * self.fsm * self.x_e\n tmp0 = P.Pad(((0, 1), (0, 0), (0, 0)))(km)\n tmp0 = P.Slice()(tmp0, (1, 0, 0), P.Shape()(km))\n km = km * (1 - self.x_h) + tmp0 * self.fsm * self.x_h\n tmp0 = P.Pad(((0, 1), (0, 0), (0, 0)))(kh)\n tmp0 = P.Slice()(tmp0, (1, 0, 0), P.Shape()(kh))\n kh = kh * (1 - self.x_h) + tmp0 * self.fsm * self.x_h\n\n tmpu = u\n tmpv = v\n tmp0 = P.Pad(((0, 1), (0, 0), (0, 0)))(tmpu)\n tmp0 = P.Slice()(tmp0, (1, 0, 0), P.Shape()(tmpu))\n tmpu = tmpu * (1 - self.x_h) + tmp0 * self.x_h\n tmp0 = P.Pad(((0, 0), (0, 1), (0, 0)))(tmpu)\n tmp0 = P.Slice()(tmp0, (0, 1, 0), P.Shape()(tmpu))\n tmpu = tmpu * (1 - self.y_h) + tmp0 * self.y_h\n\n # ==========================EAST\n tmp1 = q2 - self.dti * ((0.5 * (u - P.Abs()(u))) * (self.small - q2) / self.AXB(self.dx) +\n (0.5 * (u + P.Abs()(u))) * self.DXB(q2) / x_d[2])\n q2f = q2f * (1 - self.x_e) + tmp1 * self.x_e\n tmp1 = q2l - self.dti * ((0.5 * (u - P.Abs()(u))) * (self.small - q2l) / self.AXB(self.dx) +\n (0.5 * (u + P.Abs()(u))) * self.DXB(q2l) / x_d[2])\n q2lf = q2lf * (1 - self.x_e) + tmp1 * self.x_e\n\n # ==========================WEST\n tmp1 = q2 - self.dti * ((0.5 * (tmpu + P.Abs()(tmpu))) * (q2 - self.small) / self.AXF(self.dx) +\n (0.5 * (tmpu - P.Abs()(tmpu))) * self.DXF(q2) / x_d[2])\n q2f = q2f * (1 - self.x_h) + tmp1 * self.x_h\n tmp1 = q2l - self.dti * ((0.5 * (tmpu + P.Abs()(tmpu))) * (q2l - self.small) / self.AXF(self.dx) +\n (0.5 * (tmpu - P.Abs()(tmpu))) * self.DXF(q2l) / x_d[2])\n q2lf = q2lf * (1 - self.x_h) + tmp1 * self.x_h\n\n # ==========================NORTH\n tmp1 = q2 - self.dti * ((0.5 * (v - P.Abs()(v))) * (self.small - q2) / self.AYB(self.dy) +\n (0.5 * (v + P.Abs()(v))) * self.DYB(q2) / y_d[1])\n q2f = q2f * (1 - self.y_e) + tmp1 * self.y_e\n tmp1 = q2l - self.dti * ((0.5 * (v - P.Abs()(v))) * (self.small - q2l) / self.AYB(self.dy) +\n (0.5 * (v + P.Abs()(v))) * self.DYB(q2l) / y_d[1])\n q2lf = q2lf * (1 - self.y_e) + tmp1 * self.y_e\n\n # ==========================SOUTH\n tmp1 = q2 - self.dti * ((0.5 * (tmpv + P.Abs()(tmpv))) * (q2 - self.small) / self.AYF(self.dy) +\n (0.5 * (tmpv - P.Abs()(tmpv))) * self.DYF(q2) / y_d[1])\n q2f = q2f * (1 - self.y_h) + tmp1 * self.y_h\n tmp1 = q2l - self.dti * ((0.5 * (tmpv + P.Abs()(tmpv))) * (q2l - self.small) / self.AYF(self.dy) +\n (0.5 * (tmpv - P.Abs()(tmpv))) * self.DYF(q2l) / y_d[1])\n q2lf = q2lf * (1 - self.y_h) + tmp1 * self.y_h\n\n q2f = q2f * self.fsm + 1.e-10\n q2lf = q2lf * self.fsm + 1.e-10\n\n q2b = q2 + 0.5 * self.smoth * (q2f + q2b - 2.0 * q2)\n q2 = q2f\n q2lb = q2l + 0.5 * self.smoth * (q2lf + q2lb - 2.0 * q2l)\n q2l = q2lf\n\n return dhf, a, c, gg, ee, kq, km, kh, q2b, q2, q2lb, q2l\n\n def internal_u(self, x_d, z_d, dhf, u, v, w, ub, vb, egf, egb, ee, gg, cbc, km, advx, drhox, dt, dhb):\n \"\"\"\n compute velocity in x direction of internal mode. Refer to the paper \"OpenArray v1.0: a simple operator\n library for the decoupling of ocean modeling and parallel computing. \" Appendix B formula B9.\n Args:\n x_d, z_d: Grid increment in x, z direction, respectively.\n u, v, w: Velocity in x, y, w direction, respectively.\n ub, vb: Velocity boundary in x, y direction, respectively.\n km: Vertical kinematic viscosity.\n egf: the surface elevation also used in the internal mode for the pressure gradient and derived from el\n egb: the surface elevation boundary also used in the internal mode for the pressure gradient and derived\n from el.\n dt: time step.\n\n Returns:\n tuple[Tensor], variables of velocity of x direction.\n \"\"\"\n dh = self.AXB(dhf)\n tmp = self.AXB(w) * self.AZB(u)\n dztmp = z_d[0] * (1 - self.z_e) + self.small_debug * self.z_e\n dzztmp = self.dzz * (1 - self.z_e) + self.small_debug * self.z_e\n\n uf = (self.AXB(dhb) * ub - self.dti2 * (advx + drhox - self.AXB(self.cor * dt * self.AYF(v)) + self.grav *\n self.AXB(dt) * (self.DXB(egf + egb) / x_d[2] + self.DXB(self.e_atmos) /\n x_d[2] * 2.0) * 0.5 - self.DZF(tmp) / dztmp)) / dh\n uf = uf * (1 - self.z_e)\n dh = dh * (1 - self.x_h) + self.x_h\n dh = dh * (1 - self.y_h) + self.y_h\n\n c = self.AXB(km)\n c = c * (1 - self.x_h3d)\n a = -self.dti2 * (Shift((0, 0, -1))(c) + self.umol)\n a = a / (self.dz * self.dzz * dh * dh + self.small_debug)\n a = a * (1 - self.z_e) * (1 - self.z_e1)\n c = -self.dti2 * (c + self.umol) / (dztmp * Shift((0, 0, 1))(dzztmp) * dh * dh + self.small_debug)\n c = c * (1 - self.z_he)\n ee = ee * (1 - self.z_h) + a / (a - 1.0) * self.z_h\n gg = gg * (1 - self.z_h) + (self.dti2 * self.wusurf / (dztmp * dh) - uf) / (a - 1.0) * self.z_h\n\n ggtmp = gg\n eetmp = ee\n for k in range(1, self.kbm2):\n gg = 1.0 / (a + c * (1.0 - Shift((0, 0, 1))(eetmp)) - 1.0)\n ee = a * gg\n gg = (c * Shift((0, 0, 1))(ggtmp) - uf) * gg\n ee = eetmp * (1 - self.zslice[k]) + ee * self.zslice[k]\n gg = ggtmp * (1 - self.zslice[k]) + gg * self.zslice[k]\n ggtmp = gg\n eetmp = ee\n tmp = P.Sqrt()(ub * ub + self.AXB(self.AYF(vb)) * self.AXB(self.AYF(vb)))\n tps = self.AXB(cbc) * P.Slice()(tmp, (0, 0, self.kbm1 - 1), (self.im, self.jm, 1))\n uf = uf * (1 - self.z_e1) + (c * Shift((0, 0, 1))(gg) - uf) / (tps * self.dti2 / (-dztmp * dh) - 1.0 - c *\n (Shift((0, 0, 1))(ee) - 1.0)) * self.z_e1\n uftmp = uf\n for k in range(self.kbm2 - 1, -1, -1):\n uf = ee * Shift((0, 0, -1))(uf) + gg\n uf = uftmp * (1 - self.zslice[k]) + uf * self.zslice[k]\n uftmp = uf\n\n uf = uf * self.dum\n wubot = P.Slice()(-tps * uf, (0, 0, self.kbm2), (self.im, self.jm, 1))\n\n tmph = self.h\n tmp0 = P.Pad(((1, 0), (0, 0), (0, 0)))(tmph)\n tmp0 = P.Slice()(tmp0, (0, 0, 0), P.Shape()(tmph))\n tmph = tmph * (1 - self.x_h13d) + tmp0 * self.x_h13d\n tmpu = u\n tmp0 = P.Pad(((1, 0), (0, 0), (0, 0)))(tmpu)\n tmp0 = P.Slice()(tmp0, (0, 0, 0), P.Shape()(tmpu))\n tmpu = tmpu * (1 - self.x_e3d) + tmp0 * self.x_e3d\n tmp0 = P.Pad(((0, 1), (0, 0), (0, 0)))(tmpu)\n tmp0 = P.Slice()(tmp0, (1, 0, 0), P.Shape()(tmpu))\n tmpu = tmpu * (1 - self.x_h13d) + tmp0 * self.x_h13d\n\n tmp = P.Sqrt()(tmph / self.hmax) * self.AYF(self.AYB(tmpu)) + (1.0 - P.Sqrt()(tmph / self.hmax)) * self.AYF(\n self.AYB(u))\n uftmp = uf\n uf = uf * (1 - self.x_e3d) + tmp * self.x_e3d\n tmp0 = P.Pad(((0, 1), (0, 0), (0, 0)))(tmp)\n tmp0 = P.Slice()(tmp0, (1, 0, 0), P.Shape()(tmp))\n uf = uf * (1 - self.x_h3d) + tmp0 * self.x_h3d\n uf = uf * (1 - self.x_h13d) + tmp * self.x_h13d\n uf = uf * (1 - self.y_he3d)\n uf = uf * self.dum\n uf = uf * (1 - self.z_e) + uftmp * self.dum * self.z_e\n\n return uf, a, c, gg, ee, wubot\n\n def internal_v(self, y_d, z_d, dhf, u, v, w, ub, vb, egf, egb, ee, gg, cbc, km, advy, drhoy, dt, dhb):\n \"\"\"\n compute velocity in y direction of internal mode. Refer to the paper \"OpenArray v1.0: a simple operator\n library for the decoupling of ocean modeling and parallel computing. \" Appendix B formula B10.\n Args:\n y_d, z_d: Grid increment in y, z direction, respectively.\n u, v, w: Velocity in x, y, w direction, respectively.\n ub, vb: Velocity boundary in x, y direction, respectively.\n egf: the surface elevation also used in the internal mode for the pressure gradient and derived from el\n egb: the surface elevation boundary also used in the internal mode for the pressure gradient and derived\n from el.\n dt: time step.\n\n Returns:\n tuple[Tensor], variables of velocity of x direction.\n \"\"\"\n dh = self.AYB(dhf)\n tmp = self.AYB(w) * self.AZB(v)\n dztmp = z_d[0] * (1 - self.z_e) + self.small_debug * self.z_e\n dzztmp = self.dzz * (1 - self.z_e) + self.small_debug * self.z_e\n\n vf = (self.AYB(dhb) * vb - self.dti2 * (advy + drhoy + self.AYB(self.cor * dt * self.AXF(u)) + self.grav *\n self.AYB(dt) * (self.DYB(egf + egb) / y_d[1] + self.DYB(self.e_atmos) /\n y_d[1] * 2.0) / 2.0 - self.DZF(tmp) / dztmp)) / dh\n vf = vf * (1 - self.z_e) * (1 - self.x_e3d)\n dh = dh * (1 - self.x_h) + self.x_h\n dh = dh * (1 - self.y_h) + self.y_h\n\n c = self.AYB(km)\n c = c * (1 - self.y_h3d)\n a = -self.dti2 * (Shift((0, 0, -1))(c) + self.umol)\n a = a / (dztmp * dzztmp * dh * dh)\n a = a * (1 - self.z_e) * (1 - self.z_e1)\n c = -self.dti2 * (c + self.umol) / (dztmp * Shift((0, 0, 1))(dzztmp) * dh * dh + self.small_debug)\n c = c * (1 - self.z_he)\n ee = ee * (1 - self.z_h) + a / (a - 1.0) * self.z_h\n gg = gg * (1 - self.z_h) + (self.dti2 * self.wvsurf / (dztmp * dh) - vf) / (a - 1.0) * self.z_h\n\n ggtmp = gg\n eetmp = ee\n for k in range(1, self.kbm2):\n gg = 1.0 / (a + c * (1.0 - Shift((0, 0, 1))(eetmp)) - 1.0)\n ee = a * gg\n gg = (c * Shift((0, 0, 1))(ggtmp) - vf) * gg # * gg\n ee = eetmp * (1 - self.zslice[k]) + ee * self.zslice[k]\n gg = ggtmp * (1 - self.zslice[k]) + gg * self.zslice[k]\n ggtmp = gg\n eetmp = ee\n tmp = P.Sqrt()(self.AYB(self.AXF(ub)) * self.AYB(self.AXF(ub)) + vb * vb)\n tps = self.AYB(cbc) * P.Slice()(tmp, (0, 0, self.kbm1 - 1), (self.im, self.jm, 1))\n tps = tps * (1 - self.x_e)\n vf = vf * (1 - self.z_e1) + (c * Shift((0, 0, 1))(gg) - vf) / (tps * self.dti2 / (-dztmp * dh) - 1.0 - c *\n (Shift((0, 0, 1))(ee) - 1.0)) * self.z_e1\n vftmp = vf\n for k in range(self.kbm2 - 1, -1, -1):\n vf = ee * Shift((0, 0, -1))(vf) + gg\n vf = vftmp * (1 - self.zslice[k]) + vf * self.zslice[k]\n vftmp = vf\n\n vf = vf * self.dvm\n wvbot = P.Slice()(-tps * vf, (0, 0, self.kbm2), (self.im, self.jm, 1))\n\n tmph = self.h\n tmp0 = P.Pad(((0, 0), (1, 0), (0, 0)))(tmph)\n tmp0 = P.Slice()(tmp0, (0, 0, 0), P.Shape()(tmph))\n tmph = tmph * (1 - self.y_h13d) + tmp0 * self.y_h13d\n tmpv = v\n tmp0 = P.Pad(((0, 0), (1, 0), (0, 0)))(tmpv)\n tmp0 = P.Slice()(tmp0, (0, 0, 0), P.Shape()(tmpv))\n tmpv = tmpv * (1 - self.y_e3d) + tmp0 * self.y_e3d\n tmp0 = P.Pad(((0, 0), (0, 1), (0, 0)))(tmpv)\n tmp0 = P.Slice()(tmp0, (0, 0, 0), P.Shape()(tmpv))\n tmpv = tmpv * (1 - self.y_h13d) + tmp0 * self.y_h13d\n\n tmp = P.Sqrt()(tmph / self.hmax) * self.AXF(self.AXB(tmpv)) + (1.0 - P.Sqrt()(tmph / self.hmax)) * self.AXF(\n self.AXB(v))\n vftmp = vf\n vf = vf * (1 - self.y_e3d) + tmp * self.y_e3d\n tmp0 = P.Pad(((0, 0), (0, 1), (0, 0)))(tmp)\n tmp0 = P.Slice()(tmp0, (0, 1, 0), P.Shape()(tmp))\n vf = vf * (1 - self.y_h3d) + tmp0 * self.y_h3d\n vf = vf * (1 - self.y_h13d) + tmp * self.y_h13d\n vf = vf * (1 - self.x_he3d)\n vf = vf * self.dvm\n vf = vf * (1 - self.z_e) + vftmp * self.dvm * self.z_e\n\n return vf, a, c, gg, ee, wvbot\n\n def adjust_ufvf(self, u, v, uf, vf, ub, vb):\n \"\"\"\n adjust velocity in x, y direction\n Args:\n u, v: Velocity in x, y direction, respectively.\n ub, vb: Velocity boundary in x, y direction, respectively.\n uf, vf: Velocity in x, y direction, respectively.\n\n Returns:\n tuple[Tensor], velocity and velocity boundary in x, y direction\n \"\"\"\n u = u + 0.5 * self.smoth * (uf + ub - 2.0 * u - self.reduce_sum((uf + ub - 2.0 * u) * self.dz, 2))\n u = u * (1 - self.z_e)\n\n v = v + 0.5 * self.smoth * (vf + vb - 2.0 * v - self.reduce_sum((vf + vb - 2.0 * v) * self.dz, 2))\n v = v * (1 - self.z_e)\n\n ub = u\n u = uf\n vb = v\n v = vf\n return u, v, ub, vb\n\n def internal_update(self, egf, etb, utf, vtf, etf, et):\n \"\"\"\n update variablse of internal mode\n Args:\n egf: the surface elevation also used in the internal mode for the pressure gradient and derived from el.\n etb: the surface elevation boundary as used in the internal mode and derived from EL (m).\n utf, vtf: ua, va time averaged over the interval, DT = dti(ms-1).\n et: the surface elevation as used in the internal mode and derived from EL (m).\n\n Returns:\n tuple[Tensor], variables of internal mode\n \"\"\"\n egb = egf\n etb = et\n et = etf\n dt = self.h + et\n dhb = etb + self.h\n utb = utf\n vtb = vtf\n vfluxb = self.vfluxf\n return egb, etb, dt, dhb, utb, vtb, vfluxb, et\n\n def internal_t_(self, f, fb, wfsurf, fsurf, frad, fclim, fbe, fbw, fbn, fbs, x_d, y_d, z_d, dt, u, aam, h, dum, v,\n dvm, w, dhf, etf, a, kh, dzz, c, dzz1, ee, gg, dx, dz, dy, fsm, dhb):\n \"\"\"\n Compute Potential temperature and Salinity of internal mode. Refer to the paper \"OpenArray v1.0: a simple\n operator library for the decoupling of ocean modeling and parallel computing. \" Appendix B formula B11 and B12.\n Args:\n f: Potential temperature or Salinity.\n fb: Potential temperature or Salinity boundary.\n wfsurf: (<w θ (0)>, <ws(0)>) temperature and salinity fluxes at the surface (ms-1 oC, ms-1 psu)\n fclim: Climatology of temperature or salinity.\n fbe, fbw, fbn, fbs: Potential temperature or Salinity boundary in east, west, north, south direction.\n x_d, y_d, z_d: Grid increment in x, y, z direction, respectively.\n kh: Vertical mixing coefficient of heat and salinity.\n u, v, w: Velocity in x, y, w direction, respectively.\n dt: time step.\n aam: horizontal kinematic viscosity.\n dum, dvm: Mask for the u, v component of velocity; = 0 over land; =1 over water\n fsm: Mask for scalar variables; = 0 over land; = 1 over water.\n\n Returns:\n tuple[Tensor], variables of Potential temperature and Salinity.\n \"\"\"\n tmpdz = z_d[0] * (1 - self.z_e) + self.small_debug * self.z_e\n tmpdzz = dzz * (1 - self.z_e) + self.small_debug * self.z_e\n\n tmp = self.AZB(f) * w\n tmp1 = f * w\n tmp = tmp * (1 - self.z_h) + tmp1 * self.z_h\n tmp = tmp * (1 - self.z_e)\n ff = (dhb * fb - self.dti2 * (self.DXF(self.AXB(dt) * self.AXB(f) * u - self.AXB(aam) * self.AYB(h) *\n self.tprni * self.DXB(fb) / x_d[2] * dum) / x_d[3] +\n self.DYF(self.AYB(dt) * self.AYB(f) * v - self.AYB(aam) * self.AYB(h) *\n self.tprni * self.DYB(fb) / y_d[1] * dvm) / y_d[3] - self.DZF(tmp) /\n tmpdz)) / dhf\n ff_shape = P.Shape()(ff)\n tmp_ze = P.BroadcastTo(ff_shape)(self.z_e)\n ff = ff * (1 - tmp_ze) * (1 - self.x_he) * (1 - self.y_he)\n\n rad = self.mat_zeros\n dh = h + etf\n shape_kh = P.Shape()(kh)\n tmp_kh = -1 * self.dti2 * (P.Slice()(kh, (0, 0, 2), (shape_kh[0], shape_kh[1], shape_kh[2] - 2)) + self.umol)\n tmp_kh = P.Pad(((0, 0), (0, 0), (0, 2)))(tmp_kh)\n a = a * (1 - self.z_he2) + tmp_kh * self.z_he2\n a = a / (tmpdz * tmpdzz * dhf * dhf)\n a = a * self.z_he2\n\n shape_c = P.Shape()(c)\n tmp_c_dzz1 = P.BroadcastTo(shape_c)(dzz1)\n tmp_c_dzz1 = P.Slice()(tmp_c_dzz1, (0, 0, 0), (shape_c[0], shape_c[1], shape_c[2] - 2))\n tmp_c_dzz1 = P.Pad(((0, 0), (0, 0), (1, 1)))(tmp_c_dzz1)\n\n c = c * self.z_he + tmp_c_dzz1 * (1 - self.z_he)\n c = -1 * self.dti2 * (kh + self.umol) / (tmpdz * c * dhf * dhf)\n # DIV ===> NAN\n c = P.Slice()(c, (0, 0, 1), (shape_c[0], shape_c[1], shape_c[2] - 2))\n c = P.Pad(((0, 0), (0, 0), (1, 1)))(c)\n\n ee = (a / (a - 1)) * self.z_h + ee * (1 - self.z_h)\n gg = (((self.dti2 * wfsurf) / (tmpdz * dh) - ff) / (a - 1)) * self.z_h + gg * (1 - self.z_h)\n\n for k in range(1, self.kbm1):\n gg = (1e0 / (a + c * (1 - Shift((0, 0, 1))(ee)) - 1e0)) * self.zslice[k] + gg * (1 - self.zslice[k])\n ee = ee * (1 - self.zslice[k]) + a * gg * self.zslice[k]\n gg = ((c * Shift((0, 0, 1))(gg) - ff + self.dti2 * (rad - Shift((0, 0, -1))(rad)) /\n (dh * tmpdz)) * gg) * self.zslice[k] + gg * (1 - self.zslice[k])\n\n fftmp = ((c * Shift((0, 0, 1))(gg) - ff + self.dti2 * (rad - Shift((0, 0, -1))(rad)) / (dh * tmpdz)) /\n (c * (1 - Shift((0, 0, 1))(ee)) - 1e0))\n zslicetmp = P.BroadcastTo(P.Shape()(fftmp))(self.zslice[self.kbm1 - 1])\n ff = ff * (1 - zslicetmp) + fftmp * zslicetmp\n\n for k in range(self.kbm2 - 1, -1, -1):\n ff = ff * (1 - self.zslice[k]) + (ee * Shift((0, 0, -1))(ff) + gg) * self.zslice[k]\n\n # bcond4\n tmptb = self.mat_zeros\n tmptb = fbe * self.x_e + tmptb * (1 - self.x_e)\n tmptb = fbw * self.x_h + tmptb * (1 - self.x_h)\n tmptb = fbn * self.y_e + tmptb * (1 - self.y_e)\n tmptb = fbs * self.y_h + tmptb * (1 - self.y_h)\n tmpu = u\n tmpv = v\n tmpu = Shift((1, 0, 0))(tmpu) * self.x_e1 + tmpu * (1 - self.x_e1)\n tmpu = Shift((1, 0, 0))(tmpu) * self.x_h1 + tmpu * (1 - self.x_h1)\n tmpv = Shift((0, 1, 0))(tmpv) * self.y_e1 + tmpv * (1 - self.y_e1)\n tmpv = Shift((0, 1, 0))(tmpv) * self.y_h1 + tmpv * (1 - self.y_h1)\n\n # ==========================EAST\n tmp1 = f - self.dti * ((0.5 * (u - P.Abs()(u))) * (tmptb - f) / self.AXB(dx) + (0.5 * (u + P.Abs()(u))) *\n self.DXB(f) / x_d[2])\n tmp1 = tmp1 * (1 - self.x_h3d)\n ff = ff * (1 - self.x_e3d * (1 - self.z_e)) + tmp1 * self.x_e3d * (1 - self.z_e)\n tmp_test = P.Pad(((0, 1), (0, 0), (0, 0)))(tmp1)\n tmp_test = P.Slice()(tmp_test, (1, 0, 0), P.Shape()(tmp1))\n tmp1 = (1 - self.x_e13d) * tmp1 + tmp_test * self.x_e13d\n tmp2 = tmp1 + 0.5 * self.dti * (tmpu + P.Abs()(tmpu)) / (tmpu + self.small_debug) * self.AZF(w) / dt * \\\n self.DZF(self.AZB(f)) / z_d[0] * dz / self.AZB(dzz)\n tmp2_del_nan = P.Slice()(tmp2, (self.imm1 - 1, 0, 1), (1, self.jm, self.kb - 3))\n tmp2 = P.Pad(((self.im - 1, 0), (0, 0), (1, 2)))(tmp2_del_nan)\n ff = ff * (1 - self.x_e3d * self.z_he3) + tmp2 * self.x_e3d * self.z_he3\n\n # ==========================WEST\n tmp1 = f - self.dti * ((0.5 * (tmpu + P.Abs()(tmpu))) * (f - tmptb) / self.AXF(dx) +\n (0.5 * (tmpu + P.Abs()(tmpu))) * self.DXF(f) / x_d[2] * dx / self.AXF(dx))\n ff = ff * (1 - self.x_h3d * (1 - self.z_e)) + tmp1 * self.x_h3d * (1 - self.z_e)\n tmp_test = P.Pad(((1, 0), (0, 0), (0, 0)))(tmp1)\n tmp_test = P.Slice()(tmp_test, (0, 0, 0), P.Shape()(tmp1))\n tmp1 = (1 - self.x_h13d) * tmp1 + tmp_test * self.x_h13d\n tmp2 = tmp1 + 0.5 * self.dti * (u + P.Abs()(u)) / u * self.AZF(w) / dt * self.DZF(self.AZB(f)) / self.AZB(dzz)\n tmp2 = P.Pad(((0, self.im - 1), (0, 0), (1, 2)))(tmp2[1:2, :, 1:self.kbm2])\n ff = ff * (1 - self.x_h3d * self.z_he3) + tmp2 * self.x_h3d * self.z_he3\n\n # ==========================NORTH\n tmp1 = f - self.dti * ((0.5 * (v - P.Abs()(v))) * (tmptb - f) / self.AYB(dy) + (0.5 * (v + P.Abs()(v))) *\n self.DYB(f) / y_d[1])\n ff = ff * (1 - self.y_e3d * (1 - self.z_e)) + tmp1 * self.y_e3d * (1 - self.z_e)\n tmp1 = Shift((0, -1, 0))(tmp1) * self.y_e13d + tmp1 * (1 - self.y_e13d)\n tmp2 = tmp1 + 0.5 * self.dti * (tmpv + P.Abs()(tmpv)) / tmpv * self.AZF(w) / dt * self.DZF(self.AZB(f)) / z_d[\n 0] * dz / self.AZB(dzz)\n tmp2 = P.Pad(((0, 0), (self.jm - 1, 0), (1, 2)))(tmp2[:, self.jmm1 - 1:self.jmm1, 1:self.kbm2])\n ff = ff * (1 - self.y_e3d * self.z_he3) + tmp2 * self.y_e3d * self.z_he3\n\n # ==========================SOUTH\n tmp1 = f - self.dti * ((0.5 * (tmpv + P.Abs()(tmpv))) * (f - tmptb) / self.AYF(dy) +\n (0.5 * (tmpv + P.Abs()(tmpv))) * self.DYF(f) / y_d[1])\n ff = ff * (1 - self.y_h3d * (1 - self.z_e)) + tmp1 * self.y_h3d * (1 - self.z_e)\n tmp1 = Shift((0, 1, 0))(tmp1) * self.y_h13d + tmp1 * (1 - self.y_h13d)\n tmp2 = tmp1 + 0.5 * self.dti * (v + P.Abs()(v)) / v * self.AZF(w) / dt * self.DZF(self.AZB(f)) / z_d[0] * \\\n dz / self.AZB(dzz)\n tmp2 = P.Pad(((0, 0), (0, self.jm - 1), (1, 2)))(tmp2[:, 1:2, 1:self.kbm2])\n ff = ff * (1 - self.y_h3d * self.z_he3) + tmp2 * self.y_h3d * self.z_he3\n\n tmpff1 = ff[:, :1, :1]\n tmpff2 = ff[:, :1, self.kbm2:]\n tmpzero = P.Fill()(mstype.float32, (self.im, 1, self.kb - 3), 0)\n tmpff = P.Concat(2)((tmpff1, tmpzero, tmpff2))\n ff = P.Concat(1)((tmpff, ff[:, 1:, :]))\n tmpff1 = ff[:, -1:, :1]\n tmpff2 = ff[:, -1:, self.kbm2:]\n tmpff = P.Concat(2)((tmpff1, tmpzero, tmpff2))\n ff = P.Concat(1)((ff[:, :-1, :], tmpff))\n ff = ff * fsm\n\n fb = f + 0.5e0 * self.smoth * (ff + fb - 2e0 * f)\n f = ff\n\n return a, c, ee, gg, fb, f\n\n def construct(self, etf, ua, uab, va, vab, el, elb, d, u, v, w, kq, km, kh, q2, q2l, tb, t, sb, s,\n rho, wubot, wvbot, ub, vb, egb, etb, dt, dhb, utb, vtb, vfluxb, et):\n \"\"\"construct\"\"\"\n x_d, y_d, z_d = self.x_d, self.y_d, self.z_d\n q2b, q2lb = self.q2b, self.q2lb\n dx, dy = self.dx, self.dy\n\n # surface forcing\n w = w * (1 - self.z_h) + self.z_h * self.vfluxf\n # lateral_viscosity\n advx, advy, drhox, drhoy, aam = self.lateral_viscosity(dx, dy, u, v, dt, self.aam, ub, vb, x_d, y_d, z_d, rho,\n self.rmean)\n # mode_interaction\n adx2d, ady2d, drx2d, dry2d, aam2d, advua, advva, egf, utf, vtf = self.mode_interaction(advx, advy, drhox, drhoy,\n aam, x_d, y_d, d, uab,\n vab, ua, va, el)\n\n # ===========external===========\n vamax = 0\n elf = 0\n for iext in range(1, 31):\n # external_el\n elf = self.external_el(x_d, y_d, d, ua, va, elb)\n # external_ua\n advua, uaf = self.external_ua(iext, x_d, y_d, elf, d, ua, va, uab, vab, el, elb, advua, aam2d, adx2d, drx2d,\n wubot)\n # external_va\n advva, vaf = self.external_va(iext, x_d, y_d, elf, d, ua, va, uab, vab, el, elb, advva, aam2d, ady2d, dry2d,\n wvbot)\n # external_update\n etf, uab, ua, vab, va, elb, el, d, egf, utf, vtf, vamax = self.external_update(iext, etf, ua, uab, va, vab,\n el, elb, elf, uaf, vaf, egf,\n utf, vtf)\n\n # ===========internal===========\n if self.global_step != 0:\n # adjust_uv\n u, v = self.adjust_uv(u, v, utb, vtb, utf, vtf, dt)\n # internal_w\n w = self.internal_w(x_d, y_d, dt, u, v, etf, etb, vfluxb)\n # internal_q\n dhf, a, c, gg, ee, kq, km, kh, q2b_, q2, q2lb_, q2l = self.internal_q(x_d, y_d, z_d, etf, aam, q2b, q2lb,\n q2, q2l, kq, km, kh, u, v, w, dt, dhb,\n rho, wubot, wvbot, t, s)\n q2b = P.Assign()(self.q2b, q2b_)\n q2lb = P.Assign()(self.q2lb, q2lb_)\n # internal_t_t\n a, c, ee, gg, tb, t = self.internal_t_(t, tb, self.wtsurf, self.tsurf, self.swrad, self.tclim, self.tbe,\n self.tbw, self.tbn, self.tbs, x_d, y_d, z_d, dt, u, aam, self.h,\n self.dum, v, self.dvm, w, dhf, etf, a, kh, self.dzz, c, self.dzz1,\n ee, gg, dx, self.dz, dy, self.fsm, dhb)\n # internal_t_s\n a, c, ee, gg, sb, s = self.internal_t_(s, sb, self.wssurf, self.ssurf, self.swrad0, self.sclim, self.sbe,\n self.sbw, self.sbn, self.sbs, x_d, y_d, z_d, dt, u, aam, self.h,\n self.dum, v, self.dvm, w, dhf, etf, a, kh, self.dzz, c, self.dzz1,\n ee, gg, dx, self.dz, dy, self.fsm, dhb)\n # dense\n rho = self.dens(s, t, self.zz, self.h, self.fsm)\n # internal_u\n uf, a, c, gg, ee, wubot = self.internal_u(x_d, z_d, dhf, u, v, w, ub, vb, egf, egb, ee, gg, self.cbc, km,\n advx, drhox, dt, dhb)\n # internal_v\n vf, a, c, gg, ee, wvbot = self.internal_v(y_d, z_d, dhf, u, v, w, ub, vb, egf, egb, ee, gg, self.cbc, km,\n advy, drhoy, dt, dhb)\n # adjust_ufvf\n u, v, ub, vb = self.adjust_ufvf(u, v, uf, vf, ub, vb)\n # internal_update\n egb, etb, dt, dhb, utb, vtb, vfluxb, et = self.internal_update(egf, etb, utf, vtf, etf, et)\n steps = P.AssignAdd()(self.global_step, 1)\n\n return elf, etf, ua, uab, va, vab, el, elb, d, u, v, w, kq, km, kh, q2, q2l, tb, t, sb, s, rho, wubot, wvbot, \\\n ub, vb, egb, etb, dt, dhb, utb, vtb, vfluxb, et, steps, vamax, q2b, q2lb\n",
"\"\"\"\nMIT License\n\nCopyright (c) 2019 Xingyi Zhou\nAll rights reserved.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport numpy as np\nimport cv2\nimport pycocotools.coco as coco\n\nclass CenterfaceDataset():\n \"\"\"\n Centerface dataset definition.\n \"\"\"\n def __init__(self, config, split='train'):\n self.split = split\n self.config = config\n self.max_objs = config.max_objs\n self.img_dir = self.config.img_dir\n self.annot_path = self.config.annot_path\n\n print('==> getting centerface key point {} data.'.format(split))\n self.coco = coco.COCO(self.annot_path)\n image_ids = self.coco.getImgIds()\n\n if split == 'train':\n self.images = []\n for img_id in image_ids:\n idxs = self.coco.getAnnIds(imgIds=[img_id])\n if idxs:\n self.images.append(img_id)\n else:\n self.images = image_ids\n self.num_samples = len(self.images)\n print('Loaded {} {} samples'.format(split, self.num_samples)) # Loaded train 12671 samples\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n (image, target) (tuple): target is index of the target class.\n \"\"\"\n img_id = self.images[index]\n file_name = self.coco.loadImgs(ids=[img_id])[0]['file_name']\n img_path = os.path.join(self.img_dir, file_name)\n ann_ids = self.coco.getAnnIds(imgIds=[img_id])\n anns = self.coco.loadAnns(ids=ann_ids)\n num_objs = len(anns)\n if num_objs > self.max_objs:\n num_objs = self.max_objs\n anns = np.random.choice(anns, num_objs)\n # dataType ERROR —— to_list\n target = []\n for ann in anns:\n tmp = []\n tmp.extend(ann['bbox'])\n tmp.extend(ann['keypoints'])\n target.append(tmp)\n\n img = cv2.imread(img_path)\n return img, target\n\n def __len__(self):\n return self.num_samples\n",
"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Metric for accuracy evaluation.\"\"\"\nfrom mindspore import nn\nimport numpy as np\n\nclass SRCNNpsnr(nn.Metric):\n def __init__(self):\n super(SRCNNpsnr).__init__()\n self.clear()\n\n def clear(self):\n self.val = 0\n self.sum = 0\n self.count = 0\n\n def update(self, *inputs):\n if len(inputs) != 2:\n raise ValueError('SRCNNpsnr need 2 inputs (y_pred, y), but got {}'.format(len(inputs)))\n\n y_pred = self._convert_data(inputs[0])\n y = self._convert_data(inputs[1])\n\n n = len(inputs)\n val = 10. * np.log10(1. / np.mean((y_pred - y) ** 2))\n\n self.val = val\n self.sum += val * n\n self.count += n\n\n def eval(self):\n if self.count == 0:\n raise RuntimeError('PSNR can not be calculated, because the number of samples is 0.')\n return self.sum / self.count\n"
] | [
[
"numpy.random.poisson",
"numpy.random.normal",
"numpy.random.uniform",
"numpy.array",
"numpy.random.randint"
],
[
"numpy.logical_not",
"numpy.hstack",
"pandas.read_csv",
"numpy.log2",
"numpy.reshape",
"numpy.arange",
"numpy.cumsum",
"numpy.argwhere",
"numpy.concatenate",
"numpy.all",
"numpy.greater_equal",
"numpy.iinfo",
"numpy.mod",
"numpy.repeat",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
],
[
"numpy.split",
"numpy.log",
"numpy.maximum",
"numpy.minimum",
"numpy.nonzero",
"numpy.clip",
"numpy.squeeze",
"numpy.argmax",
"numpy.exp",
"numpy.array",
"numpy.zeros"
],
[
"numpy.fromfile"
],
[
"numpy.random.random",
"numpy.sqrt",
"numpy.random.choice",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
],
[
"numpy.mean"
],
[
"numpy.zeros",
"numpy.ones"
],
[
"numpy.array"
],
[
"numpy.load",
"numpy.ones"
],
[
"numpy.save"
],
[
"numpy.random.shuffle",
"numpy.array",
"numpy.save",
"numpy.random.seed"
],
[
"numpy.load",
"numpy.fromfile"
],
[
"numpy.array"
],
[
"numpy.dot",
"numpy.arange",
"numpy.around",
"numpy.concatenate",
"numpy.random.rand",
"numpy.array",
"numpy.meshgrid",
"numpy.zeros",
"numpy.sum"
],
[
"numpy.abs",
"numpy.min",
"numpy.nan_to_num",
"numpy.round",
"numpy.max"
],
[
"numpy.load"
],
[
"numpy.expand_dims",
"numpy.maximum",
"numpy.minimum",
"numpy.random.choice",
"numpy.concatenate",
"numpy.random.rand",
"numpy.array"
],
[
"numpy.dot",
"numpy.sqrt",
"numpy.arctan2",
"numpy.random.randn",
"numpy.exp",
"numpy.random.randint",
"numpy.ones_like",
"numpy.arange",
"numpy.stack",
"numpy.sin",
"numpy.zeros",
"numpy.random.rand",
"numpy.array",
"numpy.abs",
"numpy.array_equal",
"numpy.random.seed",
"numpy.linalg.norm",
"numpy.cos",
"numpy.ones",
"numpy.random.permutation",
"numpy.broadcast_to",
"numpy.vstack"
],
[
"numpy.array"
],
[
"numpy.reshape",
"scipy.stats.truncnorm",
"numpy.random.randn"
],
[
"numpy.fromfile",
"numpy.expand_dims",
"numpy.reshape",
"numpy.frombuffer",
"numpy.argmax"
],
[
"pandas.read_hdf",
"numpy.arange",
"numpy.random.shuffle",
"numpy.ones",
"numpy.ceil",
"numpy.array",
"numpy.zeros"
],
[
"numpy.ones"
],
[
"numpy.fromfile"
],
[
"numpy.random.randn"
],
[
"numpy.fromfile",
"numpy.histogram",
"numpy.argmax",
"numpy.zeros"
],
[
"numpy.asarray",
"numpy.frombuffer",
"numpy.empty"
],
[
"numpy.dot",
"numpy.logical_and",
"numpy.linspace",
"numpy.nonzero",
"numpy.vstack",
"numpy.logical_and.reduce",
"numpy.linalg.norm",
"numpy.hsplit",
"numpy.ones",
"numpy.round",
"numpy.all",
"numpy.delete",
"numpy.stack",
"scipy.ndimage.filters.gaussian_filter",
"numpy.any",
"numpy.array",
"numpy.zeros",
"numpy.empty"
],
[
"numpy.square",
"numpy.array",
"numpy.exp"
],
[
"numpy.ones"
],
[
"numpy.set_printoptions",
"numpy.int32",
"numpy.zeros",
"numpy.cumsum"
],
[
"numpy.cumsum",
"numpy.array",
"numpy.pad"
],
[
"numpy.random.uniform"
],
[
"numpy.mean"
],
[
"numpy.zeros",
"numpy.ones"
],
[
"numpy.fromfile"
],
[
"numpy.array"
],
[
"numpy.zeros"
],
[
"numpy.array"
],
[
"numpy.random.permutation",
"numpy.random.seed"
],
[
"numpy.zeros",
"numpy.tile",
"numpy.ones"
],
[
"numpy.random.choice"
],
[
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.16",
"1.0",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"0.10",
"0.17",
"1.3"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
iust-projects/Data-Mining-IUST | [
"88f7a5541278f1fe907ca9b70c990a27f60900b2"
] | [
"Project/EnhancedDeepPath/scripts/sl_policy.py"
] | [
"from __future__ import division\nfrom __future__ import print_function\nimport tensorflow as tf \nimport numpy as np\nfrom itertools import count\nimport sys\n\nfrom networks import policy_nn\nfrom utils import *\nfrom env import Env\nfrom BFS.KB import KB\nfrom BFS.BFS import BFS\nimport time\n\nrelation = sys.argv[1]\n# episodes = int(sys.argv[2])\ngraphpath = dataPath + 'tasks/' + relation + '/' + 'graph.txt'\nrelationPath = dataPath + 'tasks/' + relation + '/' + 'train_pos'\n\nclass SupervisedPolicy(object):\n\t\"\"\"docstring for SupervisedPolicy\"\"\"\n\tdef __init__(self, learning_rate = 0.001):\n\t\tself.initializer = tf.contrib.layers.xavier_initializer()\n\t\twith tf.variable_scope('supervised_policy'):\n\t\t\tself.state = tf.placeholder(tf.float32, [None, state_dim], name = 'state')\n\t\t\tself.action = tf.placeholder(tf.int32, [None], name = 'action')\n\t\t\tself.action_prob = policy_nn(self.state, state_dim, action_space, self.initializer)\n\n\t\t\taction_mask = tf.cast(tf.one_hot(self.action, depth = action_space), tf.bool)\n\t\t\tself.picked_action_prob = tf.boolean_mask(self.action_prob, action_mask)\n\n\t\t\tself.loss = tf.reduce_sum(-tf.log(self.picked_action_prob)) + sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope = 'supervised_policy'))\n\t\t\tself.optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate)\n\t\t\tself.train_op = self.optimizer.minimize(self.loss)\n\n\tdef predict(self, state, sess = None):\n\t\tsess = sess or tf.get_default_session()\n\t\treturn sess.run(self.action_prob, {self.state: state})\n\n\tdef update(self, state, action, sess = None):\n\t\tsess = sess or tf.get_default_session()\n\t\t_, loss = sess.run([self.train_op, self.loss], {self.state: state, self.action: action})\n\t\treturn loss\n\ndef train():\n\ttf.reset_default_graph()\n\tpolicy_nn = SupervisedPolicy()\n\n\tf = open(relationPath)\n\ttrain_data = f.readlines()\n\tf.close()\n\n\tnum_samples = len(train_data)\n\n\tsaver = tf.train.Saver()\n\twith tf.Session() as sess:\n\t\tsess.run(tf.global_variables_initializer())\n\t\tif num_samples > 500:\n\t\t\tnum_samples = 500\n\t\telse:\n\t\t\tnum_episodes = num_samples\n\n\t\tfor episode in range(num_samples):\n\t\t\tprint(\"Episode %d\" % episode)\n\t\t\tprint('Training Sample:', train_data[episode%num_samples][:-1])\n\n\t\t\tenv = Env(dataPath, train_data[episode%num_samples])\n\t\t\tsample = train_data[episode%num_samples].split()\n\n\t\t\ttry:\n\t\t\t\tgood_episodes = teacher(sample[0], sample[1], 5, env, graphpath)\n\t\t\texcept Exception as e:\n\t\t\t\tprint('Cannot find a path')\n\t\t\t\tcontinue\n\n\t\t\tfor item in good_episodes:\n\t\t\t\tstate_batch = []\n\t\t\t\taction_batch = []\n\t\t\t\tfor t, transition in enumerate(item):\n\t\t\t\t\tstate_batch.append(transition.state)\n\t\t\t\t\taction_batch.append(transition.action)\n\t\t\t\tstate_batch = np.squeeze(state_batch)\n\t\t\t\tstate_batch = np.reshape(state_batch, [-1, state_dim])\n\t\t\t\tpolicy_nn.update(state_batch, action_batch)\n\n\t\tsaver.save(sess, 'models/policy_supervised_' + relation)\n\t\tprint('Model saved')\n\n\ndef test(test_episodes):\n\ttf.reset_default_graph()\n\tpolicy_nn = SupervisedPolicy()\n\n\tf = open(relationPath)\n\ttest_data = f.readlines()\n\tf.close()\n\n\ttest_num = len(test_data)\n\n\ttest_data = test_data[-test_episodes:]\n\tprint(len(test_data))\n\t\n\tsuccess = 0\n\n\tsaver = tf.train.Saver()\n\twith tf.Session() as sess:\n\t\tsaver.restore(sess, 'models/policy_supervised_'+ relation)\n\t\tprint('Model reloaded')\n\t\tfor episode in range(len(test_data)):\n\t\t\tprint('Test sample %d: %s' % (episode,test_data[episode][:-1]))\n\t\t\tenv = Env(dataPath, test_data[episode])\n\t\t\tsample = test_data[episode].split()\n\t\t\tstate_idx = [env.entity2id_[sample[0]], env.entity2id_[sample[1]], 0]\n\t\t\tfor t in count():\n\t\t\t\tstate_vec = env.idx_state(state_idx)\n\t\t\t\taction_probs = policy_nn.predict(state_vec)\n\t\t\t\taction_chosen = np.random.choice(np.arange(action_space), p = np.squeeze(action_probs))\n\t\t\t\treward, new_state, done = env.interact(state_idx, action_chosen)\n\t\t\t\tif done or t == max_steps_test:\n\t\t\t\t\tif done:\n\t\t\t\t\t\tprint('Success')\n\t\t\t\t\t\tsuccess += 1\n\t\t\t\t\tprint('Episode ends\\n')\n\t\t\t\t\tbreak\n\t\t\t\tstate_idx = new_state\n\n\tprint('Success persentage:', success/test_episodes)\n\nif __name__ == \"__main__\":\n\ttrain()\n\t# test(50)\n\n"
] | [
[
"tensorflow.boolean_mask",
"tensorflow.get_default_session",
"numpy.reshape",
"tensorflow.get_collection",
"numpy.squeeze",
"numpy.arange",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.reset_default_graph",
"tensorflow.variable_scope",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.Session",
"tensorflow.train.AdamOptimizer",
"tensorflow.train.Saver",
"tensorflow.one_hot",
"tensorflow.log"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
AndreasKaratzas/stonne | [
"2915fcc46cc94196303d81abbd1d79a56d6dd4a9",
"2915fcc46cc94196303d81abbd1d79a56d6dd4a9",
"2915fcc46cc94196303d81abbd1d79a56d6dd4a9",
"2915fcc46cc94196303d81abbd1d79a56d6dd4a9",
"2915fcc46cc94196303d81abbd1d79a56d6dd4a9",
"2915fcc46cc94196303d81abbd1d79a56d6dd4a9",
"2915fcc46cc94196303d81abbd1d79a56d6dd4a9",
"2915fcc46cc94196303d81abbd1d79a56d6dd4a9",
"2915fcc46cc94196303d81abbd1d79a56d6dd4a9",
"2915fcc46cc94196303d81abbd1d79a56d6dd4a9",
"2915fcc46cc94196303d81abbd1d79a56d6dd4a9",
"2915fcc46cc94196303d81abbd1d79a56d6dd4a9",
"2915fcc46cc94196303d81abbd1d79a56d6dd4a9"
] | [
"pytorch-frontend/caffe2/python/operator_test/glu_op_test.py",
"pytorch-frontend/caffe2/python/workspace.py",
"pytorch-frontend/caffe2/python/operator_test/index_hash_ops_test.py",
"pytorch-frontend/caffe2/python/operator_test/image_input_op_test.py",
"pytorch-frontend/test/test_cpp_extensions_aot.py",
"pytorch-frontend/caffe2/python/operator_test/sparse_to_dense_mask_op_test.py",
"pytorch-frontend/caffe2/python/operator_test/reduce_ops_test.py",
"pytorch-frontend/caffe2/contrib/fakelowp/test/test_int8_ops_nnpi.py",
"pytorch-frontend/torch/testing/_internal/jit_metaprogramming_utils.py",
"pytorch-frontend/test/distributed/test_distributed.py",
"pytorch-frontend/caffe2/quantization/server/batch_matmul_dnnlowp_op_test.py",
"pytorch-frontend/test/test_dispatch.py",
"pytorch-frontend/caffe2/python/operator_test/sparse_ops_test.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom caffe2.python import core\nimport caffe2.python.hypothesis_test_util as hu\nimport caffe2.python.serialized_test.serialized_test_util as serial\nfrom hypothesis import assume, given, settings, HealthCheck\nimport hypothesis.strategies as st\nimport numpy as np\n\nimport unittest\n\n\[email protected]\ndef _glu_old_input(draw):\n dims = draw(st.lists(st.integers(min_value=1, max_value=5), min_size=1, max_size=3))\n axis = draw(st.integers(min_value=0, max_value=len(dims)))\n # The axis dimension must be divisible by two\n axis_dim = 2 * draw(st.integers(min_value=1, max_value=2))\n dims.insert(axis, axis_dim)\n X = draw(hu.arrays(dims, np.float32, None))\n return (X, axis)\n\n\nclass TestGlu(serial.SerializedTestCase):\n @given(\n X_axis=_glu_old_input(),\n **hu.gcs\n )\n @settings(deadline=10000)\n def test_glu_old(self, X_axis, gc, dc):\n X, axis = X_axis\n\n def glu_ref(X):\n x1, x2 = np.split(X, [X.shape[axis] // 2], axis=axis)\n Y = x1 * (1. / (1. + np.exp(-x2)))\n return [Y]\n\n op = core.CreateOperator(\"Glu\", [\"X\"], [\"Y\"], dim=axis)\n self.assertReferenceChecks(gc, op, [X], glu_ref)\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"## @package workspace\n# Module caffe2.python.workspace\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nimport collections\nimport contextlib\nfrom google.protobuf.message import Message\nfrom multiprocessing import Process\nimport os\nfrom collections import defaultdict\nimport logging\nimport numpy as np\nfrom past.builtins import basestring\nimport shutil\nimport socket\nimport tempfile\n\nfrom caffe2.proto import caffe2_pb2\nfrom caffe2.python import scope, utils\nfrom caffe2.python.lazy import TriggerLazyImport\n\nimport caffe2.python._import_c_extension as C\n\nlogger = logging.getLogger(__name__)\n\nBlobs = C.blobs\nResetBlob = C.reset_blob\nCreateBlob = C.create_blob\nCurrentWorkspace = C.current_workspace\nDeserializeBlob = C.deserialize_blob\nGlobalInit = C.global_init\nHasBlob = C.has_blob\nRegisteredOperators = C.registered_operators\nSerializeBlob = C.serialize_blob\nSwitchWorkspace = C.switch_workspace\nRootFolder = C.root_folder\nWorkspaces = C.workspaces\nBenchmarkNet = C.benchmark_net\nBenchmarkNetOnce = C.benchmark_net_once\nGetStats = C.get_stats\nCreateOfflineTensor = C.create_offline_tensor\n\noperator_tracebacks = defaultdict(dict)\n\nis_asan = C.is_asan\nhas_cuda_support = C.has_cuda_support\nhas_hip_support = C.has_hip_support\nhas_gpu_support = C.has_gpu_support\nif has_cuda_support:\n GpuDeviceType = caffe2_pb2.CUDA\n NumCudaDevices = C.num_cuda_devices\n # This is a duplicate of NumCudaDevices. Remove\n # NumCudaDevices once replaced everywhere in the code\n NumGpuDevices = C.num_cuda_devices\n GetCUDAVersion = C.get_cuda_version\n GetCuDNNVersion = C.get_cudnn_version\n\n def GetGpuPeerAccessPattern():\n return np.asarray(C.get_cuda_peer_access_pattern())\n\n GetDeviceProperties = C.get_device_properties\n GetGPUMemoryInfo = C.get_gpu_memory_info\nelse:\n NumCudaDevices = lambda: 0 # noqa\n GetCUDAVersion = lambda: 0 # noqa\n GetCuDNNVersion = lambda: 0 # noqa\n\nif has_hip_support:\n GpuDeviceType = caffe2_pb2.HIP\n NumGpuDevices = C.num_hip_devices\n GetHIPVersion = C.get_hip_version\n\n def GetGpuPeerAccessPattern():\n return np.asarray(C.get_hip_peer_access_pattern())\n GetDeviceProperties = C.get_device_properties\n GetGPUMemoryInfo = C.get_gpu_memory_info\n\nif not has_gpu_support:\n # setting cuda as the default GpuDeviceType as some tests\n # like core, scope tests use GpuDeviceType even without gpu support\n GpuDeviceType = caffe2_pb2.CUDA\n NumGpuDevices = lambda: 0 # noqa\n GetDeviceProperties = lambda x: None # noqa\n GetGpuPeerAccessPattern = lambda: np.array([]) # noqa\n GetGPUMemoryInfo = lambda: None # noqa\n\nIsNUMAEnabled = C.is_numa_enabled\nGetNumNUMANodes = C.get_num_numa_nodes\nGetBlobNUMANode = C.get_blob_numa_node\nGetBlobSizeBytes = C.get_blob_size_bytes\n\n\ndef FillRandomNetworkInputs(net, input_dims, input_types):\n C.fill_random_network_inputs(net.Proto().SerializeToString(), input_dims, input_types)\n\n\ndef _GetFreeFlaskPort():\n \"\"\"Get a free flask port.\"\"\"\n # We will prefer to use 5000. If not, we will then pick a random port.\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex(('127.0.0.1', 5000))\n if result == 0:\n return 5000\n else:\n s = socket.socket()\n s.bind(('', 0))\n port = s.getsockname()[1]\n s.close()\n # Race condition: between the interval we close the socket and actually\n # start a mint process, another process might have occupied the port. We\n # don't do much here as this is mostly for convenience in research\n # rather than 24x7 service.\n return port\n\ndef StartMint(root_folder=None, port=None):\n \"\"\"Start a mint instance.\n\n TODO(Yangqing): this does not work well under ipython yet. According to\n https://github.com/ipython/ipython/issues/5862\n writing up some fix is a todo item.\n \"\"\"\n from caffe2.python.mint import app\n if root_folder is None:\n # Get the root folder from the current workspace\n root_folder = C.root_folder()\n if port is None:\n port = _GetFreeFlaskPort()\n process = Process(\n target=app.main,\n args=(\n ['-p', str(port), '-r', root_folder],\n )\n )\n process.start()\n print('Mint running at http://{}:{}'.format(socket.getfqdn(), port))\n return process\n\n\ndef StringifyProto(obj):\n \"\"\"Stringify a protocol buffer object.\n\n Inputs:\n obj: a protocol buffer object, or a Pycaffe2 object that has a Proto()\n function.\n Outputs:\n string: the output protobuf string.\n Raises:\n AttributeError: if the passed in object does not have the right attribute.\n \"\"\"\n if isinstance(obj, basestring):\n return obj\n else:\n if isinstance(obj, Message):\n # First, see if this object is a protocol buffer, which we can\n # simply serialize with the SerializeToString() call.\n return obj.SerializeToString()\n elif hasattr(obj, 'Proto'):\n return obj.Proto().SerializeToString()\n else:\n raise ValueError(\"Unexpected argument to StringifyProto of type \" +\n type(obj).__name__)\n\n\ndef ResetWorkspace(root_folder=None):\n if root_folder is None:\n # Reset the workspace, but keep the current root folder setting.\n return C.reset_workspace(C.root_folder())\n else:\n if not os.path.exists(root_folder):\n os.makedirs(root_folder)\n return C.reset_workspace(root_folder)\n\n\ndef CreateNet(net, overwrite=False, input_blobs=None):\n TriggerLazyImport()\n if input_blobs is None:\n input_blobs = []\n for input_blob in input_blobs:\n C.create_blob(input_blob)\n return CallWithExceptionIntercept(\n C.create_net,\n C.Workspace.current._last_failed_op_net_position,\n GetNetName(net),\n StringifyProto(net), overwrite,\n )\n\n\ndef Predictor(init_net, predict_net):\n return C.Predictor(StringifyProto(init_net), StringifyProto(predict_net))\n\n\ndef GetOperatorCost(operator, blobs):\n return C.get_operator_cost(StringifyProto(operator), blobs)\n\n\ndef RunOperatorOnce(operator):\n return C.run_operator_once(StringifyProto(operator))\n\n\ndef RunOperatorMultiple(operator, num_runs):\n return C.run_operator_multiple(StringifyProto(operator), num_runs)\n\n\ndef RunOperatorsOnce(operators):\n for op in operators:\n success = RunOperatorOnce(op)\n if not success:\n return False\n return True\n\n\ndef ClearGlobalNetObserver():\n return C.clear_global_net_observer()\n\n\ndef CallWithExceptionIntercept(func, op_id_fetcher, net_name, *args, **kwargs):\n try:\n return func(*args, **kwargs)\n except Exception:\n op_id = op_id_fetcher()\n net_tracebacks = operator_tracebacks.get(net_name, None)\n logger.warning(\n 'Original python traceback for operator `{}` in network '\n '`{}` in exception above (most recent call last):'.format(\n op_id, net_name))\n if net_tracebacks and op_id in net_tracebacks:\n tb = net_tracebacks[op_id]\n for line in reversed(tb):\n logger.warning(' File \"{}\", line {}, in {}'.format(\n line[0], line[1], line[2]))\n raise\n\n\ndef RunNetOnce(net):\n return CallWithExceptionIntercept(\n C.run_net_once,\n C.Workspace.current._last_failed_op_net_position,\n GetNetName(net),\n StringifyProto(net),\n )\n\n\ndef RunNet(name, num_iter=1, allow_fail=False):\n \"\"\"Runs a given net.\n\n Inputs:\n name: the name of the net, or a reference to the net.\n num_iter: number of iterations to run\n allow_fail: if True, does not assert on net exec failure but returns False\n Returns:\n True or an exception.\n \"\"\"\n return CallWithExceptionIntercept(\n C.run_net,\n C.Workspace.current._last_failed_op_net_position,\n GetNetName(name),\n StringifyNetName(name), num_iter, allow_fail,\n )\n\n\ndef RunPlan(plan_or_step):\n # TODO(jiayq): refactor core.py/workspace.py to avoid circular deps\n import caffe2.python.core as core\n if isinstance(plan_or_step, core.ExecutionStep):\n plan_or_step = core.Plan(plan_or_step)\n return C.run_plan(StringifyProto(plan_or_step))\n\n\ndef RunPlanInBackground(plan_or_step):\n # TODO(jiayq): refactor core.py/workspace.py to avoid circular deps\n import caffe2.python.core as core\n if isinstance(plan_or_step, core.ExecutionStep):\n plan_or_step = core.Plan(plan_or_step)\n return C.run_plan_in_background(StringifyProto(plan_or_step))\n\n\ndef InferShapesAndTypes(nets, blob_dimensions=None, nets_proto=False,\n blob_types=None):\n \"\"\"Infers the shapes and types for the specified nets.\n\n Inputs:\n nets: the list of nets\n blob_dimensions (optional): a dictionary of blobs and their dimensions.\n If not specified, the workspace blobs are used.\n nets_proto (optional): a boolean flag indicating whether the protobuffer\n representation is passed to the routine.\n Returns:\n A tuple of (shapes, types) dictionaries keyed by blob name.\n \"\"\"\n if nets_proto:\n net_protos = [StringifyProto(n) for n in nets]\n else:\n net_protos = [StringifyProto(n.Proto()) for n in nets]\n if blob_dimensions is None:\n assert blob_types is None\n blobdesc_prototxt = C.infer_shapes_and_types_from_workspace(net_protos)\n elif blob_types is None:\n blobdesc_prototxt = C.infer_shapes_and_types_from_map(\n net_protos, blob_dimensions\n )\n else:\n blobdesc_prototxt = C.infer_shapes_and_types_from_map(\n net_protos, blob_dimensions, blob_types\n )\n blobdesc_proto = caffe2_pb2.TensorShapes()\n blobdesc_proto.ParseFromString(blobdesc_prototxt)\n shapes = {}\n types = {}\n for ts in blobdesc_proto.shapes:\n if not ts.unknown_shape:\n shapes[ts.name] = list(ts.dims)\n types[ts.name] = ts.data_type\n\n return (shapes, types)\n\n\ndef _StringifyName(name, expected_type):\n if isinstance(name, basestring):\n return name\n assert type(name).__name__ == expected_type, \\\n \"Expected a string or %s\" % expected_type\n return str(name)\n\n\ndef StringifyBlobName(name):\n return _StringifyName(name, \"BlobReference\")\n\n\ndef StringifyNetName(name):\n return _StringifyName(name, \"Net\")\n\n\ndef GetNetName(net):\n if isinstance(net, basestring):\n return net\n if type(net).__name__ == \"Net\":\n return net.Name()\n if isinstance(net, caffe2_pb2.NetDef):\n return net.name\n raise Exception(\"Not a Net object: {}\".format(str(net)))\n\n\ndef FeedBlob(name, arr, device_option=None):\n \"\"\"Feeds a blob into the workspace.\n\n Inputs:\n name: the name of the blob.\n arr: either a TensorProto object or a numpy array object to be fed into\n the workspace.\n device_option (optional): the device option to feed the data with.\n Returns:\n True or False, stating whether the feed is successful.\n \"\"\"\n ws = C.Workspace.current\n return _Workspace_feed_blob(ws, name, arr, device_option)\n\n\ndef FetchBlobs(names):\n \"\"\"Fetches a list of blobs from the workspace.\n\n Inputs:\n names: list of names of blobs - strings or BlobReferences\n Returns:\n list of fetched blobs\n \"\"\"\n return [FetchBlob(name) for name in names]\n\n\ndef FetchBlob(name):\n \"\"\"Fetches a blob from the workspace.\n\n Inputs:\n name: the name of the blob - a string or a BlobReference\n Returns:\n Fetched blob (numpy array or string) if successful\n \"\"\"\n result = C.fetch_blob(StringifyBlobName(name))\n if isinstance(result, tuple):\n raise TypeError(\n \"Use FetchInt8Blob to fetch Int8 Blob {}\".format(\n StringifyBlobName(name)\n )\n )\n return result\n\n\ndef FetchTorch(name):\n ws = C.Workspace.current\n return ws.blobs[name].to_torch()\n\n\nInt8Tensor = collections.namedtuple(\n 'Int8Tensor', ['data', 'scale', 'zero_point']\n)\n\n\ndef FetchInt8Blob(name):\n \"\"\"Fetches an Int8 blob from the workspace. It shared backend implementation\n with FetchBlob but it is recommended when fetching Int8 Blobs\n\n Inputs:\n name: the name of the Int8 blob - a string or a BlobReference\n Returns:\n data: int8 numpy array, data\n scale: float, fake quantization scale\n zero_point: int, fake quantization offset\n \"\"\"\n result = C.fetch_blob(StringifyBlobName(name))\n assert isinstance(result, tuple), \\\n 'You are not fetching an Int8Blob {}. Please use FetchBlob'.format(\n StringifyBlobName(name))\n return Int8Tensor(*result)\n\n\ndef FetchInt8BlobRealVal(name):\n \"\"\"Fetches an Int8 blob from the workspace and return its real value representation.\n\n Inputs:\n name: the name of the Int8 blob - a string or a BlobReference\n Returns:\n real value representation of int8 numpy array\n \"\"\"\n result = C.fetch_blob(StringifyBlobName(name))\n assert isinstance(result, tuple), \\\n 'You are not fetching an Int8Blob {}. Please use FetchBlob'.format(\n StringifyBlobName(name))\n int8_blob = Int8Tensor(*result)\n return (int8_blob.data.astype(np.int32) - int(int8_blob.zero_point)).astype(\n np.float32) * int8_blob.scale\n\n\ndef _Workspace_fetch_int8_blob(ws, name):\n \"\"\"Fetches an Int8 blob from the workspace. It shared backend implementation\n with FetchBlob but it is recommended when fetching Int8 Blobs\n\n Inputs:\n name: the name of the Int8 blob - a string or a BlobReference\n Returns:\n data: int8 numpy array, data\n scale: float, fake quantization scale\n zero_point: int, fake quantization offset\n \"\"\"\n result = ws.fetch_blob(name)\n assert isinstance(result, tuple), \\\n 'You are not fetching an Int8Blob {}. Please use fetch_blob'.format(\n StringifyBlobName(name))\n return Int8Tensor(*result)\n\n\nC.Workspace.fetch_int8_blob = _Workspace_fetch_int8_blob\n\n\ndef ApplyTransform(transform_key, net):\n \"\"\"Apply a Transform to a NetDef protobuf object, and returns the new\n transformed NetDef.\n\n Inputs:\n transform_key: the name of the transform, as it is stored in the registry\n net: a NetDef protobuf object\n Returns:\n Transformed NetDef protobuf object.\n \"\"\"\n transformed_net = caffe2_pb2.NetDef()\n transformed_str = C.apply_transform(\n str(transform_key).encode('utf-8'),\n net.SerializeToString(),\n )\n transformed_net.ParseFromString(transformed_str)\n return transformed_net\n\n\ndef ApplyTransformIfFaster(transform_key, net, init_net, **kwargs):\n \"\"\"Apply a Transform to a NetDef protobuf object, and returns the new\n transformed NetDef, only if it runs faster than the original.\n\n The runs are performed on the current active workspace (gWorkspace).\n You should initialize that workspace before making a call to this function.\n\n Inputs:\n transform_key: the name of the transform, as it is stored in the registry\n net: a NetDef protobuf object\n init_net: The net to initialize the workspace.\n warmup_runs (optional):\n Determines how many times the net is run before testing.\n Will be 5 by default.\n main_runs (optional):\n Determines how many times the net is run during testing.\n Will be 10 by default.\n improvement_threshold (optional):\n Determines the factor which the new net needs to be faster\n in order to replace the old. Will be 1.01 by default.\n\n Returns:\n Either a Transformed NetDef protobuf object, or the original netdef.\n \"\"\"\n\n warmup_runs = kwargs['warmup_runs'] if 'warmup_runs' in kwargs else 5\n main_runs = kwargs['main_runs'] if 'main_runs' in kwargs else 10\n improvement_threshold = kwargs['improvement_threshold'] \\\n if 'improvement_threshold' in kwargs else 1.01\n\n transformed_net = caffe2_pb2.NetDef()\n transformed_str = C.apply_transform_if_faster(\n str(transform_key).encode('utf-8'),\n net.SerializeToString(),\n init_net.SerializeToString(),\n warmup_runs,\n main_runs,\n float(improvement_threshold),\n )\n transformed_net.ParseFromString(transformed_str)\n return transformed_net\n\n\ndef GetNameScope():\n \"\"\"Return the current namescope string. To be used to fetch blobs\"\"\"\n return scope.CurrentNameScope()\n\n\nclass _BlobDict(object):\n \"\"\"Provides python dict compatible way to do fetching and feeding\"\"\"\n\n def __getitem__(self, key):\n return FetchBlob(key)\n\n def __setitem__(self, key, value):\n return FeedBlob(key, value)\n\n def __len__(self):\n return len(C.blobs())\n\n def __iter__(self):\n return C.blobs().__iter__()\n\n def __contains__(self, item):\n return C.has_blob(item)\n\n\nblobs = _BlobDict()\n\n\n################################################################################\n# Utilities for immediate mode\n#\n# Caffe2's immediate mode implements the following behavior: between the two\n# function calls StartImmediate() and StopImmediate(), for any operator that is\n# called through CreateOperator(), we will also run that operator in a workspace\n# that is specific to the immediate mode. The user is explicitly expected to\n# make sure that these ops have proper inputs and outputs, i.e. one should not\n# run an op where an external input is not created or fed.\n#\n# Users can use FeedImmediate() and FetchImmediate() to interact with blobs\n# in the immediate workspace.\n#\n# Once StopImmediate() is called, all contents in the immediate workspace is\n# freed up so one can continue using normal runs.\n#\n# The immediate mode is solely for debugging purposes and support will be very\n# sparse.\n################################################################################\n\n_immediate_mode = False\n_immediate_workspace_name = \"_CAFFE2_IMMEDIATE\"\n_immediate_root_folder = ''\n\n\ndef IsImmediate():\n return _immediate_mode\n\n\[email protected]\ndef WorkspaceGuard(workspace_name):\n current = CurrentWorkspace()\n SwitchWorkspace(workspace_name, True)\n yield\n SwitchWorkspace(current)\n\n\ndef StartImmediate(i_know=False):\n global _immediate_mode\n global _immediate_root_folder\n if IsImmediate():\n # already in immediate mode. We will kill the previous one\n # and start from fresh.\n StopImmediate()\n _immediate_mode = True\n with WorkspaceGuard(_immediate_workspace_name):\n _immediate_root_folder = tempfile.mkdtemp()\n ResetWorkspace(_immediate_root_folder)\n if i_know:\n # if the user doesn't want to see the warning message, sure...\n return\n print(\"\"\"\n Enabling immediate mode in caffe2 python is an EXTREMELY EXPERIMENTAL\n feature and may very easily go wrong. This is because Caffe2 uses a\n declarative way of defining operators and models, which is essentially\n not meant to run things in an interactive way. Read the following carefully\n to make sure that you understand the caveats.\n\n (1) You need to make sure that the sequences of operators you create are\n actually runnable sequentially. For example, if you create an op that takes\n an input X, somewhere earlier you should have already created X.\n\n (2) Caffe2 immediate uses one single workspace, so if the set of operators\n you run are intended to be under different workspaces, they will not run.\n To create boundaries between such use cases, you can call FinishImmediate()\n and StartImmediate() manually to flush out everything no longer needed.\n\n (3) Underlying objects held by the immediate mode may interfere with your\n normal run. For example, if there is a leveldb that you opened in immediate\n mode and did not close, your main run will fail because leveldb does not\n support double opening. Immediate mode may also occupy a lot of memory esp.\n on GPUs. Call FinishImmediate() as soon as possible when you no longer\n need it.\n\n (4) Immediate is designed to be slow. Every immediate call implicitly\n creates a temp operator object, runs it, and destroys the operator. This\n slow-speed run is by design to discourage abuse. For most use cases other\n than debugging, do NOT turn on immediate mode.\n\n (5) If there is anything FATAL happening in the underlying C++ code, the\n immediate mode will immediately (pun intended) cause the runtime to crash.\n\n Thus you should use immediate mode with extra care. If you still would\n like to, have fun [https://xkcd.com/149/].\n \"\"\")\n\n\ndef StopImmediate():\n \"\"\"Stops an immediate mode run.\"\"\"\n # Phew, that was a dangerous ride.\n global _immediate_mode\n global _immediate_root_folder\n if not IsImmediate():\n return\n with WorkspaceGuard(_immediate_workspace_name):\n ResetWorkspace()\n shutil.rmtree(_immediate_root_folder)\n _immediate_root_folder = ''\n _immediate_mode = False\n\n\ndef ImmediateBlobs():\n with WorkspaceGuard(_immediate_workspace_name):\n return Blobs()\n\n\ndef RunOperatorImmediate(op):\n with WorkspaceGuard(_immediate_workspace_name):\n RunOperatorOnce(op)\n\n\ndef FetchImmediate(*args, **kwargs):\n with WorkspaceGuard(_immediate_workspace_name):\n return FetchBlob(*args, **kwargs)\n\n\ndef FeedImmediate(*args, **kwargs):\n with WorkspaceGuard(_immediate_workspace_name):\n return FeedBlob(*args, **kwargs)\n\n\n# C.Workspace methods.\n\ndef _Workspace_create_net_with_exception_intercept(ws, net, overwrite=False):\n return CallWithExceptionIntercept(\n ws._create_net,\n ws._last_failed_op_net_position,\n GetNetName(net),\n StringifyProto(net), overwrite,\n )\n\n\ndef _Workspace_run(ws, obj):\n if hasattr(obj, 'Proto'):\n obj = obj.Proto()\n if isinstance(obj, caffe2_pb2.PlanDef):\n return ws._run_plan(obj.SerializeToString())\n if isinstance(obj, caffe2_pb2.NetDef):\n return CallWithExceptionIntercept(\n ws._run_net,\n ws._last_failed_op_net_position,\n GetNetName(obj),\n obj.SerializeToString(),\n )\n # return ws._run_net(obj.SerializeToString())\n if isinstance(obj, caffe2_pb2.OperatorDef):\n return ws._run_operator(obj.SerializeToString())\n raise ValueError(\n \"Don't know how to do Workspace.run() on {}\".format(type(obj)))\n\n\ndef _Workspace_feed_blob(ws, name, arr, device_option=None):\n if type(arr) is caffe2_pb2.TensorProto:\n arr = utils.Caffe2TensorToNumpyArray(arr)\n if type(arr) is np.ndarray and arr.dtype.kind in 'SU':\n # Plain NumPy strings are weird, let's use objects instead\n arr = arr.astype(np.object)\n\n if device_option is None:\n device_option = scope.CurrentDeviceScope()\n\n if device_option and device_option.device_type == caffe2_pb2.CUDA:\n if arr.dtype == np.dtype('float64'):\n logger.warning(\n \"CUDA operators do not support 64-bit doubles, \" +\n \"please use arr.astype(np.float32) or np.int32 for ints.\" +\n \" Blob: {}\".format(name) +\n \" type: {}\".format(str(arr.dtype))\n )\n\n name = StringifyBlobName(name)\n if device_option is not None:\n return ws.create_blob(name).feed(arr, device_option)\n else:\n return ws.create_blob(name).feed(arr)\n\n\ndef _Workspace_remove_blob(ws, blob):\n ws._remove_blob(str(blob))\n\n\nWorkspace = C.Workspace\nWorkspace.create_net = _Workspace_create_net_with_exception_intercept\nWorkspace.run = _Workspace_run\nWorkspace.feed_blob = _Workspace_feed_blob\nWorkspace.remove_blob = _Workspace_remove_blob\n\n# C.Blob methods.\n\n\ndef _Blob_feed(blob, arg, device_option=None):\n # conservative type check to avoid unnecessary import\n if type(arg).__name__ == 'Tensor' and type(arg).__module__ == 'torch':\n import torch\n if isinstance(arg, torch.Tensor):\n assert device_option is None, \\\n \"device_option doesn't make sense with PyTorch tensors\"\n handle = torch._C._tensor_impl_raw_handle(arg)\n blob._wrap_tensor_impl(handle)\n return True # _feed() returns True for some reason\n if device_option is not None:\n device_option = StringifyProto(device_option)\n return blob._feed(arg, device_option)\n\n\nC.Blob.feed = _Blob_feed\n\n\ndef _Tensor_to_torch(tensor):\n \"\"\"\n PyTorch tensor interop (TensorCPU methods)\n\n Can be accessed as:\n workspace.Workspace.current.blobs['foo'].tensor().to_torch()\n \"\"\"\n # avoiding circular dependency\n import torch\n handle = tensor._tensor_impl_raw_handle()\n return torch._C._wrap_tensor_impl(handle)\n\nC.TensorCPU.to_torch = _Tensor_to_torch\n\n\ndef _Blob_to_torch(blob):\n if not blob.is_tensor():\n raise RuntimeError(\"Blob has to be a tensor\")\n return blob.as_tensor().to_torch()\n\nC.Blob.to_torch = _Blob_to_torch\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom caffe2.python import core, workspace\nimport caffe2.python.hypothesis_test_util as hu\nimport caffe2.python.serialized_test.serialized_test_util as serial\nimport hypothesis.strategies as st\nfrom hypothesis import given, settings\nimport numpy as np\n\n\nclass TestIndexHashOps(serial.SerializedTestCase):\n @given(\n indices=st.sampled_from([\n np.int32, np.int64\n ]).flatmap(lambda dtype: hu.tensor(min_dim=1, max_dim=1, dtype=dtype)),\n seed=st.integers(min_value=0, max_value=10),\n modulo=st.integers(min_value=100000, max_value=200000),\n **hu.gcs_cpu_only\n )\n @settings(deadline=10000)\n def test_index_hash_ops(self, indices, seed, modulo, gc, dc):\n def index_hash(indices):\n dtype = np.array(indices).dtype\n assert dtype == np.int32 or dtype == np.int64\n hashed_indices = []\n for index in indices:\n hashed = dtype.type(0xDEADBEEF * seed)\n indices_bytes = np.array([index], dtype).view(np.int8)\n for b in indices_bytes:\n hashed = dtype.type(hashed * 65537 + b)\n hashed = (modulo + hashed % modulo) % modulo\n hashed_indices.append(hashed)\n return [hashed_indices]\n\n op = core.CreateOperator(\"IndexHash\",\n [\"indices\"], [\"hashed_indices\"],\n seed=seed, modulo=modulo)\n\n self.assertDeviceChecks(dc, op, [indices], [0])\n self.assertReferenceChecks(gc, op, [indices], index_hash)\n\n # In-place update\n op = core.CreateOperator(\"IndexHash\",\n [\"indices\"], [\"indices\"],\n seed=seed, modulo=modulo)\n\n self.assertDeviceChecks(dc, op, [indices], [0])\n self.assertReferenceChecks(gc, op, [indices], index_hash)\n\n def test_shape_and_type_inference(self):\n with hu.temp_workspace(\"shape_type_inf_int64\"):\n net = core.Net('test_net')\n net.ConstantFill(\n [], \"values\", shape=[64], dtype=core.DataType.INT64,\n )\n net.IndexHash(['values'], ['values_output'])\n (shapes, types) = workspace.InferShapesAndTypes([net], {})\n\n self.assertEqual(shapes[\"values_output\"], [64])\n self.assertEqual(types[\"values_output\"], core.DataType.INT64)\n\n with hu.temp_workspace(\"shape_type_inf_int32\"):\n net = core.Net('test_net')\n net.ConstantFill(\n [], \"values\", shape=[2, 32], dtype=core.DataType.INT32,\n )\n net.IndexHash(['values'], ['values_output'])\n (shapes, types) = workspace.InferShapesAndTypes([net], {})\n\n self.assertEqual(shapes[\"values_output\"], [2, 32])\n self.assertEqual(types[\"values_output\"], core.DataType.INT32)\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport unittest\ntry:\n import cv2\n import lmdb\nexcept ImportError:\n pass # Handled below\n\nfrom PIL import Image\nimport numpy as np\nimport shutil\nimport six\nimport sys\nimport tempfile\n\n# TODO: This test does not test scaling because\n# the algorithms used by OpenCV in the C and Python\n# version seem to differ slightly. It does test\n# most other features\n\nfrom hypothesis import given, settings, Verbosity\nimport hypothesis.strategies as st\n\nfrom caffe2.proto import caffe2_pb2\nimport caffe2.python.hypothesis_test_util as hu\n\nfrom caffe2.python import workspace, core\n\n\n# Verification routines (applies transformations to image to\n# verify if the operator produces same result)\ndef verify_apply_bounding_box(img, box):\n import skimage.util\n if any(type(box[f]) is not int or np.isnan(box[f] or box[f] < 0)\n for f in range(0, 4)):\n return img\n # Box is ymin, xmin, bound_height, bound_width\n y_bounds = (box[0], img.shape[0] - box[0] - box[2])\n x_bounds = (box[1], img.shape[1] - box[1] - box[3])\n c_bounds = (0, 0)\n\n if any(el < 0 for el in list(y_bounds) + list(x_bounds) + list(c_bounds)):\n return img\n\n bboxed = skimage.util.crop(img, (y_bounds, x_bounds, c_bounds))\n return bboxed\n\n\n# This function is called but not used. It will trip on assert False if\n# the arguments are wrong (improper example)\ndef verify_rescale(img, minsize):\n # Here we use OpenCV transformation to match the C code\n scale_amount = float(minsize) / min(img.shape[0], img.shape[1])\n if scale_amount <= 1.0:\n return img\n\n print(\"Scale amount is %f -- should be < 1.0; got shape %s\" %\n (scale_amount, str(img.shape)))\n assert False\n img_cv = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n output_shape = (int(np.ceil(scale_amount * img_cv.shape[0])),\n int(np.ceil(scale_amount * img_cv.shape[1])))\n resized = cv2.resize(img_cv,\n dsize=output_shape,\n interpolation=cv2.INTER_AREA)\n\n resized = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)\n assert resized.shape[0] >= minsize\n assert resized.shape[1] >= minsize\n return resized\n\n\ndef verify_crop(img, crop):\n import skimage.util\n assert img.shape[0] >= crop\n assert img.shape[1] >= crop\n y_offset = 0\n if img.shape[0] > crop:\n y_offset = (img.shape[0] - crop) // 2\n\n x_offset = 0\n if img.shape[1] > crop:\n x_offset = (img.shape[1] - crop) // 2\n\n y_bounds = (y_offset, img.shape[0] - crop - y_offset)\n x_bounds = (x_offset, img.shape[1] - crop - x_offset)\n c_bounds = (0, 0)\n cropped = skimage.util.crop(img, (y_bounds, x_bounds, c_bounds))\n assert cropped.shape[0] == crop\n assert cropped.shape[1] == crop\n return cropped\n\n\ndef verify_color_normalize(img, means, stds):\n # Note the RGB/BGR inversion\n # Operate on integers like the C version\n img = img * 255.0\n img[:, :, 0] = (img[:, :, 0] - means[2]) / stds[2]\n img[:, :, 1] = (img[:, :, 1] - means[1]) / stds[1]\n img[:, :, 2] = (img[:, :, 2] - means[0]) / stds[0]\n return img * (1.0 / 255.0)\n\n\n# Printing function (for debugging)\ndef caffe2_img(img):\n # Convert RGB to BGR\n img = img[:, :, (2, 1, 0)]\n # Convert HWC to CHW\n img = img.swapaxes(1, 2).swapaxes(0, 1)\n img = img * 255.0\n return img.astype(np.int32)\n\n\n# Bounding box is ymin, xmin, height, width\ndef create_test(output_dir, width, height, default_bound, minsize, crop, means,\n stds, count, label_type, num_labels, output1=None,\n output2_size=None):\n print(\"Creating a temporary lmdb database of %d pictures...\" % (count))\n\n if default_bound is None:\n default_bound = [-1] * 4\n\n LMDB_MAP_SIZE = 1 << 40\n env = lmdb.open(output_dir, map_size=LMDB_MAP_SIZE, subdir=True)\n index = 0\n # Create images and the expected results\n expected_results = []\n with env.begin(write=True) as txn:\n while index < count:\n img_array = np.random.random_integers(\n 0, 255, [height, width, 3]).astype(np.uint8)\n img_obj = Image.fromarray(img_array)\n img_str = six.BytesIO()\n img_obj.save(img_str, 'PNG')\n\n # Create a random bounding box for every other image\n # ymin, xmin, bound_height, bound_width\n # TODO: To ensure that we never need to scale, we\n # ensure that the bounding-box is larger than the\n # minsize parameter\n bounding_box = list(default_bound)\n do_default_bound = True\n if index % 2 == 0:\n if height > minsize and width > minsize:\n do_default_bound = False\n bounding_box[0:2] = [np.random.randint(a) for a in\n (height - minsize, width - minsize)]\n bounding_box[2:4] = [np.random.randint(a) + minsize for a in\n (height - bounding_box[0] - minsize + 1,\n width - bounding_box[1] - minsize + 1)]\n # print(\"Bounding box is %s\" % (str(bounding_box)))\n # Create expected result\n img_expected = img_array.astype(np.float32) * (1.0 / 255.0)\n # print(\"Orig image: %s\" % (str(caffe2_img(img_expected))))\n img_expected = verify_apply_bounding_box(\n img_expected,\n bounding_box)\n # print(\"Bounded image: %s\" % (str(caffe2_img(img_expected))))\n\n img_expected = verify_rescale(img_expected, minsize)\n\n img_expected = verify_crop(img_expected, crop)\n # print(\"Crop image: %s\" % (str(caffe2_img(img_expected))))\n\n img_expected = verify_color_normalize(img_expected, means, stds)\n # print(\"Color image: %s\" % (str(caffe2_img(img_expected))))\n\n tensor_protos = caffe2_pb2.TensorProtos()\n image_tensor = tensor_protos.protos.add()\n image_tensor.data_type = 4 # string data\n image_tensor.string_data.append(img_str.getvalue())\n img_str.close()\n\n label_tensor = tensor_protos.protos.add()\n label_tensor.data_type = 2 # int32 data\n assert (label_type >= 0 and label_type <= 3)\n if label_type == 0:\n label_tensor.int32_data.append(index)\n expected_label = index\n elif label_type == 1:\n binary_labels = np.random.randint(2, size=num_labels)\n for idx, val in enumerate(binary_labels.tolist()):\n if val == 1:\n label_tensor.int32_data.append(idx)\n expected_label = binary_labels\n elif label_type == 2:\n embedding_label = np.random.randint(100, size=num_labels)\n for _idx, val in enumerate(embedding_label.tolist()):\n label_tensor.int32_data.append(val)\n expected_label = embedding_label\n elif label_type == 3:\n weight_tensor = tensor_protos.protos.add()\n weight_tensor.data_type = 1 # float weights\n binary_labels = np.random.randint(2, size=num_labels)\n expected_label = np.zeros(num_labels).astype(np.float32)\n for idx, val in enumerate(binary_labels.tolist()):\n expected_label[idx] = val * idx\n if val == 1:\n label_tensor.int32_data.append(idx)\n weight_tensor.float_data.append(idx)\n\n if output1:\n output1_tensor = tensor_protos.protos.add()\n output1_tensor.data_type = 1 # float data\n output1_tensor.float_data.append(output1)\n\n output2 = []\n if output2_size:\n output2_tensor = tensor_protos.protos.add()\n output2_tensor.data_type = 2 # int32 data\n values = np.random.randint(1024, size=output2_size)\n for val in values.tolist():\n output2.append(val)\n output2_tensor.int32_data.append(val)\n\n expected_results.append(\n [caffe2_img(img_expected), expected_label, output1, output2])\n\n if not do_default_bound:\n bounding_tensor = tensor_protos.protos.add()\n bounding_tensor.data_type = 2 # int32 data\n bounding_tensor.int32_data.extend(bounding_box)\n\n txn.put(\n '{}'.format(index).encode('ascii'),\n tensor_protos.SerializeToString()\n )\n index = index + 1\n # End while\n # End with\n return expected_results\n\n\ndef run_test(\n size_tuple, means, stds, label_type, num_labels, is_test, scale_jitter_type,\n color_jitter, color_lighting, dc, validator, output1=None, output2_size=None):\n # TODO: Does not test on GPU and does not test use_gpu_transform\n # WARNING: Using ModelHelper automatically does NHWC to NCHW\n # transformation if needed.\n width, height, minsize, crop = size_tuple\n means = [float(m) for m in means]\n stds = [float(s) for s in stds]\n out_dir = tempfile.mkdtemp()\n count_images = 2 # One with bounding box and one without\n expected_images = create_test(\n out_dir,\n width=width,\n height=height,\n default_bound=(3, 5, height - 3, width - 5),\n minsize=minsize,\n crop=crop,\n means=means,\n stds=stds,\n count=count_images,\n label_type=label_type,\n num_labels=num_labels,\n output1=output1,\n output2_size=output2_size\n )\n for device_option in dc:\n with hu.temp_workspace():\n reader_net = core.Net('reader')\n reader_net.CreateDB(\n [],\n 'DB',\n db=out_dir,\n db_type=\"lmdb\"\n )\n workspace.RunNetOnce(reader_net)\n outputs = ['data', 'label']\n output_sizes = []\n if output1:\n outputs.append('output1')\n output_sizes.append(1)\n if output2_size:\n outputs.append('output2')\n output_sizes.append(output2_size)\n imageop = core.CreateOperator(\n 'ImageInput',\n ['DB'],\n outputs,\n batch_size=count_images,\n color=3,\n minsize=minsize,\n crop=crop,\n is_test=is_test,\n bounding_ymin=3,\n bounding_xmin=5,\n bounding_height=height - 3,\n bounding_width=width - 5,\n mean_per_channel=means,\n std_per_channel=stds,\n use_gpu_transform=(device_option.device_type == 1),\n label_type=label_type,\n num_labels=num_labels,\n output_sizes=output_sizes,\n scale_jitter_type=scale_jitter_type,\n color_jitter=color_jitter,\n color_lighting=color_lighting\n )\n\n imageop.device_option.CopyFrom(device_option)\n main_net = core.Net('main')\n main_net.Proto().op.extend([imageop])\n workspace.RunNetOnce(main_net)\n validator(expected_images, device_option, count_images)\n # End for\n # End with\n # End for\n shutil.rmtree(out_dir)\n# end run_test\n\n\[email protected]('cv2' not in sys.modules, 'python-opencv is not installed')\[email protected]('lmdb' not in sys.modules, 'python-lmdb is not installed')\nclass TestImport(hu.HypothesisTestCase):\n def validate_image_and_label(\n self, expected_images, device_option, count_images, label_type,\n is_test, scale_jitter_type, color_jitter, color_lighting):\n l = workspace.FetchBlob('label')\n result = workspace.FetchBlob('data').astype(np.int32)\n # If we don't use_gpu_transform, the output is in NHWC\n # Our reference output is CHW so we swap\n if device_option.device_type != 1:\n expected = [img.swapaxes(0, 1).swapaxes(1, 2) for\n (img, _, _, _) in expected_images]\n else:\n expected = [img for (img, _, _, _) in expected_images]\n for i in range(count_images):\n if label_type == 0:\n self.assertEqual(l[i], expected_images[i][1])\n else:\n self.assertEqual(\n (l[i] - expected_images[i][1] > 0).sum(), 0)\n if is_test == 0:\n # when traing data preparation is randomized (e.g. random cropping,\n # Inception-style random sized cropping, color jittering,\n # color lightin), we only compare blob shape\n for (s1, s2) in zip(expected[i].shape, result[i].shape):\n self.assertEqual(s1, s2)\n else:\n self.assertEqual((expected[i] - result[i] > 1).sum(), 0)\n # End for\n # end validate_image_and_label\n\n @given(size_tuple=st.tuples(\n st.integers(min_value=8, max_value=4096),\n st.integers(min_value=8, max_value=4096)).flatmap(lambda t: st.tuples(\n st.just(t[0]), st.just(t[1]),\n st.just(min(t[0] - 6, t[1] - 4)),\n st.integers(min_value=1, max_value=min(t[0] - 6, t[1] - 4)))),\n means=st.tuples(st.integers(min_value=0, max_value=255),\n st.integers(min_value=0, max_value=255),\n st.integers(min_value=0, max_value=255)),\n stds=st.tuples(st.floats(min_value=1, max_value=10),\n st.floats(min_value=1, max_value=10),\n st.floats(min_value=1, max_value=10)),\n label_type=st.integers(0, 3),\n num_labels=st.integers(min_value=8, max_value=4096),\n is_test=st.integers(min_value=0, max_value=1),\n scale_jitter_type=st.integers(min_value=0, max_value=1),\n color_jitter=st.integers(min_value=0, max_value=1),\n color_lighting=st.integers(min_value=0, max_value=1),\n **hu.gcs)\n @settings(verbosity=Verbosity.verbose, max_examples=10, deadline=None)\n def test_imageinput(\n self, size_tuple, means, stds, label_type,\n num_labels, is_test, scale_jitter_type, color_jitter, color_lighting,\n gc, dc):\n def validator(expected_images, device_option, count_images):\n self.validate_image_and_label(\n expected_images, device_option, count_images, label_type,\n is_test, scale_jitter_type, color_jitter, color_lighting)\n # End validator\n run_test(\n size_tuple, means, stds, label_type, num_labels, is_test,\n scale_jitter_type, color_jitter, color_lighting, dc, validator)\n # End test_imageinput\n\n @given(size_tuple=st.tuples(\n st.integers(min_value=8, max_value=4096),\n st.integers(min_value=8, max_value=4096)).flatmap(lambda t: st.tuples(\n st.just(t[0]), st.just(t[1]),\n st.just(min(t[0] - 6, t[1] - 4)),\n st.integers(min_value=1, max_value=min(t[0] - 6, t[1] - 4)))),\n means=st.tuples(st.integers(min_value=0, max_value=255),\n st.integers(min_value=0, max_value=255),\n st.integers(min_value=0, max_value=255)),\n stds=st.tuples(st.floats(min_value=1, max_value=10),\n st.floats(min_value=1, max_value=10),\n st.floats(min_value=1, max_value=10)),\n label_type=st.integers(0, 3),\n num_labels=st.integers(min_value=8, max_value=4096),\n is_test=st.integers(min_value=0, max_value=1),\n scale_jitter_type=st.integers(min_value=0, max_value=1),\n color_jitter=st.integers(min_value=0, max_value=1),\n color_lighting=st.integers(min_value=0, max_value=1),\n output1=st.floats(min_value=1, max_value=10),\n output2_size=st.integers(min_value=2, max_value=10),\n **hu.gcs)\n @settings(verbosity=Verbosity.verbose, max_examples=10, deadline=None)\n def test_imageinput_with_additional_outputs(\n self, size_tuple, means, stds, label_type,\n num_labels, is_test, scale_jitter_type, color_jitter, color_lighting,\n output1, output2_size, gc, dc):\n def validator(expected_images, device_option, count_images):\n self.validate_image_and_label(\n expected_images, device_option, count_images, label_type,\n is_test, scale_jitter_type, color_jitter, color_lighting)\n\n output1_result = workspace.FetchBlob('output1')\n output2_result = workspace.FetchBlob('output2')\n\n for i in range(count_images):\n self.assertEqual(output1_result[i], expected_images[i][2])\n self.assertEqual(\n (output2_result[i] - expected_images[i][3] > 0).sum(), 0)\n # End for\n # End validator\n run_test(\n size_tuple, means, stds, label_type, num_labels, is_test,\n scale_jitter_type, color_jitter, color_lighting, dc,\n validator, output1, output2_size)\n # End test_imageinput\n\n\nif __name__ == '__main__':\n import unittest\n unittest.main()\n",
"import os\nimport unittest\n\nimport torch.testing._internal.common_utils as common\nfrom torch.testing._internal.common_utils import IS_WINDOWS\nfrom torch.testing._internal.common_cuda import TEST_CUDA\nimport torch\nimport torch.backends.cudnn\nimport torch.utils.cpp_extension\n\ntry:\n import torch_test_cpp_extension.cpp as cpp_extension\n import torch_test_cpp_extension.msnpu as msnpu_extension\n import torch_test_cpp_extension.rng as rng_extension\nexcept ImportError:\n raise RuntimeError(\n \"test_cpp_extensions_aot.py cannot be invoked directly. Run \"\n \"`python run_test.py -i test_cpp_extensions_aot_ninja` instead.\"\n )\n\n\nclass TestCppExtensionAOT(common.TestCase):\n \"\"\"Tests ahead-of-time cpp extensions\n\n NOTE: run_test.py's test_cpp_extensions_aot_ninja target\n also runs this test case, but with ninja enabled. If you are debugging\n a test failure here from the CI, check the logs for which target\n (test_cpp_extensions_aot_no_ninja vs test_cpp_extensions_aot_ninja)\n failed.\n \"\"\"\n\n def test_extension_function(self):\n x = torch.randn(4, 4)\n y = torch.randn(4, 4)\n z = cpp_extension.sigmoid_add(x, y)\n self.assertEqual(z, x.sigmoid() + y.sigmoid())\n\n def test_extension_module(self):\n mm = cpp_extension.MatrixMultiplier(4, 8)\n weights = torch.rand(8, 4, dtype=torch.double)\n expected = mm.get().mm(weights)\n result = mm.forward(weights)\n self.assertEqual(expected, result)\n\n def test_backward(self):\n mm = cpp_extension.MatrixMultiplier(4, 8)\n weights = torch.rand(8, 4, dtype=torch.double, requires_grad=True)\n result = mm.forward(weights)\n result.sum().backward()\n tensor = mm.get()\n\n expected_weights_grad = tensor.t().mm(torch.ones([4, 4], dtype=torch.double))\n self.assertEqual(weights.grad, expected_weights_grad)\n\n expected_tensor_grad = torch.ones([4, 4], dtype=torch.double).mm(weights.t())\n self.assertEqual(tensor.grad, expected_tensor_grad)\n\n @unittest.skipIf(not TEST_CUDA, \"CUDA not found\")\n def test_cuda_extension(self):\n import torch_test_cpp_extension.cuda as cuda_extension\n\n x = torch.zeros(100, device=\"cuda\", dtype=torch.float32)\n y = torch.zeros(100, device=\"cuda\", dtype=torch.float32)\n\n z = cuda_extension.sigmoid_add(x, y).cpu()\n\n # 2 * sigmoid(0) = 2 * 0.5 = 1\n self.assertEqual(z, torch.ones_like(z))\n\n @unittest.skipIf(IS_WINDOWS, \"Not available on Windows\")\n def test_no_python_abi_suffix_sets_the_correct_library_name(self):\n # For this test, run_test.py will call `python setup.py install` in the\n # cpp_extensions/no_python_abi_suffix_test folder, where the\n # `BuildExtension` class has a `no_python_abi_suffix` option set to\n # `True`. This *should* mean that on Python 3, the produced shared\n # library does not have an ABI suffix like\n # \"cpython-37m-x86_64-linux-gnu\" before the library suffix, e.g. \"so\".\n root = os.path.join(\"cpp_extensions\", \"no_python_abi_suffix_test\", \"build\")\n matches = [f for _, _, fs in os.walk(root) for f in fs if f.endswith(\"so\")]\n self.assertEqual(len(matches), 1, msg=str(matches))\n self.assertEqual(matches[0], \"no_python_abi_suffix_test.so\", msg=str(matches))\n\n def test_optional(self):\n has_value = cpp_extension.function_taking_optional(torch.ones(5))\n self.assertTrue(has_value)\n has_value = cpp_extension.function_taking_optional(None)\n self.assertFalse(has_value)\n\n\nclass TestMSNPUTensor(common.TestCase):\n def test_unregistered(self):\n a = torch.arange(0, 10, device='cpu')\n with self.assertRaisesRegex(RuntimeError, \"Could not run\"):\n b = torch.arange(0, 10, device='msnpu')\n\n def test_zeros(self):\n a = torch.empty(5, 5, device='cpu')\n self.assertEqual(a.device, torch.device('cpu'))\n\n b = torch.empty(5, 5, device='msnpu')\n self.assertEqual(b.device, torch.device('msnpu', 0))\n self.assertEqual(msnpu_extension.get_test_int(), 0)\n self.assertEqual(torch.get_default_dtype(), b.dtype)\n\n c = torch.empty((5, 5), dtype=torch.int64, device='msnpu')\n self.assertEqual(msnpu_extension.get_test_int(), 0)\n self.assertEqual(torch.int64, c.dtype)\n\n def test_add(self):\n a = torch.empty(5, 5, device='msnpu', requires_grad=True)\n self.assertEqual(msnpu_extension.get_test_int(), 0)\n\n b = torch.empty(5, 5, device='msnpu')\n self.assertEqual(msnpu_extension.get_test_int(), 0)\n\n c = a + b\n self.assertEqual(msnpu_extension.get_test_int(), 1)\n\n def test_conv_backend_override(self):\n # To simplify tests, we use 4d input here to avoid doing view4d( which\n # needs more overrides) in _convolution.\n input = torch.empty(2, 4, 10, 2, device='msnpu', requires_grad=True)\n weight = torch.empty(6, 4, 2, 2, device='msnpu', requires_grad=True)\n bias = torch.empty(6, device='msnpu')\n\n # Make sure forward is overriden\n out = torch.nn.functional.conv1d(input, weight, bias, 2, 0, 1, 1)\n self.assertEqual(msnpu_extension.get_test_int(), 2)\n self.assertEqual(out.shape[0], input.shape[0])\n self.assertEqual(out.shape[1], weight.shape[0])\n\n # Make sure backward is overriden\n # Double backward is dispatched to _convolution_double_backward.\n # It is not tested here as it involves more computation/overrides.\n grad = torch.autograd.grad(out, input, out, create_graph=True)\n self.assertEqual(msnpu_extension.get_test_int(), 3)\n self.assertEqual(grad[0].shape, input.shape)\n\n\nclass TestRNGExtension(common.TestCase):\n\n def setUp(self):\n super(TestRNGExtension, self).setUp()\n\n def test_rng(self):\n fourty_two = torch.full((10,), 42, dtype=torch.int64)\n\n t = torch.empty(10, dtype=torch.int64).random_()\n self.assertNotEqual(t, fourty_two)\n\n gen = torch.Generator(device='cpu')\n t = torch.empty(10, dtype=torch.int64).random_(generator=gen)\n self.assertNotEqual(t, fourty_two)\n\n self.assertEqual(rng_extension.getInstanceCount(), 0)\n gen = rng_extension.createTestCPUGenerator(42)\n self.assertEqual(rng_extension.getInstanceCount(), 1)\n copy = gen\n self.assertEqual(rng_extension.getInstanceCount(), 1)\n self.assertEqual(gen, copy)\n copy2 = rng_extension.identity(copy)\n self.assertEqual(rng_extension.getInstanceCount(), 1)\n self.assertEqual(gen, copy2)\n t = torch.empty(10, dtype=torch.int64).random_(generator=gen)\n self.assertEqual(rng_extension.getInstanceCount(), 1)\n self.assertEqual(t, fourty_two)\n del gen\n self.assertEqual(rng_extension.getInstanceCount(), 1)\n del copy\n self.assertEqual(rng_extension.getInstanceCount(), 1)\n del copy2\n self.assertEqual(rng_extension.getInstanceCount(), 0)\n\nif __name__ == \"__main__\":\n common.run_tests()\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom caffe2.python import core\nfrom hypothesis import given, settings\nimport caffe2.python.hypothesis_test_util as hu\nimport hypothesis.strategies as st\nimport numpy as np\n\n\nclass TestFcOperator(hu.HypothesisTestCase):\n\n @given(n=st.integers(1, 10), k=st.integers(1, 5),\n use_length=st.booleans(), **hu.gcs_cpu_only)\n @settings(deadline=1000)\n def test_sparse_to_dense_mask(self, n, k, use_length, gc, dc):\n lengths = np.random.randint(k, size=n).astype(np.int32) + 1\n N = sum(lengths)\n indices = np.random.randint(5, size=N)\n values = np.random.rand(N, 2).astype(np.float32)\n default = np.random.rand(2).astype(np.float32)\n mask = np.arange(3)\n np.random.shuffle(mask)\n\n input_str = ['indices', 'values', 'default']\n input_data = [indices, values, default]\n if use_length and n > 1:\n input_str.append('lengths')\n input_data.append(lengths)\n output_str = ['output']\n\n op = core.CreateOperator(\n 'SparseToDenseMask',\n input_str,\n output_str,\n mask=mask,\n )\n\n # Check over multiple devices\n self.assertDeviceChecks(\n dc, op, input_data, [0])\n # Gradient check for values\n self.assertGradientChecks(\n gc, op, input_data, 1, [0])\n\n @given(n=st.integers(1, 10), k=st.integers(1, 5),\n use_length=st.booleans(), **hu.gcs_cpu_only)\n @settings(deadline=1000)\n def test_sparse_to_dense_mask_with_int64(self, n, k, use_length, gc, dc):\n lengths = np.random.randint(k, size=n).astype(np.int32) + 1\n N = sum(lengths)\n int64_mask = 10000000000\n indices = np.random.randint(5, size=N) + int64_mask\n values = np.random.rand(N, 2).astype(np.float32)\n default = np.random.rand(2).astype(np.float32)\n mask = np.arange(3) + int64_mask\n np.random.shuffle(mask)\n\n input_str = ['indices', 'values', 'default']\n input_data = [indices, values, default]\n if use_length and n > 1:\n input_str.append('lengths')\n input_data.append(lengths)\n output_str = ['output']\n\n op = core.CreateOperator(\n 'SparseToDenseMask',\n input_str,\n output_str,\n mask=mask,\n )\n\n # Check over multiple devices\n self.assertDeviceChecks(\n dc, op, input_data, [0])\n # Gradient check for values\n self.assertGradientChecks(\n gc, op, input_data, 1, [0])\n\n @given(n=st.integers(1, 10), k=st.integers(1, 5),\n dim=st.integers(1, 3), **hu.gcs_cpu_only)\n @settings(deadline=10000)\n def test_sparse_to_dense_mask_high_dim(self, n, k, dim, gc, dc):\n lengths = np.random.randint(k, size=n).astype(np.int32) + 1\n N = sum(lengths)\n indices = np.random.randint(5, size=N)\n shape = np.random.randint(5, size=dim).astype(np.int32) + 1\n values = np.random.rand(*((N,) + tuple(shape))).astype(np.float32)\n default = np.random.rand(*shape).astype(np.float32)\n mask = np.arange(3)\n np.random.shuffle(mask)\n\n op = core.CreateOperator(\n 'SparseToDenseMask',\n ['indices', 'values', 'default', 'lengths'],\n ['output'],\n mask=mask,\n )\n\n # Check over multiple devices\n self.assertDeviceChecks(\n dc, op, [indices, values, default, lengths], [0])\n # Gradient check for values\n self.assertGradientChecks(\n gc, op, [indices, values, default, lengths], 1, [0])\n\n\nif __name__ == \"__main__\":\n import unittest\n unittest.main()\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom caffe2.python import core, workspace\nfrom hypothesis import given, settings\n\nimport caffe2.python.hypothesis_test_util as hu\nimport caffe2.python.serialized_test.serialized_test_util as serial\nimport hypothesis.strategies as st\nimport numpy as np\nimport itertools as it\nimport unittest\n\n\nclass TestReduceOps(serial.SerializedTestCase):\n def run_reduce_op_test_impl(\n self, op_name, X, axes, keepdims, ref_func, gc, dc):\n if axes is None:\n op = core.CreateOperator(\n op_name,\n [\"X\"],\n [\"Y\"],\n keepdims=keepdims,\n )\n else:\n op = core.CreateOperator(\n op_name,\n [\"X\"],\n [\"Y\"],\n axes=axes,\n keepdims=keepdims,\n )\n\n def ref(X):\n return [ref_func(\n X, axis=None if axes is None else tuple(axes),\n keepdims=keepdims)]\n\n self.assertReferenceChecks(gc, op, [X], ref)\n self.assertDeviceChecks(dc, op, [X], [0])\n self.assertGradientChecks(gc, op, [X], 0, [0])\n\n def run_reduce_op_test(\n self, op_name, X, keepdims, num_axes, ref_func, gc, dc):\n self.run_reduce_op_test_impl(\n op_name, X, None, keepdims, ref_func, gc, dc)\n\n num_dims = len(X.shape)\n if num_dims < num_axes:\n self.run_reduce_op_test_impl(\n op_name, X, range(num_dims), keepdims, ref_func, gc, dc)\n else:\n for axes in it.combinations(range(num_dims), num_axes):\n self.run_reduce_op_test_impl(\n op_name, X, axes, keepdims, ref_func, gc, dc)\n\n @serial.given(\n X=hu.tensor(max_dim=3, dtype=np.float32), keepdims=st.booleans(),\n num_axes=st.integers(1, 3), **hu.gcs)\n def test_reduce_min(self, X, keepdims, num_axes, gc, dc):\n X_dims = X.shape\n X_size = X.size\n X = np.arange(X_size, dtype=np.float32)\n np.random.shuffle(X)\n X = X.reshape(X_dims)\n self.run_reduce_op_test(\n \"ReduceMin\", X, keepdims, num_axes, np.min, gc, dc)\n\n @serial.given(\n X=hu.tensor(max_dim=3, dtype=np.float32), keepdims=st.booleans(),\n num_axes=st.integers(1, 3), **hu.gcs)\n def test_reduce_max(self, X, keepdims, num_axes, gc, dc):\n X_dims = X.shape\n X_size = X.size\n X = np.arange(X_size, dtype=np.float32)\n np.random.shuffle(X)\n X = X.reshape(X_dims)\n self.run_reduce_op_test(\n \"ReduceMax\", X, keepdims, num_axes, np.max, gc, dc)\n\n @given(n=st.integers(0, 5), m=st.integers(0, 5), k=st.integers(0, 5),\n t=st.integers(0, 5), keepdims=st.booleans(),\n num_axes=st.integers(1, 3), **hu.gcs)\n @settings(deadline=10000)\n def test_reduce_sum(self, n, m, k, t, keepdims, num_axes, gc, dc):\n X = np.random.randn(n, m, k, t).astype(np.float32)\n self.run_reduce_op_test(\n \"ReduceSum\", X, keepdims, num_axes, np.sum, gc, dc)\n\n @serial.given(X=hu.tensor(dtype=np.float32), keepdims=st.booleans(),\n num_axes=st.integers(1, 4), **hu.gcs)\n def test_reduce_mean(self, X, keepdims, num_axes, gc, dc):\n self.run_reduce_op_test(\n \"ReduceMean\", X, keepdims, num_axes, np.mean, gc, dc)\n\n @given(n=st.integers(1, 3), m=st.integers(1, 3), k=st.integers(1, 3),\n keepdims=st.booleans(), num_axes=st.integers(1, 3), **hu.gcs_cpu_only)\n @settings(deadline=1000)\n def test_reduce_l1(self, n, m, k, keepdims, num_axes, gc, dc):\n X = np.arange(n * m * k, dtype=np.float32) - 0.5\n np.random.shuffle(X)\n X = X.reshape((m, n, k))\n self.run_reduce_op_test(\n \"ReduceL1\", X, keepdims, num_axes, getNorm(1), gc, dc)\n\n @serial.given(n=st.integers(1, 5), m=st.integers(1, 5), k=st.integers(1, 5),\n keepdims=st.booleans(), num_axes=st.integers(1, 3), **hu.gcs_cpu_only)\n def test_reduce_l2(self, n, m, k, keepdims, num_axes, gc, dc):\n X = np.random.randn(n, m, k).astype(np.float32)\n self.run_reduce_op_test(\n \"ReduceL2\", X, keepdims, num_axes, getNorm(2), gc, dc)\n\n\ndef getNorm(p):\n if p == 1:\n def norm(X, axis, keepdims):\n return np.sum(np.abs(X), axis=axis, keepdims=keepdims)\n elif p == 2:\n def norm(X, axis, keepdims):\n return np.sqrt(np.sum(np.power(X, 2), axis=axis, keepdims=keepdims))\n else:\n raise RuntimeError(\"Only L1 and L2 norms supported\")\n return norm\n\n\nclass TestReduceFrontReductions(serial.SerializedTestCase):\n def grad_variant_input_test(self, grad_op_name, X, ref, num_reduce_dim):\n workspace.ResetWorkspace()\n\n Y = np.array(ref(X)[0]).astype(np.float32)\n dY = np.array(np.random.rand(*Y.shape)).astype(np.float32)\n shape = np.array(X.shape).astype(np.int64)\n\n workspace.FeedBlob(\"X\", X)\n workspace.FeedBlob(\"dY\", dY)\n workspace.FeedBlob(\"shape\", shape)\n\n grad_op = core.CreateOperator(\n grad_op_name, [\"dY\", \"X\"], [\"dX\"], num_reduce_dim=num_reduce_dim)\n\n grad_op1 = core.CreateOperator(\n grad_op_name, [\"dY\", \"shape\"], [\"dX1\"],\n num_reduce_dim=num_reduce_dim)\n\n workspace.RunOperatorOnce(grad_op)\n workspace.RunOperatorOnce(grad_op1)\n\n dX = workspace.FetchBlob(\"dX\")\n dX1 = workspace.FetchBlob(\"dX1\")\n np.testing.assert_array_equal(dX, dX1)\n\n def max_op_test(\n self, op_name, num_reduce_dim, gc, dc, in_data, in_names, ref_max):\n\n op = core.CreateOperator(\n op_name,\n in_names,\n [\"outputs\"],\n num_reduce_dim=num_reduce_dim\n )\n\n self.assertReferenceChecks(\n device_option=gc,\n op=op,\n inputs=in_data,\n reference=ref_max,\n )\n\n # Skip gradient check because it is too unreliable with max.\n # Just check CPU and CUDA have same results\n Y = np.array(ref_max(*in_data)[0]).astype(np.float32)\n dY = np.array(np.random.rand(*Y.shape)).astype(np.float32)\n if len(in_data) == 2:\n grad_in_names = [\"dY\", in_names[0], \"Y\", in_names[1]]\n grad_in_data = [dY, in_data[0], Y, in_data[1]]\n else:\n grad_in_names = [\"dY\", in_names[0], \"Y\"]\n grad_in_data = [dY, in_data[0], Y]\n\n grad_op = core.CreateOperator(\n op_name + \"Gradient\",\n grad_in_names,\n [\"dX\"],\n num_reduce_dim=num_reduce_dim\n )\n self.assertDeviceChecks(dc, grad_op, grad_in_data, [0])\n\n def reduce_op_test(self, op_name, op_ref, in_data, in_names,\n num_reduce_dims, device):\n op = core.CreateOperator(\n op_name,\n in_names,\n [\"outputs\"],\n num_reduce_dim=num_reduce_dims\n )\n\n self.assertReferenceChecks(\n device_option=device,\n op=op,\n inputs=in_data,\n reference=op_ref\n )\n\n self.assertGradientChecks(\n device, op, in_data, 0, [0], stepsize=1e-2, threshold=1e-2)\n\n @given(num_reduce_dim=st.integers(0, 4), **hu.gcs)\n @settings(deadline=10000)\n def test_reduce_front_sum(self, num_reduce_dim, gc, dc):\n X = np.random.rand(7, 4, 3, 5).astype(np.float32)\n\n def ref_sum(X):\n return [np.sum(X, axis=(tuple(range(num_reduce_dim))))]\n\n self.reduce_op_test(\n \"ReduceFrontSum\", ref_sum, [X], [\"input\"], num_reduce_dim, gc)\n self.grad_variant_input_test(\n \"ReduceFrontSumGradient\", X, ref_sum, num_reduce_dim)\n\n @given(num_reduce_dim=st.integers(0, 4), seed=st.integers(0, 4), **hu.gcs)\n def test_reduce_front_sum_empty_batch(self, num_reduce_dim, seed, gc, dc):\n np.random.seed(seed)\n X = np.random.rand(0, 4, 3, 5).astype(np.float32)\n\n def ref_sum(X):\n return [np.sum(X, axis=(tuple(range(num_reduce_dim))))]\n\n self.reduce_op_test(\n \"ReduceFrontSum\", ref_sum, [X], [\"input\"], num_reduce_dim, gc)\n self.grad_variant_input_test(\n \"ReduceFrontSumGradient\", X, ref_sum, num_reduce_dim)\n\n # test the second iteration\n not_empty_X = np.random.rand(2, 4, 3, 5).astype(np.float32)\n net = core.Net('test')\n with core.DeviceScope(gc):\n net.ReduceFrontSum(\n ['X'], ['output'],\n num_reduce_dim=num_reduce_dim\n )\n workspace.CreateNet(net)\n\n workspace.FeedBlob('X', not_empty_X)\n workspace.RunNet(workspace.GetNetName(net))\n output = workspace.FetchBlob('output')\n np.testing.assert_allclose(\n output, ref_sum(not_empty_X)[0], atol=1e-3)\n\n workspace.FeedBlob('X', X)\n workspace.RunNet(workspace.GetNetName(net))\n output = workspace.FetchBlob('output')\n np.testing.assert_allclose(output, ref_sum(X)[0], atol=1e-3)\n\n @given(**hu.gcs)\n @settings(deadline=1000)\n def test_reduce_front_sum_with_length(self, dc, gc):\n num_reduce_dim = 1\n X = np.random.rand(2, 3, 4, 5).astype(np.float32)\n batch_size = int(np.prod([2, 3, 4, 5][num_reduce_dim:]))\n d = 120 // batch_size\n lengths = np.random.randint(1, d, size=batch_size).astype(np.int32)\n\n def ref_sum(X, lengths):\n Y = X.reshape(d, lengths.size)\n rv = np.zeros((lengths.size, 1)).astype(np.float32)\n for ii in range(lengths.size):\n rv[ii] = np.sum(Y[:lengths[ii], ii])\n return [rv.reshape((2, 3, 4, 5)[num_reduce_dim:])]\n\n self.reduce_op_test(\n \"ReduceFrontSum\", ref_sum, [X, lengths], [\"input\", \"lengths\"],\n num_reduce_dim, gc)\n\n @given(num_reduce_dim=st.integers(0, 4), **hu.gcs)\n @settings(deadline=10000)\n def test_reduce_front_mean(self, num_reduce_dim, gc, dc):\n X = np.random.rand(6, 7, 8, 2).astype(np.float32)\n\n def ref_mean(X):\n return [np.mean(X, axis=(tuple(range(num_reduce_dim))))]\n\n self.reduce_op_test(\n \"ReduceFrontMean\", ref_mean, [X], [\"input\"], num_reduce_dim, gc)\n self.grad_variant_input_test(\n \"ReduceFrontMeanGradient\", X, ref_mean, num_reduce_dim)\n\n @given(**hu.gcs)\n @settings(deadline=1000)\n def test_reduce_front_mean_with_length(self, dc, gc):\n num_reduce_dim = 1\n X = np.random.rand(2, 3, 4, 5).astype(np.float32)\n batch_size = int(np.prod([2, 3, 4, 5][num_reduce_dim:]))\n d = 120 // batch_size\n lengths = np.random.randint(1, d, size=batch_size).astype(np.int32)\n\n def ref_mean(X, lengths):\n Y = X.reshape(d, lengths.size)\n rv = np.zeros((lengths.size, 1)).astype(np.float32)\n for ii in range(lengths.size):\n rv[ii] = np.mean(Y[:lengths[ii], ii])\n return [rv.reshape((2, 3, 4, 5)[num_reduce_dim:])]\n\n self.reduce_op_test(\n \"ReduceFrontMean\", ref_mean, [X, lengths], [\"input\", \"lengths\"],\n num_reduce_dim, gc)\n\n @serial.given(num_reduce_dim=st.integers(0, 4), **hu.gcs)\n def test_reduce_front_max(self, num_reduce_dim, gc, dc):\n X = np.random.rand(6, 7, 8, 2).astype(np.float32)\n\n def ref_frontmax(X):\n return [np.max(X, axis=(tuple(range(num_reduce_dim))))]\n\n self.max_op_test(\n \"ReduceFrontMax\", num_reduce_dim, gc, dc, [X], [\"X\"], ref_frontmax)\n\n @given(**hu.gcs)\n def test_reduce_front_max_with_length(self, dc, gc):\n num_reduce_dim = 1\n X = np.random.rand(2, 3, 4, 5).astype(np.float32)\n batch_size = int(np.prod([2, 3, 4, 5][num_reduce_dim:]))\n d = 120 // batch_size\n lengths = np.random.randint(1, d, size=batch_size).astype(np.int32)\n\n def ref_max(X, lengths):\n Y = X.reshape(d, lengths.size)\n rv = np.zeros((lengths.size, 1)).astype(np.float32)\n for ii in range(lengths.size):\n rv[ii] = np.max(Y[:lengths[ii], ii])\n return [rv.reshape((2, 3, 4, 5)[num_reduce_dim:])]\n\n self.max_op_test(\n \"ReduceFrontMax\", num_reduce_dim, gc, dc, [X, lengths],\n [\"X\", \"lengths\"], ref_max)\n\n @serial.given(num_reduce_dim=st.integers(0, 4), **hu.gcs)\n def test_reduce_back_max(self, num_reduce_dim, gc, dc):\n X = np.random.rand(6, 7, 8, 2).astype(np.float32)\n\n def ref_backmax(X):\n return [np.max(X, axis=(0, 1, 2, 3)[4 - num_reduce_dim:])]\n\n self.max_op_test(\n \"ReduceBackMax\", num_reduce_dim, gc, dc, [X], [\"X\"], ref_backmax)\n\n @given(**hu.gcs)\n def test_reduce_back_max_with_length(self, gc, dc):\n num_reduce_dim = 1\n X = np.random.rand(2, 3, 4, 5).astype(np.float32)\n batch_size = int(np.prod([2, 3, 4, 5][:4 - num_reduce_dim]))\n d = 120 // batch_size\n lengths = np.random.randint(1, d, size=batch_size).astype(np.int32)\n\n def ref_max(X, lengths):\n Y = X.reshape(lengths.size, d)\n rv = np.zeros((lengths.size, 1)).astype(np.float32)\n for ii in range(lengths.size):\n rv[ii] = np.max(Y[ii, :lengths[ii]])\n return [rv.reshape((2, 3, 4, 5)[:4 - num_reduce_dim])]\n\n self.max_op_test(\n \"ReduceBackMax\", num_reduce_dim, gc, dc, [X, lengths],\n [\"X\", \"lengths\"], ref_max)\n\n @given(**hu.gcs)\n @settings(deadline=10000)\n def test_reduce_back_sum(self, dc, gc):\n num_reduce_dim = 1\n X = np.random.rand(6, 7, 8, 2).astype(np.float32)\n\n def ref_sum(X):\n return [np.sum(X, axis=(0, 1, 2, 3)[4 - num_reduce_dim:])]\n\n self.reduce_op_test(\n \"ReduceBackSum\", ref_sum, [X], [\"input\"], num_reduce_dim, gc)\n self.grad_variant_input_test(\n \"ReduceBackSumGradient\", X, ref_sum, num_reduce_dim)\n\n @given(**hu.gcs)\n @settings(deadline=10000)\n def test_reduce_back_sum_with_length(self, dc, gc):\n num_reduce_dim = 1\n X = np.random.rand(2, 3, 4, 5).astype(np.float32)\n batch_size = int(np.prod([2, 3, 4, 5][:4 - num_reduce_dim]))\n d = 120 // batch_size\n lengths = np.random.randint(1, d, size=batch_size).astype(np.int32)\n\n def ref_sum(X, lengths):\n Y = X.reshape(lengths.size, d)\n rv = np.zeros((lengths.size, 1)).astype(np.float32)\n for ii in range(lengths.size):\n rv[ii] = np.sum(Y[ii, :lengths[ii]])\n return [rv.reshape((2, 3, 4, 5)[:4 - num_reduce_dim])]\n\n self.reduce_op_test(\n \"ReduceBackSum\", ref_sum, [X, lengths], [\"input\", \"lengths\"],\n num_reduce_dim, gc)\n\n @given(num_reduce_dim=st.integers(0, 4), **hu.gcs)\n @settings(deadline=10000)\n def test_reduce_back_mean(self, num_reduce_dim, dc, gc):\n X = np.random.rand(6, 7, 8, 2).astype(np.float32)\n\n def ref_mean(X):\n return [np.mean(X, axis=(0, 1, 2, 3)[4 - num_reduce_dim:])]\n\n self.reduce_op_test(\n \"ReduceBackMean\", ref_mean, [X], [\"input\"], num_reduce_dim, gc)\n self.grad_variant_input_test(\n \"ReduceBackMeanGradient\", X, ref_mean, num_reduce_dim)\n\n @given(**hu.gcs)\n @settings(deadline=1000)\n def test_reduce_back_mean_with_length(self, dc, gc):\n num_reduce_dim = 1\n X = np.random.rand(2, 3, 4, 5).astype(np.float32)\n batch_size = int(np.prod([2, 3, 4, 5][:4 - num_reduce_dim]))\n d = 120 // batch_size\n lengths = np.random.randint(1, d, size=batch_size).astype(np.int32)\n\n def ref_mean(X, lengths):\n Y = X.reshape(lengths.size, d)\n rv = np.zeros((lengths.size, 1)).astype(np.float32)\n for ii in range(lengths.size):\n rv[ii] = np.mean(Y[ii, :lengths[ii]])\n return [rv.reshape((2, 3, 4, 5)[:4 - num_reduce_dim])]\n\n self.reduce_op_test(\n \"ReduceBackMean\", ref_mean, [X, lengths], [\"input\", \"lengths\"],\n num_reduce_dim, gc)\n",
"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport caffe2.python.fakelowp.init_shared_libs # noqa\nimport numpy as np\nfrom caffe2.python import core, workspace\nfrom caffe2.python.onnx.onnxifi import onnxifi_caffe2_net\nfrom hypothesis import given, strategies as st, settings\nfrom caffe2.python.fakelowp.test_utils import print_test_debug_info\nimport caffe2.python.serialized_test.serialized_test_util as serial\n\ncore.GlobalInit([\"caffe2\", \"--caffe2_log_level=-3\", \"--glow_global_fp16=1\"])\n\n\nclass Int8OpsTest(serial.SerializedTestCase):\n def _get_scale_zp(self, tensor):\n tensor_max = np.max(tensor)\n tensor_min = min(0, np.min(tensor))\n scale = np.float32(np.float16((tensor_max - tensor_min) / 255.0))\n if scale < 1e-6:\n scale = 1e-6\n zero_point = 0 - tensor_min / scale\n zero_point = int(round(np.clip(zero_point, 0, 255.0)))\n return (scale, zero_point)\n\n @settings(max_examples=30, deadline=None)\n @given(\n n=st.integers(2, 1024),\n rand_seed=st.integers(0, 65534),\n non_zero_offset=st.booleans()\n )\n def test_int8_quantize(self, n, rand_seed, non_zero_offset):\n print(\"n={}, rand_seed={}\".format(n, rand_seed))\n np.random.seed(rand_seed)\n workspace.ResetWorkspace()\n\n if non_zero_offset:\n X_fp32 = np.random.uniform(-1, 1, size=(n, n)).astype(np.float16) \\\n .astype(np.float32)\n else:\n X_fp32 = np.random.rand(n, n).astype(np.float16).astype(np.float32)\n\n W_fp32 = np.identity(n, dtype=np.float32)\n b_fp32 = np.zeros((n,), dtype=np.float32)\n\n X_scale, X_zero_point = self._get_scale_zp(X_fp32)\n\n workspace.FeedBlob(\"X\", X_fp32)\n workspace.FeedBlob(\"W\", W_fp32)\n workspace.FeedBlob(\"b\", b_fp32)\n\n workspace.RunOperatorOnce(\n core.CreateOperator(\n \"Int8FCPackWeight\",\n [\"W\"],\n [\"W_int8\"],\n engine=\"DNNLOWP\",\n save_unpacked_weights=True,\n in_scale=X_scale,\n )\n )\n\n ref_net = core.Net(\"net\")\n ref_net.Int8QuantizeNNPI(\n [\"X\"],\n [\"X_int8\"],\n Y_scale=X_scale,\n Y_zero_point=X_zero_point\n )\n ref_net.Int8FCFakeAcc32NNPI(\n [\"X_int8\", \"W_int8\", \"b\"],\n [\"Y_int8\"],\n Y_scale=X_scale,\n Y_zero_point=X_zero_point,\n )\n ref_net.Int8DequantizeNNPI(\n [\"Y_int8\"],\n [\"Y\"]\n )\n ref_net.Proto().external_output.append(\"Y\")\n\n # run ref_net\n workspace.RunNetOnce(ref_net)\n Y_fbgemm = workspace.FetchBlob(\"Y\")\n\n # run onnxifi net\n ref_net.Proto().op[0].type = \"Int8Quantize\"\n ref_net.Proto().op[1].type = \"Int8FC\"\n ref_net.Proto().op[2].type = \"Int8Dequantize\"\n net_onnxified = onnxifi_caffe2_net(\n ref_net.Proto(),\n {},\n debug=True,\n adjust_batch=False,\n use_onnx=False,\n weight_names=[\"W_int8\", \"b\"],\n )\n num_onnxified_ops = sum(\n 1 if o.type == \"Onnxifi\" else 0 for o in net_onnxified.op\n )\n np.testing.assert_equal(num_onnxified_ops, 1)\n workspace.CreateNet(net_onnxified)\n workspace.RunNet(net_onnxified.name)\n Y_glow = workspace.FetchBlob(\"Y\")\n\n if not np.allclose(Y_glow, Y_fbgemm):\n diff_Y = np.abs(Y_glow - Y_fbgemm)\n print_test_debug_info(\n \"int8_fc\",\n {\n \"seed\": rand_seed,\n \"n\": n,\n \"X\": X_fp32,\n \"W\": W_fp32,\n \"b\": b_fp32,\n \"Y_fbgemm\": Y_fbgemm,\n \"Y_glow\": Y_glow,\n \"diff\": diff_Y,\n \"maxdiff\": diff_Y.max(axis=1),\n },\n )\n assert 0\n\n @given(\n n=st.integers(1, 1024),\n m=st.integers(1, 1024),\n k=st.integers(1, 1024),\n f=st.integers(1, 1), # TODO: figure a safe number to increase\n rand_seed=st.integers(0, 65534),\n quantize_bias=st.sampled_from([False]),\n )\n @settings(deadline=None, max_examples=30)\n def test_int8_fc(\n self, n, m, k, rand_seed, quantize_bias, f\n ):\n print(\n f\"n={n}, m={m}, k={k}, rand_seed={rand_seed}, quantize_bias={quantize_bias}\"\n )\n np.random.seed(rand_seed)\n workspace.ResetWorkspace()\n\n ff = float(f)\n X_fp32 = np.random.uniform(-ff, ff, size=(m, k)).astype(np.float32)\n W_fp32 = np.random.uniform(-ff, ff, size=(n, k)).astype(np.float32)\n b_fp32 = np.random.uniform(-ff, ff, size=(n)).astype(np.float32)\n\n X_scale, X_zero_point = self._get_scale_zp(X_fp32)\n Y_fp32 = np.dot(X_fp32, W_fp32.T) + b_fp32\n Y_scale, Y_zero_point = self._get_scale_zp(Y_fp32)\n\n workspace.FeedBlob(\"X\", X_fp32)\n workspace.FeedBlob(\"W\", W_fp32)\n workspace.FeedBlob(\"b\", b_fp32)\n\n workspace.RunOperatorOnce(\n core.CreateOperator(\n \"Int8FCPackWeight\",\n [\"W\", \"b\"] if quantize_bias else [\"W\"],\n [\"W_int8\", \"b_int32\"] if quantize_bias else [\"W_int8\"],\n engine=\"DNNLOWP\",\n save_unpacked_weights=True,\n in_scale=X_scale,\n )\n )\n\n ref_net = core.Net(\"net\")\n ref_net.Int8QuantizeNNPI(\n [\"X\"],\n [\"X_int8\"],\n Y_scale=X_scale,\n Y_zero_point=X_zero_point\n )\n ref_net.Int8FCFakeAcc32NNPI(\n [\"X_int8\", \"W_int8\", \"b_int32\" if quantize_bias else \"b\"],\n [\"Y_int8\"],\n Y_scale=Y_scale,\n Y_zero_point=Y_zero_point,\n )\n ref_net.Int8DequantizeNNPI(\n [\"Y_int8\"],\n [\"Y\"]\n )\n ref_net.Proto().external_output.append(\"Y\")\n\n # run ref_net\n workspace.RunNetOnce(ref_net)\n Y_fbgemm = workspace.FetchBlob(\"Y\")\n\n # run onnxifi net\n ref_net.Proto().op[0].type = \"Int8Quantize\"\n ref_net.Proto().op[1].type = \"Int8FC\"\n ref_net.Proto().op[2].type = \"Int8Dequantize\"\n net_onnxified = onnxifi_caffe2_net(\n ref_net.Proto(),\n {},\n debug=True,\n adjust_batch=False,\n use_onnx=False,\n weight_names=[\"W_int8\", \"b_int32\"] if quantize_bias else [\"W_int8\", \"b\"],\n )\n num_onnxified_ops = sum(\n 1 if o.type == \"Onnxifi\" else 0 for o in net_onnxified.op\n )\n np.testing.assert_equal(num_onnxified_ops, 1)\n workspace.CreateNet(net_onnxified)\n workspace.RunNet(net_onnxified.name)\n Y_glow = workspace.FetchBlob(\"Y\")\n\n if not np.allclose(Y_glow, Y_fbgemm):\n diff_Y = np.abs(Y_glow - Y_fbgemm)\n print_test_debug_info(\n \"int8_fc\",\n {\n \"seed\": rand_seed,\n \"n\": n,\n \"m\": m,\n \"k\": k,\n \"X\": X_fp32,\n \"W\": W_fp32,\n \"b\": b_fp32,\n \"Y_fbgemm\": Y_fbgemm,\n \"Y_glow\": Y_glow,\n \"diff\": diff_Y,\n \"maxdiff\": diff_Y.max(axis=1),\n },\n )\n assert 0\n\n @given(\n n=st.integers(1, 4),\n rand_seed=st.integers(0, 65534)\n )\n @settings(max_examples=100, deadline=None)\n def test_int8_small_input(self, n, rand_seed):\n print(\"n={}, rand_seed={}\".format(n, rand_seed))\n np.random.seed(rand_seed)\n workspace.ResetWorkspace()\n\n X_fp32 = np.random.uniform(0.01, 0.03, size=(n, n)).astype(np.float32)\n W_fp32 = np.identity(n, dtype=np.float32)\n b_fp32 = np.zeros((n,), dtype=np.float32)\n\n X_scale, X_zero_point = self._get_scale_zp(X_fp32)\n\n workspace.FeedBlob(\"X\", X_fp32)\n workspace.FeedBlob(\"W\", W_fp32)\n workspace.FeedBlob(\"b\", b_fp32)\n\n workspace.RunOperatorOnce(\n core.CreateOperator(\n \"Int8FCPackWeight\",\n [\"W\"],\n [\"W_int8\"],\n engine=\"DNNLOWP\",\n save_unpacked_weights=True,\n in_scale=X_scale,\n )\n )\n\n ref_net = core.Net(\"net\")\n ref_net.Int8QuantizeNNPI(\n [\"X\"],\n [\"X_int8\"],\n Y_scale=X_scale,\n Y_zero_point=X_zero_point\n )\n ref_net.Int8FCFakeAcc32NNPI(\n [\"X_int8\", \"W_int8\", \"b\"],\n [\"Y_int8\"],\n Y_scale=X_scale,\n Y_zero_point=X_zero_point,\n )\n ref_net.Int8DequantizeNNPI(\n [\"Y_int8\"],\n [\"Y\"]\n )\n ref_net.Proto().external_output.append(\"Y\")\n\n # run ref_net\n workspace.RunNetOnce(ref_net)\n Y_fbgemm = workspace.FetchBlob(\"Y\")\n\n # run onnxifi net\n ref_net.Proto().op[0].type = \"Int8Quantize\"\n ref_net.Proto().op[1].type = \"Int8FC\"\n ref_net.Proto().op[2].type = \"Int8Dequantize\"\n net_onnxified = onnxifi_caffe2_net(\n ref_net.Proto(),\n {},\n debug=True,\n adjust_batch=False,\n use_onnx=False,\n weight_names=[\"W_int8\", \"b\"],\n )\n num_onnxified_ops = sum(\n 1 if o.type == \"Onnxifi\" else 0 for o in net_onnxified.op\n )\n np.testing.assert_equal(num_onnxified_ops, 1)\n workspace.CreateNet(net_onnxified)\n workspace.RunNet(net_onnxified.name)\n Y_glow = workspace.FetchBlob(\"Y\")\n\n if not np.allclose(Y_glow, Y_fbgemm):\n diff_Y = np.abs(Y_glow - Y_fbgemm)\n print_test_debug_info(\n \"int8_fc\",\n {\n \"seed\": rand_seed,\n \"n\": n,\n \"X\": X_fp32,\n \"W\": W_fp32,\n \"b\": b_fp32,\n \"Y_fbgemm\": Y_fbgemm,\n \"Y_glow\": Y_glow,\n \"diff\": diff_Y,\n \"maxdiff\": diff_Y.max(axis=1),\n },\n )\n assert 0\n",
"# Torch\nfrom torch.jit.annotations import BroadcastingList2, BroadcastingList3 # noqa: F401\nfrom torch.testing._internal.common_methods_invocations import non_differentiable, create_input, \\\n unpack_variables\nimport torch.nn.functional as F\nimport torch\nimport torch.cuda\nimport torch.jit\nimport torch.jit._logging\nimport torch.jit.frontend\nfrom torch.testing._internal.common_nn import module_tests, new_module_tests\nfrom copy import deepcopy\nimport math # noqa: F401\n\n# Testing utils\nfrom torch._six import inf\ntorch.set_default_dtype(torch.double)\n\nL = 20\nM = 10\nS = 5\n\n# NB: JIT script tests for all nn functional interfaces, script mode does\n# not support in_place operations yet, so no inplace operation tests added.\n# removed all the deprecated functions\n#\n# (\n# method name,\n# input size/constructing fn,\n# args (tuple represents shape of a tensor arg),\n# test variant name(will be used at test name suffix,\n# 'inplace' skips grad tests), // optional\n# (True, nonfusible_nodes, fusible_nodes) for autodiff // optional\n# fn to determine if test should be skipped, // optional\n# fn mapping output to part that should be gradcheck'ed, // optional\n# kwargs for function, // optional\n# )\nnn_functional_tests = [\n ('conv1d', (S, S, S), ((S, S, S),)),\n ('conv2d', (S, S, S, S), ((S, S, S, S),)),\n ('conv3d', (S, S, S, S, S), ((S, S, S, S, S),)),\n ('conv_transpose1d', (S, S, S), ((S, S, S),)),\n ('conv_transpose2d', (S, S, S, S), ((S, S, S, S),)),\n ('conv_transpose3d', (S, S, S, S, S), ((S, S, S, S, S),)),\n ('conv_tbc', (S, S, S), ((S, S, S), (S,), 2)),\n ('avg_pool1d', (S, S, S), (3,)),\n ('avg_pool2d', (S, S, S, S), (3,), '', (True,)),\n ('avg_pool3d', (S, S, S, S, S), (3,)),\n ('fractional_max_pool2d', (S, S, S, S), (3, [2, 3],)),\n ('max_pool1d', (S, S, S), (2, 1)),\n ('max_pool1d', (S, S, S), (2, 1, 1, 1, False, True), 'with_indices'),\n ('max_pool2d', (S, S, S, S), (2, 1), '', (True, 'aten::max_pool2d_with_indices')),\n ('max_pool2d', (S, S, S, S), (2, 1, 1, 1, False, True), 'with_indices', (True, 'aten::max_pool2d_with_indices')),\n ('max_pool3d', (S, S, S, S, S), (2, 1)),\n ('max_unpool1d', torch.tensor([[[2., 4]]]), (torch.tensor([[[1, 3]]]), 2, 2, 0)),\n ('max_unpool2d', torch.tensor([[[[2., 4]]]]), (torch.tensor([[[[1, 3]]]]), 2, 2, 0)),\n ('max_unpool3d', torch.tensor([[[[[2., 4]]]]]), (torch.tensor([[[[[1, 3]]]]]), 2, 2, 0)),\n ('lp_pool1d', (S, S, S), (2., 3, 2,)),\n ('lp_pool2d', (S, S, S, S), (2., 3, 2,)),\n ('adaptive_max_pool1d', (S, S, S), (5,)),\n ('adaptive_max_pool2d', (S, S, S, S), ([5, 7],)),\n ('adaptive_max_pool3d', (S, S, S, S, S), ([3, 2, 2],)),\n ('adaptive_avg_pool1d', (S, S, S), (5,), '', (True,)),\n ('adaptive_avg_pool2d', (S, S, S, S), ([5, 7],), '', (True,)),\n ('adaptive_avg_pool3d', (S, S, S, S, S), ([3, 2, 2],), '', (True,)),\n ('dropout', (S, S, S), (0.5,), '', (True,\n ['aten::bernoulli_',\n 'aten::empty_like', 'aten::mul', 'aten::div'])),\n ('alpha_dropout', (S, S, S), (0.5,)),\n ('dropout2d', (S, S, S), (0.5,)),\n ('dropout3d', (S, S, S), (0.5,)),\n ('feature_alpha_dropout', (S, S, S), (0.5,)),\n ('threshold', (S, S, S), (0.1, 2.), '', (True,)),\n ('threshold', (S, S, S), (0.1, 2., True), 'inplace'),\n ('relu', (S, S, S), (), '', (True,)),\n ('relu', (S, S, S), (), 'inplace'),\n ('glu', (S - 1, S - 1, S - 1), (),),\n ('hardtanh', (S, S, S), (-0.5, 0.5),),\n ('hardtanh', (S, S, S), (-0.5, 0.5, True), 'inplace'),\n ('relu6', (S, S, S), (),),\n ('relu6', (S, S, S), (True), 'inplace'),\n ('elu', (S, S, S), (0.9,),),\n ('elu', (S, S, S), (0.9, True), 'inplace'),\n ('selu', (S, S, S), (),),\n ('selu', (S, S, S), (True), 'inplace'),\n ('celu', (S, S, S), (0.9,),),\n ('celu', (S, S, S), (0.9, True), 'inplace'),\n ('leaky_relu', (S, S, S), (0.02,),),\n ('leaky_relu', (S, S, S), (0.02,), 'inplace'),\n ('rrelu', (S, S), (0.1, 0.3, False),),\n ('rrelu', (S, S), (0.1, 0.3, False, True), 'inplace'),\n ('hardshrink', (S, S, S), (0.4,),),\n ('tanhshrink', (S, S, S), (),),\n ('softsign', (S, S, S), (),),\n ('softplus', (S, S, S), (),),\n ('softmin', (S, S, S), (0,),),\n ('softmax', (S, S, S), (0,), '', (True,)),\n ('softmax', (S, S, S), (0, 3, torch.double), 'with_all_args', (True,)),\n ('tanh', (S, S, S), (), '', (True,)),\n ('sigmoid', (S, S, S), (), '', (True,)),\n ('log_softmax', (S, S, S), (0,), '', (True,)),\n ('linear', (S, S), ((M, S),), '', (True, ['aten::t', 'aten::matmul'])),\n ('linear', (S, S), ((M, S), (M,)), 'addmm', (True, ['aten::add', 'aten::mm'])),\n ('bilinear', (S, S, S), ((S, S, M), torch.zeros(M, S, M),),),\n ('embedding', torch.tensor([[1, 2, 4, 5], [4, 3, 2, 5]]), (torch.rand(6, 3), ), '', (True,)),\n ('embedding_bag', torch.tensor([1, 2, 4, 2]), (torch.rand(5, 3), torch.tensor([0, 4]),),),\n ('batch_norm', (S, S), (non_differentiable(torch.randn(S)), non_differentiable(torch.ones(S)), ),\n '', (False, 'aten::_batch_norm_impl_index')),\n ('instance_norm', (S, S, S), (non_differentiable(torch.zeros(S)), non_differentiable(torch.ones(S))),),\n ('layer_norm', (S, S, S, S), ([5],), '',\n (False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])),\n ('layer_norm', (S, S, S, S), ([5], non_differentiable(torch.rand(S)),), 'with_only_weight',\n (False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])),\n ('layer_norm', (S, S, S, S), ([5], None, non_differentiable(torch.rand(S)),), 'with_only_bias',\n (False, ['aten::contiguous', 'aten::_batch_norm_impl_index'])),\n ('layer_norm', (S, S, S, S), ([5], non_differentiable(torch.rand(S)),\n non_differentiable(torch.rand(S))), 'with_weight_and_bias',\n (False, ['aten::contiguous', 'aten::_batch_norm_impl_index', 'aten::addcmul'])),\n ('group_norm', (S, S, S), (1, torch.rand(5),),),\n ('local_response_norm', (S, S, S), (2, ),),\n ('nll_loss', F.log_softmax(torch.randn(3, 5), dim=0), (torch.tensor([1, 0, 4]),), '', (True, 'aten::nll_loss_forward')),\n ('poisson_nll_loss', torch.rand(S, 2), (torch.rand(S, 2),),),\n ('poisson_nll_loss', torch.rand(S, 2), (torch.rand(S, 2), True, True), 'full'),\n ('kl_div', F.log_softmax(torch.randn(S, 10), 1), (F.softmax(torch.randn(S, 10), 1),),),\n ('cross_entropy', (3, S), (torch.randint(S, (3,), dtype=torch.int64),),),\n ('binary_cross_entropy_with_logits', (3,), (torch.empty(3).random_(2), ),),\n ('smooth_l1_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),\n ('l1_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),\n ('mse_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),\n ('smooth_l1_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),\n ('l1_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),\n ('mse_loss', (3, S), ((torch.rand(3, S)),), 'with_grad'),\n ('margin_ranking_loss', (3, S), ((3, S), (S,)),),\n ('hinge_embedding_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),\n ('soft_margin_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),\n ('multilabel_soft_margin_loss', (3, S), (non_differentiable(torch.rand(3, S)),),),\n ('cosine_embedding_loss', (S, S), ((S, S), non_differentiable(torch.rand(S,))),),\n ('pixel_shuffle', (1, 9, 4, 4), (3,),),\n ('affine_grid', (S, 2, 3), (torch.Size([S, 1, 7, 7]),),),\n ('pad', (3, 3, 4, 2), ([1, 1],),),\n ('pairwise_distance', (S, S), ((S, S),),),\n ('pdist', (S, S), (),),\n ('cosine_similarity', (S, S), ((S, S),),),\n ('triplet_margin_loss', (S, S), ((S, S), (S, S)),),\n ('normalize', (S, S, S), (),),\n ('unfold', (S, S, S, S), ([2, 3]),),\n ('fold', (1, 3 * 2 * 2, 12), ([4, 5], [2, 2]),),\n ('grid_sample', (S, S, S, S), (non_differentiable(torch.rand(S, S, S, 2)),),),\n ('gumbel_softmax', (S, S), (2.,), '', (True, ['aten::softmax', 'aten::add', 'aten::div'], ['aten::neg'])),\n ('gumbel_softmax', (S, S), (2., True,), 'hard', (True, ['aten::softmax', 'aten::add', 'aten::div'], ['aten::neg'])),\n ('multilabel_margin_loss', torch.tensor([[0.2, -0.2, 0.07]]), (torch.tensor([[0, 0, 1]]),),),\n ('multi_margin_loss', (S, S), (non_differentiable(torch.randint(S, (S, ), dtype=torch.int64)),\n 1, 1., non_differentiable(torch.randn(S))),),\n ('binary_cross_entropy', torch.randn(3, 2).sigmoid(), (non_differentiable(torch.rand(3, 2)),\n non_differentiable(torch.randn(3, 2))),),\n ('binary_cross_entropy', torch.randn(3, 2).sigmoid(),\n (non_differentiable(torch.rand(3, 2)),\n non_differentiable(torch.randn(3, 2)), None, None, 'mean'), 'size_average'),\n ('ctc_loss', torch.rand(S, S, S).log_softmax(2).detach().requires_grad_(),\n (torch.randint(1, S, (S, S), dtype=torch.long), torch.full((S,), S, dtype=torch.long),\n torch.randint(1, S, (S,), dtype=torch.long))),\n ('upsample', torch.randn(S, S, M, M), (None, 2.), 'with_scale'),\n ('upsample', torch.randn(S, S, M, M), (4,), 'with_size'),\n ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'nearest_4d'),\n ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'nearest_4d_with_scale'),\n ('interpolate', torch.randn(S, S, M, M), (4,), 'nearest_4d_with_size'),\n ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'area_4d'),\n ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'area_4d_with_scale'),\n ('interpolate', torch.randn(S, S, M, M), (4,), 'area_4d_with_size'),\n ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'bilinear_4d'),\n ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'bilinear_4d_with_scale'),\n ('interpolate', torch.randn(S, S, M, M), (4,), 'bilinear_4d_with_size'),\n ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2,), 'bicubic_4d'),\n ('interpolate', torch.randn(S, S, M, M), (None, 2.), 'bicubic_4d_with_scale'),\n ('interpolate', torch.randn(S, S, M, M), (4,), 'bicubic_4d_with_size'),\n ('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'nearest_3d'),\n ('interpolate', torch.randn(S, M, M), (None, 2.), 'nearest_3d_with_scale'),\n ('interpolate', torch.randn(S, M, M), (4,), 'nearest_3d_with_size'),\n ('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'area_3d'),\n ('interpolate', torch.randn(S, M, M), (None, 2.), 'area_3d_with_scale'),\n ('interpolate', torch.randn(S, M, M), (4,), 'area_3d_with_size'),\n ('interpolate', torch.zeros(3, 3).view(1, 3, 3), (2,), 'linear_3d'),\n ('interpolate', torch.randn(S, M, M), (None, 2.), 'linear_3d_with_scale'),\n ('interpolate', torch.randn(S, M, M), (4,), 'linear_3d_with_size'),\n ('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'nearest_5d_with_scale'),\n ('interpolate', torch.randn(S, M, M, M, M), (4,), 'nearest_5d_with_size'),\n ('interpolate', torch.zeros(3, 3, 3).view(1, 1, 3, 3, 3), (2,), 'area_5d'),\n ('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'area_5d_with_scale'),\n ('interpolate', torch.randn(S, M, M, M, M), (4,), 'area_5d_with_size'),\n ('interpolate', torch.zeros(3, 3, 3).view(1, 1, 3, 3, 3), (2,), 'trilinear_5d'),\n ('interpolate', torch.randn(S, M, M, M, M), (None, 2.), 'trilinear_5d_with_scale'),\n ('interpolate', torch.randn(S, M, M, M, M), (4,), 'trilinear_5d_with_size'),\n ('interpolate', torch.zeros(3, 3).view(1, 1, 3, 3), (2, None, 'nearest', None, False),\n 'nearest_4d_not_recompute_scale_factor'),\n ('interpolate', torch.randn(S, S, M, M), (4, None, 'nearest', None, False),\n 'nearest_4d_with_size_not_recompute_scale_factor'),\n ('interpolate', torch.randn(S, S, M, M), (None, 2., 'bilinear', None, False),\n 'bilinear_4d_with_scale_not_recompute_scale_factor'),\n ('interpolate', torch.randn(S, S, M, M), (4, None, 'bilinear', None, False),\n 'bilinear_4d_with_size_not_recompute_scale_factor'),\n ('interpolate', torch.randn(S, S, M, M), (None, 2., 'bicubic', None, False),\n 'bicubic_4d_with_scale_not_recompute_scale_factor'),\n ('interpolate', torch.randn(S, S, M, M), (4, None, 'bicubic', None, False),\n 'bicubic_4d_with_size_not_recompute_scale_factor'),\n ('interpolate', torch.randn(S, M, M), (None, 2., 'nearest', None, False),\n 'nearest_3d_with_scale_not_recompute_scale_factor'),\n ('interpolate', torch.randn(S, M, M), (4, None, 'nearest', None, False),\n 'nearest_3d_with_size_not_recompute_scale_factor'),\n ('interpolate', torch.randn(S, M, M), (None, 2., 'linear', None, False),\n 'linear_3d_with_scale_not_recompute_scale_factor'),\n ('interpolate', torch.randn(S, M, M), (4, None, 'linear', None, False),\n 'linear_3d_with_size_not_recompute_scale_factor'),\n ('interpolate', torch.randn(S, M, M, M, M), (None, 2., 'nearest', None, False),\n 'nearest_5d_with_scale_not_recompute_scale_factor'),\n ('interpolate', torch.randn(S, M, M, M, M), (4, None, 'nearest', None, False),\n 'nearest_5d_with_size_not_recompute_scale_factor'),\n ('interpolate', torch.randn(S, M, M, M, M), (None, 2., 'trilinear', None, False),\n 'trilinear_5d_with_scale_not_recompute_scale_factor'),\n ('interpolate', torch.randn(S, M, M, M, M), (4, None, 'trilinear', None, False),\n 'trilinear_5d_with_size_not_recompute_scale_factor'),\n]\n\nscript_template = '''\ndef the_method({}):\n return {}\n'''\n\ndef get_call(method_name, func_type, args, kwargs):\n kwargs_str = ', '.join([k + '=' + str(v) for k, v in kwargs.items()])\n self_arg = args[0]\n if(func_type == 'method'):\n args = args[1:]\n\n argument_str = ', '.join(args)\n argument_str += ', ' if len(args) and len(kwargs) else ''\n argument_str += kwargs_str\n\n if func_type == 'functional':\n call = 'torch.{}({})'.format(method_name, argument_str)\n elif func_type == 'method':\n call = '{}.{}({})'.format(self_arg, method_name, argument_str)\n elif func_type == 'nn_functional':\n call = 'torch.nn.functional.{}({})'.format(method_name, argument_str)\n else:\n raise 'Unsupported function type'\n\n return call\n\ndef get_constant(x):\n if x == inf:\n return 'math.inf'\n if x == -inf:\n return '-math.inf'\n return x\n\ndef get_script_args(args):\n formals = []\n tensors = []\n actuals = []\n for arg in args:\n if isinstance(arg, torch.Tensor):\n name = 'i{}'.format(len(formals))\n formals.append(name)\n actuals.append(name)\n tensors.append(arg)\n elif isinstance(arg, str):\n actuals.append(\"'{}'\".format(arg))\n else:\n actuals.append(str(get_constant(arg)))\n return (formals, tensors, actuals)\n\n# create a script function from (name, func_type, output_process_fn),\n# and returns the compiled function and example inputs\ndef gen_script_fn_and_args(method_name, func_type, *args, **kwargs):\n formals, tensors, actuals = get_script_args(args)\n call = get_call(method_name, func_type, actuals, kwargs)\n script = script_template.format(', '.join(formals), call)\n CU = torch.jit.CompilationUnit(script)\n return CU.the_method, tensors\n\n# create a script function from (name, func_type, output_process_fn),\n# returns a function takes in (args, kwargs) and runs the compiled function and\n# then applies the post process fn to the outputs\ndef create_script_fn(self, method_name, func_type, output_process_fn):\n def script_fn(*args, **kwargs):\n fn, tensors = gen_script_fn_and_args(method_name, func_type, *args, **kwargs)\n self.assertExportImport(fn.graph, tensors)\n output = output_process_fn(fn(*tensors))\n script_fn.last_graph = fn.graph_for(*tensors)\n return output\n return script_fn\n\n# make a new function where all non-tensor arguments in 'args' have been partially\n# applied, and all tensor arguments remain.\n# used to trace functions when some arguments are not tensors\ndef partial_apply_nontensors(fn, args, **kwargs):\n source = ['t' if isinstance(arg, torch.Tensor) else 's' for arg in args]\n\n def new_fn(*tensors_):\n tensors = iter(tensors_)\n return fn(*(args[i] if s == 's' else next(tensors) for i, s in enumerate(source)), **kwargs)\n\n return new_fn, [arg for arg in args if isinstance(arg, torch.Tensor)]\n\n# create a trace function from input fn\ndef create_traced_fn(self, fn):\n def traced_fn(*inputs, **kwargs):\n fn_tensors, inputs_tensors = partial_apply_nontensors(fn, inputs, **kwargs)\n # `check_trace` is set to False because check_trace is run with @no_grad\n # Also, `check_against_reference` already does all the checks\n # against python function\n traced = torch.jit.trace(fn_tensors, inputs_tensors, check_trace=False)\n self.assertExportImport(traced.graph, inputs_tensors)\n output = traced(*inputs_tensors)\n traced_fn.last_graph = traced.graph_for(*inputs_tensors)\n return output\n return traced_fn\n\n# known to be failing in script\nEXCLUDE_SCRIPT = {\n 'test_norm_fro_default',\n 'test_norm_fro_cpu',\n 'test_norm_nuc',\n 'test_norm_fro',\n 'test_norm_nuc_batched',\n\n # aten op has additional cudnn argument\n 'test_nn_unfold',\n\n # flaky test - TODO fix\n 'test_nn_ctc_loss',\n\n # unknown builtin op\n 'test_nn_fold',\n\n # jit doesn't support sparse tensors.\n 'test_to_sparse'\n}\n\n# generates a script function and set of example inputs\n# from a specified test in the format of nn_functional_tests\ndef get_nn_functional_compiled_fn_and_inputs(name, self_size, args, variant_name='', *extra_args):\n test_name = 'test_nn_' + name\n\n if variant_name != '':\n test_name = test_name + '_' + variant_name\n\n no_grad = variant_name == 'inplace'\n\n self_variable = create_input((self_size,))[0][0]\n kwargs = None\n\n # need to record this because methods can change the size (e.g. unsqueeze)\n args_variable, kwargs_variable = create_input(args)\n\n self_tensor = deepcopy(self_variable.data)\n args_tensor = deepcopy(unpack_variables(args_variable))\n\n f_args_variable = (self_variable,) + args_variable\n f_args_tensor = (self_tensor,) + args_tensor\n with torch._jit_internal._disable_emit_hooks():\n script_fn, inputs = gen_script_fn_and_args(name, \"nn_functional\", *f_args_variable)\n return script_fn, inputs\n\n\n# additional modules test\n# TODO: delete this list once we make all nn_tests work\nadditional_module_tests = [\n {\n 'module_name': 'Bilinear',\n 'constructor_args': (S, S, M),\n 'input_size': (S, S),\n 'extra_args': ((S, S),)\n },\n {\n 'module_name': 'RNNCell',\n 'constructor_args': (S, S),\n 'input_size': (S, S),\n },\n {\n 'module_name': 'LSTMCell',\n 'constructor_args': (S, S),\n 'input_size': (S, S),\n },\n {\n 'module_name': 'GRUCell',\n 'constructor_args': (S, S),\n 'input_size': (S, S),\n },\n {\n 'module_name': 'MultiheadAttention',\n 'constructor_args': (128, 8),\n 'input_size': (10, 8, 128),\n 'extra_args': (torch.randn(10, 8, 128), torch.randn(10, 8, 128)),\n 'slowTest': True\n },\n {\n 'module_name': 'Transformer',\n 'constructor_args': (1, 1, 1, 1, 2),\n 'input_size': (3, 1, 1),\n 'extra_args': (torch.randn(1, 1, 1),),\n 'slowTest': True\n }\n]\n\nEXCLUDE_SCRIPT_MODULES = {\n 'test_nn_AdaptiveAvgPool2d_tuple_none',\n 'test_nn_AdaptiveAvgPool3d_tuple_none',\n 'test_nn_AdaptiveMaxPool2d_tuple_none',\n 'test_nn_AdaptiveMaxPool3d_tuple_none',\n\n # Doesn't use future division, so this is not supported\n 'test_nn_CrossMapLRN2d',\n}\n\nscript_method_template = '''\ndef forward({}):\n return {}\n'''\n\ndef create_script_module(self, nn_module, constructor_args, *args, **kwargs):\n def script_module(*args, **kwargs):\n formals, tensors, actuals = get_script_args(args)\n\n method_args = ', '.join(['self'] + actuals)\n call_args_str = ', '.join(actuals)\n call = \"self.submodule({})\".format(call_args_str)\n script = script_method_template.format(method_args, call)\n\n submodule_constants = []\n if kwargs.get('is_constant'):\n submodule_constants = ['submodule']\n\n # Create module to use the script method\n class TheModule(torch.jit.ScriptModule):\n __constants__ = submodule_constants\n\n def __init__(self):\n super(TheModule, self).__init__()\n self.submodule = nn_module(*constructor_args)\n\n def make_module(script):\n module = TheModule()\n # check __repr__\n str(module)\n module.define(script)\n return module\n\n module = make_module(script)\n if self:\n self.assertExportImportModule(module, tensors)\n module(*args)\n create_script_module.last_graph = module.graph\n return module\n return script_module\n\ndef get_nn_module_name_from_kwargs(**kwargs):\n if 'module_name' in kwargs:\n return kwargs['module_name']\n elif 'fullname' in kwargs:\n return kwargs['fullname']\n elif 'constructor' in kwargs:\n return kwargs['constructor'].__name__\n\ndef get_nn_mod_test_name(**kwargs):\n name = get_nn_module_name_from_kwargs(**kwargs)\n test_name = name\n if 'desc' in kwargs:\n test_name = \"{}_{}\".format(test_name, kwargs['desc'])\n return 'test_nn_{}'.format(test_name)\n\ndef get_nn_module_class_from_kwargs(**kwargs):\n name = get_nn_module_name_from_kwargs(**kwargs)\n index = name.find(\"_\")\n if index == -1:\n return name\n else:\n return name[0:name.find(\"_\")]\n\ndef try_get_nn_module_compiled_mod_and_inputs(*args, **kwargs):\n name = get_nn_module_name_from_kwargs(**kwargs)\n\n if 'desc' in kwargs and 'eval' in kwargs['desc']:\n # eval() is not supported, so skip these tests\n return\n\n test_name = name\n if 'desc' in kwargs:\n test_name = \"{}_{}\".format(test_name, kwargs['desc'])\n test_name = get_nn_mod_test_name(**kwargs)\n\n if test_name in EXCLUDE_SCRIPT_MODULES:\n return\n if 'constructor' in kwargs:\n nn_module = kwargs['constructor']\n else:\n nn_module = getattr(torch.nn, name)\n\n if \"FunctionalModule\" in str(nn_module):\n return\n\n if 'constructor_args_fn' in kwargs:\n constructor_args = kwargs['constructor_args_fn']()\n else:\n constructor_args = kwargs.get('constructor_args', ())\n\n # Set up inputs from tuple of sizes or constructor fn\n if 'input_fn' in kwargs:\n input = kwargs['input_fn']()\n else:\n input = (kwargs['input_size'],)\n\n # Extra parameters to forward()\n if 'extra_args' in kwargs:\n input = input + kwargs['extra_args']\n\n if 'target_size' in kwargs:\n input = input + (kwargs['target_size'],)\n elif 'target_fn' in kwargs:\n if torch.is_tensor(input):\n input = (input,)\n input = input + (kwargs['target_fn'](),)\n\n args_variable, kwargs_variable = create_input(input)\n f_args_variable = deepcopy(unpack_variables(args_variable))\n out_var = deepcopy(f_args_variable)\n\n args, mod = f_args_variable, create_script_module(None, nn_module, constructor_args, *f_args_variable)(*f_args_variable)\n\n return mod, out_var\n\n\ndef get_all_nn_module_tests():\n return module_tests + new_module_tests + additional_module_tests\n",
"from __future__ import absolute_import, division, print_function, unicode_literals\nimport copy\nimport errno\nimport fcntl\nimport itertools\nimport math\nimport os\nimport random\nimport sys\nimport time\nimport tempfile\nimport unittest\nfrom contextlib import contextmanager\nfrom datetime import timedelta\nfrom functools import reduce, wraps\nfrom io import StringIO\nfrom typing import Union, NamedTuple\n\nimport torch\nimport torch.cuda\nimport torch.distributed as dist\nfrom torch.utils.data.distributed import DistributedSampler\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.testing._internal.common_utils import TestCase, run_tests, find_free_port\nfrom torch.nn.parallel.distributed import _dump_DDP_relevant_env_vars\nfrom torch.distributed.distributed_c10d import _get_default_group\nfrom torch._utils_internal import TEST_MASTER_ADDR as MASTER_ADDR\nfrom torch._utils_internal import TEST_MASTER_PORT as MASTER_PORT\nfrom torch.testing._internal.common_distributed import (\n TEST_SKIPS,\n MultiProcessTestCase,\n simple_sparse_reduce_tests,\n skip_if_rocm,\n skip_if_small_worldsize,\n skip_if_lt_x_gpu,\n skip_if_no_gpu,\n require_n_gpus_for_nccl_backend,\n)\n\ntry:\n import torchvision\n HAS_TORCHVISION = True\nexcept ImportError:\n HAS_TORCHVISION = False\n\nclass Foo:\n def __init__(self, x):\n self.x = x\n\n def __eq__(self, other):\n return self.__dict__ == other.__dict__\n\n\nskipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, \"no torchvision\")\n\nCPP_EXTENSIONS_WARNING = \"\"\"\nNinja (https://ninja-build.org) must be available to run C++ extensions tests,\nbut it could not be found. Install ninja with `pip install ninja`\nor `conda install ninja`.\n\"\"\"\n\nBACKEND = os.environ[\"BACKEND\"]\nTEMP_DIR = os.environ[\"TEMP_DIR\"]\nINIT_METHOD = os.getenv(\"INIT_METHOD\", \"env://\")\n\nDEFAULT_TIMEOUT = 300\nCUSTOMIZED_TIMEOUT = {\"test_DistributedDataParallel\": 500}\n\n\nclass _FC2(nn.Module):\n def __init__(self):\n super(_FC2, self).__init__()\n self.fc = nn.Linear(10, 50, bias=True)\n self.fc.bias.requires_grad = False\n\n def forward(self, x):\n x = self.fc(x)\n return x\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.fc1 = nn.Linear(2, 10, bias=False)\n self.fc2 = _FC2()\n self.fc3 = nn.Linear(50, 4, bias=False)\n self.relu = nn.ReLU()\n self.no_grad_param = nn.Parameter(torch.tensor([2, 2]).long(),\n requires_grad=False)\n\n def forward(self, x):\n x = self.relu(self.fc1(x))\n x = self.relu(self.fc2(x))\n x = self.fc3(x)\n return F.softmax(x, dim=1)\n\nclass Task(nn.Module):\n def __init__(self):\n super().__init__()\n self.p = nn.Parameter(torch.ones(2, 2))\n\n def forward(self, x):\n return self.p + x\n\n\nclass BatchNormNet(nn.Module):\n\n def __init__(self):\n super(BatchNormNet, self).__init__()\n self.fc1 = nn.Linear(2, 40, bias=False)\n self.bn = nn.BatchNorm1d(4)\n self.fc2 = nn.Linear(40, 4, bias=False)\n\n def forward(self, x):\n x = torch.reshape(self.fc1(x), (-1, 4, 10))\n x = self.bn(x)\n x = torch.reshape(x, (-1, 40))\n x = self.fc2(x)\n return F.softmax(x, dim=1)\n\n\nDDP_NET = Net()\nBN_NET = BatchNormNet()\nONLY_SBN_NET = nn.SyncBatchNorm(2, momentum=0.99)\n\n\ndef get_timeout(test_id):\n test_name = test_id.split(\".\")[-1]\n if test_name in CUSTOMIZED_TIMEOUT:\n return CUSTOMIZED_TIMEOUT[test_name]\n else:\n return DEFAULT_TIMEOUT\n\n\nif not dist.is_available():\n print(\"Distributed not available, skipping tests\", file=sys.stderr)\n sys.exit(0)\n\n\ndef skip_if_no_ninja(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n try:\n import torch.utils.cpp_extension\n torch.utils.cpp_extension.verify_ninja_availability()\n except RuntimeError:\n print(CPP_EXTENSIONS_WARNING)\n return 0\n\n return func(*args, **kwargs)\n\n return wrapper\n\ndef require_backend(backends):\n if BACKEND not in backends:\n return unittest.skip(\"Test requires backend to be one of %s\" % backends)\n return lambda func: func\n\n\ndef require_backends_available(backends):\n def check(backend):\n if backend == dist.Backend.GLOO:\n return dist.is_gloo_available()\n if backend == dist.Backend.NCCL:\n return dist.is_nccl_available()\n if backend == dist.Backend.MPI:\n return dist.is_mpi_available()\n return False\n backends = map(lambda b: dist.Backend(b), backends)\n if not all(map(check, backends)):\n return unittest.skip(\n \"Test requires backends to be available %s\" % backends)\n return lambda func: func\n\n\ndef require_world_size(world_size):\n if int(os.environ[\"WORLD_SIZE\"]) < world_size:\n return unittest.skip(\"Test requires world size of %d\" % world_size)\n return lambda func: func\n\n\ndef apply_hack_for_nccl():\n # This is a hack for a known NCCL issue using multiprocess\n # in conjunction with multiple threads to manage different GPUs which\n # may cause ncclCommInitRank to fail.\n # http://docs.nvidia.com/deeplearning/sdk/nccl-release-notes/rel_2.1.4.html#rel_2.1.4\n # It slows down the performance of collective operations.\n # Without this setting NCCL might throw unhandled error.\n os.environ[\"NCCL_MAX_NRINGS\"] = \"1\"\n\n\n@contextmanager\ndef _lock():\n lockfile = os.path.join(TEMP_DIR, \"lockfile\")\n with open(lockfile, \"w\") as lf:\n try:\n fcntl.flock(lf.fileno(), fcntl.LOCK_EX)\n yield\n finally:\n fcntl.flock(lf.fileno(), fcntl.LOCK_UN)\n lf.close()\n\n\ndef _build_tensor(size, value=None, dtype=torch.float):\n if value is None:\n value = size\n return torch.empty(size, size, size, dtype=dtype).fill_(value)\n\n\ndef _build_multidim_tensor(dim, dim_size, value=None):\n if value is None:\n value = size\n return torch.FloatTensor(size=[dim_size for _ in range(dim)]).fill_(value)\n\n\nclass Barrier(object):\n barrier_id = 0\n\n @classmethod\n def init(cls):\n cls.barrier_id = 0\n barrier_dir = os.path.join(TEMP_DIR, \"barrier\")\n for f_name in os.listdir(barrier_dir):\n os.unlink(os.path.join(barrier_dir, f_name))\n\n @classmethod\n def sync(cls, wait_for=None, timeout=10):\n if wait_for is None:\n wait_for = dist.get_world_size()\n cls.barrier_id += 1\n barrier_dir = os.path.join(TEMP_DIR, \"barrier\")\n pid = str(os.getpid())\n barrier_file = os.path.join(barrier_dir, pid)\n with _lock():\n with open(barrier_file, \"w\") as f:\n f.write(str(cls.barrier_id))\n\n start_time = time.time()\n while True:\n arrived = 0\n with _lock():\n for f_name in os.listdir(barrier_dir):\n with open(os.path.join(barrier_dir, f_name), \"r\") as f:\n data = f.read()\n if int(data) >= cls.barrier_id:\n arrived += 1\n if arrived == wait_for:\n break\n\n if time.time() - start_time > timeout:\n raise RuntimeError(\"barrier timeout\")\n time.sleep(0.1)\n\n\n@contextmanager\ndef _captured_output():\n new_out, new_err = StringIO(), StringIO()\n old_out, old_err = sys.stdout, sys.stderr\n try:\n sys.stdout, sys.stderr = new_out, new_err\n yield sys.stdout, sys.stderr\n finally:\n sys.stdout, sys.stderr = old_out, old_err\n\n\nclass _DistTestBase(object):\n def _barrier(self, *args, **kwargs):\n Barrier.sync(*args, **kwargs)\n\n def _init_group_test(self, **kwargs):\n group = [1, 2]\n group_id = dist.new_group(group, **kwargs)\n rank = dist.get_rank()\n if rank not in group:\n return ([], None, rank)\n\n return (group, group_id, rank)\n\n def _init_full_group_test(self, **kwargs):\n group = list(range(0, dist.get_world_size()))\n group_id = dist.new_group(**kwargs)\n rank = dist.get_rank()\n return (group, group_id, rank)\n\n def _init_global_test(self):\n group = list(range(0, dist.get_world_size()))\n group_id = dist.group.WORLD\n rank = dist.get_rank()\n return (group, group_id, rank)\n\n # HELPER FOR MULTIGPU TESTS\n def _init_multigpu_helper(self):\n \"\"\"Multigpu tests are designed to simulate the multi nodes with multi\n GPUs on each node. Nccl backend requires equal #GPUs in each process.\n On a single node, all visible GPUs are evenly\n divided to subsets, each process only uses a subset.\n \"\"\"\n nGPUs = torch.cuda.device_count()\n world_size = dist.get_world_size()\n visible_devices = range(nGPUs)\n\n if BACKEND == \"nccl\":\n apply_hack_for_nccl()\n\n nGPUs_per_process = nGPUs // world_size\n rank_to_GPU = {\n i: list(\n visible_devices[i * nGPUs_per_process: (i + 1) * nGPUs_per_process]\n )\n for i in range(world_size)\n }\n return rank_to_GPU\n\n def test_dump_DDP_relevant_env_vars(self):\n with _captured_output() as (out, err):\n _dump_DDP_relevant_env_vars()\n lines = out.getvalue().splitlines()\n\n def format_line(var):\n return \"env:%s=%s\" % (var, os.environ[var] if var in os.environ else \"N/A\")\n\n # Check relevant env vars\n vars = [\n \"MASTER_ADDR\",\n \"MASTER_PORT\",\n \"WORLD_SIZE\",\n \"NCCL_TOPO_DUMP_FILE\", # N/A\n ]\n for var in vars:\n line = format_line(var)\n self.assertIn(line, lines)\n # Check irrelevant env vars\n vars = [\n \"xxx\",\n \"yyy\",\n \"zzz\",\n ]\n for var in vars:\n line = format_line(var)\n self.assertNotIn(line, lines)\n\n # GET RANK\n def test_get_rank(self):\n test_dir = os.path.join(TEMP_DIR, \"test_dir\")\n pid = str(os.getpid())\n num_processes = dist.get_world_size()\n with open(os.path.join(test_dir, pid), \"w\") as f:\n f.write(str(dist.get_rank()))\n\n self._barrier()\n\n all_ranks = set()\n for f_name in os.listdir(test_dir):\n with open(os.path.join(test_dir, f_name), \"r\") as f:\n all_ranks.add(int(f.read()))\n self.assertEqual(len(all_ranks), num_processes)\n\n self._barrier()\n\n if dist.get_rank() == 0:\n for f_name in os.listdir(test_dir):\n os.unlink(os.path.join(test_dir, f_name))\n\n self._barrier()\n\n def test_get_backend(self):\n if dist.get_world_size() > 2:\n group = [1, 2]\n else:\n group = [0, 1]\n group_id = dist.new_group(group)\n backend_str = BACKEND.lower()\n self.assertEqual(dist.get_backend(), backend_str)\n if dist.get_rank() in group:\n self.assertEqual(dist.get_backend(group_id), backend_str)\n else:\n with self.assertRaisesRegex(RuntimeError, \"Invalid process group specified\"):\n dist.get_backend(group_id)\n\n def test_Backend_enum_class(self):\n # test parsing\n backend = BACKEND.lower()\n self.assertEqual(dist.Backend(BACKEND.upper()), backend)\n self.assertEqual(dist.Backend(BACKEND), backend)\n with self.assertRaisesRegex(ValueError, \"Invalid backend: 'undefined'\"):\n dist.Backend(\"undefined\")\n with self.assertRaisesRegex(ValueError, \"Invalid backend: 'xYz'\"):\n dist.Backend(\"xYz\")\n with self.assertRaises(ValueError):\n dist.Backend(None)\n with self.assertRaises(ValueError):\n dist.Backend(3)\n with self.assertRaises(ValueError):\n dist.Backend([\"gloo\"])\n\n # Test destroy\n def test_destroy_group(self):\n if dist.get_world_size() > 2:\n group = [1, 2]\n else:\n group = [0, 1]\n group_id = dist.new_group(group)\n self._barrier()\n dist.destroy_process_group(group_id)\n\n # Test get rank and size of group\n def test_get_rank_size_group(self):\n if dist.get_world_size() > 2:\n group = [1, 2]\n else:\n group = [0, 1]\n group_id = dist.new_group(group)\n if dist.get_rank() in group:\n self.assertEqual(dist.get_world_size(group_id), 2)\n self.assertTrue(dist.get_rank(group_id) in list(range(2)))\n else:\n self.assertEqual(dist.get_world_size(group_id), -1)\n self.assertEqual(dist.get_rank(group_id), -1)\n\n # Test destroy full groups\n def test_destroy_full_group(self):\n _, group_id, _ = self._init_full_group_test()\n self._barrier()\n dist.destroy_process_group(group_id)\n\n # Test get rank and size of full group\n def test_get_rank_size_full_group(self):\n _, group_id, _ = self._init_full_group_test()\n self.assertEqual(dist.get_world_size(group_id), dist.get_world_size())\n self.assertEqual(dist.get_rank(group_id), dist.get_rank())\n\n def _test_barrier_timeout(self, group_id, timeout):\n local_rank = dist.get_rank(group_id)\n\n # Only execute barrier on rank == 0, causing it to timeout\n if local_rank == 0:\n expected_time = time.time() + timeout.total_seconds()\n with self.assertRaisesRegex(Exception, \" (Timed out|closed|timeout) \"):\n dist.barrier(group_id)\n self.assertGreaterEqual(time.time(), expected_time)\n else:\n time.sleep(timeout.total_seconds())\n\n @unittest.skipIf(BACKEND != \"gloo\", \"Only gloo backend supports timeouts\")\n @unittest.skipIf(\n not INIT_METHOD.startswith(\"file://\"),\n \"Requires file:// initialization method. \" +\n \"Both tcp:// and env:// rely on the TCP store for which \"\n \"reinitialization has proven racy.\"\n )\n def test_barrier_timeout_global(self):\n dist.destroy_process_group()\n\n # Explicitly pass world size to the barrier because we've\n # just destroyed any state in torch.distributed.\n self._barrier(wait_for=int(WORLD_SIZE))\n\n # Reinitialize global process group\n timeout = timedelta(seconds=1)\n dist.init_process_group(\n init_method=INIT_METHOD,\n backend=BACKEND,\n world_size=int(WORLD_SIZE),\n rank=self.rank,\n timeout=timeout,\n )\n self._test_barrier_timeout(dist.group.WORLD, timeout)\n\n @skip_if_small_worldsize\n @unittest.skipIf(BACKEND != \"gloo\", \"Only gloo backend supports timeouts\")\n def test_barrier_timeout_group(self):\n timeout = timedelta(seconds=1)\n _, group_id, _ = self._init_group_test(timeout=timeout)\n if group_id is not None:\n self._test_barrier_timeout(group_id, timeout)\n\n @unittest.skipIf(BACKEND != \"gloo\", \"Only gloo backend supports timeouts\")\n def test_barrier_timeout_full_group(self):\n timeout = timedelta(seconds=1)\n _, group_id, _ = self._init_full_group_test(timeout=timeout)\n if group_id is not None:\n self._test_barrier_timeout(group_id, timeout)\n\n # This test helper can only be used when using the Gloo or NCCL backend\n # **and** both the Gloo and NCCL backends are available.\n # See the @skip annotations below.\n def _test_group_override_backend(self, initializer):\n if BACKEND == \"gloo\":\n new_backend = \"nccl\"\n if BACKEND == \"nccl\":\n new_backend = \"gloo\"\n\n group, group_id, rank = initializer(backend=new_backend)\n if group_id is None:\n return\n\n if new_backend == \"gloo\":\n self.assertTrue(isinstance(group_id, dist.ProcessGroupGloo))\n if new_backend == \"nccl\":\n self.assertTrue(isinstance(group_id, dist.ProcessGroupNCCL))\n\n self.assertEqual(rank, group[dist.get_rank(group_id)])\n self.assertEqual(len(group), dist.get_world_size(group_id))\n\n # Pin device (so we avoid NCCL race conditions/deadlocks).\n group_rank = dist.get_rank(group_id)\n torch.cuda.set_device(group_rank)\n\n # Run broadcast of CUDA tensor (so it works for both Gloo and NCCL).\n tensor = _build_tensor(2, value=group_rank).cuda()\n dist.broadcast(tensor, src=group[0], group=group_id)\n self.assertEqual(_build_tensor(2, value=0), tensor.to(\"cpu\"))\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @require_world_size(3)\n @skip_if_lt_x_gpu(2)\n def test_backend_group(self):\n self._test_group_override_backend(self._init_group_test)\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(3)\n def test_backend_full_group(self):\n self._test_group_override_backend(self._init_full_group_test)\n\n # SEND RECV\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support send/recv\")\n def test_send_recv(self):\n rank = dist.get_rank()\n tensor = _build_tensor(rank + 1)\n\n for src in range(0, dist.get_world_size()):\n if src == rank:\n # Send mode\n for dst in range(0, dist.get_world_size()):\n if dst == rank:\n continue\n dist.send(tensor, dst)\n else:\n # Recv mode\n expected_tensor = _build_tensor(src + 1)\n output_tensor = _build_tensor(src + 1, value=-1)\n dist.recv(output_tensor, src)\n self.assertEqual(output_tensor, expected_tensor)\n\n self._barrier()\n\n # SEND RECV ANY SOURCE\n @unittest.skipIf(\n BACKEND == \"nccl\", \"Nccl does not support send/recv from any source\"\n )\n def test_send_recv_any_source(self):\n rank = dist.get_rank()\n tensor = _build_tensor(10, value=rank)\n recv_ranks = set()\n\n for dst in range(0, dist.get_world_size()):\n if dst == rank:\n # Recv mode\n for dst in range(0, dist.get_world_size()):\n if dst == rank:\n continue\n output_tensor = _build_tensor(10, value=-1)\n sender = dist.recv(output_tensor)\n\n # Assert the scalar value \"sender\" that should be\n # equal to the rank of the sender is equal to all\n # values in the received tensor.\n self.assertTrue(output_tensor.eq(sender).all())\n recv_ranks.add(sender)\n else:\n # Send mode\n dist.send(tensor, dst)\n\n self.assertEqual(len(recv_ranks), dist.get_world_size() - 1)\n self._barrier()\n\n # SEND RECV WITH TAG\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support send/recv\")\n def test_send_recv_with_tag(self):\n rank = dist.get_rank()\n world_size = dist.get_world_size()\n tensor = _build_tensor(10, value=rank)\n\n for dst in range(0, world_size):\n if dst == rank:\n # Recv mode\n for src in range(0, world_size):\n if src == rank:\n continue\n output_tensor = _build_tensor(10, value=-1)\n dist.recv(output_tensor, src, tag=src)\n self.assertTrue(output_tensor.eq(src).all())\n else:\n # Send mode\n dist.send(tensor, dst, tag=rank)\n\n # ISEND\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support isend\")\n def test_isend(self):\n rank = dist.get_rank()\n world_size = dist.get_world_size()\n\n if rank == 0:\n requests = [\n dist.isend(_build_tensor(dest, 10), dest)\n for dest in range(1, world_size)\n ]\n for request in requests:\n request.wait()\n self.assertTrue(request.is_completed())\n else:\n tensor = _build_tensor(rank, -1)\n dist.recv(tensor, 0)\n self.assertEqual(tensor, _build_tensor(rank, 10))\n\n self._barrier()\n\n # IRECV\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support irecv\")\n def test_irecv(self):\n rank = dist.get_rank()\n world_size = dist.get_world_size()\n\n if rank == 0:\n expected_tensors = [_build_tensor(src, -1) for src in range(1, world_size)]\n requests = [\n dist.irecv(expected_tensors[src - 1], src)\n for src in range(1, world_size)\n ]\n\n for src in range(1, world_size):\n requests[src - 1].wait()\n self.assertTrue(requests[src - 1].is_completed())\n self.assertEqual(expected_tensors[src - 1], _build_tensor(src, 10))\n else:\n tensor = _build_tensor(rank, 10)\n dist.send(tensor, 0)\n\n self._barrier()\n\n # BROADCAST\n def _test_broadcast_helper(\n self, group, group_id, rank, cuda=False, rank_to_GPU=None\n ):\n for dtype, value, requires_cuda in [\n (torch.float, -1e-10, False),\n (torch.double, -1e-100, False),\n (torch.half, -0.1, True),\n (torch.int8, -2, False),\n (torch.uint8, 129, False),\n (torch.int, -1e5, False),\n (torch.long, -1e15, False),\n ]:\n if requires_cuda and not cuda:\n continue\n for src in group:\n expected_tensor = _build_tensor(src + 1, value, dtype)\n if cuda:\n expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0])\n if rank == src:\n dist.broadcast(expected_tensor, src, group_id)\n else:\n tensor = _build_tensor(src + 1, -1, dtype)\n if cuda:\n tensor = tensor.cuda(rank_to_GPU[rank][0])\n dist.broadcast(tensor, src, group_id)\n self.assertEqual(tensor.size(), expected_tensor.size())\n self.assertEqual(tensor.ne(expected_tensor).max(), torch.tensor(False))\n\n self._barrier()\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_broadcast(self):\n group, group_id, rank = self._init_global_test()\n self._test_broadcast_helper(group, group_id, rank)\n\n @unittest.skipIf(\n BACKEND != \"gloo\" and BACKEND != \"nccl\",\n \"Only Gloo and Nccl backend supports CUDA allReduce\",\n )\n @skip_if_no_gpu\n def test_broadcast_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_broadcast_helper(group, group_id, rank, True, rank_to_GPU)\n\n @skip_if_small_worldsize\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_broadcast_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_broadcast_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_broadcast_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_broadcast_helper(group, group_id, rank)\n\n # REDUCE\n def _test_reduce_helper(\n self,\n group,\n group_id,\n rank,\n op,\n master_value,\n worker_value,\n expected_value,\n cuda=False,\n rank_to_GPU=None,\n ):\n for src in group:\n if rank == src:\n tensor = _build_tensor(src + 1).fill_(master_value)\n if cuda:\n tensor = tensor.cuda(rank_to_GPU[rank][0])\n dist.reduce(tensor, src, op, group_id)\n self.assertEqual(tensor, _build_tensor(src + 1, expected_value))\n else:\n tensor = _build_tensor(src + 1).fill_(worker_value)\n if cuda:\n tensor = tensor.cuda(rank_to_GPU[rank][0])\n dist.reduce(tensor, src, op, group_id)\n\n self._barrier()\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_sum(self):\n group, group_id, rank = self._init_global_test()\n self._test_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n )\n\n @unittest.skipIf(BACKEND != \"nccl\", \"Only Nccl supports CUDA reduce\")\n @skip_if_no_gpu\n @skip_if_rocm\n def test_reduce_sum_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + 10 * (len(group) - 1),\n True,\n rank_to_GPU,\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_product(self):\n group, group_id, rank = self._init_global_test()\n self._test_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n 2,\n 10,\n reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_min(self):\n group, group_id, rank = self._init_global_test()\n self._test_reduce_helper(group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_max(self):\n group, group_id, rank = self._init_global_test()\n self._test_reduce_helper(group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n @skip_if_small_worldsize\n def test_reduce_group_sum(self):\n group, group_id, rank = self._init_group_test()\n self._test_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n @skip_if_small_worldsize\n def test_reduce_group_product(self):\n group, group_id, rank = self._init_group_test()\n self._test_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n 2,\n 10,\n reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n @skip_if_small_worldsize\n def test_reduce_group_min(self):\n group, group_id, rank = self._init_group_test()\n self._test_reduce_helper(group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n @skip_if_small_worldsize\n def test_reduce_group_max(self):\n group, group_id, rank = self._init_group_test()\n self._test_reduce_helper(group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_full_group_sum(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_full_group_product(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n 2,\n 10,\n reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_full_group_min(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_reduce_helper(group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_reduce_full_group_max(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_reduce_helper(group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10)\n\n # ALL REDUCE\n def _test_all_reduce_helper(\n self,\n group,\n group_id,\n rank,\n op,\n master_value,\n worker_value,\n expected_value,\n cuda=False,\n rank_to_GPU=None,\n ):\n for src in group:\n if rank == src:\n tensor = _build_tensor(src + 1).fill_(master_value)\n if cuda:\n tensor = tensor.cuda(rank_to_GPU[rank][0])\n dist.all_reduce(tensor, op, group_id)\n self.assertEqual(tensor, _build_tensor(src + 1, expected_value))\n else:\n tensor = _build_tensor(src + 1).fill_(worker_value)\n if cuda:\n tensor = tensor.cuda(rank_to_GPU[rank][0])\n dist.all_reduce(tensor, op, group_id)\n self.assertEqual(tensor, _build_tensor(src + 1, expected_value))\n\n self._barrier()\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_sum(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n )\n\n @unittest.skipIf(\n BACKEND != \"gloo\",\n \"Only Gloo backend will have CUDA allReduce tested\",\n )\n @skip_if_no_gpu\n def test_all_reduce_sum_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n True,\n rank_to_GPU,\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_product(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n 2,\n 10,\n reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_min(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_max(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10\n )\n\n @skip_if_small_worldsize\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_group_sum(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n )\n\n @skip_if_small_worldsize\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_group_product(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n 2,\n 10,\n reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),\n )\n\n @skip_if_small_worldsize\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_group_min(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1\n )\n\n @skip_if_small_worldsize\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_group_max(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_full_group_sum(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n 2,\n 10,\n 2 + (10 * (len(group) - 1)),\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_full_group_product(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n 2,\n 10,\n reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_full_group_min(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1\n )\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_reduce_full_group_max(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_helper(\n group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10\n )\n\n # SPARSE ALL REDUCE\n def _test_sparse_all_reduce_sum(self, fn):\n group, group_id, rank = self._init_global_test()\n\n tests = simple_sparse_reduce_tests(\n rank,\n dist.get_world_size(),\n num_inputs=1)\n for (inputs, outputs) in tests:\n tensors = [fn(input) for input in inputs]\n dist.all_reduce(tensors[0], dist.ReduceOp.SUM, group_id)\n self.assertEqual(tensors[0], outputs[0])\n\n @unittest.skipIf(BACKEND != \"gloo\", \"Only Gloo backend support sparse all reduce\")\n def test_sparse_all_reduce_sum(self):\n self._test_sparse_all_reduce_sum(lambda t: t)\n\n @unittest.skipIf(BACKEND != \"gloo\", \"Only Gloo backend support sparse all reduce\")\n @skip_if_no_gpu\n @skip_if_rocm\n def test_sparse_all_reduce_sum_cuda(self):\n self._test_sparse_all_reduce_sum(lambda t: t.clone().cuda())\n\n # ALL REDUCE - COALESCED\n @staticmethod\n def _all_reduce_coalesced_sum_test_cases(group_size):\n return (\n [2, 3],\n [10, 11],\n [2 + 10 * (group_size - 1), 3 + 11 * (group_size - 1)]\n )\n\n @staticmethod\n def _all_reduce_coalesced_product_test_cases(group_size):\n return (\n [1, 2],\n [3, 4],\n [1 * 3 ** (group_size - 1), 2 * 4 ** (group_size - 1)]\n )\n\n @staticmethod\n def _all_reduce_coalesced_min_test_cases(group_size):\n return (\n [1, 4],\n [2, 3],\n [1, 3]\n )\n\n @staticmethod\n def _all_reduce_coalesced_max_test_cases(group_size):\n return (\n [1, 4],\n [2, 3],\n [2, 4]\n )\n\n def _test_all_reduce_coalesced_helper(\n self,\n group,\n group_id,\n rank,\n op,\n cuda=False,\n rank_to_GPU=None,\n ):\n test_case_func = {\n dist.ReduceOp.SUM: self._all_reduce_coalesced_sum_test_cases,\n dist.ReduceOp.PRODUCT: self._all_reduce_coalesced_product_test_cases,\n dist.ReduceOp.MIN: self._all_reduce_coalesced_min_test_cases,\n dist.ReduceOp.MAX: self._all_reduce_coalesced_max_test_cases\n }[op]\n\n master_values, worker_values, expected_values = test_case_func(len(group))\n\n for src in group:\n tensors = [\n _build_tensor(src + 1, val)\n for val in (master_values if rank == src else worker_values)\n ]\n if cuda:\n tensors = list(map(tensors, lambda t: t.cuda(rank_to_GPU[rank][0])))\n dist.all_reduce_coalesced(tensors, op, group_id)\n self.assertEqual(\n tensors,\n [\n _build_tensor(src + 1, expected_value)\n for expected_value in expected_values\n ]\n )\n\n self._barrier()\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_sum(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_coalesced_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n cuda=False,\n rank_to_GPU=None,\n )\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_product(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_coalesced_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n cuda=False,\n rank_to_GPU=None,\n )\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_min(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_coalesced_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.MIN,\n cuda=False,\n rank_to_GPU=None,\n )\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_max(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_reduce_coalesced_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.MAX,\n cuda=False,\n rank_to_GPU=None\n )\n\n @skip_if_small_worldsize\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_group_sum(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_coalesced_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n cuda=False,\n rank_to_GPU=None\n )\n\n @skip_if_small_worldsize\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_group_product(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_coalesced_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n cuda=False,\n rank_to_GPU=None\n )\n\n @skip_if_small_worldsize\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_group_min(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_coalesced_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.MIN,\n cuda=False,\n rank_to_GPU=None\n )\n\n @skip_if_small_worldsize\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_group_max(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_reduce_coalesced_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.MAX,\n cuda=False,\n rank_to_GPU=None\n )\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_full_group_sum(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_coalesced_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.SUM,\n cuda=False,\n rank_to_GPU=None\n )\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_full_group_product(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_coalesced_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.PRODUCT,\n cuda=False,\n rank_to_GPU=None\n )\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_full_group_min(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_coalesced_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.MIN,\n cuda=False,\n rank_to_GPU=None,\n )\n\n @require_backend({\"gloo\"})\n def test_all_reduce_coalesced_full_group_max(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_reduce_coalesced_helper(\n group,\n group_id,\n rank,\n dist.ReduceOp.MAX,\n cuda=False,\n rank_to_GPU=None\n )\n\n # SCATTER\n def _test_scatter_helper(self, group, group_id, rank):\n for dest in group:\n tensor = _build_tensor(dest + 1, -1)\n expected_tensor = _build_tensor(dest + 1, rank)\n tensors = (\n [_build_tensor(dest + 1, i) for i in group] if rank == dest else []\n )\n dist.scatter(tensor, src=dest, scatter_list=tensors, group=group_id)\n self.assertEqual(tensor, expected_tensor)\n\n self._barrier()\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_scatter_checks(self):\n group, group_id, rank = self._init_global_test()\n one = torch.ones([1])\n\n # Specify scatter_list argument only on source rank.\n output = one.clone() * -1\n if rank == 0:\n scatter_list = [one.clone() * i for i in group]\n dist.scatter(output, src=0, scatter_list=scatter_list)\n else:\n dist.scatter(output, src=0)\n self.assertEqual(output, one * rank)\n\n # Don't specify src argument.\n output = one.clone() * -1\n if rank == 0:\n scatter_list = [one.clone() * i for i in group]\n dist.scatter(output, scatter_list=scatter_list)\n else:\n dist.scatter(output)\n self.assertEqual(output, one * rank)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support scatter\")\n def test_scatter(self):\n group, group_id, rank = self._init_global_test()\n self._test_scatter_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support scatter\")\n @skip_if_small_worldsize\n def test_scatter_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_scatter_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support scatter\")\n def test_scatter_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_scatter_helper(group, group_id, rank)\n\n # GATHER\n def _test_gather_helper(self, group, group_id, rank):\n for dest in group:\n tensor = _build_tensor(dest + 1, rank)\n tensors = (\n [_build_tensor(dest + 1, -1) for i in group] if rank == dest else []\n )\n dist.gather(tensor, dst=dest, gather_list=tensors, group=group_id)\n if rank == dest:\n expected_tensors = [_build_tensor(dest + 1, i) for i in group]\n for t1, t2 in zip(tensors, expected_tensors):\n self.assertEqual(t1, t2)\n\n self._barrier()\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_gather_checks(self):\n group, group_id, rank = self._init_global_test()\n one = torch.ones([1])\n\n # Specify gather_list argument only on destination rank.\n if rank == 0:\n gather_list = [one.clone() for _ in group]\n dist.gather(one * rank, dst=0, gather_list=gather_list)\n for i in group:\n self.assertEqual(gather_list[i], one * i)\n else:\n dist.gather(one * rank, dst=0)\n\n # Don't specify dst argument.\n if rank == 0:\n gather_list = [one.clone() for _ in group]\n dist.gather(one * rank, gather_list=gather_list)\n for i in group:\n self.assertEqual(gather_list[i], one * i)\n else:\n dist.gather(one * rank)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_gather(self):\n group, group_id, rank = self._init_global_test()\n self._test_gather_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n @skip_if_small_worldsize\n def test_gather_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_gather_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_gather_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_gather_helper(group, group_id, rank)\n\n # ALL GATHER\n def _test_all_gather_helper(\n self, group, group_id, rank, cuda=False, rank_to_GPU=None\n ):\n for dest in group:\n tensor = _build_tensor(dest + 1, rank)\n tensors = [_build_tensor(dest + 1, -1) for i in group]\n if cuda:\n tensor = tensor.cuda(rank_to_GPU[rank][0])\n tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors]\n dist.all_gather(tensors, tensor, group_id)\n\n expected_tensors = [_build_tensor(dest + 1, i) for i in group]\n for t1, t2 in zip(tensors, expected_tensors):\n self.assertEqual(t1, t2)\n\n self._barrier()\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_gather(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_gather_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND != \"nccl\", \"Only Nccl supports CUDA all gather\")\n @unittest.skipIf(BACKEND == \"nccl\", \"CUDA all gather skipped for NCCL\")\n @skip_if_no_gpu\n def test_all_gather_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_gather_helper(group, group_id, rank, True, rank_to_GPU)\n\n @skip_if_small_worldsize\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_gather_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_gather_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"Nccl does not support CPU tensors\")\n def test_all_gather_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_gather_helper(group, group_id, rank)\n\n def _run_all_gather_coalesced_and_verify(\n self, output_tensor_lists, input_tensors, expected_tensors, group_id\n ):\n \"\"\"\n Helper that runs all_gather_coalesced and returns true if output\n matches expectations.\n \"\"\"\n dist.all_gather_coalesced(\n output_tensor_lists, input_tensors, group_id)\n\n for l1, l2 in zip(output_tensor_lists, expected_tensors):\n for t1, t2 in zip(l1, l2):\n if not torch.equal(t1, t2):\n return False\n return True\n\n def _test_all_gather_coalesced_helper(\n self, group, group_id, rank\n ):\n # TODO: Instead we should probably go through _rank_not_in_group\n # mechanism to disable sending tensors\n if group_id is not None:\n for test_case_id in range(2, 5):\n # Make sure we create tensors of incompatible sizes, e.g.\n # [1], [2x2], [3x3x3] ... to be sent in one batch\n input_tensors = [\n _build_multidim_tensor(\n tensor_id, tensor_id, rank + tensor_id) for tensor_id in range(\n 1, test_case_id)\n ]\n output_tensor_lists = [\n [\n _build_multidim_tensor(\n tensor_id, tensor_id, -1) for tensor_id in range(\n 1, test_case_id)\n ] for _ in group\n ]\n expected_tensors = [\n [\n _build_multidim_tensor(\n tensor_id,\n tensor_id,\n rank_iter + tensor_id) for tensor_id in range(\n 1, test_case_id)\n ] for rank_iter in group\n ]\n assert self._run_all_gather_coalesced_and_verify(\n output_tensor_lists, input_tensors,\n expected_tensors, group_id\n ), \"output tensors do not match expected ouputs\"\n\n self._barrier()\n\n @unittest.skipIf(BACKEND == \"nccl\", \"all_gather_coalesced does not support NCCL\")\n @unittest.skipIf(BACKEND == \"mpi\", \"all_gather_coalesced does not support MPI\")\n def test_all_gather_coalesced_simple(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_gather_coalesced_helper(group, group_id, rank)\n\n @skip_if_small_worldsize\n @unittest.skipIf(BACKEND == \"nccl\", \"all_gather_coalesced does not support NCCL\")\n @unittest.skipIf(BACKEND == \"mpi\", \"all_gather_coalesced does not support MPI\")\n def test_all_gather_coalesced_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_gather_coalesced_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"all_gather_coalesced does not support NCCL\")\n @unittest.skipIf(BACKEND == \"mpi\", \"all_gather_coalesced does not support MPI\")\n def test_all_gather_coalesced_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_gather_coalesced_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"all_gather_coalesced does not support NCCL\")\n @unittest.skipIf(BACKEND == \"mpi\", \"all_gather_coalesced does not support MPI\")\n def test_all_gather_coalesced_with_empty(self):\n group, group_id, rank = self._init_global_test()\n input_tensors = [\n rank * torch.ones([2, 2]),\n torch.ones([0]),\n (rank + 1) * torch.ones([3, 3]),\n torch.ones([0]),\n torch.ones([0])\n ]\n output_tensors_lists = [\n [\n -1 * torch.ones([2, 2]),\n -1 * torch.ones([0]),\n -1 * torch.ones([3, 3]),\n -1 * torch.ones([0]),\n -1 * torch.ones([0])\n ] for _ in group\n ]\n expected_tensors = [\n [\n r * torch.ones([2, 2]),\n torch.ones([0]),\n (r + 1) * torch.ones([3, 3]),\n torch.ones([0]),\n torch.ones([0])\n ] for r in group\n ]\n assert self._run_all_gather_coalesced_and_verify(\n output_tensors_lists, input_tensors, expected_tensors, group_id)\n self._barrier()\n\n # AllToAll\n def _test_all_to_all_single_equal_split_helper(\n self,\n group,\n group_id,\n rank,\n cuda=False,\n rank_to_GPU=None,\n ):\n if group_id is not None:\n size = len(group)\n in_tensor = torch.ones([size, size]) * rank\n expected_tensor = torch.cat([torch.ones([1, size]) * i for i in group])\n out_tensor = torch.ones([size, size]) * -1\n if cuda:\n in_tensor = in_tensor.cuda(rank_to_GPU[rank][0])\n expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0])\n out_tensor = out_tensor.cuda(rank_to_GPU[rank][0])\n dist.all_to_all_single(out_tensor, in_tensor, group=group_id)\n self.assertEqual(out_tensor, expected_tensor)\n self._barrier()\n\n def _test_all_to_all_single_unequal_split_helper(\n self,\n group,\n group_id,\n rank,\n cuda=False,\n rank_to_GPU=None,\n ):\n if group_id is not None:\n size = len(group)\n in_splits = [i + 1 for i in group]\n out_splits = [rank + 1 for _ in group]\n in_tensor = torch.ones([sum(in_splits), size]) * rank\n out_tensor = torch.ones([(rank + 1) * size, size])\n expected_tensor = torch.cat([torch.ones([rank + 1, size]) * i for i in group])\n if cuda:\n in_tensor = in_tensor.cuda(rank_to_GPU[rank][0])\n expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0])\n out_tensor = out_tensor.cuda(rank_to_GPU[rank][0])\n dist.all_to_all_single(\n out_tensor, in_tensor, out_splits, in_splits, group=group_id)\n self.assertEqual(out_tensor, expected_tensor)\n self._barrier()\n\n def _test_all_to_all_helper(self, group, group_id, rank):\n if group_id is not None:\n size = len(group)\n in_splits = [i + 1 for i in group]\n in_tensors = [\n torch.ones([in_splits[i], size]) * rank for i, _ in enumerate(group)\n ]\n out_tensors = [torch.ones([(rank + 1), size]) for _ in group]\n expected_tensors = [torch.ones([rank + 1, size]) * i for i in group]\n dist.all_to_all(out_tensors, in_tensors, group=group_id)\n for t1, t2 in zip(out_tensors, expected_tensors):\n self.assertEqual(t1, t2)\n self._barrier()\n\n @unittest.skipIf(\n BACKEND != \"mpi\" and BACKEND != \"gloo\",\n \"Only MPI and Gloo support CPU all_to_all_single\"\n )\n def test_all_to_all_single_equal_split(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_to_all_single_equal_split_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"NCCL A2A is not enabled for OSS builds\")\n @unittest.skipIf(\n BACKEND != \"gloo\", \"Only Gloo supports CUDA all_to_all_single\"\n )\n @skip_if_no_gpu\n @skip_if_rocm\n def test_all_to_all_single_equal_split_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_single_equal_split_helper(\n group,\n group_id,\n rank,\n True,\n rank_to_GPU,\n )\n\n @unittest.skipIf(\n BACKEND != \"mpi\" and BACKEND != \"gloo\",\n \"Only MPI and Gloo support CPU all_to_all_single\"\n )\n def test_all_to_all_single_unequal_split(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_to_all_single_unequal_split_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"NCCL A2A is not enabled for OSS builds\")\n @unittest.skipIf(\n BACKEND != \"gloo\", \"Only Gloo supports CUDA all_to_all_single\"\n )\n @skip_if_no_gpu\n @skip_if_rocm\n def test_all_to_all_single_unequal_split_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_single_unequal_split_helper(\n group,\n group_id,\n rank,\n True,\n rank_to_GPU,\n )\n\n @unittest.skipIf(BACKEND != \"mpi\", \"Only MPI supports all_to_all\")\n def test_all_to_all(self):\n group, group_id, rank = self._init_global_test()\n self._test_all_to_all_helper(group, group_id, rank)\n\n @unittest.skipIf(\n BACKEND != \"mpi\" and BACKEND != \"gloo\",\n \"Only MPI and Gloo support CPU all_to_all_single\"\n )\n @skip_if_small_worldsize\n def test_all_to_all_single_equal_split_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_to_all_single_equal_split_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"NCCL A2A is not enabled for OSS builds\")\n @unittest.skipIf(\n BACKEND != \"gloo\", \"Only Gloo supports CUDA all_to_all_single\"\n )\n @skip_if_no_gpu\n @skip_if_rocm\n @skip_if_small_worldsize\n def test_all_to_all_single_equal_split_group_cuda(self):\n group, group_id, rank = self._init_group_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_single_equal_split_helper(\n group,\n group_id,\n rank,\n True,\n rank_to_GPU,\n )\n\n @unittest.skipIf(\n BACKEND != \"mpi\" and BACKEND != \"gloo\",\n \"Only MPI and Gloo support CPU all_to_all_single\"\n )\n @skip_if_small_worldsize\n def test_all_to_all_single_unequal_split_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_to_all_single_unequal_split_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"NCCL A2A is not enabled for OSS builds\")\n @unittest.skipIf(\n BACKEND != \"gloo\", \"Only Gloo supports CUDA all_to_all_single\"\n )\n @skip_if_no_gpu\n @skip_if_rocm\n @skip_if_small_worldsize\n def test_all_to_all_single_unequal_split_group_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_single_unequal_split_helper(\n group,\n group_id,\n rank,\n True,\n rank_to_GPU,\n )\n\n @unittest.skipIf(BACKEND != \"mpi\", \"Only MPI supports all_to_all\")\n @skip_if_small_worldsize\n def test_all_to_all_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_all_to_all_helper(group, group_id, rank)\n\n @unittest.skipIf(\n BACKEND != \"mpi\" and BACKEND != \"gloo\",\n \"Only MPI and Gloo support CPU all_to_all_single\"\n )\n def test_all_to_all_single_equal_split_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_to_all_single_equal_split_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"NCCL A2A is not enabled for OSS builds\")\n @unittest.skipIf(\n BACKEND != \"gloo\", \"Only Gloo supports CUDA all_to_all_single\"\n )\n @skip_if_no_gpu\n @skip_if_rocm\n def test_all_to_all_single_equal_split_full_group_cuda(self):\n group, group_id, rank = self._init_full_group_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_single_equal_split_helper(\n group,\n group_id,\n rank,\n True,\n rank_to_GPU,\n )\n\n @unittest.skipIf(\n BACKEND != \"mpi\" and BACKEND != \"gloo\",\n \"Only MPI and Gloo support CPU all_to_all_single\"\n )\n def test_all_to_all_single_unequal_split_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_to_all_single_unequal_split_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"NCCL A2A is not enabled for OSS builds\")\n @unittest.skipIf(\n BACKEND != \"gloo\", \"Only Gloo supports CUDA all_to_all_single\"\n )\n @skip_if_no_gpu\n @skip_if_rocm\n def test_all_to_all_single_unequal_split_full_group_cuda(self):\n group, group_id, rank = self._init_full_group_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_to_all_single_unequal_split_helper(\n group,\n group_id,\n rank,\n True,\n rank_to_GPU,\n )\n\n @unittest.skipIf(BACKEND != \"mpi\", \"Only MPI supports all_to_all\")\n def test_all_to_all_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_all_to_all_helper(group, group_id, rank)\n\n # BARRIER\n def _test_barrier_helper(\n self, group, group_id, rank, cuda=False, rank_to_GPU=None):\n WAIT_TIME = 0.3 # seconds\n\n for dest in group:\n expected_time = torch.DoubleTensor(1).fill_(0.0)\n if cuda:\n expected_time = expected_time.cuda(rank_to_GPU[rank][0])\n if dest == rank:\n expected_time.fill_(time.time() + WAIT_TIME)\n dist.broadcast(expected_time, dest, group_id)\n time.sleep(WAIT_TIME + 0.1) # sleep a little bit longer\n dist.barrier(group_id)\n else:\n dist.broadcast(expected_time, dest, group_id)\n dist.barrier(group_id)\n self.assertGreaterEqual(\n float(time.time()),\n float(expected_time[0]),\n \"destination rank: %d, my rank: %d\" % (dest, rank) +\n \" (if you see this failure, please report in #14554)\")\n\n # Use higher timeout for the instance where the test runs\n # against a subgroup and uses a CUDA tensor for expected time.\n # The CUDA initialization for the participating processes can\n # take long enough for the barrier timeout to trigger on the\n # process that doesn't participate in the group.\n self._barrier(timeout=20)\n\n @skip_if_no_gpu\n @unittest.skipIf(BACKEND == \"mpi\", \"MPI doesn't supports GPU barrier\")\n def test_barrier_cuda(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU)\n\n @skip_if_small_worldsize\n @skip_if_no_gpu\n @unittest.skipIf(BACKEND == \"mpi\", \"MPI doesn't supports GPU barrier\")\n @skip_if_rocm\n def test_barrier_group_cuda(self):\n group, group_id, rank = self._init_group_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU)\n\n @skip_if_small_worldsize\n @skip_if_no_gpu\n @unittest.skipIf(BACKEND == \"mpi\", \"MPI doesn't supports GPU barrier\")\n def test_barrier_full_group_cuda(self):\n group, group_id, rank = self._init_full_group_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"NCCL does not support CPU barrier\")\n def test_barrier(self):\n group, group_id, rank = self._init_global_test()\n self._test_barrier_helper(group, group_id, rank)\n\n @skip_if_small_worldsize\n @unittest.skipIf(BACKEND == \"nccl\", \"NCCL does not support CPU barrier\")\n def test_barrier_group(self):\n group, group_id, rank = self._init_group_test()\n self._test_barrier_helper(group, group_id, rank)\n\n @unittest.skipIf(BACKEND == \"nccl\", \"NCCL does not support CPU barrier\")\n def test_barrier_full_group(self):\n group, group_id, rank = self._init_full_group_test()\n self._test_barrier_helper(group, group_id, rank)\n\n def _test_broadcast_multigpu_helper(self, group, group_id, rank, rank_to_GPU):\n for src in group:\n expected_tensor = _build_tensor(src + 1)\n tensors = [\n _build_tensor(src + 1, -1).cuda(device=i) for i in rank_to_GPU[rank]\n ]\n if rank == src:\n tensors[0] = expected_tensor.cuda(device=rank_to_GPU[rank][0])\n\n dist.broadcast_multigpu(tensors, src, group_id)\n for tensor in tensors:\n self.assertEqual(tensor, expected_tensor)\n self._barrier()\n\n @unittest.skipIf(BACKEND == \"mpi\", \"MPI doesn't support broadcast multigpu\")\n @unittest.skipIf(BACKEND == \"nccl\", \"NCCL broadcast multigpu skipped\")\n @skip_if_no_gpu\n def test_broadcast_multigpu(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_broadcast_multigpu_helper(group, group_id, rank, rank_to_GPU)\n\n def _test_all_reduce_multigpu_helper(\n self,\n group,\n group_id,\n rank,\n rank_to_GPU,\n op,\n master_value,\n worker_value,\n expected_value,\n ):\n for src in group:\n if rank == src:\n tensors = [\n _build_tensor(src + 1, master_value).cuda(device=i)\n for i in rank_to_GPU[rank]\n ]\n else:\n tensors = [\n _build_tensor(src + 1, worker_value).cuda(device=i)\n for i in rank_to_GPU[rank]\n ]\n\n dist.all_reduce_multigpu(tensors, op, group_id)\n expected_tensor = _build_tensor(src + 1, expected_value)\n for tensor in tensors:\n self.assertEqual(tensor, expected_tensor)\n\n self._barrier()\n\n @unittest.skipIf(BACKEND == \"mpi\", \"MPI doesn't support broadcast multigpu\")\n @unittest.skipIf(BACKEND == \"nccl\", \"CUDA all_reduce multigpu skipped for NCCL\")\n @skip_if_no_gpu\n def test_all_reduce_multigpu(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_reduce_multigpu_helper(\n group,\n group_id,\n rank,\n rank_to_GPU,\n dist.ReduceOp.SUM,\n 2,\n 10,\n (2 + 10 * (len(group) - 1)) * len(rank_to_GPU[0]),\n )\n\n def _test_reduce_multigpu_helper(\n self,\n group,\n group_id,\n rank,\n rank_to_GPU,\n op,\n master_value,\n worker_value,\n expected_value,\n ):\n for src in group:\n if rank == src:\n tensors = [\n _build_tensor(src + 1, master_value).cuda(device=i)\n for i in rank_to_GPU[rank]\n ]\n dist.reduce_multigpu(tensors, src, op, group_id)\n expected_tensor = _build_tensor(src + 1, expected_value)\n self.assertEqual(tensors[0], expected_tensor)\n else:\n tensors = [\n _build_tensor(src + 1, worker_value).cuda(device=i)\n for i in rank_to_GPU[rank]\n ]\n dist.reduce_multigpu(tensors, src, op, group_id)\n\n self._barrier()\n\n @unittest.skipIf(BACKEND != \"nccl\", \"Only Nccl backend supports reduce multigpu\")\n @skip_if_no_gpu\n @skip_if_rocm\n def test_reduce_multigpu(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_reduce_multigpu_helper(\n group,\n group_id,\n rank,\n rank_to_GPU,\n dist.ReduceOp.SUM,\n 2,\n 10,\n (2 + 10 * (len(group) - 1)) * len(rank_to_GPU[0]),\n )\n\n def _test_all_gather_multigpu_helper(self, group, group_id, rank, rank_to_GPU):\n for dest in group:\n tensors = [\n _build_tensor(dest + 1).cuda(device=i) for i in rank_to_GPU[rank]\n ]\n\n # construct expected output along with\n # a place holder to receive all gather results\n output_tensors = []\n expected_output = []\n output_per_gpu = (\n [_build_tensor(dest + 1, -1)] * len(rank_to_GPU[0]) * len(group)\n )\n expected_per_gpu = (\n [_build_tensor(dest + 1)] * len(rank_to_GPU[0]) * len(group)\n )\n for gpu in rank_to_GPU[rank]:\n output_tensors.append([t.cuda(device=gpu) for t in output_per_gpu])\n expected_output.append([t.cuda(device=gpu) for t in expected_per_gpu])\n\n dist.all_gather_multigpu(output_tensors, tensors, group_id)\n self.assertEqual(output_tensors, expected_output)\n\n self._barrier()\n\n @unittest.skipIf(BACKEND != \"nccl\", \"Only Nccl backend supports allgather multigpu\")\n @skip_if_no_gpu\n def test_all_gather_multigpu(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n self._test_all_gather_multigpu_helper(group, group_id, rank, rank_to_GPU)\n\n def _model_step(self, model):\n for param in model.parameters():\n if param.grad is not None:\n with torch.no_grad():\n param += param.grad\n param.grad = None\n\n def _prepare_dummy_data(self, local_bs):\n # global_bs for DDP should be divisible by WORLD_SIZE\n global_bs = int(WORLD_SIZE) * local_bs\n input_cpu = torch.randn(global_bs, 2)\n target = torch.randn(global_bs, 4)\n loss = nn.MSELoss()\n return global_bs, input_cpu, target, loss\n\n # END TO END TEST FOR DISTRIBUTEDDATAPARALLEL\n def _test_DDP_helper(self, model, input_var, target, loss, scale_factor=1.0):\n model.train()\n output = model(input_var)\n l = loss(output, target) * scale_factor\n l.backward()\n\n def _assert_equal_param(self, param_gpu, param_DDP):\n self.assertEqual(len(param_gpu), len(param_DDP))\n for p_gpu, p_DDP in zip(param_gpu, param_DDP):\n self.assertEqual(p_gpu, p_DDP)\n\n def _test_DDP_5iter(\n self, model_base, model_DDP, input, target, loss, local_bs, rank, batch_size, test_save, offset=None, world_size=0\n ):\n for idx in range(5):\n # single cpu/gpu training\n self._test_DDP_helper(model_base, input, target, loss)\n\n if offset is None:\n offset = rank * local_bs\n\n # DDP training, DDP scatters subsets of input_cpu to nodes/GPUs\n self._test_DDP_helper(\n model_DDP,\n input[offset: offset + local_bs],\n target[offset: offset + local_bs],\n loss,\n world_size * local_bs / batch_size if world_size != 0 else 1,\n )\n\n # Update weights and run a second iteration to shake out errors\n self._model_step(model_base)\n self._model_step(model_DDP)\n self._assert_equal_param(\n list(model_base.parameters()), list(model_DDP.module.parameters())\n )\n\n # Shuffle the input so that DDP input is different\n input = input[torch.randperm(batch_size)]\n\n # save the model in the middle and reload\n if test_save and idx == 2 and INIT_METHOD.startswith(\"file://\"):\n with tempfile.NamedTemporaryFile() as tmp:\n torch.save(model_DDP, tmp.name)\n model_DDP = torch.load(tmp.name)\n\n with tempfile.TemporaryFile() as tmp_file:\n torch.save(model_DDP, tmp_file)\n tmp_file.seek(0)\n saved_model = torch.load(tmp_file)\n for k in model_DDP.state_dict():\n self.assertEqual(model_DDP.state_dict()[k],\n saved_model.state_dict()[k])\n\n def _test_DistributedDataParallel(self, gpu_subset, rank, output_device=None):\n # Run a simple end to end DDP model, use result of single node model\n # as baseline\n\n # cpu training setup\n model = DDP_NET\n\n # single gpu training setup\n model_gpu = copy.deepcopy(model)\n model_gpu.cuda(gpu_subset[0])\n\n # DDP training setup\n model_DDP = copy.deepcopy(model)\n model_DDP.cuda(gpu_subset[0])\n model_DDP = nn.parallel.DistributedDataParallel(\n model_DDP, device_ids=gpu_subset\n )\n\n # test serializable/unserializable\n with tempfile.NamedTemporaryFile() as tmp:\n torch.save(model_DDP, tmp.name)\n model_DDP = torch.load(tmp.name)\n\n # dummy data initialization\n local_bs = len(gpu_subset)\n global_bs, input_cpu, target, loss = self._prepare_dummy_data(local_bs)\n\n # check two model parameters over 5 iterations\n self._test_DDP_5iter(\n model_gpu,\n model_DDP,\n input_cpu.cuda(gpu_subset[0]),\n target.cuda(gpu_subset[0]),\n loss,\n local_bs,\n rank,\n global_bs,\n True\n )\n self._barrier()\n\n @unittest.skipIf(\n BACKEND == \"nccl\", \"nccl does not support DDP on CPU models\"\n )\n def test_DistributedDataParallelCPU(self):\n # Run a simple end to end DDP-CPU model, use result of single node\n # model as baseline\n group, group_id, rank = self._init_global_test()\n\n # cpu training setup\n model_base = DDP_NET\n\n # DDP-CPU training setup\n model_DDP = copy.deepcopy(model_base)\n model_DDP = nn.parallel.DistributedDataParallelCPU(model_DDP)\n\n # dummy data initialization\n local_bs = 2\n global_bs, input_cpu, target, loss = self._prepare_dummy_data(local_bs)\n\n # check two model parameters over 5 iterations\n self._test_DDP_5iter(\n model_base, model_DDP, input_cpu, target, loss, local_bs, rank, global_bs, False\n )\n self._barrier()\n\n @unittest.skipIf(BACKEND != 'nccl' and BACKEND != 'gloo',\n \"Only Nccl & Gloo backend support DistributedDataParallel\")\n def test_DistributedDataParallel_requires_grad(self):\n # a module without gradients shouldn't be accepted\n self.assertRaises(AssertionError, lambda: nn.parallel.DistributedDataParallel(nn.Module()))\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n @skip_if_lt_x_gpu(2)\n @skip_if_rocm\n def test_DistributedDataParallel_non_default_stream(self):\n stream = torch.cuda.Stream()\n rank = self.rank\n with torch.cuda.stream(stream):\n net = torch.nn.parallel.DistributedDataParallel(\n torch.nn.Linear(1, 1, bias=False).cuda(rank), device_ids=[rank]\n )\n for i in range(1000):\n # Clear gradients manually\n grad = net.module.weight.grad\n if grad is not None:\n grad.detach_()\n grad.zero_()\n # Forward + BW\n batch = torch.tensor([rank]).float().cuda(rank)\n loss = net(batch).sum()\n loss.backward()\n # For each worker, the gradient on the weight should be worker_rank.\n grad = net.module.weight.grad\n avg = grad.clone()\n # All-reducing the gradient averages should give us the gradient\n # average. If not, then one of the workers has not correctly\n # written back the averaged gradient before this all-reduce call.\n dist.all_reduce(avg)\n world_size = int(os.environ[\"WORLD_SIZE\"])\n avg.div_(world_size)\n expected_grad = sum(i for i in range(world_size)) / world_size\n self.assertEqual(\n avg[0, 0],\n expected_grad,\n msg=f\"Expected gradient of {expected_grad} but got {avg} on rank {self.rank}\",\n )\n\n @unittest.skipIf(BACKEND != 'nccl' and BACKEND != 'gloo',\n \"Only Nccl & Gloo backend support DistributedDataParallel\")\n @skip_if_no_gpu\n @skip_if_rocm\n def test_DistributedDataParallel(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n gpus = list(rank_to_GPU[rank])\n self._test_DistributedDataParallel(gpu_subset=gpus, rank=rank)\n\n # test output_device\n self._test_DistributedDataParallel(gpu_subset=gpus, rank=rank, output_device=torch.device('cuda'))\n\n # test device_ids\n gpus = list(map(lambda i: torch.device('cuda:' + str(i)), gpus))\n self._test_DistributedDataParallel(gpu_subset=gpus, rank=rank, output_device=torch.device('cuda'))\n\n def _test_DistributedDataParallel_SyncBatchNorm(self, gpu_subset, rank, local_bs, global_bs, offset, output_device=None):\n # Run a simple end to end DDP model, use result of single node model\n # as baseline\n\n # cpu training setup\n model = BN_NET\n\n # single gpu training setup\n model_gpu = copy.deepcopy(model)\n model_gpu.cuda(gpu_subset[0])\n\n # DDP training setup\n model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model))\n model_DDP.cuda(gpu_subset[0])\n model_DDP = nn.parallel.DistributedDataParallel(\n model_DDP, device_ids=gpu_subset\n )\n\n # test serializable/unserializable\n with tempfile.NamedTemporaryFile() as tmp:\n torch.save(model_DDP, tmp.name)\n model_DDP = torch.load(tmp.name)\n\n # data initialization\n input_cpu = torch.randn(global_bs, 2)\n target = torch.randn(global_bs, 4)\n loss = nn.MSELoss()\n\n # check two model parameters over 5 iterations\n self._test_DDP_5iter(\n model_gpu,\n model_DDP,\n input_cpu.cuda(gpu_subset[0]),\n target.cuda(gpu_subset[0]),\n loss,\n local_bs,\n rank,\n global_bs,\n True,\n offset,\n int(WORLD_SIZE)\n )\n self._barrier()\n\n @unittest.skipIf(BACKEND != 'nccl' and BACKEND != 'gloo',\n \"Only Nccl & Gloo backend support DistributedDataParallel\")\n @skip_if_no_gpu\n def test_DistributedDataParallel_SyncBatchNorm(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n # DDP does not support replicating BN layers within a process, hence\n # testing with one module replica per process\n gpus = [rank]\n\n num_processes = int(WORLD_SIZE)\n local_bs = 2\n bs_offset = int(rank * 2)\n global_bs = int(num_processes * 2)\n\n self._test_DistributedDataParallel_SyncBatchNorm(\n gpu_subset=gpus,\n rank=rank,\n local_bs=local_bs,\n global_bs=global_bs,\n offset=bs_offset)\n\n # test output_device\n self._test_DistributedDataParallel_SyncBatchNorm(\n gpu_subset=gpus,\n rank=rank,\n local_bs=local_bs,\n global_bs=global_bs,\n offset=bs_offset,\n output_device=torch.device('cuda'))\n\n # test device_ids\n gpus = list(map(lambda i: torch.device('cuda:' + str(i)), gpus))\n self._test_DistributedDataParallel_SyncBatchNorm(\n gpu_subset=gpus,\n rank=rank,\n local_bs=local_bs,\n global_bs=global_bs,\n offset=bs_offset,\n output_device=torch.device('cuda'))\n\n @unittest.skipIf(BACKEND != 'nccl' and BACKEND != 'gloo',\n \"Only Nccl & Gloo backend support DistributedDataParallel\")\n @skip_if_no_gpu\n def test_DistributedDataParallel_SyncBatchNorm_2D_Input(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n # DDP does not support replicating BN layers within a process, hence\n # testing with one module replica per process\n gpus = [rank]\n\n model = nn.BatchNorm1d(2)\n\n # single gpu training setup\n model_gpu = copy.deepcopy(model)\n model_gpu.cuda(gpus[0])\n\n # DDP training setup\n model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model))\n model_DDP.cuda(gpus[0])\n model_DDP = nn.parallel.DistributedDataParallel(\n model_DDP, device_ids=gpus\n )\n\n local_bs = len(gpus) * 2\n global_bs = int(WORLD_SIZE) * local_bs\n input_cpu = torch.randn(global_bs, 2)\n target = torch.randn(global_bs, 2)\n loss = nn.MSELoss()\n\n # disabling cudnn.\n # SyncBatchNorm goes through native_batch_norm kernel, this avoids the\n # numerical issue created by the divergent code path.\n with torch.backends.cudnn.flags(False):\n # check two model parameters over 5 iterations\n self._test_DDP_5iter(\n model_gpu,\n model_DDP,\n input_cpu.cuda(gpus[0]),\n target.cuda(gpus[0]),\n loss,\n local_bs,\n rank,\n global_bs,\n True\n )\n self._barrier()\n\n @unittest.skipIf(BACKEND != 'nccl' and BACKEND != 'gloo',\n \"Only Nccl & Gloo backend support DistributedDataParallel\")\n @skip_if_no_gpu\n @require_world_size(2)\n @skip_if_rocm\n def test_DistributedDataParallel_SyncBatchNorm_Single_Input_Per_Process(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n # DDP does not support replicating BN layers within a process, hence\n # testing with one module replica per process\n gpus = [rank]\n\n model = nn.BatchNorm1d(2)\n\n # single gpu training setup\n model_gpu = copy.deepcopy(model)\n model_gpu.cuda(gpus[0])\n\n # DDP training setup\n model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model))\n model_DDP.cuda(gpus[0])\n model_DDP = nn.parallel.DistributedDataParallel(\n model_DDP, device_ids=gpus\n )\n\n local_bs = 1\n global_bs = int(WORLD_SIZE)\n input_cpu = torch.randn(global_bs, 2)\n target = torch.randn(global_bs, 2)\n loss = nn.MSELoss()\n\n # disabling cudnn.\n # SyncBatchNorm goes through native_batch_norm kernel, this avoids the\n # numerical issue created by the divergent code path.\n with torch.backends.cudnn.flags(False):\n # check two model parameters over 5 iterations\n self._test_DDP_5iter(\n model_gpu,\n model_DDP,\n input_cpu.cuda(gpus[0]),\n target.cuda(gpus[0]),\n loss,\n local_bs,\n rank,\n global_bs,\n True\n )\n self._barrier()\n\n @unittest.skipIf(BACKEND != 'nccl' and BACKEND != 'gloo',\n \"Only Nccl & Gloo backend support DistributedDataParallel\")\n @skip_if_no_gpu\n def test_DistributedDataParallel_SyncBatchNorm_Diff_Input_Sizes_Running_Value(self):\n group, group_id, rank = self._init_global_test()\n rank_to_GPU = self._init_multigpu_helper()\n model = nn.parallel.DistributedDataParallel(ONLY_SBN_NET.cuda(rank), device_ids=[rank])\n\n input_var = []\n for i in range(int(WORLD_SIZE)):\n input_var_rank = torch.cat([\n torch.ones(2, 1, 10 ** (i + 1)) * (0.1 ** (i - 1)),\n torch.ones(2, 1, 10 ** (i + 1)) * (0.3 ** (i - 1))\n ], dim=1)\n input_var.append(input_var_rank)\n\n all_input_var = torch.cat(\n [x.permute(1, 0, 2).contiguous().view(ONLY_SBN_NET.num_features, -1) for x in input_var],\n dim=1\n ).cuda(rank)\n\n for i in range(100):\n y = model(input_var[rank].cuda(rank))\n y.mean().backward()\n\n running_mean, running_var = model.module.running_mean, model.module.running_var\n torch.testing.assert_allclose(running_mean, all_input_var.mean(1))\n torch.testing.assert_allclose(running_var, all_input_var.var(1))\n\n @unittest.skipIf(BACKEND != 'nccl' and BACKEND != 'gloo',\n \"Only Nccl & Gloo backend support DistributedDataParallel\")\n @skip_if_no_gpu\n def test_DistributedDataParallel_SyncBatchNorm_Diff_Input_Sizes_gradient(self):\n group, group_id, rank = self._init_global_test()\n # only do single GPU per process\n gpus = [rank]\n\n # cpu training setup\n model = BN_NET\n\n num_processes = int(WORLD_SIZE)\n local_bs = rank + 2\n bs_offset = int((rank + 3) * rank / 2)\n global_bs = int((num_processes + 3) * num_processes / 2)\n\n self._test_DistributedDataParallel_SyncBatchNorm(\n gpu_subset=gpus,\n rank=rank,\n local_bs=local_bs,\n global_bs=global_bs,\n offset=bs_offset)\n\n @skipIfNoTorchVision\n def test_SyncBatchNorm_process_group(self):\n # When adopting `convert_sync_batchnorm` to convert a `nn.modules`,\n # it need to recursively pass the `process_group` in the module when the `SyncBatchNorm`\n # is nested in a sub-module or sub-sub-module (e.g. resnet50 in torchvision.models).\n\n process_ids = 0\n process_group = torch.distributed.new_group([process_ids])\n res50_model = torchvision.models.resnet50()\n res50_model_sync = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(res50_model), process_group)\n process_group_sync = res50_model_sync.layer1[0].bn1.process_group\n self.assertEqual(process_group_sync, process_group)\n\n def _run_reduction_test(\n self, tensor, expected_tensor, op, reduction_fn=dist.all_reduce, dst=None\n ):\n if reduction_fn != dist.all_reduce and dst is None:\n raise ValueError(f\"Reduction fn {reduction_fn} must specify dst!\")\n if dst is not None:\n reduction_fn(tensor, dst, op)\n # Only destination rank tensor is expected to have final result.\n if dist.get_rank() == dst:\n self.assertEqual(tensor, expected_tensor)\n else:\n reduction_fn(tensor, op)\n self.assertEqual(tensor, expected_tensor)\n\n @require_backend({\"nccl\"})\n @require_backends_available({\"nccl\"})\n @skip_if_lt_x_gpu(2)\n @skip_if_rocm\n def test_nccl_backend_bool_allreduce(self):\n torch.cuda.set_device(self.rank)\n # Run all_reduce with PRODUCT\n element = self.rank % 2 == 0\n for op in [dist.ReduceOp.PRODUCT, dist.ReduceOp.MIN]:\n input_tensor = torch.tensor([element, element]).to(self.rank)\n self._run_reduction_test(\n input_tensor, torch.tensor([False, False]).to(self.rank), op\n )\n # Ensure that all ranks contributing True (cast to 1) results in the\n # correct reduction.\n input_tensor = torch.tensor([True, True]).to(self.rank)\n expected_tensor = input_tensor.clone()\n self._run_reduction_test(\n input_tensor, expected_tensor, op\n )\n\n # Run all_reduce with SUM\n for op in [dist.ReduceOp.SUM, dist.ReduceOp.MAX]:\n input_tensor = torch.tensor([element, element]).to(self.rank)\n self._run_reduction_test(\n input_tensor, torch.tensor([True, True]).to(self.rank), op\n )\n # TODO: NCCL backend does not work correctly for bitwise reduction ops\n # (see https://github.com/pytorch/pytorch/issues/41362). Add tests for\n # these once it is supported.\n\n @require_backend({\"nccl\"})\n @require_backends_available({\"nccl\"})\n @skip_if_lt_x_gpu(2)\n @skip_if_rocm\n def test_nccl_backend_bool_allgather(self):\n torch.cuda.set_device(self.rank)\n inp = {0: [True, True], 1: [False, True]}\n input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank)\n # Preserve a copy of the tensor to compare against after allgather.\n input_tensor_copy = input_tensor.clone()\n tensor_list = [\n torch.tensor([False, False]).to(self.rank)\n for _ in range(dist.get_world_size())\n ]\n dist.all_gather(tensor_list, input_tensor)\n\n self.assertEqual(len(tensor_list), dist.get_world_size())\n for i, t in enumerate(tensor_list):\n expected = torch.tensor(inp[i % 2]).to(self.rank)\n self.assertEqual(t, expected)\n # Ensure that the input tensor is not modified, since this collective\n # does not modify its input.\n self.assertEqual(input_tensor_copy, input_tensor)\n\n @require_backend({\"nccl\"})\n @require_backends_available({\"nccl\"})\n @skip_if_lt_x_gpu(int(os.environ[\"WORLD_SIZE\"]))\n @skip_if_rocm\n def test_nccl_backend_bool_reduce(self):\n torch.cuda.set_device(self.rank)\n inp = {0: [True, True], 1: [False, False]}\n # Run reduce() with product op\n for op in [dist.ReduceOp.PRODUCT, dist.ReduceOp.MIN]:\n input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank)\n expected = torch.tensor([False, False]).to(self.rank)\n self._run_reduction_test(\n input_tensor, expected, op, dist.reduce, dst=0\n )\n # Ensure that all ranks contributing True (cast to 1) results in the\n # correct reduction.\n input_tensor = torch.tensor([True, True]).to(self.rank)\n expected_tensor = input_tensor.clone()\n self._run_reduction_test(\n input_tensor, expected_tensor, op, dist.reduce, dst=0\n )\n\n for op in [dist.ReduceOp.SUM, dist.ReduceOp.MAX]:\n input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank)\n expected = (\n torch.tensor([True, True]).to(self.rank)\n if self.rank == 0\n else input_tensor.clone()\n )\n self._run_reduction_test(\n input_tensor, expected, op, dist.reduce, dst=0\n )\n\n @require_backend({\"nccl\"})\n @require_backends_available({\"nccl\"})\n @skip_if_lt_x_gpu(2)\n @skip_if_rocm\n def test_nccl_backend_bool_broadcast(self):\n tensor_size = 10\n bcast_tensor = torch.tensor(\n [\n (random.random() < 0.5 if self.rank == 0 else False)\n for _ in range(tensor_size)\n ]\n ).to(self.rank)\n dist.broadcast(bcast_tensor, src=0)\n # Now allgather and ensure the tensors are equal.\n tensor_list = [\n torch.tensor([False for _ in range(tensor_size)]).to(self.rank)\n for _ in range(dist.get_world_size())\n ]\n dist.all_gather(tensor_list, bcast_tensor)\n expected = tensor_list[0]\n for tensor in tensor_list[1:]:\n self.assertEqual(tensor, expected)\n\n @unittest.skipIf(\n BACKEND != \"nccl\" and BACKEND != \"gloo\",\n \"Only NCCL and GLOO backend support DistributedDataParallel\",\n )\n @skip_if_lt_x_gpu(2)\n def test_DistributedSampler_padding(self):\n # Tests padding of distributed sampler.\n world_size = dist.get_world_size()\n dataset_size = 100 + world_size + 1\n dataset = [torch.ones(1).to(self.rank) * i for i in range(dataset_size)]\n\n # Specifying drop_last=True will cause the tail of the data to be dropped.\n dist_sampler = DistributedSampler(dataset=dataset, drop_last=True)\n local_num_samples, local_dataset_size = (\n dist_sampler.num_samples,\n dist_sampler.total_size,\n )\n # The effective dataset size should be the greatest integer that is <=\n # dataset_size that is divisible by the world_size. This is to ensure each\n # rank processes the same number of samples.\n effective_dataset_size = (\n math.ceil((dataset_size - world_size) / world_size)\n if dataset_size % world_size != 0\n else dataset_size / world_size\n )\n self.assertEqual(local_num_samples, effective_dataset_size)\n self.assertEqual(local_dataset_size, local_num_samples * world_size)\n indices_list = list(iter(dist_sampler))\n self.assertEqual(len(indices_list), local_num_samples)\n\n def validate_global_samples(local_num_samples):\n # Ensure that each rank processes the same number of samples.\n world_samples = [\n torch.LongTensor([0]).to(self.rank) for _ in range(world_size)\n ]\n dist.all_gather(world_samples, torch.tensor([local_num_samples]).to(self.rank))\n world_samples = [sample.item() for sample in world_samples]\n self.assertEqual(len(set(world_samples)), 1)\n\n validate_global_samples(local_num_samples)\n\n # drop_last=False is the default and will add additional indices to be sampled,\n # increasing the effective dataset size.\n dist_sampler_added_samples = DistributedSampler(dataset=dataset)\n local_num_samples, local_dataset_size = (\n dist_sampler_added_samples.num_samples,\n dist_sampler_added_samples.total_size,\n )\n # The effective dataset size is the smallest integer that is >= dataset_size\n # and divisible by the world size.\n self.assertEqual(\n local_num_samples, math.ceil(dataset_size / world_size)\n )\n self.assertEqual(local_dataset_size, local_num_samples * world_size)\n indices_list = list(iter(dist_sampler_added_samples))\n self.assertEqual(len(indices_list), local_num_samples)\n\n # Ensure that each rank processes the same number of samples.\n validate_global_samples(local_num_samples)\n\n @require_backend({\"nccl\", \"gloo\"})\n @require_n_gpus_for_nccl_backend(int(os.environ[\"WORLD_SIZE\"]), os.environ[\"BACKEND\"])\n def test_allgather_object(self):\n # Ensure stateful objects can be allgathered\n f = Foo(10)\n f.bar = 1\n gather_objects = [\n {\"key1\": 3, \"key2\": 4, \"key3\": {\"nested\": True}},\n f,\n \"foo\",\n [1, 2, True, \"string\", [4, 5, \"nested\"]],\n ]\n\n output_gathered = [None for _ in range(dist.get_world_size())]\n dist.all_gather_object(\n output_gathered, gather_objects[self.rank % len(gather_objects)]\n )\n\n for i, val in enumerate(output_gathered):\n expected = gather_objects[i % len(gather_objects)]\n self.assertEqual(val, expected)\n\n # Validate errors when objects can't be pickled.\n class Bar:\n pass\n\n b = Bar()\n gather_objects = [b for _ in range(dist.get_world_size())]\n with self.assertRaisesRegex(AttributeError, \"Can't pickle local object\"):\n dist.all_gather_object(\n [None for _ in range(dist.get_world_size())], gather_objects[self.rank]\n )\n\n @require_backend({\"gloo\"})\n @unittest.skipIf(BACKEND == \"nccl\", \"NCCL does not support gather\")\n def test_gather_object(self):\n # Ensure stateful objects can be gathered\n f = Foo(10)\n f.bar = 1\n gather_objects = [\n {\"key1\": 3, \"key2\": 4, \"key3\": {\"nested\": True}},\n f,\n \"example_string\",\n [1, 2, True, \"string\", [4, 5, \"nested\"]],\n ]\n output_gathered = [None for _ in range(dist.get_world_size())]\n gather_on_rank = 0\n my_rank = dist.get_rank()\n dist.gather_object(\n gather_objects[self.rank % len(gather_objects)],\n object_gather_list=output_gathered if my_rank == gather_on_rank else None,\n dst=gather_on_rank,\n )\n if my_rank != gather_on_rank:\n self.assertEqual(\n output_gathered, [None for _ in range(dist.get_world_size())]\n )\n else:\n for i, val in enumerate(output_gathered):\n expected = gather_objects[i % len(gather_objects)]\n self.assertEqual(val, expected)\n\n # Validate errors when objects can't be pickled.\n class Bar:\n pass\n\n b = Bar()\n gather_objects = [b for _ in range(dist.get_world_size())]\n with self.assertRaisesRegex(AttributeError, \"Can't pickle local object\"):\n dist.gather_object(\n gather_objects[0],\n object_gather_list=gather_objects\n if my_rank == gather_on_rank\n else None,\n dst=gather_on_rank,\n )\n\n @require_backend({\"nccl\"})\n @require_backends_available({\"nccl\"})\n @skip_if_lt_x_gpu(2)\n def test_nccl_gather_object_err(self):\n output_gathered = [None for _ in range(dist.get_world_size())]\n gather_on_rank = 0\n my_rank = dist.get_rank()\n with self.assertRaisesRegex(\n RuntimeError, \"ProcessGroupNCCL does not support gather\"\n ):\n dist.gather_object(\n \"foo\",\n object_gather_list=output_gathered\n if my_rank == gather_on_rank\n else None,\n dst=gather_on_rank,\n )\n\n def validate_net_equivalence(self, net):\n # Helper to validate synchronization of nets across ranks.\n net_module_states = list(net.module.state_dict().values())\n # Check that all tensors in module's state_dict() are equal.\n for t in net_module_states:\n tensor_list = [\n torch.zeros_like(t) for _ in range(dist.get_world_size())\n ]\n dist.all_gather(tensor_list, t)\n for tensor in tensor_list:\n self.assertEqual(tensor, t)\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n @skip_if_rocm\n def test_ddp_sync_params_and_buffers(self):\n # Test that after calling _sync_params_and_buffers, models across ranks\n # are the same and are equal to the model on the input rank.\n dim = 2\n rank = self.rank\n rank_to_broadcast = 1\n # Seed to ensure that ranks are initialized with different initial models.\n torch.manual_seed(rank)\n model = nn.Linear(dim, dim, bias=False)\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(rank), device_ids=[self.rank], bucket_cap_mb=1\n )\n new_model = nn.Linear(dim, dim, bias=False).cuda(rank)\n net.module = copy.deepcopy(new_model)\n # Assert params are different\n net_module_states = list(net.module.state_dict().values())\n for t in net_module_states:\n tensor_list = [\n torch.zeros_like(t) for _ in range(dist.get_world_size())\n ]\n dist.all_gather(tensor_list, t)\n for i, tensor in enumerate(tensor_list):\n if i == rank:\n self.assertEqual(t, tensor)\n else:\n # tensor from another rank should be different.\n self.assertNotEqual(t, tensor)\n\n net._sync_params_and_buffers(authoritative_rank=rank_to_broadcast)\n # Now all model params should be the same.\n self.validate_net_equivalence(net)\n # Since the network params were broadcast from rank_to_broadcast, validate that\n # they are the same as new_model on rank_to_broadcast.\n if rank == rank_to_broadcast:\n expected_states = new_model.state_dict().values()\n for t, expected in zip(net_module_states, expected_states):\n self.assertEqual(t, expected)\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n @skip_if_rocm\n def test_ddp_grad_div_uneven_inputs(self):\n # Test gradient division during training with join() API. If\n # divide_by_initial_world_size=False, we scale by the effective world\n # size when allreducing grads.\n dim = 5\n batch = 1\n grad_scale = 50\n rank = self.rank\n model = nn.Linear(dim, dim, bias=False)\n inp = torch.ones(batch, dim, device=self.rank) * grad_scale\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(rank), device_ids=[self.rank], bucket_cap_mb=1\n )\n n_iters = 3\n if self.rank > 0:\n n_iters += 2\n\n with net.join(divide_by_initial_world_size=False):\n for _ in range(n_iters):\n loss = net(inp).sum()\n loss.backward()\n # The grad is always expected_grad, since we divide by the number\n # of currently active processes and inactive processes contribute\n # zero gradient. If we kept dividing by static initial world\n # size as processes leave, the grad would be smaller.\n expected_grad = torch.ones(dim, dim, device=self.rank) * grad_scale\n param = list(net.parameters())[0]\n self.assertEqual(expected_grad, param.grad)\n # Avoid accumulating grads so that it's the same every iteration\n net.zero_grad()\n torch.cuda.synchronize(device=self.rank)\n\n # If divide_by_initial_world_size=True (default), we always scale grads\n # by the initial world_size.\n with net.join(divide_by_initial_world_size=True):\n for i in range(n_iters):\n loss = net(inp).sum()\n loss.backward()\n effective_ws = dist.get_world_size()\n if i >= 3:\n effective_ws -= 1\n expected_grad = (\n torch.ones(dim, dim, device=self.rank) * grad_scale * effective_ws\n ) / dist.get_world_size()\n param = list(net.parameters())[0]\n self.assertEqual(expected_grad, param.grad)\n # Avoid accumulating grad so that it's the same every iteration.\n net.zero_grad()\n torch.cuda.synchronize(device=self.rank)\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n @skip_if_rocm\n def test_ddp_join_model_equivalence(self):\n # Verifies equivalence with model training locally and with DDP under\n # the join context manager.\n batch = 3\n dim = 10\n learning_rate = 0.03\n model = nn.Linear(dim, dim, bias=False)\n inp = torch.rand(batch, dim, device=self.rank)\n local_model = copy.deepcopy(model)\n local_model = local_model.cuda(self.rank)\n rank_to_iter_mapping = {rank : 2 * (rank + 1) for rank in range(dist.get_world_size())}\n # run local model\n local_iters = sum(rank_to_iter_mapping.values())\n local_optim = torch.optim.SGD(local_model.parameters(), lr=learning_rate)\n for _ in range(local_iters):\n local_optim.zero_grad()\n out = local_model(inp)\n loss = out.sum()\n loss.backward()\n local_optim.step()\n\n # run DDP model with join API\n num_iters = rank_to_iter_mapping[self.rank]\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(self.rank), device_ids=[self.rank]\n )\n ddp_optim = torch.optim.SGD(\n model.parameters(), lr=learning_rate * dist.get_world_size()\n )\n with net.join():\n for i in range(num_iters):\n ddp_optim.zero_grad()\n out = net(inp)\n loss = out.sum()\n loss.backward()\n torch.cuda.synchronize(device=self.rank)\n ddp_optim.step()\n\n # Validate model state dicts are equal\n for (_, local_tensor), (_, dist_tensor) in zip(\n local_model.state_dict().items(), net.module.state_dict().items()\n ):\n self.assertEqual(local_tensor, dist_tensor)\n\n def _run_uneven_inputs_test(\n self, test_case, iteration_mapping, find_unused_params,\n ):\n model = test_case.model\n inp = test_case.inp\n rank = self.rank\n # Ensure all outsanding GPU work is comlete so this test runs independently.\n torch.cuda.synchronize()\n # Bucket_cap_mb is intentionally low to test allreduce scheduling when\n # there are many buckets.\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(rank),\n device_ids=[rank],\n bucket_cap_mb=1,\n find_unused_parameters=find_unused_params,\n )\n\n # Determine num iters for this rank via the passed in mapping.\n num_iters = iteration_mapping[rank]\n with net.join():\n for _ in range(num_iters):\n if isinstance(inp, tuple):\n loss = net(*inp).sum()\n else:\n loss = net(inp).sum()\n loss.backward()\n self._model_step(net)\n # Ensure completion of GPU kernels (including allreduce). If the\n # join API is not properly implemented, then this should hang\n # since the allreduce will hang.\n torch.cuda.synchronize(device=rank)\n\n # Ensure completion of all GPU kernels.\n torch.cuda.synchronize(device=rank)\n self.assertTrue(net._authoritative_rank)\n # All ranks should have agreed on the same authoritative_rank!\n final_rank_tensor = torch.tensor([net._authoritative_rank], device=self.rank)\n tensor_list = [\n torch.zeros_like(final_rank_tensor)\n for _ in range(dist.get_world_size())\n ]\n dist.all_gather(tensor_list, final_rank_tensor)\n max_rank = dist.get_world_size() - 1\n self.assertSetEqual({max_rank}, set(tensor.item() for tensor in tensor_list))\n # Ensure that all models are the same across ranks after all have joined.\n self.validate_net_equivalence(net)\n dist.barrier()\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n @skip_if_rocm\n def test_ddp_uneven_inputs(self):\n class DDPUnevenTestInput(NamedTuple):\n name: str\n model: nn.Module\n inp: Union[torch.tensor, tuple]\n\n dim = 1000\n batch = 1\n # Create a variety of models to run uneven input tests on.\n large_model = nn.Sequential(\n nn.Conv2d(1, 20, 5),\n nn.ReLU(),\n nn.Conv2d(20, 32, 5),\n nn.ReLU(),\n nn.Conv2d(32, 256, 5),\n nn.ReLU(),\n )\n small_model = nn.Linear(dim, dim, bias=False)\n bn_net = BatchNormNet()\n\n class UnusedParamModule(nn.Module):\n def __init__(self, unused_params_rank):\n super().__init__()\n self.t0 = Task()\n self.t1 = Task()\n self.unused_params_rank = unused_params_rank\n\n def task_parameters(self):\n return (self.t0.p, self.t1.p)\n\n def forward(self, x, rank):\n return (\n self.t1(self.t0(x))\n if rank != self.unused_params_rank\n else self.t1(x)\n )\n\n unjoined_rank_with_unused_params_model = UnusedParamModule(1)\n joined_rank_with_unused_params_model = UnusedParamModule(0)\n\n rank = self.rank\n models_to_test = [\n # Network with batchnorm\n DDPUnevenTestInput(\n name=\"batch_norm_net\", model=bn_net, inp=torch.ones(batch, 2, device=rank)\n ),\n DDPUnevenTestInput(\n name=\"large_conv_model\",\n model=large_model,\n inp=torch.ones(batch, batch, dim, dim, device=rank),\n ),\n DDPUnevenTestInput(\n name=\"small_model\",\n model=small_model,\n inp=torch.ones(batch, dim, device=rank),\n ),\n # Unused parameter test where rank that does not join early has unused params\n DDPUnevenTestInput(\n name=\"unjoined_rank_with_unused_params_model\",\n model=unjoined_rank_with_unused_params_model,\n inp=(torch.ones(batch, 2, device=rank), rank),\n ),\n # Unused parameter test where rank that does join early has unused params\n DDPUnevenTestInput(\n name=\"joined_rank_with_unused_params_model\",\n model=joined_rank_with_unused_params_model,\n inp=(torch.ones(batch, 2, device=rank), rank),\n ),\n ]\n\n # Add resnet model if we have torchvision installed.\n if HAS_TORCHVISION:\n resnet_model = torchvision.models.resnet50()\n models_to_test.append(\n DDPUnevenTestInput(\n name=\"resnet_model\",\n model=resnet_model,\n inp=torch.ones(1, 3, 1000, 1000),\n )\n )\n\n # 0 iteration tests for when one process does not train model at all, so\n # we must shadow the broadcast calls made when rebuilding buckets.\n baseline_num_iters = [0, 5]\n iteration_offsets = [2, 3, 10]\n num_uneven_ranks = [1]\n if dist.get_world_size() > 2:\n num_uneven_ranks.append(2)\n iteration_mappings = []\n # Generate rank : num_iters mappings for various uneven input scenarios.\n # This includes cases where rank 0 joins early and all other ranks join\n # later, and scenarios where multiple ranks join early, but at different\n # iterations, and later ranks join later.\n for num_early_join_ranks in num_uneven_ranks:\n for baseline_iter in baseline_num_iters:\n for offset in iteration_offsets:\n mapping = {\n rank: baseline_iter for rank in range(0, num_early_join_ranks)\n }\n # if num_early_join_ranks > 1, ranks > 0 that will join early\n # iterate offset//2 more times than rank 0, to test nodes\n # depleting inputs at different times.\n if num_early_join_ranks > 1:\n for rank in mapping.keys():\n if rank > 0:\n mapping[rank] += offset // 2\n mapping.update(\n {\n rank: baseline_iter + offset\n for rank in range(\n num_early_join_ranks, dist.get_world_size()\n )\n }\n )\n iteration_mappings.append(mapping)\n\n for (test_case, iteration_mapping) in itertools.product(\n models_to_test, iteration_mappings\n ):\n if self.rank == 0:\n print(\n f\"Running test: {test_case.name} with iteration mapping {iteration_mapping}\"\n )\n self._run_uneven_inputs_test(\n test_case,\n iteration_mapping,\n find_unused_params=(\"unused_params_model\" in test_case.name),\n )\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n @skip_if_rocm\n def test_ddp_uneven_input_join_disable(self):\n # tests that if net.join() with enable=False is specified, DDP works as\n # expected with even inputs.\n torch.manual_seed(self.rank)\n net = torch.nn.parallel.DistributedDataParallel(\n torch.nn.Linear(1, 1).cuda(self.rank), device_ids=[self.rank]\n )\n inp = torch.ones(1) * self.rank\n n_iters = 5\n world_size = dist.get_world_size()\n with net.join(enable=False):\n for _ in range(n_iters):\n # Clear grads\n grad = net.module.weight.grad\n if grad is not None:\n grad.requires_grad_(False)\n grad.zero_()\n out = net(inp)\n loss = out.sum()\n loss.backward()\n # Validate gradients to ensure that we divide by the correct\n # world_size when join mode is disabled.\n expected_grad = sum(i for i in range(world_size)) / world_size\n self.assertEqual(\n net.module.weight.grad.item(), expected_grad\n )\n\n self.assertFalse(net.ddp_join_enabled)\n self.validate_net_equivalence(net)\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(2)\n @skip_if_rocm\n def test_ddp_uneven_input_exception(self):\n # Tests that exceptions during training are correctly propagated by the\n # context manager.\n error_str = \"Intentional error\"\n\n class ExceptionModule(nn.Module):\n def __init__(self):\n super().__init__()\n self.param = nn.Parameter(torch.ones(1, requires_grad=True))\n\n def forward(self, _):\n raise ValueError(error_str)\n\n exception_module = ExceptionModule()\n net = torch.nn.parallel.DistributedDataParallel(\n exception_module.cuda(self.rank), device_ids=[self.rank]\n )\n inp = torch.ones(1)\n with self.assertRaisesRegex(ValueError, error_str):\n with net.join():\n out = net(inp)\n loss = out.sum()\n loss.backward()\n\n @require_backend({\"gloo\", \"nccl\"})\n @require_backends_available({\"gloo\", \"nccl\"})\n @skip_if_lt_x_gpu(4)\n @skip_if_rocm\n def test_ddp_uneven_inputs_replicated_error(self):\n # Tests that the context manager errors out in SPMD mode.\n group = dist.new_group([0, 1])\n if self.rank < 2:\n model = nn.Linear(1, 1, bias=False)\n rank_to_device = {0: [0, 1], 1: [2, 3]}\n\n devices = rank_to_device[self.rank]\n net = torch.nn.parallel.DistributedDataParallel(\n model.cuda(devices[0]), device_ids=devices, process_group=group\n )\n with self.assertRaisesRegex(\n ValueError, r\"DDP join\\(\\) API does not support Single-Process Multi-GPU\"\n ):\n with net.join():\n pass\n # We need a barrier since otherwise non-participating processes exit too early\n # and cause a timeout.\n self._barrier(timeout=60)\n\nif BACKEND == \"gloo\" or BACKEND == \"nccl\":\n WORLD_SIZE = os.environ[\"WORLD_SIZE\"]\n\n class TestDistBackend(MultiProcessTestCase, _DistTestBase):\n\n # Needed since MultiProcessTestCase assumes a world_size of 4, but we\n # run these tests under other various world_sizes.\n @property\n def world_size(self):\n return os.environ[\"WORLD_SIZE\"]\n\n @classmethod\n def setUpClass(cls):\n os.environ[\"MASTER_ADDR\"] = str(MASTER_ADDR)\n os.environ[\"MASTER_PORT\"] = str(MASTER_PORT)\n os.environ[\"WORLD_SIZE\"] = str(WORLD_SIZE)\n super().setUpClass()\n\n def setUp(self):\n super().setUp()\n global INIT_METHOD\n # initialize Barrier.\n Barrier.init()\n # We rely on tearDown for deleting the temporary file\n # TODO: this temporary file should be deduped with the file_name\n # in MultiProcessTestCase as part of supporting spawn mode for these tests.\n # https://github.com/pytorch/pytorch/issues/36663\n self.temporary_file = None\n if INIT_METHOD.startswith(\"file://\"):\n self.temporary_file = tempfile.NamedTemporaryFile(delete=False)\n INIT_METHOD = \"file://{}\".format(self.temporary_file.name)\n\n # TODO: enable spawn mode https://github.com/pytorch/pytorch/issues/36663\n self._fork_processes()\n\n def tearDown(self):\n super(MultiProcessTestCase, self).tearDown()\n super(TestDistBackend, self).tearDown()\n\n # Clean up temporary file if we used one.\n if self.temporary_file:\n try:\n os.unlink(self.temporary_file.name)\n except OSError as err:\n # ENOENT is OK because the test is supposed to clean it up.\n if err.errno != errno.ENOENT:\n raise\n\n @classmethod\n def _run(cls, rank, test_name, file_name):\n self = cls(test_name)\n self.rank = rank\n self.file_name = file_name\n try:\n dist.init_process_group(\n init_method=INIT_METHOD,\n backend=BACKEND,\n world_size=int(WORLD_SIZE),\n rank=self.rank\n )\n except RuntimeError as e:\n if \"recompile\" in e.args[0]:\n sys.exit(TEST_SKIPS[\"backend_unavailable\"].exit_code)\n\n raise\n\n # Execute barrier prior to running test to ensure that every process\n # has finished initialization and that the following test\n # immediately exiting due to a skip doesn't cause flakiness.\n self._barrier()\n\n # self.id() == e.g. '__main__.TestDistributed.test_get_rank'\n # We're retreiving a corresponding test and executing it.\n getattr(self, test_name)()\n self._barrier()\n dist.destroy_process_group()\n sys.exit(0)\n\n\nelif BACKEND == \"mpi\":\n WORLD_SIZE = os.environ[\"WORLD_SIZE\"]\n dist.init_process_group(init_method=INIT_METHOD, backend=\"mpi\")\n\n class TestMPI(TestCase, _DistTestBase):\n pass\n\nelif BACKEND == \"test\":\n class TestBackendDynamicLoad(TestCase):\n def setUp(self):\n super(TestBackendDynamicLoad, self).setUp()\n\n def _load_test_backend(self):\n temp_dir = tempfile.mkdtemp()\n src = \"{}/../cpp_extensions/cpp_c10d_extension.cpp\".format(os.path.abspath(os.path.dirname(__file__)))\n extension = torch.utils.cpp_extension.load(\n name=\"torch_test\",\n sources=[src],\n build_directory=temp_dir\n )\n\n @skip_if_no_ninja\n def test_backend_apis(self):\n self._load_test_backend()\n\n os.environ['WORLD_SIZE'] = '1'\n os.environ['MASTER_ADDR'] = '127.0.0.1'\n os.environ['MASTER_PORT'] = str(find_free_port())\n os.environ['RANK'] = '0'\n\n dist.init_process_group(backend='test', init_method='env://', world_size=1, rank=0)\n self.assertEqual(dist.get_rank(), 0)\n self.assertEqual(dist.get_world_size(), 1)\n\n process_group = _get_default_group()\n work = process_group.allreduce([torch.rand(1), torch.rand(1)])\n self.assertTrue(work.wait())\n self.assertTrue(work.is_completed())\n self.assertTrue(work.is_success())\n\n work = process_group.broadcast([torch.rand(1)])\n self.assertTrue(work.wait())\n self.assertTrue(work.is_completed())\n self.assertTrue(work.is_success())\n\n dist.destroy_process_group()\n\nif __name__ == \"__main__\":\n assert (\n not torch.cuda._initialized\n ), \"test_distributed must not have initialized CUDA context on main process\"\n\n run_tests()\n",
"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport collections\nfrom itertools import product\n\nimport caffe2.python.hypothesis_test_util as hu\nimport hypothesis.strategies as st\nimport numpy as np\nfrom caffe2.python import core, dyndep, workspace\nfrom caffe2.quantization.server import utils as dnnlowp_utils\nfrom caffe2.quantization.server.dnnlowp_test_utils import (\n avoid_vpmaddubsw_overflow_fc,\n check_quantized_results_close,\n)\nfrom hypothesis import given, settings\n\n\ndyndep.InitOpsLibrary(\"//caffe2/caffe2/quantization/server:dnnlowp_ops\")\nworkspace.GlobalInit([\"caffe2\", \"--caffe2_omp_num_threads=11\"])\n\n\nclass DNNLowPBatchMatMulOpTest(hu.HypothesisTestCase):\n # correctness test with no quantization error in inputs\n @given(\n m=st.integers(0, 32),\n n=st.integers(4, 32),\n k=st.integers(4, 32),\n batch_size=st.integers(0, 4),\n **hu.gcs_cpu_only\n )\n @settings(deadline=10000)\n def test_dnnlowp_batch_matmul_int(self, m, n, k, batch_size, gc, dc):\n # A and B have scale 1, so exactly represented after quantization\n A_min = -77\n A_max = A_min + 255\n A = np.round(np.random.rand(batch_size, m, k) * 255 + A_min)\n A = A.astype(np.float32)\n # input channels 0 and 1 are all A_min to avoid overflow from vpmaddubsw\n # when multiplied with B_min and B_max\n if batch_size > 0 and m > 0:\n A[0, :, 0] = A_min\n A[0, 0, 1] = A_max\n\n B_min = -100\n B_max = B_min + 255\n B = np.round(np.random.rand(batch_size, n, k) * 255 + B_min)\n B = B.astype(np.float32)\n if batch_size > 0:\n B[0, 0, 0] = B_min\n B[0, 1, 0] = B_max\n\n for i in range(batch_size):\n avoid_vpmaddubsw_overflow_fc(\n m, k, n, A[i,], A_min, A_max, B[i,], B_min, B_max\n )\n\n for trans_a, trans_b in product([0, 1], [0, 1]):\n Output = collections.namedtuple(\"Output\", [\"Y\", \"op_type\", \"engine\"])\n outputs = []\n\n op_engine_list = [\n (\"BatchMatMul\", \"\"),\n (\"BatchMatMul\", \"DNNLOWP\"),\n (\"BatchMatMul\", \"DNNLOWP_16\"),\n (\"Int8BatchMatMul\", \"DNNLOWP\"),\n ]\n\n for op_type, engine in op_engine_list:\n net = core.Net(\"test_net\")\n\n if \"DNNLOWP\" in engine:\n quantize_A = core.CreateOperator(\n \"Quantize\", [\"A\"], [\"A_q\"], engine=engine, device_option=gc\n )\n net.Proto().op.extend([quantize_A])\n\n quantize_B = core.CreateOperator(\n \"Quantize\", [\"B\"], [\"B_q\"], engine=engine, device_option=gc\n )\n net.Proto().op.extend([quantize_B])\n\n batch_matmul = core.CreateOperator(\n op_type,\n [\n \"A_q\" if \"DNNLOWP\" in engine else \"A\",\n \"B_q\" if \"DNNLOWP\" in engine else \"B\",\n ],\n [\"Y_q\" if \"DNNLOWP\" in engine else \"Y\"],\n trans_a=trans_a,\n trans_b=trans_b,\n engine=engine,\n device_option=gc,\n )\n net.Proto().op.extend([batch_matmul])\n\n if \"DNNLOWP\" in engine:\n dequantize = core.CreateOperator(\n \"Dequantize\", [\"Y_q\"], [\"Y\"], engine=engine, device_option=gc\n )\n net.Proto().op.extend([dequantize])\n\n self.ws.create_blob(\"A\").feed(\n np.transpose(A, (0, 2, 1)) if trans_a else A, device_option=gc\n )\n self.ws.create_blob(\"B\").feed(\n B if trans_b else np.transpose(B, (0, 2, 1)), device_option=gc\n )\n self.ws.run(net)\n outputs.append(\n Output(Y=self.ws.blobs[\"Y\"].fetch(), op_type=op_type, engine=engine)\n )\n\n check_quantized_results_close(outputs)\n\n # correctness test with no quantization error in inputs\n @given(\n m=st.integers(0, 32),\n n=st.integers(4, 32),\n k=st.integers(4, 32),\n C_1=st.integers(0, 3), # number of batch dims\n C_2=st.integers(0, 3),\n A_quantized=st.booleans(),\n B_quantized=st.booleans(),\n out_quantized=st.booleans(),\n **hu.gcs_cpu_only\n )\n @settings(deadline=1000)\n def test_dnnlowp_batch_matmul_int_constant_B(\n self, m, n, k, C_1, C_2, A_quantized, B_quantized, out_quantized, gc, dc\n ):\n batch_dims = tuple(np.random.randint(3, size=max(C_1, C_2)))\n batch_dims_A = batch_dims[-C_1:]\n batch_dims_B = batch_dims[-C_2:]\n A = np.zeros(batch_dims_A + (m, k)).astype(np.float32)\n B = np.zeros(batch_dims_B + (n, k)).astype(np.float32)\n\n if np.prod(batch_dims) > 0:\n for index in np.ndindex(batch_dims_A):\n # When both input and output are float, each input of the batch has\n # scale 1 but with different offset, so input-wise quantization\n # shouldn't have any input quantization error\n # A_min = -77 if (A_quantized or out_quantized) else -77 + i\n A_min = -77\n A_max = A_min + 255\n A[index] = np.round(np.random.rand(m, k) * 255 + A_min)\n # input channels 0 and 1 are all A_min to avoid overflow from vpmaddubsw\n # when multiplied with B_min and B_max\n A[index][:, 0] = A_min\n if m != 0:\n A[index][0, 1] = A_max\n\n i = 0\n for index in np.ndindex(batch_dims_B):\n # When weight is quantized in a lazy manner, each input of the batch has\n # scale 1 but with different offset, so input-wise quantization\n # shouldn't have any input quantization error when weight is quantized\n # in a lazy manner.\n B_min = -100 if B_quantized else -100 + i\n # B_min = -100\n B_max = B_min + 255\n B[index] = np.round(np.random.rand(n, k) * 255 + B_min)\n B[index][0, 0] = B_min\n B[index][1, 0] = B_max\n\n if C_1 > C_2:\n # A has more dims\n for outer_index in np.ndindex(batch_dims_A[: C_1 - C_2]):\n avoid_vpmaddubsw_overflow_fc(\n m,\n k,\n n,\n A[outer_index] if C_2 == 0 else A[outer_index + index],\n A_min,\n A_max,\n B[index],\n B_min,\n B_max,\n )\n else:\n avoid_vpmaddubsw_overflow_fc(\n m, k, n, A[index[-C_1:]], A_min, A_max, B[index], B_min, B_max\n )\n i += 1\n\n for trans_a, trans_b in product([0, 1], [0, 1]):\n Output = collections.namedtuple(\"Output\", [\"Y\", \"op_type\", \"engine\"])\n outputs = []\n\n op_engine_list = [\n (\"BatchMatMul\", \"\"),\n (\"BatchMatMul\", \"DNNLOWP\"),\n (\"Int8BatchMatMul\", \"DNNLOWP\"),\n ]\n\n for op_type, engine in op_engine_list:\n net = core.Net(\"test_net\")\n\n do_quantize_A = \"DNNLOWP\" in engine and A_quantized\n do_quantize_B = \"DNNLOWP\" in engine and B_quantized\n do_dequantize = \"DNNLOWP\" in engine and out_quantized\n\n if do_quantize_A:\n quantize_A = core.CreateOperator(\n \"Quantize\", [\"A\"], [\"A_q\"], engine=engine, device_option=gc\n )\n net.Proto().op.extend([quantize_A])\n\n if do_quantize_B:\n int8_given_tensor_fill, B_q_param = dnnlowp_utils.create_int8_given_tensor_fill(\n B if trans_b else B.swapaxes(-1, -2), \"B_q\"\n )\n net.Proto().op.extend([int8_given_tensor_fill])\n\n batch_matmul = core.CreateOperator(\n op_type,\n [\"A_q\" if do_quantize_A else \"A\", \"B_q\" if do_quantize_B else \"B\"],\n [\"Y_q\" if do_dequantize else \"Y\"],\n trans_a=trans_a,\n trans_b=trans_b,\n broadcast=True,\n constant_B=True,\n dequantize_output=not do_dequantize,\n engine=engine,\n device_option=gc,\n )\n if do_quantize_B:\n # When quantized weight is provided, we can't rescale the\n # output dynamically by looking at the range of output of each\n # batch, so here we provide the range of output observed from\n # fp32 reference implementation\n dnnlowp_utils.add_quantization_param_args(\n batch_matmul, outputs[0][0]\n )\n net.Proto().op.extend([batch_matmul])\n\n if do_dequantize:\n dequantize = core.CreateOperator(\n \"Dequantize\", [\"Y_q\"], [\"Y\"], engine=engine, device_option=gc\n )\n net.Proto().op.extend([dequantize])\n\n self.ws.create_blob(\"A\").feed(\n A.swapaxes(-1, -2) if trans_a else A, device_option=gc\n )\n self.ws.create_blob(\"B\").feed(\n B if trans_b else B.swapaxes(-1, -2), device_option=gc\n )\n self.ws.run(net)\n outputs.append(\n Output(Y=self.ws.blobs[\"Y\"].fetch(), op_type=op_type, engine=engine)\n )\n\n if np.prod(batch_dims) > 0:\n check_quantized_results_close(outputs)\n",
"import torch._C as C\nfrom torch.testing._internal.common_utils import TestCase, run_tests\n\nimport itertools\nimport unittest\n\n# TODO: Expand the dispatcher API to be a generic API for interfacing with\n# the dispatcher from Python!\n#\n# These are exhaustive tests for commutativity of dispatch behavior. If you're\n# looking for more usage-info style tests, check op_registration_test.cpp\n#\n# Things not tested here:\n# - Listeners\n# - Top level namespace registrations\n# - Fallback\n# - Exotic overloads of CppFunction/schema\n#\n# Things not directly tested here:\n# - Internal state of Dispatcher makes sense. This is indirectly\n# tested by the invariant testing\n\nclass TestDispatch(TestCase):\n namespace_index = 0\n\n def test_all_invariants(self):\n # Check that the regular stuff is OK!\n C._dispatch_check_all_invariants()\n\n # You probably don't want to call this directly; if your constructors\n # don't commute, you can still run commute with a fixed ctor_order\n # so that you can test that the destructors still commute\n def run_ops(self, name, ops, ctor_order=None, dtor_order=None,\n results=None, expect_raises=False):\n \"\"\"\n Given a list of operator registrations, run the registrations in the\n order specified by ctor_order, and then run the deregistrations in\n dtor_order.\n\n If results is specified, intermediate results are checked for consistency\n with results stored in results (and stored in results if this is the\n first time we've seen them). Results are expected to be equivalent\n modulo commutativity and inverses (thus, results is keyed on a frozenset\n of in effect registrations from ops). Results stores Tuple[str, provenance],\n where provenance is a string that describes how exactly we got this\n string.\n\n If expect_raises is True, it is not an error to raise an exception. Instead,\n we'll store the exception string (instead of the dispatcher state)\n in results. In principle we should flag these differently, but it's\n very obvious when you get an error in one case but not another.\n \"\"\"\n # By allocating every test into a fresh namespace, this makes it less\n # likely that a bug in the testing framework will result in tests\n # interfering with each other\n self.__class__.namespace_index += 1\n if results is None:\n results = {}\n if ctor_order is None:\n ctor_order = list(range(len(ops)))\n if dtor_order is None:\n dtor_order = list(reversed(ctor_order))\n # Refs which retain the c10::Module object so we can explicitly control\n # when each deregistration happens (deregistration occurs when the\n # object gets deallocated).\n refs = [None] * len(ops)\n # Keep track of the set \"in effect\" registrations\n active_ops = set()\n\n # double underscore to make it less likely we conflict with something\n # else\n test_namespace = \"__test{}__\".format(self.namespace_index)\n\n def check_invariants(actual_provenance):\n C._dispatch_check_invariants(name)\n # Normalize the test namespace so that expected outputs are stable\n actual = C._dispatch_dump(\n \"{}::{}\".format(test_namespace, name)).replace(test_namespace, \"test\")\n expected, expected_provenance = results.setdefault(\n frozenset(active_ops),\n (actual, actual_provenance)\n )\n self.assertMultiLineEqual(\n expected, actual,\n \"expected from {}; actual from {}\"\n .format(expected_provenance, actual_provenance)\n )\n\n results.setdefault(frozenset(), (\"\", \"hardcoded initial state\"))\n check_invariants(\"initial state\")\n # In the order specified by ctor_order, run registrations\n set_to_report = frozenset(range(len(ops)))\n for i, op_ix in enumerate(ctor_order):\n # It would be better to DEF here, but because we manage\n # lifetime of multiple registrations with multiple Library\n # references (refs), we can't deal with the strict checking\n # from DEF.\n refs[op_ix] = C._dispatch_library(\"FRAGMENT\", test_namespace, \"\")\n active_ops.add(op_ix)\n try:\n ops[op_ix](refs[op_ix])\n check_invariants(\"running ctors {}\".format(ctor_order[:i + 1]))\n except RuntimeError as e:\n if not expect_raises:\n raise\n actual = str(e).replace(test_namespace, \"test\")\n expected, expected_provenance = results.setdefault(\n frozenset(active_ops),\n (actual, \"error after running ctors {}\".format(ctor_order[:i + 1]))\n )\n self.assertMultiLineEqual(expected, actual, expected_provenance)\n set_to_report = frozenset(active_ops)\n active_ops.remove(op_ix)\n # NB: this finally test asserts that if a registrations fails,\n # the dispatcher is left in the same state *that it was before*!\n check_invariants(\n \"running ctors {} and then failing to run ctor {} \"\n \"(did this failure leave the dispatcher in a wedged state? \"\n \"it shouldn't!)\"\n .format(ctor_order[:i], op_ix))\n break\n last_ctor = i\n if expect_raises and len(active_ops) == len(ops):\n # Destroy references first, as some test frameworks (like pytest)\n # will retain references in the exception raised by assertTrue! EW!\n refs = None\n self.assertTrue(\n False,\n \"expected exception to be raised, but nothing was raised \"\n \"(after running ctors {})\".format(ctor_order))\n # In the order specified by dtor_order, run deregistrations\n for i, op_ix in enumerate(dtor_order):\n # Trigger a destruction\n refs[op_ix] = None\n # discard not remove, since we may not have actually deregistered\n # anything if there was an error raised\n if expect_raises:\n active_ops.discard(op_ix)\n else:\n active_ops.remove(op_ix)\n check_invariants(\n \"running ctors {}, then running dtors {}\"\n .format(ctor_order[:last_ctor + 1], dtor_order[:i + 1])\n )\n return results[set_to_report][0]\n\n # Operator registrations are commutative (as static initializers can\n # run in any order) and invertible (by deregistration). (Subject\n # to some caveats: some legacy behavior in the system are not commutative--\n # we want to get rid of these!)\n #\n # So while in principle we could simply test a set of operations\n # by just running them one by one in the order specified by the user,\n # we can get more assurance about these extra properties by doing\n # more work:\n #\n # 1. Don't run the registrations once in a fixed order: run every possible\n # permutation. Similarly, run every permutation of deregistration order.\n #\n # 2. Don't just check the end state of the dispatcher: for every\n # subset of operator registrations, ensure that the computed\n # intermediate state is path independent. One thing to note:\n # in this function, we assume each operation is unique. In general,\n # there may be duplicated registrations, but these are usually\n # idempotent or legacy. We test for behavior here separately.\n #\n # NB: checking all permutations means this function is exponential in\n # the length of ops! So don't pass too many ops to this function!\n def commute(self, name, ops, ctor_order=None, expect_raises=False):\n results = {}\n\n def go(ctor_order):\n for dtor_order in itertools.permutations(range(len(ops))):\n self.run_ops(\n name, ops, ctor_order, dtor_order,\n results=results, expect_raises=expect_raises)\n\n if ctor_order is not None:\n go(ctor_order)\n else:\n for ctor_order in itertools.permutations(range(len(ops))):\n go(ctor_order)\n\n # Return the \"full\" state after all operations are run.\n # If this KeyErrors, that means that there did not exist any\n # ordering of ctors which got us to the \"end\". That's an\n # error in test construction: it means you could have\n # factored the test into two smaller ones.\n return results[frozenset(range(len(ops)))][0]\n\n def test_def(self):\n r = self.commute(\"foo\", [\n # m.def(\"foo(Tensor x) -> Tensor\")\n lambda m: m.def_(\"foo(Tensor x) -> Tensor\"),\n # m.impl(\"test_def\", [](const Tensor& x) { return x })\n lambda m: m.impl_t_t(\"foo\"),\n # m.impl(\"test_def\", kAutograd, [](const Tensor& x) { return x })\n lambda m: m.impl_t_t(\"foo\", dispatch=\"autograd\")\n ])\n self.assertExpectedInline(r, '''\\\nname: test::foo\nschema: test::foo(Tensor x) -> (Tensor)\ndebug: registered at /dev/null:0\nalias analysis kind: FROM_SCHEMA\nAutograd: impl_t_t :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]\ncatchall: impl_t_t :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]\n''')\n\n def test_def_impl_schema_mismatch(self):\n # NB: an impl-impl mismatch is not reported eagerly; you'll find out\n # about it because one of them won't match with def\n r = self.commute(\"foo\", [\n # m.def(\"foo(Tensor x, Tensor y) -> Tensor\")\n lambda m: m.def_(\"foo(Tensor x, Tensor y) -> Tensor\"),\n # m.impl(\"foo\", [](const Tensor & x) { return x })\n lambda m: m.impl_t_t(\"foo\"),\n ], expect_raises=True)\n self.assertExpectedInline(r, '''In registration for test::foo: expected schema of operator to be \"test::foo(Tensor x, Tensor y) -> (Tensor)\" (registered at /dev/null:0), but got inferred schema \"(Tensor _0) -> (Tensor _0)\" (impl_t_t). The number of arguments is different. 2 vs 1.''') # noqa\n\n def test_def_with_inference(self):\n r = self.commute(\"foo\", [\n # m.def(\"foo\", [](const Tensor & x) { return x })\n lambda m: m.def_name_t_t(\"foo\"),\n # m.impl(\"foo\", torch::kAutograd, [](const Tensor & x) { return x })\n lambda m: m.impl_t_t(\"foo\", \"autograd\")\n ])\n self.assertExpectedInline(r, '''\\\nname: test::foo\nschema: test::foo(Tensor _0) -> (Tensor _0)\ndebug: registered at /dev/null:0\nalias analysis kind: CONSERVATIVE\nAutograd: impl_t_t :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]\ncatchall: default_def_name_t_t :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]\n''')\n\n def test_def_only(self):\n r = self.commute(\"foo\", [\n # m.def(\"foo(Tensor x, Tensor y) -> Tensor\")\n lambda m: m.def_(\"foo(Tensor x, Tensor y) -> Tensor\"),\n ])\n self.assertExpectedInline(r, '''\\\nname: test::foo\nschema: test::foo(Tensor x, Tensor y) -> (Tensor)\ndebug: registered at /dev/null:0\nalias analysis kind: FROM_SCHEMA\n''')\n\n def test_impl_only(self):\n r = self.commute(\"foo\", [\n # m.impl(\"foo\", [](const Tensor& x) { return x })\n lambda m: m.impl_t_t(\"foo\"),\n # m.impl(\"foo\", torch::kAutograd, [](const Tensor& x) { return x })\n lambda m: m.impl_t_t(\"foo\", \"autograd\")\n ])\n self.assertExpectedInline(r, '''\\\nname: test::foo\nschema: (none)\nAutograd: impl_t_t :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]\ncatchall: impl_t_t :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]\n''')\n\n # Can't do this yet for BC reasons\n @unittest.expectedFailure\n def test_multiple_def_error(self):\n r = self.commute(\"foo\", [\n # m.def(\"foo(Tensor x, Tensor y) -> Tensor\")\n lambda m: m.def_(\"foo(Tensor x, Tensor y) -> Tensor\"),\n # m.def(\"foo(Tensor x, Tensor y) -> Tensor\")\n lambda m: m.def_(\"foo(Tensor x, Tensor y) -> Tensor\"),\n ], expect_raises=True)\n # TODO: fill in the error message here\n # self.assertExpectedInline(r, '''''')\n\n def test_def_with_explicit_alias(self):\n r = self.commute(\"foo\", [\n # m.def(torch::schema(\n # \"foo(Tensor x, Tensor y) -> Tensor\",\n # AliasAnalysisKind::PURE))\n lambda m: m.def_(\"foo(Tensor x, Tensor y) -> Tensor\",\n alias=\"PURE_FUNCTION\")\n ])\n self.assertExpectedInline(r, '''\\\nname: test::foo\nschema: test::foo(Tensor x, Tensor y) -> (Tensor)\ndebug: registered at /dev/null:0\nalias analysis kind: PURE_FUNCTION\n''')\n\n # TODO: get rid of this test when multiple defs are wrong\n def test_multiple_def_schema_mismatch(self):\n # error message is order dependent\n ops = [\n # m.def(\"foo(Tensor x, Tensor y) -> Tensor\")\n lambda m: m.def_(\"foo(Tensor x, Tensor y) -> Tensor\"),\n # m.def(\"foo(Tensor x) -> Tensor\")\n lambda m: m.def_(\"foo(Tensor x) -> Tensor\"),\n ]\n self.assertExpectedInline(\n self.commute(\"foo\", ops, ctor_order=(0, 1), expect_raises=True),\n '''Tried to register multiple operators with the same name and the same overload name but different schemas: test::foo(Tensor x) -> (Tensor) (registered at /dev/null:0) vs test::foo(Tensor x, Tensor y) -> (Tensor) (registered at /dev/null:0)''' # noqa\n )\n self.assertExpectedInline(\n self.commute(\"foo\", ops, ctor_order=(1, 0), expect_raises=True),\n '''Tried to register multiple operators with the same name and the same overload name but different schemas: test::foo(Tensor x, Tensor y) -> (Tensor) (registered at /dev/null:0) vs test::foo(Tensor x) -> (Tensor) (registered at /dev/null:0)''' # noqa\n )\n\n def test_multiple_def_alias_defaulting(self):\n # TODO: should be an error in both directions soon\n ops = [\n # m.def(torch::schema(\"foo(Tensor x) -> Tensor\",\n # c10::AliasAnalysisKind::PURE_FUNCTION))\n lambda m: m.def_(\"foo(Tensor x) -> Tensor\", alias=\"PURE_FUNCTION\"),\n # RegisterOperators().op(\"foo(Tensor x) -> Tensor\")\n lambda m: m.def_legacy(\"foo(Tensor x) -> Tensor\"),\n ]\n self.assertExpectedInline(\n self.commute(\"foo\", ops, ctor_order=(0, 1)),\n '''\\\nname: test::foo\nschema: test::foo(Tensor x) -> (Tensor)\ndebug: registered at /dev/null:0\nalias analysis kind: PURE_FUNCTION\n'''\n )\n # NB: When run with ctor order (1, 0), the destructors are NOT\n # COMMUTATIVE. THIS IS A BUG, however we are purposely leaving the bug\n # in as it is very benign (only leaves us in a bad state during\n # destruction, when no useful work is being done), will be fixed when we\n # make alias defaulting a hard error, and is very nontrivial to fix\n # prior to that.\n\n def test_multiple_def_alias_mismatch(self):\n # error message is order dependent\n ops = [\n # m.def(torch::schema(\"foo(Tensor x) -> Tensor\",\n # c10::AliasAnalysisKind::PURE_FUNCTION))\n lambda m: m.def_(\"foo(Tensor x) -> Tensor\", alias=\"PURE_FUNCTION\"),\n # m.def(torch::schema(\"foo(Tensor x) -> Tensor\",\n # c10::AliasAnalysisKind::CONSERVATIVE))\n lambda m: m.def_(\"foo(Tensor x) -> Tensor\", alias=\"CONSERVATIVE\"),\n ]\n self.assertExpectedInline(\n self.commute(\"foo\", ops, ctor_order=(0, 1), expect_raises=True),\n '''Tried to define the schema for test::foo with different alias analysis kinds: PURE_FUNCTION (registered at /dev/null:0) vs CONSERVATIVE (registered at /dev/null:0)''' # noqa\n )\n self.assertExpectedInline(\n self.commute(\"foo\", ops, ctor_order=(1, 0), expect_raises=True),\n '''Tried to define the schema for test::foo with different alias analysis kinds: CONSERVATIVE (registered at /dev/null:0) vs PURE_FUNCTION (registered at /dev/null:0)''' # noqa\n )\n\n def test_multiple_fallback(self):\n global_m = C._dispatch_library(\"IMPL\", \"_\", \"xla\")\n global_m.fallback_fallthrough(),\n try:\n global_m.fallback_fallthrough(),\n except RuntimeError as e:\n self.assertExpectedInline(\n str(e),\n '''Tried to register multiple backend fallbacks for the same dispatch key XLA; previous registration registered at /dev/null:0, new registration registered at /dev/null:0''' # noqa\n )\n else:\n self.assertTrue(False)\n\n def test_overwrite_catchall(self):\n ops = [\n lambda m: m.impl_t_t(\"foo\", debug=\"fn1\"),\n lambda m: m.impl_t_t(\"foo\", debug=\"fn2\"),\n ]\n # Not commutative\n self.assertExpectedInline(\n self.commute(\"foo\", ops, ctor_order=(0, 1)),\n '''\\\nname: test::foo\nschema: (none)\ncatchall: fn2 :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]\ncatchall (inactive): fn1 :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]\n'''\n )\n\nif __name__ == '__main__':\n run_tests()\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom caffe2.python import core\nfrom caffe2.python.test_util import rand_array\nimport caffe2.python.hypothesis_test_util as hu\nimport caffe2.python.serialized_test.serialized_test_util as serial\nfrom hypothesis import given, settings\nimport hypothesis.strategies as st\nimport numpy as np\n\nclass TestScatterOps(serial.SerializedTestCase):\n # TODO(dzhulgakov): add test cases for failure scenarios\n @given(num_args=st.integers(1, 5),\n first_dim=st.integers(1, 20),\n index_dim=st.integers(1, 10),\n extra_dims=st.lists(st.integers(1, 4), min_size=0, max_size=3),\n ind_type=st.sampled_from([np.int32, np.int64]),\n **hu.gcs)\n @settings(deadline=10000)\n def testScatterWeightedSum(\n self, num_args, first_dim, index_dim, extra_dims, ind_type, gc, dc):\n ins = ['data', 'w0', 'indices']\n for i in range(1, num_args + 1):\n ins.extend(['x' + str(i), 'w' + str(i)])\n op = core.CreateOperator(\n 'ScatterWeightedSum',\n ins,\n ['data'],\n device_option=gc)\n def ref(d, w0, ind, *args):\n r = d.copy()\n for i in ind:\n r[i] *= w0\n for i in range(0, len(args), 2):\n x = args[i]\n w = args[i+1]\n for i, j in enumerate(ind):\n r[j] += w * x[i]\n return [r]\n\n d = rand_array(first_dim, *extra_dims)\n ind = np.random.randint(0, first_dim, index_dim).astype(ind_type)\n # ScatterWeightedSumOp only supports w0=1.0 in CUDAContext\n if(gc == hu.gpu_do or gc == hu.hip_do):\n w0 = np.array(1.0).astype(np.float32)\n else:\n w0 = rand_array()\n inputs = [d, w0, ind]\n for _ in range(1, num_args + 1):\n x = rand_array(index_dim, *extra_dims)\n w = rand_array()\n inputs.extend([x,w])\n self.assertReferenceChecks(gc, op, inputs, ref, threshold=1e-3)\n\n @given(first_dim=st.integers(1, 20),\n index_dim=st.integers(1, 10),\n extra_dims=st.lists(st.integers(1, 4), min_size=0, max_size=3),\n data_type=st.sampled_from([np.float16, np.float32, np.int32, np.int64]),\n ind_type=st.sampled_from([np.int32, np.int64]),\n **hu.gcs)\n @settings(deadline=10000)\n def testScatterAssign(\n self, first_dim, index_dim, extra_dims, data_type, ind_type, gc, dc):\n op = core.CreateOperator('ScatterAssign',\n ['data', 'indices', 'slices'], ['data'])\n def ref(d, ind, x):\n r = d.copy()\n r[ind] = x\n return [r]\n\n # let's have indices unique\n if first_dim < index_dim:\n first_dim, index_dim = index_dim, first_dim\n d = (rand_array(first_dim, *extra_dims) * 10).astype(data_type)\n ind = np.random.choice(first_dim, index_dim,\n replace=False).astype(ind_type)\n x = (rand_array(index_dim, *extra_dims) * 10).astype(data_type)\n self.assertReferenceChecks(gc, op, [d, ind, x], ref, threshold=1e-3, ensure_outputs_are_inferred=True)\n\nif __name__ == \"__main__\":\n import unittest\n unittest.main()\n"
] | [
[
"numpy.split",
"numpy.exp"
],
[
"torch._C._wrap_tensor_impl",
"numpy.array",
"numpy.dtype",
"torch._C._tensor_impl_raw_handle"
],
[
"numpy.array"
],
[
"numpy.isnan",
"numpy.ceil",
"numpy.random.random_integers",
"numpy.zeros",
"numpy.random.randint"
],
[
"torch.Generator",
"torch.ones",
"torch.empty",
"torch.full",
"torch.zeros",
"torch.randn",
"torch.nn.functional.conv1d",
"torch.rand",
"torch.arange",
"torch.device",
"torch.get_default_dtype",
"torch.testing._internal.common_utils.run_tests",
"torch.ones_like",
"torch.autograd.grad"
],
[
"numpy.arange",
"numpy.random.rand",
"numpy.random.shuffle",
"numpy.random.randint"
],
[
"numpy.abs",
"numpy.random.seed",
"numpy.power",
"numpy.arange",
"numpy.random.shuffle",
"numpy.testing.assert_array_equal",
"numpy.max",
"numpy.random.randn",
"numpy.random.rand",
"numpy.prod",
"numpy.mean",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.random.randint"
],
[
"numpy.testing.assert_equal",
"numpy.dot",
"numpy.allclose",
"numpy.random.seed",
"numpy.min",
"numpy.abs",
"numpy.clip",
"numpy.float16",
"numpy.max",
"numpy.identity",
"numpy.random.rand",
"numpy.random.uniform",
"numpy.zeros"
],
[
"torch.testing._internal.common_methods_invocations.unpack_variables",
"torch.Size",
"torch.jit.trace",
"torch.randint",
"torch.full",
"torch.zeros",
"torch.set_default_dtype",
"torch.randn",
"torch.ones",
"torch.empty",
"torch.is_tensor",
"torch.tensor",
"torch.testing._internal.common_methods_invocations.create_input",
"torch._jit_internal._disable_emit_hooks",
"torch.rand",
"torch.jit.CompilationUnit"
],
[
"torch.distributed.is_nccl_available",
"torch.distributed.scatter",
"torch.nn.functional.softmax",
"torch.distributed.broadcast",
"torch.distributed.reduce_multigpu",
"torch.testing._internal.common_utils.find_free_port",
"torch.load",
"torch.randperm",
"torch.nn.parallel.DistributedDataParallelCPU",
"torch.distributed.distributed_c10d._get_default_group",
"torch.nn.SyncBatchNorm",
"torch.distributed.gather_object",
"torch.no_grad",
"torch.cuda.stream",
"torch.device",
"torch.distributed.get_rank",
"torch.distributed.irecv",
"torch.distributed.is_gloo_available",
"torch.distributed.is_mpi_available",
"torch.save",
"torch.utils.cpp_extension.verify_ninja_availability",
"torch.cuda.synchronize",
"torch.utils.cpp_extension.load",
"torch.ones",
"torch.distributed.init_process_group",
"torch.utils.data.distributed.DistributedSampler",
"torch.distributed.all_reduce_multigpu",
"torch.randn",
"torch.reshape",
"torch.distributed.barrier",
"torch.tensor",
"torch.nn.parallel.distributed._dump_DDP_relevant_env_vars",
"torch.equal",
"torch.rand",
"torch.testing._internal.common_distributed.skip_if_lt_x_gpu",
"torch.DoubleTensor",
"torch.distributed.all_to_all",
"torch.nn.BatchNorm1d",
"torch.distributed.all_gather_coalesced",
"torch.LongTensor",
"torch.empty",
"torch.distributed.send",
"torch.distributed.all_gather_multigpu",
"torch.nn.Conv2d",
"torch.zeros_like",
"torch.nn.Module",
"torch.nn.Linear",
"torch.distributed.is_available",
"torch.distributed.destroy_process_group",
"torch.cuda.device_count",
"torch.distributed.get_world_size",
"torch.testing._internal.common_utils.run_tests",
"torch.distributed.Backend",
"torch.nn.parallel.DistributedDataParallel",
"torch.distributed.get_backend",
"torch.cuda.set_device",
"torch.distributed.all_to_all_single",
"torch.manual_seed",
"torch.distributed.all_gather",
"torch.distributed.recv",
"torch.distributed.broadcast_multigpu",
"torch.distributed.all_reduce_coalesced",
"torch.distributed.reduce",
"torch.distributed.new_group",
"torch.distributed.gather",
"torch.backends.cudnn.flags",
"torch.nn.ReLU",
"torch.distributed.all_reduce",
"torch.nn.MSELoss",
"torch.cuda.Stream"
],
[
"numpy.random.rand",
"numpy.prod",
"numpy.transpose",
"numpy.ndindex",
"numpy.zeros"
],
[
"torch._C._dispatch_check_invariants",
"torch.testing._internal.common_utils.run_tests",
"torch._C._dispatch_check_all_invariants",
"torch._C._dispatch_library"
],
[
"numpy.array",
"numpy.random.choice",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
adshieh/cvxpy | [
"73b696b71dbb2ceb66a805798c922461e33afc6b",
"73b696b71dbb2ceb66a805798c922461e33afc6b"
] | [
"cvxpy/problems/problem.py",
"cvxpy/reductions/solvers/conic_solvers/conic_solver.py"
] | [
"\"\"\"\nCopyright 2013 Steven Diamond, 2017 Akshay Agrawal\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom cvxpy import settings as s\nfrom cvxpy import error\nfrom cvxpy.problems.objective import Minimize, Maximize\nfrom cvxpy.reductions.chain import Chain\nfrom cvxpy.reductions.dgp2dcp.dgp2dcp import Dgp2Dcp\nfrom cvxpy.reductions.dqcp2dcp import dqcp2dcp\nfrom cvxpy.reductions.eval_params import EvalParams\nfrom cvxpy.reductions.flip_objective import FlipObjective\nfrom cvxpy.reductions.solvers.solving_chain import construct_solving_chain\nfrom cvxpy.interface.matrix_utilities import scalar_value\nfrom cvxpy.reductions.solvers import bisection\nfrom cvxpy.reductions.solvers import defines as slv_def\nfrom cvxpy.utilities.deterministic import unique_list\nimport cvxpy.utilities.performance_utils as perf\nfrom cvxpy.constraints import Equality, Inequality, NonPos, Zero, NonNeg\nimport cvxpy.utilities as u\n\nfrom collections import namedtuple\nimport numpy as np\nimport time\n\n\nSolveResult = namedtuple(\n 'SolveResult',\n ['opt_value', 'status', 'primal_values', 'dual_values'])\n\n\nclass Cache(object):\n def __init__(self):\n self.key = None\n self.solving_chain = None\n self.param_prog = None\n self.inverse_data = None\n\n def invalidate(self):\n self.key = None\n self.solving_chain = None\n self.param_prog = None\n self.inverse_data = None\n\n def make_key(self, solver, gp):\n return (solver, gp)\n\n def gp(self):\n return self.key is not None and self.key[1]\n\n\nclass Problem(u.Canonical):\n \"\"\"A convex optimization problem.\n\n Problems are immutable, save for modification through the specification\n of :class:`~cvxpy.expressions.constants.parameters.Parameter`\n\n Arguments\n ---------\n objective : Minimize or Maximize\n The problem's objective.\n constraints : list\n The constraints on the problem variables.\n \"\"\"\n\n # The solve methods available.\n REGISTERED_SOLVE_METHODS = {}\n\n def __init__(self, objective, constraints=None):\n if constraints is None:\n constraints = []\n # Check that objective is Minimize or Maximize.\n if not isinstance(objective, (Minimize, Maximize)):\n raise error.DCPError(\"Problem objective must be Minimize or Maximize.\")\n # Constraints and objective are immutable.\n self._objective = objective\n self._constraints = [c for c in constraints]\n self._value = None\n self._status = None\n self._solution = None\n self._cache = Cache()\n self._solver_cache = {}\n # Information about the shape of the problem and its constituent parts\n self._size_metrics = None\n # Benchmarks reported by the solver:\n self._solver_stats = None\n self.args = [self._objective, self._constraints]\n\n @property\n def value(self):\n \"\"\"float : The value from the last time the problem was solved\n (or None if not solved).\n \"\"\"\n if self._value is None:\n return None\n else:\n return scalar_value(self._value)\n\n @property\n def status(self):\n \"\"\"str : The status from the last time the problem was solved; one\n of optimal, infeasible, or unbounded (with or without\n suffix inaccurate).\n \"\"\"\n return self._status\n\n @property\n def solution(self):\n \"\"\"Solution : The solution from the last time the problem was solved.\n \"\"\"\n return self._solution\n\n @property\n def objective(self):\n \"\"\"Minimize or Maximize : The problem's objective.\n\n Note that the objective cannot be reassigned after creation,\n and modifying the objective after creation will result in\n undefined behavior.\n \"\"\"\n return self._objective\n\n @property\n def constraints(self):\n \"\"\"A shallow copy of the problem's constraints.\n\n Note that constraints cannot be reassigned, appended to, or otherwise\n modified after creation, except through parameters.\n \"\"\"\n return self._constraints[:]\n\n @perf.compute_once\n def is_dcp(self, dpp=False):\n \"\"\"Does the problem satisfy DCP rules?\n\n Arguments\n ---------\n dpp : bool, optional\n If True, enforce the disciplined parametrized programming (DPP)\n ruleset; only relevant when the problem involves Parameters.\n DPP is a mild restriction of DCP. When a problem involving\n Parameters is DPP, subsequent solves can be much faster than\n the first one. For more information, consult the documentation at\n\n https://www.cvxpy.org/tutorial/advanced/index.html#disciplined-parametrized-programming\n\n Returns\n -------\n bool\n True if the Expression is DCP, False otherwise.\n \"\"\"\n return all(\n expr.is_dcp(dpp) for expr in self.constraints + [self.objective])\n\n @perf.compute_once\n def is_dgp(self, dpp=False):\n \"\"\"Does the problem satisfy DGP rules?\n\n Arguments\n ---------\n dpp : bool, optional\n If True, enforce the disciplined parametrized programming (DPP)\n ruleset; only relevant when the problem involves Parameters.\n DPP is a mild restriction of DGP. When a problem involving\n Parameters is DPP, subsequent solves can be much faster than\n the first one. For more information, consult the documentation at\n\n https://www.cvxpy.org/tutorial/advanced/index.html#disciplined-parametrized-programming\n\n Returns\n -------\n bool\n True if the Expression is DGP, False otherwise.\n \"\"\"\n return all(\n expr.is_dgp(dpp) for expr in self.constraints + [self.objective])\n\n @perf.compute_once\n def is_dqcp(self):\n \"\"\"Does the problem satisfy the DQCP rules?\n \"\"\"\n return all(\n expr.is_dqcp() for expr in self.constraints + [self.objective])\n\n @perf.compute_once\n def is_dpp(self, context='dcp'):\n \"\"\"Does the problem satisfy DPP rules?\n\n DPP is a mild restriction of DGP. When a problem involving\n Parameters is DPP, subsequent solves can be much faster than\n the first one. For more information, consult the documentation at\n\n https://www.cvxpy.org/tutorial/advanced/index.html#disciplined-parametrized-programming\n\n Arguments\n ---------\n context : str\n Whether to check DPP-compliance for DCP or DGP; ``context`` should\n be either ``'dcp'`` or ``'dgp'``. Calling ``problem.is_dpp('dcp')``\n is equivalent to ``problem.is_dcp(dpp=True)``, and\n `problem.is_dpp('dgp')`` is equivalent to\n `problem.is_dgp(dpp=True)`.\n\n Returns\n -------\n bool\n Whether the problem satisfies the DPP rules.\n \"\"\"\n if context.lower() == 'dcp':\n return self.is_dcp(dpp=True)\n elif context.lower() == 'dgp':\n return self.is_dgp(dpp=True)\n else:\n raise ValueError(\"Unsupported context \", context)\n\n @perf.compute_once\n def is_qp(self):\n \"\"\"Is problem a quadratic program?\n \"\"\"\n for c in self.constraints:\n if not (isinstance(c, (Equality, Zero)) or c.args[0].is_pwl()):\n return False\n for var in self.variables():\n if var.is_psd() or var.is_nsd():\n return False\n return (self.is_dcp() and self.objective.args[0].is_qpwa())\n\n @perf.compute_once\n def is_mixed_integer(self):\n return any(v.attributes['boolean'] or v.attributes['integer']\n for v in self.variables())\n\n @perf.compute_once\n def variables(self):\n \"\"\"Accessor method for variables.\n\n Returns\n -------\n list of :class:`~cvxpy.expressions.variable.Variable`\n A list of the variables in the problem.\n \"\"\"\n vars_ = self.objective.variables()\n for constr in self.constraints:\n vars_ += constr.variables()\n return unique_list(vars_)\n\n @perf.compute_once\n def parameters(self):\n \"\"\"Accessor method for parameters.\n\n Returns\n -------\n list of :class:`~cvxpy.expressions.constants.parameter.Parameter`\n A list of the parameters in the problem.\n \"\"\"\n params = self.objective.parameters()\n for constr in self.constraints:\n params += constr.parameters()\n return unique_list(params)\n\n @perf.compute_once\n def constants(self):\n \"\"\"Accessor method for constants.\n\n Returns\n -------\n list of :class:`~cvxpy.expressions.constants.constant.Constant`\n A list of the constants in the problem.\n \"\"\"\n const_dict = {}\n constants_ = self.objective.constants()\n for constr in self.constraints:\n constants_ += constr.constants()\n # Note that numpy matrices are not hashable, so we use the built-in\n # function \"id\"\n const_dict = {id(constant): constant for constant in constants_}\n return list(const_dict.values())\n\n def atoms(self):\n \"\"\"Accessor method for atoms.\n\n Returns\n -------\n list of :class:`~cvxpy.atoms.Atom`\n A list of the atom types in the problem; note that this list\n contains classes, not instances.\n \"\"\"\n atoms = self.objective.atoms()\n for constr in self.constraints:\n atoms += constr.atoms()\n return unique_list(atoms)\n\n @property\n def size_metrics(self):\n \"\"\":class:`~cvxpy.problems.problem.SizeMetrics` : Information about the problem's size.\n \"\"\"\n if self._size_metrics is None:\n self._size_metrics = SizeMetrics(self)\n return self._size_metrics\n\n @property\n def solver_stats(self):\n \"\"\":class:`~cvxpy.problems.problem.SolverStats` : Information returned by the solver.\n \"\"\"\n return self._solver_stats\n\n def solve(self, *args, **kwargs):\n \"\"\"Solves the problem using the specified method.\n\n Populates the :code:`status` and :code:`value` attributes on the\n problem object as a side-effect.\n\n Arguments\n ---------\n solver : str, optional\n The solver to use. For example, 'ECOS', 'SCS', or 'OSQP'.\n verbose : bool, optional\n Overrides the default of hiding solver output.\n gp : bool, optional\n If True, parses the problem as a disciplined geometric program\n instead of a disciplined convex program.\n qcp : bool, optional\n If True, parses the problem as a disciplined quasiconvex program\n instead of a disciplined convex program.\n requires_grad : bool, optional\n Makes it possible to compute gradients of a solution with respect to\n Parameters by calling ``problem.backward()`` after solving, or to\n compute perturbations to the variables given perturbations to Parameters by\n calling ``problem.derivative()``.\n\n Gradients are only supported for DCP and DGP problems, not\n quasiconvex problems. When computing gradients (i.e., when\n this argument is True), the problem must satisfy the DPP rules.\n enforce_dpp : bool, optional\n When True, a DPPError will be thrown when trying to solve a non-DPP\n problem (instead of just a warning). Only relevant for problems\n involving Parameters. Defaults to False.\n method : function, optional\n A custom solve method to use.\n kwargs : keywords, optional\n Additional solver specific arguments. See Notes below.\n\n Notes\n ------\n CVXPY interfaces with a wide range of solvers; the algorithms used by these solvers\n have arguments relating to stopping criteria, and strategies to improve solution quality.\n\n There is no one choice of arguments which is perfect for every problem. If you are not\n getting satisfactory results from a solver, you can try changing its arguments. The\n exact way this is done depends on the specific solver. Here are some examples:\n\n prob.solve(solver='ECOS', abstol=1e-6)\n prob.solve(solver='OSQP', max_iter=10000).\n mydict = {\"MSK_DPAR_INTPNT_CO_TOL_NEAR_REL\": 10}\n prob.solve(solver='MOSEK', mosek_params=mydict).\n\n You should refer to CVXPY's web documentation for details on how to pass solver\n solver arguments, available at\n\n https://www.cvxpy.org/tutorial/advanced/index.html#setting-solver-options\n\n Returns\n -------\n float\n The optimal value for the problem, or a string indicating\n why the problem could not be solved.\n\n Raises\n ------\n cvxpy.error.DCPError\n Raised if the problem is not DCP and `gp` is False.\n cvxpy.error.DGPError\n Raised if the problem is not DGP and `gp` is True.\n cvxpy.error.SolverError\n Raised if no suitable solver exists among the installed solvers,\n or if an unanticipated error is encountered.\n \"\"\"\n func_name = kwargs.pop(\"method\", None)\n if func_name is not None:\n solve_func = Problem.REGISTERED_SOLVE_METHODS[func_name]\n else:\n solve_func = Problem._solve\n return solve_func(self, *args, **kwargs)\n\n @classmethod\n def register_solve(cls, name, func):\n \"\"\"Adds a solve method to the Problem class.\n\n Arguments\n ---------\n name : str\n The keyword for the method.\n func : function\n The function that executes the solve method. This function must\n take as its first argument the problem instance to solve.\n \"\"\"\n cls.REGISTERED_SOLVE_METHODS[name] = func\n\n def get_problem_data(self, solver, gp=False, enforce_dpp=False):\n \"\"\"Returns the problem data used in the call to the solver.\n\n When a problem is solved, CVXPY creates a chain of reductions enclosed\n in a :class:`~cvxpy.reductions.solvers.solving_chain.SolvingChain`,\n and compiles it to some low-level representation that is\n compatible with the targeted solver. This method returns that low-level\n representation.\n\n For some solving chains, this low-level representation is a dictionary\n that contains exactly those arguments that were supplied to the solver;\n however, for other solving chains, the data is an intermediate\n representation that is compiled even further by the solver interfaces.\n\n A solution to the equivalent low-level problem can be obtained via the\n data by invoking the `solve_via_data` method of the returned solving\n chain, a thin wrapper around the code external to CVXPY that further\n processes and solves the problem. Invoke the unpack_results method\n to recover a solution to the original problem.\n\n For example:\n\n ::\n\n objective = ...\n constraints = ...\n problem = cp.Problem(objective, constraints)\n data, chain, inverse_data = problem.get_problem_data(cp.SCS)\n # calls SCS using `data`\n soln = chain.solve_via_data(problem, data)\n # unpacks the solution returned by SCS into `problem`\n problem.unpack_results(soln, chain, inverse_data)\n\n Alternatively, the `data` dictionary returned by this method\n contains enough information to bypass CVXPY and call the solver\n directly.\n\n For example:\n\n ::\n\n problem = cp.Problem(objective, constraints)\n data, _, _ = problem.get_problem_data(cp.SCS)\n\n import scs\n probdata = {\n 'A': data['A'],\n 'b': data['b'],\n 'c': data['c'],\n }\n cone_dims = data['dims']\n cones = {\n \"f\": cone_dims.zero,\n \"l\": cone_dims.nonpos,\n \"q\": cone_dims.soc,\n \"ep\": cone_dims.exp,\n \"s\": cone_dims.psd,\n }\n soln = scs.solve(data, cones)\n\n The structure of the data dict that CVXPY returns depends on the\n solver. For details, consult the solver interfaces in\n `cvxpy/reductions/solvers`.\n\n Arguments\n ---------\n solver : str\n The solver the problem data is for.\n gp : bool, optional\n If True, then parses the problem as a disciplined geometric program\n instead of a disciplined convex program.\n enforce_dpp : bool, optional\n When True, a DPPError will be thrown when trying to parse a non-DPP\n problem (instead of just a warning). Defaults to False.\n\n Returns\n -------\n dict or object\n lowest level representation of problem\n SolvingChain\n The solving chain that created the data.\n list\n The inverse data generated by the chain.\n \"\"\"\n key = self._cache.make_key(solver, gp)\n if key != self._cache.key:\n self._cache.invalidate()\n solving_chain = self._construct_chain(\n solver=solver, gp=gp, enforce_dpp=enforce_dpp)\n self._cache.key = key\n self._cache.solving_chain = solving_chain\n self._solver_cache = {}\n else:\n solving_chain = self._cache.solving_chain\n\n if self._cache.param_prog is not None:\n # fast path, bypasses application of reductions\n if gp:\n dgp2dcp = self._cache.solving_chain.get(Dgp2Dcp)\n # Parameters in the param cone prog are the logs\n # of parameters in the original problem (with one exception:\n # parameters appearing as exponents (in power and gmatmul\n # atoms) are unchanged.\n old_params_to_new_params = dgp2dcp.canon_methods._parameters\n for param in self.parameters():\n if param in old_params_to_new_params:\n old_params_to_new_params[param].value = np.log(\n param.value)\n\n data, solver_inverse_data = solving_chain.solver.apply(\n self._cache.param_prog)\n inverse_data = self._cache.inverse_data + [solver_inverse_data]\n else:\n data, inverse_data = solving_chain.apply(self)\n safe_to_cache = (\n isinstance(data, dict)\n and s.PARAM_PROB in data\n and not any(isinstance(reduction, EvalParams)\n for reduction in solving_chain.reductions)\n )\n if safe_to_cache:\n self._cache.param_prog = data[s.PARAM_PROB]\n # the last datum in inverse_data corresponds to the solver,\n # so we shouldn't cache it\n self._cache.inverse_data = inverse_data[:-1]\n return data, solving_chain, inverse_data\n\n def _find_candidate_solvers(self,\n solver=None,\n gp=False):\n \"\"\"\n Find candiate solvers for the current problem. If solver\n is not None, it checks if the specified solver is compatible\n with the problem passed.\n\n Arguments\n ---------\n solver : string\n The name of the solver with which to solve the problem. If no\n solver is supplied (i.e., if solver is None), then the targeted\n solver may be any of those that are installed. If the problem\n is variable-free, then this parameter is ignored.\n gp : bool\n If True, the problem is parsed as a Disciplined Geometric Program\n instead of as a Disciplined Convex Program.\n\n Returns\n -------\n dict\n A dictionary of compatible solvers divided in `qp_solvers`\n and `conic_solvers`.\n\n Raises\n ------\n cvxpy.error.SolverError\n Raised if the problem is not DCP and `gp` is False.\n cvxpy.error.DGPError\n Raised if the problem is not DGP and `gp` is True.\n \"\"\"\n candidates = {'qp_solvers': [],\n 'conic_solvers': []}\n\n if solver is not None:\n if solver not in slv_def.INSTALLED_SOLVERS:\n raise error.SolverError(\"The solver %s is not installed.\" % solver)\n if solver in slv_def.CONIC_SOLVERS:\n candidates['conic_solvers'] += [solver]\n if solver in slv_def.QP_SOLVERS:\n candidates['qp_solvers'] += [solver]\n else:\n candidates['qp_solvers'] = [s for s in slv_def.INSTALLED_SOLVERS\n if s in slv_def.QP_SOLVERS]\n candidates['conic_solvers'] = [s for s in slv_def.INSTALLED_SOLVERS\n if s in slv_def.CONIC_SOLVERS]\n\n # If gp we must have only conic solvers\n if gp:\n if solver is not None and solver not in slv_def.CONIC_SOLVERS:\n raise error.SolverError(\n \"When `gp=True`, `solver` must be a conic solver \"\n \"(received '%s'); try calling \" % solver +\n \" `solve()` with `solver=cvxpy.ECOS`.\"\n )\n elif solver is None:\n candidates['qp_solvers'] = [] # No QP solvers allowed\n\n if self.is_mixed_integer():\n if len(slv_def.INSTALLED_MI_SOLVERS) == 0:\n msg = \"\"\"\n\n CVXPY needs additional software (a `mixed-integer solver`) to handle this model.\n The web documentation\n https://www.cvxpy.org/tutorial/advanced/index.html#mixed-integer-programs\n reviews open-source and commercial options for mixed-integer solvers.\n\n Quick fix: if you install the python package CVXOPT (pip install cvxopt),\n then CVXPY can use the open-source mixed-integer solver `GLPK`.\n \"\"\"\n raise error.SolverError(msg)\n candidates['qp_solvers'] = [\n s for s in candidates['qp_solvers']\n if slv_def.SOLVER_MAP_QP[s].MIP_CAPABLE]\n candidates['conic_solvers'] = [\n s for s in candidates['conic_solvers']\n if slv_def.SOLVER_MAP_CONIC[s].MIP_CAPABLE]\n if not candidates['conic_solvers'] and \\\n not candidates['qp_solvers']:\n raise error.SolverError(\n \"Problem is mixed-integer, but candidate \"\n \"QP/Conic solvers (%s) are not MIP-capable.\" %\n (candidates['qp_solvers'] +\n candidates['conic_solvers']))\n\n return candidates\n\n def _construct_chain(self, solver=None, gp=False, enforce_dpp=False):\n \"\"\"\n Construct the chains required to reformulate and solve the problem.\n\n In particular, this function\n\n # finds the candidate solvers\n # constructs the solving chain that performs the\n numeric reductions and solves the problem.\n\n Arguments\n ---------\n solver : str, optional\n The solver to use. Defaults to ECOS.\n gp : bool, optional\n If True, the problem is parsed as a Disciplined Geometric Program\n instead of as a Disciplined Convex Program.\n enforce_dpp : bool, optional\n Whether to error on DPP violations.\n\n Returns\n -------\n A solving chain\n \"\"\"\n candidate_solvers = self._find_candidate_solvers(solver=solver, gp=gp)\n return construct_solving_chain(self, candidate_solvers, gp=gp,\n enforce_dpp=enforce_dpp)\n\n def _invalidate_cache(self):\n self._cache_key = None\n self._solving_chain = None\n self._param_prog = None\n self._inverse_data = None\n\n def _solve(self,\n solver=None,\n warm_start=True,\n verbose=False,\n gp=False, qcp=False, requires_grad=False, enforce_dpp=False, **kwargs):\n \"\"\"Solves a DCP compliant optimization problem.\n\n Saves the values of primal and dual variables in the variable\n and constraint objects, respectively.\n\n Arguments\n ---------\n solver : str, optional\n The solver to use. Defaults to ECOS.\n warm_start : bool, optional\n Should the previous solver result be used to warm start?\n verbose : bool, optional\n Overrides the default of hiding solver output.\n gp : bool, optional\n If True, parses the problem as a disciplined geometric program.\n qcp : bool, optional\n If True, parses the problem as a disciplined quasiconvex program.\n requires_grad : bool, optional\n Makes it possible to compute gradients with respect to\n parameters by calling `backward()` after solving, or to compute\n perturbations to the variables by calling `derivative()`. When\n True, the solver must be SCS, and dqcp must be False.\n A DPPError is thrown when problem is not DPP.\n enforce_dpp : bool, optional\n When True, a DPPError will be thrown when trying to solve a non-DPP\n problem (instead of just a warning). Defaults to False.\n kwargs : dict, optional\n A dict of options that will be passed to the specific solver.\n In general, these options will override any default settings\n imposed by cvxpy.\n\n Returns\n -------\n float\n The optimal value for the problem, or a string indicating\n why the problem could not be solved.\n \"\"\"\n for parameter in self.parameters():\n if parameter.value is None:\n raise error.ParameterError(\n \"A Parameter (whose name is '%s') does not have a value \"\n \"associated with it; all Parameter objects must have \"\n \"values before solving a problem.\" % parameter.name())\n\n if requires_grad:\n dpp_context = 'dgp' if gp else 'dcp'\n if qcp:\n raise ValueError(\"Cannot compute gradients of DQCP problems.\")\n elif not self.is_dpp(dpp_context):\n raise error.DPPError(\"Problem is not DPP (when requires_grad \"\n \"is True, problem must be DPP).\")\n elif solver is not None and solver not in [s.SCS, s.DIFFCP]:\n raise ValueError(\"When requires_grad is True, the only \"\n \"supported solver is SCS \"\n \"(received %s).\" % solver)\n elif s.DIFFCP not in slv_def.INSTALLED_SOLVERS:\n raise ImportError(\n \"The Python package diffcp must be installed to \"\n \"differentiate through problems. Please follow the \"\n \"installation instructions at \"\n \"https://github.com/cvxgrp/diffcp\")\n else:\n solver = s.DIFFCP\n else:\n if gp and qcp:\n raise ValueError(\"At most one of `gp` and `qcp` can be True.\")\n if qcp and not self.is_dcp():\n if not self.is_dqcp():\n raise error.DQCPError(\"The problem is not DQCP.\")\n reductions = [dqcp2dcp.Dqcp2Dcp()]\n if type(self.objective) == Maximize:\n reductions = [FlipObjective()] + reductions\n chain = Chain(problem=self, reductions=reductions)\n soln = bisection.bisect(\n chain.reduce(), solver=solver, verbose=verbose, **kwargs)\n self.unpack(chain.retrieve(soln))\n return self.value\n\n data, solving_chain, inverse_data = self.get_problem_data(\n solver, gp, enforce_dpp)\n solution = solving_chain.solve_via_data(\n self, data, warm_start, verbose, kwargs)\n self.unpack_results(solution, solving_chain, inverse_data)\n return self.value\n\n def backward(self):\n \"\"\"Compute the gradient of a solution with respect to Parameters.\n\n This method differentiates through the solution map of the problem,\n obtaining the gradient of a solution with respect to the Parameters.\n In other words, it calculates the sensitivities of the Parameters\n with respect to perturbations in the optimal Variable values. This\n can be useful for integrating CVXPY into automatic differentation\n toolkits.\n\n ``backward()`` populates the ``gradient`` attribute of each Parameter\n in the problem as a side-effect. It can only be called after calling\n ``solve()`` with ``requires_grad=True``.\n\n Below is a simple example:\n\n ::\n\n import cvxpy as cp\n import numpy as np\n\n p = cp.Parameter()\n x = cp.Variable()\n quadratic = cp.square(x - 2 * p)\n problem = cp.Problem(cp.Minimize(quadratic), [x >= 0])\n p.value = 3.0\n problem.solve(requires_grad=True, eps=1e-10)\n # backward() populates the gradient attribute of the parameters\n problem.backward()\n # Because x* = 2 * p, dx*/dp = 2\n np.testing.assert_allclose(p.gradient, 2.0)\n\n In the above example, the gradient could easily be computed by hand.\n The ``backward()`` is useful because for almost all problems, the\n gradient cannot be computed analytically.\n\n This method can be used to differentiate through any DCP or DGP\n problem, as long as the problem is DPP compliant (i.e.,\n ``problem.is_dcp(dpp=True)`` or ``problem.is_dgp(dpp=True)`` evaluates to\n ``True``).\n\n This method uses the chain rule to evaluate the gradients of a\n scalar-valued function of the Variables with respect to the Parameters.\n For example, let x be a variable and p a Parameter; x and p might be\n scalars, vectors, or matrices. Let f be a scalar-valued function, with\n z = f(x). Then this method computes dz/dp = (dz/dx) (dx/p). dz/dx\n is chosen as the all-ones vector by default, corresponding to\n choosing f to be the sum function. You can specify a custom value for\n dz/dx by setting the ``gradient`` attribute on your variables. For example,\n\n ::\n\n import cvxpy as cp\n import numpy as np\n\n\n b = cp.Parameter()\n x = cp.Variable()\n quadratic = cp.square(x - 2 * b)\n problem = cp.Problem(cp.Minimize(quadratic), [x >= 0])\n b.value = 3.\n problem.solve(requires_grad=True, eps=1e-10)\n x.gradient = 4.\n problem.backward()\n # dz/dp = dz/dx dx/dp = 4. * 2. == 8.\n np.testing.assert_allclose(b.gradient, 8.)\n\n The ``gradient`` attribute on a variable can also be interpreted as a\n perturbation to its optimal value.\n\n Raises\n ------\n ValueError\n if solve was not called with ``requires_grad=True``\n SolverError\n if the problem is infeasible or unbounded\n \"\"\"\n if s.DIFFCP not in self._solver_cache:\n raise ValueError(\"backward can only be called after calling \"\n \"solve with `requires_grad=True`\")\n elif self.status not in s.SOLUTION_PRESENT:\n raise error.SolverError(\"Backpropagating through \"\n \"infeasible/unbounded problems is not \"\n \"yet supported. Please file an issue on \"\n \"Github if you need this feature.\")\n\n # TODO(akshayka): Backpropagate through dual variables as well.\n backward_cache = self._solver_cache[s.DIFFCP]\n DT = backward_cache[\"DT\"]\n zeros = np.zeros(backward_cache[\"s\"].shape)\n del_vars = {}\n\n gp = self._cache.gp()\n for variable in self.variables():\n if variable.gradient is None:\n del_vars[variable.id] = np.ones(variable.shape)\n else:\n del_vars[variable.id] = np.asarray(variable.gradient,\n dtype=np.float64)\n if gp:\n # x_gp = exp(x_cone_program),\n # dx_gp/d x_cone_program = exp(x_cone_program) = x_gp\n del_vars[variable.id] *= variable.value\n\n dx = self._cache.param_prog.split_adjoint(del_vars)\n start = time.time()\n dA, db, dc = DT(dx, zeros, zeros)\n end = time.time()\n backward_cache['DT_TIME'] = end - start\n dparams = self._cache.param_prog.apply_param_jac(dc, -dA, db)\n\n if not gp:\n for param in self.parameters():\n param.gradient = dparams[param.id]\n else:\n dgp2dcp = self._cache.solving_chain.get(Dgp2Dcp)\n old_params_to_new_params = dgp2dcp.canon_methods._parameters\n for param in self.parameters():\n # Note: if param is an exponent in a power or gmatmul atom,\n # then the parameter passes through unchanged to the DCP\n # program; if the param is also used elsewhere (not as an\n # exponent), then param will also be in\n # old_params_to_new_params. Therefore, param.gradient =\n # dparams[param.id] (or 0) + 1/param*dparams[new_param.id]\n #\n # Note that param.id is in dparams if and only if\n # param was used as an exponent (because this means that\n # the parameter entered the DCP problem unchanged.)\n grad = 0.0 if param.id not in dparams else dparams[param.id]\n if param in old_params_to_new_params:\n new_param = old_params_to_new_params[param]\n # new_param.value == log(param), apply chain rule\n grad += (1.0 / param.value) * dparams[new_param.id]\n param.gradient = grad\n\n def derivative(self):\n \"\"\"Apply the derivative of the solution map to perturbations in the Parameters\n\n This method applies the derivative of the solution map to perturbations\n in the Parameters to obtain perturbations in the optimal values of the\n Variables. In other words, it tells you how the optimal values of the\n Variables would be changed by small changes to the Parameters.\n\n You can specify perturbations in a Parameter by setting its ``delta``\n attribute (if unspecified, the perturbation defaults to 0).\n\n This method populates the ``delta`` attribute of the Variables as a\n side-effect.\n\n This method can only be called after calling ``solve()`` with\n ``requires_grad=True``. It is compatible with both DCP and DGP\n problems (that are also DPP-compliant).\n\n Below is a simple example:\n\n ::\n\n import cvxpy as cp\n import numpy as np\n\n p = cp.Parameter()\n x = cp.Variable()\n quadratic = cp.square(x - 2 * p)\n problem = cp.Problem(cp.Minimize(quadratic), [x >= 0])\n p.value = 3.0\n problem.solve(requires_grad=True, eps=1e-10)\n # derivative() populates the delta attribute of the variables\n problem.derivative()\n p.delta = 1e-3\n # Because x* = 2 * p, dx*/dp = 2, so (dx*/dp)(p.delta) == 2e-3\n np.testing.assert_allclose(x.delta, 2e-3)\n\n Raises\n ------\n ValueError\n if solve was not called with ``requires_grad=True``\n SolverError\n if the problem is infeasible or unbounded\n \"\"\"\n if s.DIFFCP not in self._solver_cache:\n raise ValueError(\"derivative can only be called after calling \"\n \"solve with `requires_grad=True`\")\n elif self.status not in s.SOLUTION_PRESENT:\n raise ValueError(\"Differentiating through infeasible/unbounded \"\n \"problems is not yet supported. Please file an \"\n \"issue on Github if you need this feature.\")\n # TODO(akshayka): Forward differentiate dual variables as well\n backward_cache = self._solver_cache[s.DIFFCP]\n param_prog = self._cache.param_prog\n D = backward_cache[\"D\"]\n param_deltas = {}\n\n gp = self._cache.gp()\n if gp:\n dgp2dcp = self._cache.solving_chain.get(Dgp2Dcp)\n\n if not self.parameters():\n for variable in self.variables():\n variable.delta = np.zeros(variable.shape)\n return\n\n for param in self.parameters():\n delta = param.delta if param.delta is not None else np.zeros(param.shape)\n if gp:\n if param in dgp2dcp.canon_methods._parameters:\n new_param_id = dgp2dcp.canon_methods._parameters[param].id\n else:\n new_param_id = param.id\n param_deltas[new_param_id] = (\n 1.0/param.value * np.asarray(delta, dtype=np.float64))\n if param.id in param_prog.param_id_to_col:\n # here, param generated a new parameter and also\n # passed through to the param cone prog unchanged\n # (because it was an exponent of a power)\n param_deltas[param.id] = np.asarray(delta,\n dtype=np.float64)\n else:\n param_deltas[param.id] = np.asarray(delta, dtype=np.float64)\n dc, _, dA, db = param_prog.apply_parameters(param_deltas,\n zero_offset=True)\n start = time.time()\n dx, _, _ = D(-dA, db, dc)\n end = time.time()\n backward_cache['D_TIME'] = end - start\n dvars = param_prog.split_solution(\n dx, [v.id for v in self.variables()])\n for variable in self.variables():\n variable.delta = dvars[variable.id]\n if gp:\n # x_gp = exp(x_cone_program),\n # dx_gp/d x_cone_program = exp(x_cone_program) = x_gp\n variable.delta *= variable.value\n\n def _clear_solution(self):\n for v in self.variables():\n v.save_value(None)\n for c in self.constraints:\n for dv in c.dual_variables:\n dv.save_value(None)\n self._value = None\n self._status = None\n self._solution = None\n\n def unpack(self, solution):\n \"\"\"Updates the problem state given a Solution.\n\n Updates problem.status, problem.value and value of primal and dual\n variables. If solution.status is in cvxpy.settins.ERROR, this method\n is a no-op.\n\n Arguments\n _________\n solution : cvxpy.Solution\n A Solution object.\n\n Raises\n ------\n ValueError\n If the solution object has an invalid status\n \"\"\"\n if solution.status in s.SOLUTION_PRESENT:\n for v in self.variables():\n v.save_value(solution.primal_vars[v.id])\n for c in self.constraints:\n if c.id in solution.dual_vars:\n c.save_dual_value(solution.dual_vars[c.id])\n elif solution.status in s.INF_OR_UNB:\n for v in self.variables():\n v.save_value(None)\n for constr in self.constraints:\n for dv in constr.dual_variables:\n dv.save_value(None)\n else:\n raise ValueError(\"Cannot unpack invalid solution: %s\" % solution)\n\n self._value = solution.opt_val\n self._status = solution.status\n self._solution = solution\n\n def unpack_results(self, solution, chain, inverse_data):\n \"\"\"Updates the problem state given the solver results.\n\n Updates problem.status, problem.value and value of\n primal and dual variables.\n\n Arguments\n _________\n solution : object\n The solution returned by applying the chain to the problem\n and invoking the solver on the resulting data.\n chain : SolvingChain\n A solving chain that was used to solve the problem.\n inverse_data : list\n The inverse data returned by applying the chain to the problem.\n\n Raises\n ------\n cvxpy.error.SolverError\n If the solver failed\n \"\"\"\n\n solution = chain.invert(solution, inverse_data)\n if solution.status in s.ERROR:\n raise error.SolverError(\n \"Solver '%s' failed. \" % chain.solver.name() +\n \"Try another solver, or solve with verbose=True for more \"\n \"information.\")\n self.unpack(solution)\n self._solver_stats = SolverStats(self._solution.attr,\n chain.solver.name())\n\n def __str__(self):\n if len(self.constraints) == 0:\n return str(self.objective)\n else:\n subject_to = \"subject to \"\n lines = [str(self.objective),\n subject_to + str(self.constraints[0])]\n for constr in self.constraints[1:]:\n lines += [len(subject_to) * \" \" + str(constr)]\n return '\\n'.join(lines)\n\n def __repr__(self):\n return \"Problem(%s, %s)\" % (repr(self.objective),\n repr(self.constraints))\n\n def __neg__(self):\n return Problem(-self.objective, self.constraints)\n\n def __add__(self, other):\n if other == 0:\n return self\n elif not isinstance(other, Problem):\n return NotImplemented\n return Problem(self.objective + other.objective,\n unique_list(self.constraints + other.constraints))\n\n def __radd__(self, other):\n if other == 0:\n return self\n else:\n return NotImplemented\n\n def __sub__(self, other):\n if not isinstance(other, Problem):\n return NotImplemented\n return Problem(self.objective - other.objective,\n unique_list(self.constraints + other.constraints))\n\n def __rsub__(self, other):\n if other == 0:\n return -self\n else:\n return NotImplemented\n\n def __mul__(self, other):\n if not isinstance(other, (int, float)):\n return NotImplemented\n return Problem(self.objective * other, self.constraints)\n\n __rmul__ = __mul__\n\n def __div__(self, other):\n if not isinstance(other, (int, float)):\n return NotImplemented\n return Problem(self.objective * (1.0 / other), self.constraints)\n\n def is_constant(self):\n return False\n\n __truediv__ = __div__\n\n\nclass SolverStats(object):\n \"\"\"Reports some of the miscellaneous information that is returned\n by the solver after solving but that is not captured directly by\n the Problem instance.\n\n Attributes\n ----------\n solve_time : double\n The time (in seconds) it took for the solver to solve the problem.\n setup_time : double\n The time (in seconds) it took for the solver to setup the problem.\n num_iters : int\n The number of iterations the solver had to go through to find a solution.\n \"\"\"\n def __init__(self, results_dict, solver_name):\n self.solver_name = solver_name\n self.solve_time = None\n self.setup_time = None\n self.num_iters = None\n\n if s.SOLVE_TIME in results_dict:\n self.solve_time = results_dict[s.SOLVE_TIME]\n if s.SETUP_TIME in results_dict:\n self.setup_time = results_dict[s.SETUP_TIME]\n if s.NUM_ITERS in results_dict:\n self.num_iters = results_dict[s.NUM_ITERS]\n\n\nclass SizeMetrics(object):\n \"\"\"Reports various metrics regarding the problem.\n\n Attributes\n ----------\n\n num_scalar_variables : integer\n The number of scalar variables in the problem.\n num_scalar_data : integer\n The number of scalar constants and parameters in the problem. The number of\n constants used across all matrices, vectors, in the problem.\n Some constants are not apparent when the problem is constructed: for example,\n The sum_squares expression is a wrapper for a quad_over_lin expression with a\n constant 1 in the denominator.\n num_scalar_eq_constr : integer\n The number of scalar equality constraints in the problem.\n num_scalar_leq_constr : integer\n The number of scalar inequality constraints in the problem.\n\n max_data_dimension : integer\n The longest dimension of any data block constraint or parameter.\n max_big_small_squared : integer\n The maximum value of (big)(small)^2 over all data blocks of the problem, where\n (big) is the larger dimension and (small) is the smaller dimension\n for each data block.\n \"\"\"\n\n def __init__(self, problem):\n # num_scalar_variables\n self.num_scalar_variables = 0\n for var in problem.variables():\n self.num_scalar_variables += var.size\n\n # num_scalar_data, max_data_dimension, and max_big_small_squared\n self.max_data_dimension = 0\n self.num_scalar_data = 0\n self.max_big_small_squared = 0\n for const in problem.constants()+problem.parameters():\n big = 0\n # Compute number of data\n self.num_scalar_data += const.size\n big = 1 if len(const.shape) == 0 else max(const.shape)\n small = 1 if len(const.shape) == 0 else min(const.shape)\n\n # Get max data dimension:\n if self.max_data_dimension < big:\n self.max_data_dimension = big\n\n max_big_small_squared = float(big)*(float(small)**2)\n if self.max_big_small_squared < max_big_small_squared:\n self.max_big_small_squared = max_big_small_squared\n\n # num_scalar_eq_constr\n self.num_scalar_eq_constr = 0\n for constraint in problem.constraints:\n if isinstance(constraint, (Equality, Zero)):\n self.num_scalar_eq_constr += constraint.expr.size\n\n # num_scalar_leq_constr\n self.num_scalar_leq_constr = 0\n for constraint in problem.constraints:\n if isinstance(constraint, (Inequality, NonPos, NonNeg)):\n self.num_scalar_leq_constr += constraint.expr.size\n",
"\"\"\"\nCopyright 2017 Robin Verschueren, 2017 Akshay Agrawal\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport cvxpy.settings as s\nfrom cvxpy.constraints import SOC, ExpCone, PSD, Zero, NonNeg\nfrom cvxpy.reductions.cvx_attr2constr import convex_attributes\nfrom cvxpy.reductions.dcp2cone.cone_matrix_stuffing import ParamConeProg\nfrom cvxpy.reductions.solution import Solution, failure_solution\nfrom cvxpy.reductions.solvers.solver import Solver\nfrom cvxpy.reductions.solvers import utilities\nimport numpy as np\nimport scipy.sparse as sp\n\n\n# NOTE(akshayka): Small changes to this file can lead to drastic\n# performance regressions. If you are making a change to this file,\n# make sure to run cvxpy/tests/test_benchmarks.py to ensure that you have\n# not introduced a regression.\n\nclass LinearOperator(object):\n \"\"\"A wrapper for linear operators.\"\"\"\n def __init__(self, linear_op, shape):\n if sp.issparse(linear_op):\n self._matmul = lambda X: linear_op @ X\n else:\n self._matmul = linear_op\n self.shape = shape\n\n def __call__(self, X):\n return self._matmul(X)\n\n\ndef as_linear_operator(linear_op):\n if isinstance(linear_op, LinearOperator):\n return linear_op\n elif sp.issparse(linear_op):\n return LinearOperator(linear_op, linear_op.shape)\n\n\ndef as_block_diag_linear_operator(matrices):\n \"\"\"Block diag of SciPy sparse matrices or linear operators.\"\"\"\n linear_operators = [as_linear_operator(op) for op in matrices]\n nrows = [op.shape[0] for op in linear_operators]\n ncols = [op.shape[1] for op in linear_operators]\n m, n = sum(nrows), sum(ncols)\n col_indices = np.append(0, np.cumsum(ncols))\n\n def matmul(X):\n outputs = []\n for i, op in enumerate(linear_operators):\n Xi = X[col_indices[i]:col_indices[i + 1]]\n outputs.append(op(Xi))\n return sp.vstack(outputs)\n return LinearOperator(matmul, (m, n))\n\n\nclass ConicSolver(Solver):\n \"\"\"Conic solver class with reduction semantics\n \"\"\"\n # The key that maps to ConeDims in the data returned by apply().\n DIMS = \"dims\"\n\n # Every conic solver must support Zero and NonNeg constraints.\n SUPPORTED_CONSTRAINTS = [Zero, NonNeg]\n\n # Some solvers cannot solve problems that do not have constraints.\n # For such solvers, REQUIRES_CONSTR should be set to True.\n REQUIRES_CONSTR = False\n\n EXP_CONE_ORDER = None\n\n def accepts(self, problem):\n return (isinstance(problem, ParamConeProg)\n and (self.MIP_CAPABLE or not problem.is_mixed_integer())\n and not convex_attributes([problem.x])\n and (len(problem.constraints) > 0 or not self.REQUIRES_CONSTR)\n and all(type(c) in self.SUPPORTED_CONSTRAINTS for c in\n problem.constraints))\n\n @staticmethod\n def get_spacing_matrix(shape, spacing, streak, num_blocks, offset):\n \"\"\"Returns a sparse matrix that spaces out an expression.\n\n Parameters\n ----------\n shape : tuple\n (rows in matrix, columns in matrix)\n spacing : int\n The number of rows between the start of each non-zero block.\n streak: int\n The number of elements in each block.\n num_blocks : int\n The number of non-zero blocks.\n offset : int\n The number of zero rows at the beginning of the matrix.\n\n Returns\n -------\n SciPy CSC matrix\n A sparse matrix\n \"\"\"\n num_values = num_blocks * streak\n val_arr = np.ones(num_values, dtype=np.float64)\n streak_plus_spacing = streak + spacing\n row_arr = np.arange(0, num_blocks * streak_plus_spacing).reshape(\n num_blocks, streak_plus_spacing)[:, :streak].flatten() + offset\n col_arr = np.arange(num_values)\n return sp.csc_matrix((val_arr, (row_arr, col_arr)), shape)\n\n def psd_format_mat(self, constr):\n \"\"\"Return a matrix to multiply by PSD constraint coefficients.\n \"\"\"\n # Default is identity.\n return sp.eye(constr.size, format='csc')\n\n def format_constraints(self, problem, exp_cone_order):\n \"\"\"\n Returns a ParamConeProg whose problem data tensors will yield the\n coefficient \"A\" and offset \"b\" for the constraint in the following\n formats:\n Linear equations: (A, b) such that A * x + b == 0,\n Linear inequalities: (A, b) such that A * x + b >= 0,\n Second order cone: (A, b) such that A * x + b in SOC,\n Exponential cone: (A, b) such that A * x + b in EXP,\n Semidefinite cone: (A, b) such that A * x + b in PSD,\n\n The CVXPY standard for the exponential cone is:\n K_e = closure{(x,y,z) | z >= y * exp(x/y), y>0}.\n Whenever a solver uses this convention, EXP_CONE_ORDER should be\n [0, 1, 2].\n\n The CVXPY standard for the second order cone is:\n SOC(n) = { x : x[0] >= norm(x[1:n], 2) }.\n All currently supported solvers use this convention.\n\n Args:\n problem : ParamConeProg\n The problem that is the provenance of the constraint.\n exp_cone_order: list\n A list indicating how the exponential cone arguments are ordered.\n\n Returns:\n ParamConeProg with structured A.\n \"\"\"\n # Create a matrix to reshape constraints, then replicate for each\n # variable entry.\n restruct_mat = [] # Form a block diagonal matrix.\n for constr in problem.constraints:\n total_height = sum([arg.size for arg in constr.args])\n if type(constr) == Zero:\n restruct_mat.append(-sp.eye(constr.size, format='csr'))\n elif type(constr) == NonNeg:\n restruct_mat.append(sp.eye(constr.size, format='csr'))\n elif type(constr) == SOC:\n # Group each t row with appropriate X rows.\n assert constr.axis == 0, 'SOC must be lowered to axis == 0'\n\n # Interleave the rows of coeffs[0] and coeffs[1]:\n # coeffs[0][0, :]\n # coeffs[1][0:gap-1, :]\n # coeffs[0][1, :]\n # coeffs[1][gap-1:2*(gap-1), :]\n t_spacer = ConicSolver.get_spacing_matrix(\n shape=(total_height, constr.args[0].size),\n spacing=constr.args[1].shape[0],\n streak=1,\n num_blocks=constr.args[0].size,\n offset=0,\n )\n X_spacer = ConicSolver.get_spacing_matrix(\n shape=(total_height, constr.args[1].size),\n spacing=1,\n streak=constr.args[1].shape[0],\n num_blocks=constr.args[0].size,\n offset=1,\n )\n restruct_mat.append(sp.hstack([t_spacer, X_spacer]))\n elif type(constr) == ExpCone:\n arg_mats = []\n for i, arg in enumerate(constr.args):\n space_mat = ConicSolver.get_spacing_matrix(\n shape=(total_height, arg.size),\n spacing=len(exp_cone_order) - 1,\n streak=1,\n num_blocks=arg.size,\n offset=exp_cone_order[i],\n )\n arg_mats.append(space_mat)\n restruct_mat.append(sp.hstack(arg_mats))\n elif type(constr) == PSD:\n restruct_mat.append(self.psd_format_mat(constr))\n else:\n raise ValueError(\"Unsupported constraint type.\")\n\n # Form new ParamConeProg\n if restruct_mat:\n # TODO(akshayka): profile to see whether using linear operators\n # or bmat is faster\n restruct_mat = as_block_diag_linear_operator(restruct_mat)\n # this is equivalent to but _much_ faster than:\n # restruct_mat_rep = sp.block_diag([restruct_mat]*(problem.x.size + 1))\n # restruct_A = restruct_mat_rep * problem.A\n unspecified, remainder = divmod(problem.A.shape[0] *\n problem.A.shape[1],\n restruct_mat.shape[1])\n reshaped_A = problem.A.reshape(restruct_mat.shape[1],\n unspecified, order='F').tocsr()\n restructured_A = restruct_mat(reshaped_A).tocoo()\n # Because of a bug in scipy versions < 1.20, `reshape`\n # can overflow if indices are int32s.\n restructured_A.row = restructured_A.row.astype(np.int64)\n restructured_A.col = restructured_A.col.astype(np.int64)\n restructured_A = restructured_A.reshape(\n restruct_mat.shape[0] * (problem.x.size + 1),\n problem.A.shape[1], order='F')\n else:\n restructured_A = problem.A\n new_param_cone_prog = ParamConeProg(problem.c,\n problem.x,\n restructured_A,\n problem.variables,\n problem.var_id_to_col,\n problem.constraints,\n problem.parameters,\n problem.param_id_to_col,\n formatted=True)\n return new_param_cone_prog\n\n def invert(self, solution, inverse_data):\n \"\"\"Returns the solution to the original problem given the inverse_data.\n \"\"\"\n status = solution['status']\n\n if status in s.SOLUTION_PRESENT:\n opt_val = solution['value']\n primal_vars = {inverse_data[self.VAR_ID]: solution['primal']}\n eq_dual = utilities.get_dual_values(\n solution['eq_dual'],\n utilities.extract_dual_value,\n inverse_data[Solver.EQ_CONSTR])\n leq_dual = utilities.get_dual_values(\n solution['ineq_dual'],\n utilities.extract_dual_value,\n inverse_data[Solver.NEQ_CONSTR])\n eq_dual.update(leq_dual)\n dual_vars = eq_dual\n return Solution(status, opt_val, primal_vars, dual_vars, {})\n else:\n return failure_solution(status)\n"
] | [
[
"numpy.asarray",
"numpy.log",
"numpy.zeros",
"numpy.ones"
],
[
"scipy.sparse.csc_matrix",
"scipy.sparse.issparse",
"scipy.sparse.eye",
"numpy.arange",
"numpy.cumsum",
"numpy.ones",
"scipy.sparse.vstack",
"scipy.sparse.hstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
Danielznn16/RoboticHand-in-KG | [
"27e4eee97ea4ecab40fbd13b24a97e1f94c10258"
] | [
"models/pointnet2_part_seg_msg.py"
] | [
"import torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nfrom models.pointnet_util import PointNetSetAbstractionMsg,PointNetSetAbstraction,PointNetFeaturePropagation\n\n\nclass get_model(nn.Module):\n def __init__(self, num_classes, normal_channel=False):\n super(get_model, self).__init__()\n if normal_channel:\n additional_channel = 3\n else:\n additional_channel = 0\n self.normal_channel = normal_channel\n self.sa1 = PointNetSetAbstractionMsg(512, [0.1, 0.2, 0.4], [32, 64, 128], 3+additional_channel, [[32, 32, 64], [64, 64, 128], [64, 96, 128]])\n self.sa2 = PointNetSetAbstractionMsg(128, [0.4,0.8], [64, 128], 128+128+64, [[128, 128, 256], [128, 196, 256]])\n self.sa3 = PointNetSetAbstraction(npoint=None, radius=None, nsample=None, in_channel=512 + 3, mlp=[256, 512, 1024], group_all=True)\n self.fp3 = PointNetFeaturePropagation(in_channel=1536, mlp=[256, 256])\n self.fp2 = PointNetFeaturePropagation(in_channel=576, mlp=[256, 128])\n self.fp1 = PointNetFeaturePropagation(in_channel=150+additional_channel, mlp=[128, 128])\n self.conv1 = nn.Conv1d(128, 128, 1)\n self.bn1 = nn.BatchNorm1d(128)\n self.drop1 = nn.Dropout(0.5)\n self.conv2 = nn.Conv1d(128, num_classes, 1)\n\n def forward(self, xyz, cls_label):\n # Set Abstraction layers\n B,C,N = xyz.shape\n if self.normal_channel:\n l0_points = xyz\n l0_xyz = xyz[:,:3,:]\n else:\n l0_points = xyz\n l0_xyz = xyz\n l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)\n l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)\n l3_xyz, l3_points = self.sa3(l2_xyz, l2_points)\n # Feature Propagation layers\n l2_points = self.fp3(l2_xyz, l3_xyz, l2_points, l3_points)\n l1_points = self.fp2(l1_xyz, l2_xyz, l1_points, l2_points)\n cls_label_one_hot = cls_label.view(B,16,1).repeat(1,1,N)\n # print(cls_label_one_hot)\n l0_points = self.fp1(l0_xyz, l1_xyz, torch.cat([cls_label_one_hot,l0_xyz,l0_points],1), l1_points)\n # FC layers\n feat = F.relu(self.bn1(self.conv1(l0_points)))\n x = self.drop1(feat)\n x = self.conv2(x)\n x = F.log_softmax(x, dim=1)\n x = x.permute(0, 2, 1)\n return x, l3_points\n\n\nclass get_loss(nn.Module):\n def __init__(self):\n super(get_loss, self).__init__()\n\n def forward(self, pred, target, trans_feat):\n total_loss = F.nll_loss(pred, target)\n\n return total_loss"
] | [
[
"torch.nn.BatchNorm1d",
"torch.nn.Dropout",
"torch.nn.functional.nll_loss",
"torch.nn.functional.log_softmax",
"torch.cat",
"torch.nn.Conv1d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ZvonimirBandic/QuCumber | [
"81f0291951e89346fd8ab5c35cc90341fd8acf35"
] | [
"qucumber/nn_states/density_matrix.py"
] | [
"# Copyright 2019 PIQuIL - All Rights Reserved.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport warnings\n\nimport torch\nfrom torch.nn import functional as F\n\nfrom qucumber import _warn_on_missing_gpu\nfrom qucumber.utils import cplx, unitaries\nfrom qucumber.rbm import PurificationRBM\nfrom .neural_state import NeuralStateBase\n\n\nclass DensityMatrix(NeuralStateBase):\n r\"\"\"\n :param num_visible: The number of visible units, i.e. the size of the system\n :type num_visible: int\n :param num_hidden: The number of units in the hidden layer\n :type num_hidden: int\n :param num_aux: The number of units in the purification layer\n :type num_aux: int\n :param unitary_dict: A dictionary associating bases with their unitary rotations\n :type unitary_dict: dict[str, torch.Tensor]\n :param gpu: Whether to perform computations on the default gpu.\n :type gpu: bool\n \"\"\"\n\n _rbm_am = None\n _rbm_ph = None\n _device = None\n\n def __init__(\n self,\n num_visible,\n num_hidden=None,\n num_aux=None,\n unitary_dict=None,\n gpu=False,\n module=None,\n ):\n if gpu and torch.cuda.is_available():\n warnings.warn(\n \"Using DensityMatrix on GPU is not recommended due to poor performance compared to CPU.\",\n ResourceWarning,\n 2,\n )\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(\"cpu\")\n\n if module is None:\n self.rbm_am = PurificationRBM(num_visible, num_hidden, num_aux, gpu=gpu)\n self.rbm_ph = PurificationRBM(num_visible, num_hidden, num_aux, gpu=gpu)\n else:\n _warn_on_missing_gpu(gpu)\n self.rbm_am = module.to(self.device)\n self.rbm_am.device = self.device\n self.rbm_ph = module.to(self.device).clone()\n self.rbm_ph.device = self.device\n\n self.num_visible = self.rbm_am.num_visible\n self.num_hidden = self.rbm_am.num_hidden\n self.num_aux = self.rbm_am.num_aux\n self.device = self.rbm_am.device\n\n self.unitary_dict = unitary_dict if unitary_dict else unitaries.create_dict()\n self.unitary_dict = {\n k: v.to(device=self.device) for k, v in self.unitary_dict.items()\n }\n\n @property\n def networks(self):\n return [\"rbm_am\", \"rbm_ph\"]\n\n @property\n def rbm_am(self):\n return self._rbm_am\n\n @rbm_am.setter\n def rbm_am(self, new_val):\n self._rbm_am = new_val\n\n @property\n def rbm_ph(self):\n \"\"\"RBM used to learn the wavefunction phase.\"\"\"\n return self._rbm_ph\n\n @rbm_ph.setter\n def rbm_ph(self, new_val):\n self._rbm_ph = new_val\n\n @property\n def device(self):\n return self._device\n\n @device.setter\n def device(self, new_val):\n self._device = new_val\n\n def pi(self, v, vp, expand=True):\n r\"\"\"Calculates elements of the :math:`\\Pi` matrix.\n If `expand` is `True`, will return a complex matrix\n :math:`A_{ij} = \\langle\\sigma_i|\\Pi|\\sigma'_j\\rangle`.\n Otherwise will return a complex vector\n :math:`A_{i} = \\langle\\sigma_i|\\Pi|\\sigma'_i\\rangle`.\n\n :param v: A batch of visible states, :math:`\\sigma`.\n :type v: torch.Tensor\n :param vp: The other batch of visible state, :math:`\\sigma'`.\n :type vp: torch.Tensor\n :param expand: Whether to return a matrix (`True`) or a vector (`False`).\n :type expand: bool\n\n :returns: The matrix elements given by :math:`\\langle\\sigma|\\Pi|\\sigma'\\rangle`\n :rtype: torch.Tensor\n \"\"\"\n m_am = F.linear(v, self.rbm_am.weights_U, self.rbm_am.aux_bias)\n mp_am = F.linear(vp, self.rbm_am.weights_U, self.rbm_am.aux_bias)\n\n m_ph = F.linear(v, self.rbm_ph.weights_U)\n mp_ph = F.linear(vp, self.rbm_ph.weights_U)\n\n if expand and v.dim() >= 2:\n m_am = m_am.unsqueeze_(1)\n m_ph = m_ph.unsqueeze_(1)\n if expand and vp.dim() >= 2:\n mp_am = mp_am.unsqueeze_(0)\n mp_ph = mp_ph.unsqueeze_(0)\n\n exp_arg = (m_am + mp_am) / 2\n phase = (m_ph - mp_ph) / 2\n\n real = (\n (1 + 2 * exp_arg.exp() * phase.cos() + (2 * exp_arg).exp())\n .sqrt()\n .log()\n .sum(-1)\n )\n\n imag = torch.atan2(\n (exp_arg.exp() * phase.sin()), (1 + exp_arg.exp() * phase.cos())\n ).sum(-1)\n\n return cplx.make_complex(real, imag)\n\n def pi_grad(self, v, vp, phase=False, expand=False):\n r\"\"\"Calculates the gradient of the :math:`\\Pi` matrix with\n respect to the amplitude RBM parameters for two input states\n\n :param v: One of the visible states, :math:`\\sigma`\n :type v: torch.Tensor\n :param vp: The other visible state, :math`\\sigma'`\n :type vp: torch.Tensor\n :param phase: Whether to compute the gradients for the phase RBM (`True`)\n or the amplitude RBM (`False`)\n :type phase: bool\n\n :returns: The matrix element of the gradient given by\n :math:`\\langle\\sigma|\\nabla_\\lambda\\Pi|\\sigma'\\rangle`\n :rtype: torch.Tensor\n \"\"\"\n unsqueezed = v.dim() < 2 or vp.dim() < 2\n v = (v.unsqueeze(0) if v.dim() < 2 else v).to(self.rbm_am.weights_W)\n vp = (vp.unsqueeze(0) if vp.dim() < 2 else vp).to(self.rbm_am.weights_W)\n\n if expand:\n arg_real = 0.5 * (\n F.linear(v, self.rbm_am.weights_U, self.rbm_am.aux_bias).unsqueeze_(1)\n + F.linear(vp, self.rbm_am.weights_U, self.rbm_am.aux_bias).unsqueeze_(\n 0\n )\n )\n arg_imag = 0.5 * (\n F.linear(v, self.rbm_ph.weights_U).unsqueeze_(1)\n - F.linear(vp, self.rbm_ph.weights_U).unsqueeze_(0)\n )\n else:\n arg_real = self.rbm_am.mixing_term(v + vp)\n arg_imag = self.rbm_ph.mixing_term(v - vp)\n\n sig = cplx.sigmoid(arg_real, arg_imag)\n\n batch_sizes = (\n (v.shape[0], vp.shape[0], *v.shape[1:-1]) if expand else (*v.shape[:-1],)\n )\n\n W_grad = torch.zeros_like(self.rbm_am.weights_W).expand(*batch_sizes, -1, -1)\n vb_grad = torch.zeros_like(self.rbm_am.visible_bias).expand(*batch_sizes, -1)\n hb_grad = torch.zeros_like(self.rbm_am.hidden_bias).expand(*batch_sizes, -1)\n\n if phase:\n temp = (v.unsqueeze(1) - vp.unsqueeze(0)) if expand else (v - vp)\n sig = cplx.scalar_mult(sig, cplx.I)\n\n ab_grad_real = torch.zeros_like(self.rbm_ph.aux_bias).expand(\n *batch_sizes, -1\n )\n ab_grad_imag = ab_grad_real.clone()\n else:\n temp = (v.unsqueeze(1) + vp.unsqueeze(0)) if expand else (v + vp)\n\n ab_grad_real = cplx.real(sig)\n ab_grad_imag = cplx.imag(sig)\n\n U_grad = 0.5 * torch.einsum(\"c...j,...k->c...jk\", sig, temp)\n U_grad_real = cplx.real(U_grad)\n U_grad_imag = cplx.imag(U_grad)\n\n vec_real = [\n W_grad.view(*batch_sizes, -1),\n U_grad_real.view(*batch_sizes, -1),\n vb_grad,\n hb_grad,\n ab_grad_real,\n ]\n vec_imag = [\n W_grad.view(*batch_sizes, -1).clone(),\n U_grad_imag.view(*batch_sizes, -1),\n vb_grad.clone(),\n hb_grad.clone(),\n ab_grad_imag,\n ]\n\n if unsqueezed and not expand:\n vec_real = [grad.squeeze_(0) for grad in vec_real]\n vec_imag = [grad.squeeze_(0) for grad in vec_imag]\n\n return cplx.make_complex(\n torch.cat(vec_real, dim=-1), torch.cat(vec_imag, dim=-1)\n )\n\n def rho(self, v, vp=None, expand=True):\n r\"\"\"Computes the matrix elements of the (unnormalized) density matrix.\n If `expand` is `True`, will return a complex matrix\n :math:`A_{ij} = \\langle\\sigma_i|\\widetilde{\\rho}|\\sigma'_j\\rangle`.\n Otherwise will return a complex vector\n :math:`A_{i} = \\langle\\sigma_i|\\widetilde{\\rho}|\\sigma'_i\\rangle`.\n\n :param v: One of the visible states, :math:`\\sigma`.\n :type v: torch.Tensor\n :param vp: The other visible state, :math:`\\sigma'`.\n If `None`, will be set to `v`.\n :type vp: torch.Tensor\n :param expand: Whether to return a matrix (`True`) or a vector (`False`).\n :type expand: bool\n\n :returns: The elements of the current density matrix\n :math:`\\langle\\sigma|\\widetilde{\\rho}|\\sigma'\\rangle`\n :rtype: torch.Tensor\n \"\"\"\n if expand is False and vp is None:\n return cplx.make_complex(self.probability(v))\n elif vp is None:\n vp = v\n\n pi_ = self.pi(v, vp, expand=expand)\n amp = (self.rbm_am.gamma(v, vp, eta=+1, expand=expand) + cplx.real(pi_)).exp()\n phase = self.rbm_ph.gamma(v, vp, eta=-1, expand=expand) + cplx.imag(pi_)\n\n return cplx.make_complex(amp * phase.cos(), amp * phase.sin())\n\n def importance_sampling_numerator(self, vp, v):\n return self.rho(vp, v, expand=False)\n\n def importance_sampling_denominator(self, v):\n return cplx.make_complex(self.probability(v))\n\n def rotated_gradient(self, basis, sample):\n r\"\"\"Computes the gradients rotated into the measurement basis\n\n :param basis: The bases in which the measurement is made\n :type basis: numpy.ndarray\n :param sample: The measurement (either 0 or 1)\n :type sample: torch.Tensor\n\n :returns: A list of two tensors, representing the rotated gradients\n of the amplitude and phase RBMs\n :rtype: list[torch.Tensor, torch.Tensor]\n \"\"\"\n UrhoU, UrhoU_v, v = unitaries.rotate_rho_probs(\n self, basis, sample, include_extras=True\n )\n inv_UrhoU = 1 / (UrhoU + 1e-8) # avoid dividing by zero\n\n raw_grads = [self.am_grads(v), self.ph_grads(v)]\n\n rotated_grad = [\n -cplx.einsum(\"ijb,ijbg->bg\", UrhoU_v, g, imag_part=False) for g in raw_grads\n ]\n\n return [torch.einsum(\"b,bg->g\", inv_UrhoU, g) for g in rotated_grad]\n\n def am_grads(self, v):\n r\"\"\"Computes the gradients of the amplitude RBM for given input states\n\n :param v: The first input state, :math:`\\sigma`\n :type v: torch.Tensor\n\n :returns: The gradients of all amplitude RBM parameters\n :rtype: torch.Tensor\n \"\"\"\n return self.rbm_am.gamma_grad(v, v, eta=+1, expand=True) + self.pi_grad(\n v, v, phase=False, expand=True\n )\n\n def ph_grads(self, v):\n r\"\"\"Computes the gradients of the phase RBM for given input states\n\n :param v: The first input state, :math:`\\sigma`\n :type v: torch.Tensor\n\n :returns: The gradients of all phase RBM parameters\n :rtype: torch.Tensor\n \"\"\"\n return cplx.scalar_mult( # need to multiply Gamma- by i\n self.rbm_ph.gamma_grad(v, v, eta=-1, expand=True), cplx.I\n ) + self.pi_grad(v, v, phase=True, expand=True)\n\n def fit(\n self,\n data,\n epochs=100,\n pos_batch_size=100,\n neg_batch_size=None,\n k=1,\n lr=1,\n input_bases=None,\n progbar=False,\n starting_epoch=1,\n time=False,\n callbacks=None,\n optimizer=torch.optim.SGD,\n optimizer_args=None,\n scheduler=None,\n scheduler_args=None,\n **kwargs,\n ):\n if input_bases is None:\n raise ValueError(\"input_bases must be provided to train a DensityMatrix!\")\n else:\n super().fit(\n data=data,\n epochs=epochs,\n pos_batch_size=pos_batch_size,\n neg_batch_size=neg_batch_size,\n k=k,\n lr=lr,\n input_bases=input_bases,\n progbar=progbar,\n starting_epoch=starting_epoch,\n time=time,\n callbacks=callbacks,\n optimizer=optimizer,\n optimizer_args=optimizer_args,\n scheduler=scheduler,\n scheduler_args=scheduler_args,\n **kwargs,\n )\n\n @staticmethod\n def autoload(location, gpu=False):\n state_dict = torch.load(location)\n nn_state = DensityMatrix(\n unitary_dict=state_dict[\"unitary_dict\"],\n num_visible=len(state_dict[\"rbm_am\"][\"visible_bias\"]),\n num_hidden=len(state_dict[\"rbm_am\"][\"hidden_bias\"]),\n num_aux=len(state_dict[\"rbm_am\"][\"aux_bias\"]),\n gpu=gpu,\n )\n nn_state.load(location)\n return nn_state\n"
] | [
[
"torch.load",
"torch.cat",
"torch.einsum",
"torch.zeros_like",
"torch.cuda.is_available",
"torch.device",
"torch.nn.functional.linear"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gsc2001/ConvexNet | [
"a17609bd5bca0a02b6330b1ad8035f2b280109f0"
] | [
"src/models/densenet/model.py"
] | [
"\"\"\"\nVanilla DenseNet implementation\nPaper: https://arxiv.org/abs/1608.06993\nImplementation taken from: https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py\n\"\"\"\nimport re\nfrom collections import OrderedDict\nfrom functools import partial\nfrom typing import Any, List, Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.checkpoint as cp\nfrom torch import Tensor\n\nclass _DenseLayer(nn.Module):\n def __init__(\n self, num_input_features: int, growth_rate: int, bn_size: int, drop_rate: float, memory_efficient: bool = False\n ) -> None:\n super().__init__()\n self.norm1: nn.BatchNorm2d\n self.add_module(\"norm1\", nn.BatchNorm2d(num_input_features))\n self.relu1: nn.ReLU\n self.add_module(\"relu1\", nn.ReLU(inplace=True))\n self.conv1: nn.Conv2d\n self.add_module(\n \"conv1\", nn.Conv2d(num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)\n )\n self.norm2: nn.BatchNorm2d\n self.add_module(\"norm2\", nn.BatchNorm2d(bn_size * growth_rate))\n self.relu2: nn.ReLU\n self.add_module(\"relu2\", nn.ReLU(inplace=True))\n self.conv2: nn.Conv2d\n self.add_module(\n \"conv2\", nn.Conv2d(bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)\n )\n self.drop_rate = float(drop_rate)\n self.memory_efficient = memory_efficient\n\n def bn_function(self, inputs: List[Tensor]) -> Tensor:\n concated_features = torch.cat(inputs, 1)\n bottleneck_output = self.conv1(self.relu1(self.norm1(concated_features))) # noqa: T484\n return bottleneck_output\n\n # todo: rewrite when torchscript supports any\n def any_requires_grad(self, input: List[Tensor]) -> bool:\n for tensor in input:\n if tensor.requires_grad:\n return True\n return False\n\n @torch.jit.unused # noqa: T484\n def call_checkpoint_bottleneck(self, input: List[Tensor]) -> Tensor:\n def closure(*inputs):\n return self.bn_function(inputs)\n\n return cp.checkpoint(closure, *input)\n\n @torch.jit._overload_method # noqa: F811\n def forward(self, input: List[Tensor]) -> Tensor: # noqa: F811\n pass\n\n @torch.jit._overload_method # noqa: F811\n def forward(self, input: Tensor) -> Tensor: # noqa: F811\n pass\n\n # torchscript does not yet support *args, so we overload method\n # allowing it to take either a List[Tensor] or single Tensor\n def forward(self, input: Tensor) -> Tensor: # noqa: F811\n if isinstance(input, Tensor):\n prev_features = [input]\n else:\n prev_features = input\n\n if self.memory_efficient and self.any_requires_grad(prev_features):\n if torch.jit.is_scripting():\n raise Exception(\"Memory Efficient not supported in JIT\")\n\n bottleneck_output = self.call_checkpoint_bottleneck(prev_features)\n else:\n bottleneck_output = self.bn_function(prev_features)\n\n new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))\n if self.drop_rate > 0:\n new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)\n return new_features\n\n\nclass _DenseBlock(nn.ModuleDict):\n _version = 2\n\n def __init__(\n self,\n num_layers: int,\n num_input_features: int,\n bn_size: int,\n growth_rate: int,\n drop_rate: float,\n memory_efficient: bool = False,\n ) -> None:\n super().__init__()\n for i in range(num_layers):\n layer = _DenseLayer(\n num_input_features + i * growth_rate,\n growth_rate=growth_rate,\n bn_size=bn_size,\n drop_rate=drop_rate,\n memory_efficient=memory_efficient,\n )\n self.add_module(\"denselayer%d\" % (i + 1), layer)\n\n def forward(self, init_features: Tensor) -> Tensor:\n features = [init_features]\n for name, layer in self.items():\n new_features = layer(features)\n features.append(new_features)\n return torch.cat(features, 1)\n\n\nclass _Transition(nn.Sequential):\n def __init__(self, num_input_features: int, num_output_features: int) -> None:\n super().__init__()\n self.add_module(\"norm\", nn.BatchNorm2d(num_input_features))\n self.add_module(\"relu\", nn.ReLU(inplace=True))\n self.add_module(\"conv\", nn.Conv2d(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False))\n self.add_module(\"pool\", nn.AvgPool2d(kernel_size=2, stride=2))\n\n\nclass DenseNet(nn.Module):\n r\"\"\"Densenet-BC model class, based on\n `\"Densely Connected Convolutional Networks\" <https://arxiv.org/pdf/1608.06993.pdf>`_.\n Args:\n growth_rate (int) - how many filters to add each layer (`k` in paper)\n block_config (list of 4 ints) - how many layers in each pooling block\n num_init_features (int) - the number of filters to learn in the first convolution layer\n bn_size (int) - multiplicative factor for number of bottle neck layers\n (i.e. bn_size * k features in the bottleneck layer)\n drop_rate (float) - dropout rate after each dense layer\n num_classes (int) - number of classification classes\n memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,\n but slower. Default: *False*. See `\"paper\" <https://arxiv.org/pdf/1707.06990.pdf>`_.\n \"\"\"\n\n def __init__(\n self,\n growth_rate: int = 32,\n block_config: Tuple[int, int, int, int] = (6, 12, 24, 16),\n num_init_features: int = 64,\n bn_size: int = 4,\n drop_rate: float = 0,\n num_classes: int = 1000,\n memory_efficient: bool = False,\n ) -> None:\n\n super().__init__()\n\n # First convolution\n self.features = nn.Sequential(\n OrderedDict(\n [\n (\"conv0\", nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),\n (\"norm0\", nn.BatchNorm2d(num_init_features)),\n (\"relu0\", nn.ReLU(inplace=True)),\n (\"pool0\", nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),\n ]\n )\n )\n\n # Each denseblock\n num_features = num_init_features\n for i, num_layers in enumerate(block_config):\n block = _DenseBlock(\n num_layers=num_layers,\n num_input_features=num_features,\n bn_size=bn_size,\n growth_rate=growth_rate,\n drop_rate=drop_rate,\n memory_efficient=memory_efficient,\n )\n self.features.add_module(\"denseblock%d\" % (i + 1), block)\n num_features = num_features + num_layers * growth_rate\n if i != len(block_config) - 1:\n trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2)\n self.features.add_module(\"transition%d\" % (i + 1), trans)\n num_features = num_features // 2\n\n # Final batch norm\n self.features.add_module(\"norm5\", nn.BatchNorm2d(num_features))\n\n # Linear layer\n self.classifier = nn.Linear(num_features, num_classes)\n\n # Official init from torch repo.\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x: Tensor) -> Tensor:\n features = self.features(x)\n out = F.relu(features, inplace=True)\n out = F.adaptive_avg_pool2d(out, (1, 1))\n out = torch.flatten(out, 1)\n out = self.classifier(out)\n return out\n"
] | [
[
"torch.cat",
"torch.nn.functional.dropout",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.nn.functional.relu",
"torch.utils.checkpoint.checkpoint",
"torch.jit.is_scripting",
"torch.nn.BatchNorm2d",
"torch.flatten",
"torch.nn.MaxPool2d",
"torch.nn.ReLU",
"torch.nn.init.kaiming_normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gladcolor/seamseg | [
"9e6c7e2828f32b311a7b0c16b279ac194e8aaf94"
] | [
"seamseg/utils/coco_ap.py"
] | [
"import json\nimport tempfile\nimport time\nfrom collections import defaultdict\nfrom os import path, remove\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nfrom PIL import Image\nfrom pycocotools.coco import COCO as _COCO\nfrom pycocotools.cocoeval import COCOeval\nfrom pycocotools.mask import encode as mask_encode\n\nfrom .bbx import invert_roi_bbx, extract_boxes\nfrom .parallel import PackedSequence\nfrom .roi_sampling import roi_sampling\n\n\ndef process_prediction(bbx_pred, cls_pred, obj_pred, msk_pred, img_size, idx, original_size):\n # Move everything to CPU\n bbx_pred, cls_pred, obj_pred = (t.cpu() for t in (bbx_pred, cls_pred, obj_pred))\n msk_pred = msk_pred.cpu() if msk_pred is not None else None\n\n if msk_pred is not None:\n if isinstance(msk_pred, torch.Tensor):\n # ROI-stile prediction\n bbx_inv = invert_roi_bbx(bbx_pred, list(msk_pred.shape[-2:]), list(img_size))\n bbx_idx = torch.arange(0, msk_pred.size(0), dtype=torch.long)\n msk_pred = roi_sampling(msk_pred.unsqueeze(1).sigmoid(), bbx_inv, bbx_idx, list(img_size), padding=\"zero\")\n msk_pred = msk_pred.squeeze(1) > 0.5\n elif isinstance(msk_pred, PackedSequence):\n # Seeds-style prediction\n msk_pred.data = msk_pred.data > 0.5\n msk_pred_exp = msk_pred.data.new_zeros(len(msk_pred), img_size[0], img_size[1])\n\n for it, (msk_pred_i, bbx_pred_i) in enumerate(zip(msk_pred, bbx_pred)):\n i, j = int(bbx_pred_i[0].item()), int(bbx_pred_i[1].item())\n msk_pred_exp[it, i:i + msk_pred_i.size(0), j:j + msk_pred_i.size(1)] = msk_pred_i\n\n msk_pred = msk_pred_exp\n\n # Convert bbx and redo clamping\n bbx_pred[:, [0, 2]] = (bbx_pred[:, [0, 2]] / img_size[0] * original_size[0]).clamp(min=0, max=original_size[0])\n bbx_pred[:, [1, 3]] = (bbx_pred[:, [1, 3]] / img_size[1] * original_size[1]).clamp(min=0, max=original_size[1])\n bbx_pred_size = bbx_pred[:, 2:] - bbx_pred[:, :2]\n\n outs = []\n for i, (bbx_pred_i, bbx_pred_size_i, cls_pred_i, obj_pred_i) in \\\n enumerate(zip(bbx_pred, bbx_pred_size, cls_pred, obj_pred)):\n out = dict(image_id=idx, category_id=int(cls_pred_i.item()), score=float(obj_pred_i.item()))\n\n out[\"bbox\"] = [\n float(bbx_pred_i[1].item()),\n float(bbx_pred_i[0].item()),\n float(bbx_pred_size_i[1].item()),\n float(bbx_pred_size_i[0].item()),\n ]\n\n # Expand and convert mask if present\n if msk_pred is not None:\n segmentation = Image.fromarray(msk_pred[i].numpy()).resize(original_size[::-1], Image.NEAREST)\n\n out[\"segmentation\"] = mask_encode(np.asfortranarray(np.array(segmentation)))\n out[\"segmentation\"][\"counts\"] = str(out[\"segmentation\"][\"counts\"], \"utf-8\")\n\n outs.append(out)\n\n return outs\n\n\ndef process_panoptic_prediction(panoptic_pred, num_stuff, idx, img_size, original_size):\n # Extract panoptic prediction\n msk_pred, cat_pred, obj_pred, iscrowd_pred = panoptic_pred\n\n bbx_pred = extract_boxes(msk_pred, cat_pred.numel())\n\n # Convert bbx and redo clamping\n bbx_pred[:, [0, 2]] = (bbx_pred[:, [0, 2]] / img_size[0] * original_size[0]).clamp(min=0, max=original_size[0])\n bbx_pred[:, [1, 3]] = (bbx_pred[:, [1, 3]] / img_size[1] * original_size[1]).clamp(min=0, max=original_size[1])\n bbx_pred_size = bbx_pred[:, 2:] - bbx_pred[:, :2]\n\n outs = []\n for i, (obj_i, cat_i, bbx_i, iscrowd_i, bbx_size_i) in enumerate(zip(\n obj_pred, cat_pred, bbx_pred, iscrowd_pred, bbx_pred_size)):\n if iscrowd_i.item() == 1 or cat_i.item() < num_stuff or cat_i.item() == 255:\n continue\n out = dict(image_id=idx, category_id=int(cat_i.item()), score=float(obj_i.item()))\n\n out[\"bbox\"] = [\n float(bbx_i[1].item()),\n float(bbx_i[0].item()),\n float(bbx_size_i[1].item()),\n float(bbx_size_i[0].item()),\n ]\n\n segmentation = msk_pred == i\n segmentation = Image.fromarray(segmentation.numpy()).resize(original_size[::-1], Image.NEAREST)\n out[\"segmentation\"] = mask_encode(np.asfortranarray(np.array(segmentation)))\n out[\"segmentation\"][\"counts\"] = str(out[\"segmentation\"][\"counts\"], \"utf-8\")\n\n outs.append(out)\n\n return outs\n\n\ndef summarize(predictions, annotations_file, img_list, mask=False):\n msk_map = 0\n with tempfile.NamedTemporaryFile(\"w\") as fid:\n json.dump(predictions, fid)\n fid.flush()\n\n # Detection\n gt = COCO(annotations_file, img_list)\n pred = gt.loadRes(fid.name)\n pred_eval = COCOeval(gt, pred, \"bbox\")\n pred_eval.evaluate()\n pred_eval.accumulate()\n pred_eval.summarize()\n det_map = pred_eval.stats[0]\n\n if mask:\n pred_eval = COCOeval(gt, pred, \"segm\")\n pred_eval.evaluate()\n pred_eval.accumulate()\n pred_eval.summarize()\n msk_map = pred_eval.stats[0]\n\n return det_map, msk_map\n\n\ndef summarize_mp(predictions, annotations_file, img_list, log_dir, mask=False):\n # Write partial results to file (all workers)\n rank = dist.get_rank()\n with open(path.join(log_dir, \"coco_ap_{:02d}.json\".format(rank)), \"w\") as fid:\n json.dump(predictions, fid)\n with open(path.join(log_dir, \"img_list_{:02d}.json\".format(rank)), \"w\") as fid:\n json.dump(img_list, fid)\n\n dist.barrier()\n\n # Merge results from all workers and run evaluation (only rank 0)\n if rank == 0:\n predictions = []\n img_list = []\n\n for i in range(dist.get_world_size()):\n coco_ap_file = path.join(log_dir, \"coco_ap_{:02d}.json\".format(i))\n with open(coco_ap_file) as fid:\n predictions += json.load(fid)\n remove(coco_ap_file)\n\n img_list_file = path.join(log_dir, \"img_list_{:02d}.json\".format(i))\n with open(img_list_file) as fid:\n img_list += json.load(fid)\n remove(img_list_file)\n\n det_map, msk_map = summarize(predictions, annotations_file, img_list, mask)\n else:\n det_map, msk_map = 0, 0\n\n dist.barrier()\n\n return det_map, msk_map\n\n\nclass COCO(_COCO):\n \"\"\"Modified COCO class that loads only a subset of\"\"\"\n\n def __init__(self, annotation_file, img_list):\n # load dataset\n self.dataset, self.anns, self.cats, self.imgs = dict(), dict(), dict(), dict()\n self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)\n\n print('loading annotations into memory...')\n tic = time.time()\n dataset = json.load(open(annotation_file, 'r'))\n assert type(dataset) == dict, 'annotation file format {} not supported'.format(type(dataset))\n print('Done (t={:0.2f}s)'.format(time.time() - tic))\n\n # Clean-up dataset, removing all images and annotations that are not in img_list\n img_list = set(img_list)\n dataset[\"images\"] = [img for img in dataset[\"images\"] if img[\"id\"] in img_list]\n dataset[\"annotations\"] = [ann for ann in dataset[\"annotations\"] if ann[\"image_id\"] in img_list]\n\n self.dataset = dataset\n self.createIndex()\n"
] | [
[
"torch.distributed.get_rank",
"torch.distributed.get_world_size",
"numpy.array",
"torch.distributed.barrier"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
GillesVandewiele/pyShapelets | [
"d7e91150c17bf0f5fed55dc36d0c4d2d447e80c9",
"a48b0c1019b787d94fb54c5c45c72044fa8bedbe"
] | [
"pyshapelets/lts_smaller_shap_dicts.py",
"pyshapelets/mstamp/mstamp_stamp.py"
] | [
"import time\nfrom collections import Counter, defaultdict\nimport warnings; warnings.filterwarnings('ignore')\nimport glob\nimport re\nimport ast\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom algorithms import ShapeletTransformer\nfrom extractors.extractor import MultiGeneticExtractor\nfrom data.load_all_datasets import load_data_train_test\n\nfrom sklearn.metrics import accuracy_score, log_loss\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import GridSearchCV\n\nfrom tslearn.shapelets import ShapeletModel\n\n\ndef parse_shapelets(shapelets):\n shapelets = shapelets.replace(']', '],')[:-2]\n shapelets = re.sub(r'\\s+', ', ', shapelets)\n shapelets = re.sub(r',+', ',', shapelets)\n shapelets = shapelets.replace('],[', '], [')\n shapelets = shapelets.replace('[,', '[')\n shapelets = '[' + shapelets + ']'\n shapelets = re.sub(r',\\s+]', ']', shapelets)\n return ast.literal_eval(shapelets)\n\ndef fit_rf(X_distances_train, y_train, X_distances_test, y_test, out_path):\n rf = GridSearchCV(RandomForestClassifier(), {'n_estimators': [10, 25, 50, 100, 500], 'max_depth': [None, 3, 7, 15]})\n rf.fit(X_distances_train, y_train)\n \n hard_preds = rf.predict(X_distances_test)\n proba_preds = rf.predict_proba(X_distances_test)\n\n print(\"[RF] Accuracy = {}\".format(accuracy_score(y_test, hard_preds)))\n print(\"[RF] Logloss = {}\".format(log_loss(y_test, proba_preds)))\n\n hard_preds = pd.DataFrame(hard_preds, columns=['prediction'])\n proba_preds = pd.DataFrame(proba_preds, columns=['proba_{}'.format(x) for x in set(list(y_train) + list(y_test))])\n\n hard_preds.to_csv(out_path.split('.')[0]+'_rf_hard.csv')\n proba_preds.to_csv(out_path.split('.')[0]+'_rf_proba.csv')\n\ndef fit_lr(X_distances_train, y_train, X_distances_test, y_test, out_path):\n lr = GridSearchCV(LogisticRegression(), {'penalty': ['l1', 'l2'], 'C': [0.001, 0.01, 0.1, 1.0, 10.0]})\n lr.fit(X_distances_train, y_train)\n \n hard_preds = lr.predict(X_distances_test)\n proba_preds = lr.predict_proba(X_distances_test)\n\n print(\"[LR] Accuracy = {}\".format(accuracy_score(y_test, hard_preds)))\n print(\"[LR] Logloss = {}\".format(log_loss(y_test, proba_preds)))\n\n hard_preds = pd.DataFrame(hard_preds, columns=['prediction'])\n proba_preds = pd.DataFrame(proba_preds, columns=['proba_{}'.format(x) for x in set(list(y_train) + list(y_test))])\n\n hard_preds.to_csv(out_path.split('.')[0]+'_lr_hard.csv')\n proba_preds.to_csv(out_path.split('.')[0]+'_lr_proba.csv')\n\ndef fit_svm(X_distances_train, y_train, X_distances_test, y_test, out_path):\n svc = GridSearchCV(SVC(kernel='linear', probability=True), {'C': [0.001, 0.01, 0.1, 1.0, 10.0]})\n svc.fit(X_distances_train, y_train)\n \n hard_preds = svc.predict(X_distances_test)\n proba_preds = svc.predict_proba(X_distances_test)\n\n print(\"[SVM] Accuracy = {}\".format(accuracy_score(y_test, hard_preds)))\n print(\"[SVM] Logloss = {}\".format(log_loss(y_test, proba_preds)))\n\n hard_preds = pd.DataFrame(hard_preds, columns=['prediction'])\n proba_preds = pd.DataFrame(proba_preds, columns=['proba_{}'.format(x) for x in set(list(y_train) + list(y_test))])\n\n hard_preds.to_csv(out_path.split('.')[0]+'_svm_hard.csv')\n proba_preds.to_csv(out_path.split('.')[0]+'_svm_proba.csv')\n\ndef fit_lts(X_train, y_train, X_test, y_test, shap_dict, reg, max_it, shap_out_path, pred_out_path, timing_out_path):\n # Fit LTS model, print metrics on test-set, write away predictions and shapelets\n clf = ShapeletModel(n_shapelets_per_size=shap_dict, \n max_iter=max_it, verbose_level=0, batch_size=1,\n optimizer='sgd', weight_regularizer=reg)\n\n start = time.time()\n clf.fit(\n np.reshape(\n X_train, \n (X_train.shape[0], X_train.shape[1], 1)\n ), \n y_train\n )\n learning_time = time.time() - start\n\n print('Learning shapelets took {}s'.format(learning_time))\n\n with open(shap_out_path, 'w+') as ofp:\n for shap in clf.shapelets_:\n ofp.write(str(np.reshape(shap, (-1))) + '\\n')\n\n with open(timing_out_path, 'w+') as ofp:\n ofp.write(str(learning_time))\n\n X_distances_train = clf.transform(X_train)\n X_distances_test = clf.transform(X_test)\n\n print('Max distance value = {}'.format(np.max(X_distances_train)))\n\n fit_rf(X_distances_train, y_train, X_distances_test, y_test, pred_out_path)\n fit_lr(X_distances_train, y_train, X_distances_test, y_test, pred_out_path)\n fit_svm(X_distances_train, y_train, X_distances_test, y_test, pred_out_path)\n\nhyper_parameters_lts = {\n\t'Adiac': \t\t\t\t\t[0.3, 0.2, 3, 0.01, 10000],\n\t'Beef': \t\t\t\t\t[0.15, 0.125, 3, 0.01, 10000],\n\t'BeetleFly': \t\t\t\t[0.15, 0.125, 1, 0.01, 5000],\n\t'BirdChicken': \t\t\t\t[0.3, 0.075, 1, 0.1, 10000],\n\t'ChlorineConcentration': [0.3, 0.2, 3, 0.01, 10000],\n\t'Coffee': \t\t\t\t\t[0.05, 0.075, 2, 0.01, 5000],\n\t'DiatomSizeReduction': \t\t[0.3, 0.175, 2, 0.01, 10000],\n\t'ECGFiveDays': \t\t\t\t[0.05, 0.125, 2, 0.01, 10000],\n\t'FaceFour': \t\t\t\t[0.3, 0.175, 3, 1.0, 5000],\n\t'GunPoint': \t\t\t\t[0.15, 0.2, 3, 0.1, 10000],\n\t'ItalyPowerDemand':\t\t\t[0.3, 0.2, 3, 0.01, 5000],\n\t'Lightning7': \t\t\t\t[0.05, 0.075, 3, 1, 5000],\n\t'MedicalImages': \t\t\t[0.3, 0.2, 2, 1, 10000],\n\t'MoteStrain': \t\t\t\t[0.3, 0.2, 3, 1, 10000],\n\t#NOT AVAILABLE#'Otoliths': \t\t\t\t[0.15, 0.125, 3, 0.01, 2000],\n\t'SonyAIBORobotSurface1': \t[0.3, 0.125, 2, 0.01, 10000],\n\t'SonyAIBORobotSurface2': \t[0.3, 0.125, 2, 0.01, 10000],\n\t'Symbols': \t\t\t\t\t[0.05, 0.175, 1, 0.1, 5000],\n\t'SyntheticControl': \t\t[0.15, 0.125, 3, 0.01, 5000],\n\t'Trace': \t\t\t\t\t[0.15, 0.125, 2, 0.1, 10000],\n\t'TwoLeadECG': \t\t\t\t[0.3, 0.075, 1, 0.1, 10000]\n}\n\ndatasets = [\n 'Adiac',\n 'Beef',\n 'BeetleFly',\n 'BirdChicken',\n 'ChlorineConcentration',\n 'Coffee',\n 'ECGFiveDays',\n 'FaceFour',\n 'GunPoint',\n 'ItalyPowerDemand',\n 'Lightning7',\n 'MedicalImages',\n 'MoteStrain',\n 'SonyAIBORobotSurface1',\n 'SonyAIBORobotSurface2',\n 'Symbols',\n 'SyntheticControl',\n 'Trace',\n 'TwoLeadECG',\n 'DiatomSizeReduction'\n]\n\nlearning_sizes = defaultdict(list)\ngenetic_sizes = defaultdict(list)\n\nmetadata = sorted(load_data_train_test(), key=lambda x: x['train']['n_samples']**2*x['train']['n_features']**3)\n\nfor dataset in metadata:\n\n train_df = pd.read_csv(dataset['train']['data_path'])\n test_df = pd.read_csv(dataset['test']['data_path'])\n X_train = train_df.drop('target', axis=1).values\n y_train = train_df['target']\n X_test = test_df.drop('target', axis=1).values\n y_test = test_df['target']\n\n map_dict = {}\n for j, c in enumerate(np.unique(y_train)):\n map_dict[c] = j\n y_train = y_train.map(map_dict) \n y_test = y_test.map(map_dict)\n\n y_train = y_train.values\n y_test = y_test.values\n\n nr_shap, l, r, reg, max_it = hyper_parameters_lts[dataset['train']['name']]\n \n files = glob.glob('results/lts_vs_genetic/{}_genetic_shapelets*.txt'.format(dataset['train']['name']))\n if len(files):\n\t sizes = []\n\t for f in files:\n\t shaps = parse_shapelets(open(f, 'r').read())\n\t genetic_sizes[dataset['train']['name']].append(len(shaps))\n\t for s in shaps:\n\t \tsizes.append(len(s))\n\t \n\t shap_dict_cntr = Counter(np.random.choice(sizes, size=int(np.mean(genetic_sizes[dataset['train']['name']]))))\n\t shap_dict = {}\n\t for c in shap_dict_cntr:\n\t \tshap_dict[int(c)] = int(shap_dict_cntr[c])\n\n\t fit_lts(X_train, y_train, X_test, y_test, dict(shap_dict), reg, max_it,\n\t 'results/lts_smaller/{}_learned_shapelets_{}.txt'.format(dataset['train']['name'], int(time.time())), \n\t 'results/lts_smaller/{}_learned_shapelets_predictions_{}.csv'.format(dataset['train']['name'], int(time.time())), \n\t 'results/lts_smaller/{}_learned_runtime_{}.csv'.format(dataset['train']['name'], int(time.time()))\n\t )",
"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Michael Yeh\n\"\"\"\n\nfrom __future__ import print_function\nimport time\nimport numpy as np\n\n\n_EPS = 1e-14\n\n\ndef mstamp(seq, sub_len, return_dimension=False):\n \"\"\" multidimensional matrix profile with mSTAMP (stamp based)\n\n Parameters\n ----------\n seq : numpy matrix, shape (n_dim, seq_len)\n input sequence\n sub_len : int\n subsequence length\n return_dimension : bool\n if True, also return the matrix profile dimension. It takses O(d^2 n)\n to store and O(d^2 n^2) to compute. (default is False)\n\n Returns\n -------\n matrix_profile : numpy matrix, shape (n_dim, sub_num)\n matrix profile\n profile_index : numpy matrix, shape (n_dim, sub_num)\n matrix profile index\n profile_dimension : list, optional, shape (n_dim)\n matrix profile dimension, this is only returned when return_dimension\n is True\n\n Notes\n -----\n C.-C. M. Yeh, N. Kavantzas, and E. Keogh, \"Matrix Profile VI: Meaningful\n Multidimensional Motif Discovery,\" IEEE ICDM 2017.\n https://sites.google.com/view/mstamp/\n http://www.cs.ucr.edu/~eamonn/MatrixProfile.html\n \"\"\"\n if sub_len < 4:\n raise RuntimeError('Subsequence length (sub_len) must be at least 4')\n exc_zone = sub_len // 2\n seq = np.array(seq, dtype=float, copy=True)\n\n if seq.ndim == 1:\n seq = np.expand_dims(seq, axis=0)\n\n seq_len = seq.shape[1]\n sub_num = seq.shape[1] - sub_len + 1\n n_dim = seq.shape[0]\n skip_loc = np.zeros(sub_num, dtype=bool)\n for i in range(sub_num):\n if not np.all(np.isfinite(seq[:, i:i + sub_len])):\n skip_loc[i] = True\n seq[~np.isfinite(seq)] = 0\n\n matrix_profile = np.empty((n_dim, sub_num))\n matrix_profile[:] = np.inf\n profile_index = -np.ones((n_dim, sub_num), dtype=int)\n seq_freq = np.empty((n_dim, seq_len * 2), dtype=np.complex128)\n seq_mu = np.empty((n_dim, sub_num))\n seq_sig = np.empty((n_dim, sub_num))\n if return_dimension:\n profile_dimension = []\n for i in range(n_dim):\n profile_dimension.append(np.empty((i + 1, sub_num), dtype=int))\n for i in range(n_dim):\n seq_freq[i, :], seq_mu[i, :], seq_sig[i, :] = \\\n _mass_pre(seq[i, :], sub_len)\n\n dist_profile = np.empty((n_dim, sub_num))\n que_sig = np.empty(n_dim)\n tic = time.time()\n for i in range(sub_num):\n cur_prog = (i + 1) / sub_num\n time_left = ((time.time() - tic) / (i + 1)) * (sub_num - i - 1)\n print('\\rProgress [{0:<50s}] {1:5.1f}% {2:8.1f} sec'\n .format('#' * int(cur_prog * 50),\n cur_prog * 100, time_left), end=\"\")\n for j in range(n_dim):\n que = seq[j, i:i + sub_len]\n dist_profile[j, :], que_sig = _mass(\n seq_freq[j, :], que, seq_len, sub_len,\n seq_mu[j, :], seq_sig[j, :])\n\n if skip_loc[i] or np.any(que_sig < _EPS):\n continue\n\n exc_zone_st = max(0, i - exc_zone)\n exc_zone_ed = min(sub_num, i + exc_zone)\n dist_profile[:, exc_zone_st:exc_zone_ed] = np.inf\n dist_profile[:, skip_loc] = np.inf\n dist_profile[seq_sig < _EPS] = np.inf\n\n dist_profile_dim = np.argsort(dist_profile, axis=0)\n dist_profile_sort = np.sort(dist_profile, axis=0)\n dist_profile_cumsum = np.zeros(sub_num)\n for j in range(n_dim):\n dist_profile_cumsum += dist_profile_sort[j, :]\n dist_profile_mean = dist_profile_cumsum / (j + 1)\n update_pos = dist_profile_mean < matrix_profile[j, :]\n profile_index[j, update_pos] = i\n matrix_profile[j, update_pos] = dist_profile_mean[update_pos]\n if return_dimension:\n profile_dimension[j][:, update_pos] = \\\n dist_profile_dim[:j + 1, update_pos]\n\n matrix_profile = np.sqrt(matrix_profile)\n if return_dimension:\n return matrix_profile, profile_index, profile_dimension\n else:\n return matrix_profile, profile_index,\n\n\ndef _mass_pre(seq, sub_len):\n \"\"\" pre-computation for iterative call to MASS\n\n Parameters\n ----------\n seq : numpy array\n input sequence\n sub_len : int\n subsequence length\n\n Returns\n -------\n seq_freq : numpy array\n sequence in frequency domain\n seq_mu : numpy array\n each subsequence's mu (mean)\n seq_sig : numpy array\n each subsequence's sigma (standard deviation)\n\n Notes\n -----\n This functions is modified from the code provided in the following URL\n http://www.cs.unm.edu/~mueen/FastestSimilaritySearch.html\n \"\"\"\n seq_len = len(seq)\n seq_pad = np.zeros(seq_len * 2)\n seq_pad[0:seq_len] = seq\n seq_freq = np.fft.fft(seq_pad)\n seq_cum = np.cumsum(seq_pad)\n seq_sq_cum = np.cumsum(np.square(seq_pad))\n seq_sum = (seq_cum[sub_len - 1:seq_len] -\n np.concatenate(([0], seq_cum[0:seq_len - sub_len])))\n seq_sq_sum = (seq_sq_cum[sub_len - 1:seq_len] -\n np.concatenate(([0], seq_sq_cum[0:seq_len - sub_len])))\n seq_mu = seq_sum / sub_len\n seq_sig_sq = seq_sq_sum / sub_len - np.square(seq_mu)\n seq_sig = np.sqrt(seq_sig_sq)\n return seq_freq, seq_mu, seq_sig\n\n\ndef _mass(seq_freq, que, seq_len, sub_len, seq_mu, seq_sig):\n \"\"\" iterative call of MASS\n\n Parameters\n ----------\n seq_freq : numpy array\n sequence in frequency domain\n que : numpy array\n query\n seq_len : int\n sequence length\n sub_len : int\n subsequence length\n seq_mu : numpy array\n each subsequence's mu (mean)\n seq_sig : numpy array\n each subsequence's sigma (standard deviation)\n\n Returns\n -------\n dist_profile : numpy array\n distance profile\n que_sig : float64\n query's sigma (standard deviation)\n\n Notes\n -----\n This functions is modified from the code provided in the following URL\n http://www.cs.unm.edu/~mueen/FastestSimilaritySearch.html\n \"\"\"\n que = que[::-1]\n que_pad = np.zeros(seq_len * 2)\n que_pad[0:sub_len] = que\n que_freq = np.fft.fft(que_pad)\n product_freq = seq_freq * que_freq\n product = np.fft.ifft(product_freq)\n product = np.real(product)\n\n que_sum = np.sum(que)\n que_sq_sum = np.sum(np.square(que))\n que_mu = que_sum / sub_len\n que_sig_sq = que_sq_sum / sub_len - que_mu**2\n if que_sig_sq < _EPS:\n que_sig_sq = _EPS\n que_sig = np.sqrt(que_sig_sq)\n\n dist_profile = (2 * (sub_len - (product[sub_len - 1:seq_len] -\n sub_len * seq_mu * que_mu) /\n (seq_sig * que_sig)))\n return dist_profile, que_sig\n"
] | [
[
"pandas.read_csv",
"sklearn.linear_model.LogisticRegression",
"sklearn.ensemble.RandomForestClassifier",
"numpy.unique",
"numpy.reshape",
"pandas.DataFrame",
"numpy.max",
"sklearn.metrics.log_loss",
"numpy.mean",
"sklearn.svm.SVC",
"sklearn.metrics.accuracy_score"
],
[
"numpy.square",
"numpy.expand_dims",
"numpy.sqrt",
"numpy.fft.fft",
"numpy.isfinite",
"numpy.cumsum",
"numpy.sort",
"numpy.ones",
"numpy.concatenate",
"numpy.fft.ifft",
"numpy.real",
"numpy.any",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ngiambla/nnflex | [
"7c8bf46218ea70c6dad1efedf9e2069e41c4c3fa"
] | [
"operators/clip.py"
] | [
"''' clip.py:\n\nImplement's the clip ONNX node as a flexnode (for use with any accelerator)\n\n'''\nimport uuid\n\nimport numpy as np\n\nfrom operators.flexnode import FlexNode\nfrom core.defines import Operator\nfrom core.messaging import Message\n \nclass Clip(FlexNode):\n\n def __init__(self, onnx_node, inputs, outputs):\n FlexNode.__init__(self, onnx_node, inputs, outputs)\n self._min = -3.402823466e+38\n self._max = 3.402823466e+38\n\n if len(inputs) != 1 and len(inputs) != 3:\n raise ValueError(\"Clip can only have 1 or 3 inputs.\")\n\n self._input = inputs[0]\n\n if len(inputs) == 3:\n self._min = inputs[1]\n self._max = inputs[2] \n\n def map(self, memory_mapper):\n pass\n\n def unmap(self, memory_mapper):\n pass\n\n def _inputs2mem(self, memory_xfer_engine):\n pass\n\n def _mem2output(self, memory_xfer_engine):\n pass\n\n def compile(self, source, destinations):\n\n tile_commands = list()\n\n # Here, we are NOT generating tile_commands, (although, this is not difficult.)\n np.copyto(self._outputs[0], np.clip(self._input, self._min, self._max))\n\n return tile_commands\n"
] | [
[
"numpy.clip"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aroig/nnutil2 | [
"1fc77df351d4eee1166688e25a94287a5cfa27c4"
] | [
"nnutil2/layers/segment.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# nnutil2 - Tensorflow utilities for training neural networks\n# Copyright (c) 2019, Abdó Roig-Maranges <[email protected]>\n#\n# This file is part of 'nnutil2'.\n#\n# This file may be modified and distributed under the terms of the 3-clause BSD\n# license. See the LICENSE file for details.\n\nfrom typing import List\n\nimport tensorflow as tf\n\nfrom ..util import kwargs_for\nfrom .layer import Layer\n\nclass Segment(Layer):\n \"\"\"A sequential collection of layers\"\"\"\n def __init__(self, layers: List[Layer] = [], activation=None, **kwargs):\n super(Segment, self).__init__(**kwargs)\n\n self._segment_layers = layers\n self._segment_activation = tf.keras.activations.get(activation)\n self._segment_states = []\n\n def get_config(self):\n config = {\n 'layers': [ly.get_config() for ly in self._layers],\n 'activation': self._segment_activation\n }\n\n base_config = super(Segment, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def call(self, inputs, **kwargs):\n x = inputs\n self._segment_states.append(x)\n\n for l in self._segment_layers:\n layer_kwargs = kwargs_for(kwargs, l.call)\n x = l(x, **layer_kwargs)\n self._segment_states.append(x)\n\n if self._segment_activation is not None:\n x = self._segment_activation(x)\n self._segment_states.append(x)\n\n return x\n\n def compute_output_shape(self, input_shape):\n shape = input_shape\n for l in self._segment_layers:\n shape = l.compute_output_shape(shape)\n return shape\n\n @property\n def flat_layers(self):\n layers = []\n\n def add_layers(ly):\n if isinstance(ly, Segment):\n for ly2 in ly.layers:\n add_layers(ly2)\n else:\n layers.append(ly)\n\n add_layers(self)\n return layers\n\n @property\n def layers(self):\n return self._segment_layers\n\n @property\n def states(self):\n return self._segment_states\n"
] | [
[
"tensorflow.keras.activations.get"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
briancylui/ALOCC-CVPR2018 | [
"78b6a1e8f3fcde8a46a88294926074a65ff0726a"
] | [
"train.py"
] | [
"import os\nimport numpy as np\nfrom models import ALOCC_Model\nfrom utils import pp, visualize, to_json, show_all_variables\nimport tensorflow as tf\n\nflags = tf.app.flags\nflags.DEFINE_integer(\"epoch\", 40, \"Epoch to train [25]\")\nflags.DEFINE_float(\"learning_rate\", 0.002, \"Learning rate of for adam [0.0002]\")\nflags.DEFINE_float(\"beta1\", 0.5, \"Momentum term of adam [0.5]\")\nflags.DEFINE_integer(\"attention_label\", 1, \"Conditioned label that growth attention of training label [1]\")\nflags.DEFINE_float(\"r_alpha\", 0.2, \"Refinement parameter [0.2]\")\nflags.DEFINE_integer(\"train_size\", np.inf, \"The size of train images [np.inf]\")\nflags.DEFINE_integer(\"batch_size\",128, \"The size of batch images [64]\")\nflags.DEFINE_integer(\"input_height\", 45, \"The size of image to use. [45]\")\nflags.DEFINE_integer(\"input_width\", None, \"The size of image to use. If None, same value as input_height [None]\")\nflags.DEFINE_integer(\"output_height\", 45, \"The size of the output images to produce [45]\")\nflags.DEFINE_integer(\"output_width\", None, \"The size of the output images to produce. If None, same value as output_height [None]\")\nflags.DEFINE_string(\"dataset\", \"UCSD\", \"The name of dataset [UCSD, mnist]\")\nflags.DEFINE_string(\"dataset_address\", \"./dataset/UCSD_Anomaly_Dataset.v1p2/UCSDped2/Train\", \"The path of dataset\")\nflags.DEFINE_string(\"input_fname_pattern\", \"*\", \"Glob pattern of filename of input images [*]\")\nflags.DEFINE_string(\"checkpoint_dir\", \"checkpoint\", \"Directory name to save the checkpoints [checkpoint]\")\nflags.DEFINE_string(\"log_dir\", \"log\", \"Directory name to save the log [log]\")\nflags.DEFINE_string(\"sample_dir\", \"samples\", \"Directory name to save the image samples [samples]\")\nflags.DEFINE_boolean(\"train\", True, \"True for training, False for testing [False]\")\nFLAGS = flags.FLAGS\n\n\ndef check_some_assertions():\n \"\"\"\n to check some assertions in inputs and also check sth else.\n \"\"\"\n if FLAGS.input_width is None:\n FLAGS.input_width = FLAGS.input_height\n if FLAGS.output_width is None:\n FLAGS.output_width = FLAGS.output_height\n\n if not os.path.exists(FLAGS.checkpoint_dir):\n os.makedirs(FLAGS.checkpoint_dir)\n if not os.path.exists(FLAGS.log_dir):\n os.makedirs(FLAGS.log_dir)\n if not os.path.exists(FLAGS.sample_dir):\n os.makedirs(FLAGS.sample_dir)\n\ndef main(_):\n \"\"\"\n The main function for training steps \n \"\"\"\n pp.pprint(flags.FLAGS.__flags)\n n_per_itr_print_results = 100\n kb_work_on_patch = True\n\n # ---------------------------------------------------------------------------------------------\n # ---------------------------------------------------------------------------------------------\n # Manual Switchs ------------------------------------------------------------------------------\n # ---------------------------------------------------------------------------------------------\n # DATASET PARAMETER : UCSD\n #FLAGS.dataset = 'UCSD'\n #FLAGS.dataset_address = './dataset/UCSD_Anomaly_Dataset.v1p2/UCSDped2/Train'\n\n nd_input_frame_size = (240, 360)\n nd_slice_size = (45, 45)\n n_stride = 25\n n_fetch_data = 600\n # ---------------------------------------------------------------------------------------------\n # # DATASET PARAMETER : MNIST\n # FLAGS.dataset = 'mnist'\n # FLAGS.dataset_address = './dataset/mnist'\n # nd_input_frame_size = (28, 28)\n # nd_slice_size = (28, 28)\n\n FLAGS.train = True\n\n FLAGS.input_width = nd_slice_size[0]\n FLAGS.input_height = nd_slice_size[1]\n FLAGS.output_width = nd_slice_size[0]\n FLAGS.output_height = nd_slice_size[1]\n\n FLAGS.sample_dir = 'export/'+FLAGS.dataset +'_%d.%d'%(nd_slice_size[0],nd_slice_size[1])\n FLAGS.input_fname_pattern = '*'\n\n check_some_assertions()\n\n # manual handling of GPU\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.4)\n run_config = tf.ConfigProto(gpu_options=gpu_options)\n run_config.gpu_options.allow_growth=True\n\n with tf.Session(config=run_config) as sess:\n tmp_model = ALOCC_Model(\n sess,\n input_width=FLAGS.input_width,\n input_height=FLAGS.input_height,\n output_width=FLAGS.output_width,\n output_height=FLAGS.output_height,\n batch_size=FLAGS.batch_size,\n sample_num=FLAGS.batch_size,\n attention_label=FLAGS.attention_label,\n r_alpha=FLAGS.r_alpha,\n dataset_name=FLAGS.dataset,\n dataset_address=FLAGS.dataset_address,\n input_fname_pattern=FLAGS.input_fname_pattern,\n checkpoint_dir=FLAGS.checkpoint_dir,\n is_training = FLAGS.train,\n log_dir=FLAGS.log_dir,\n sample_dir=FLAGS.sample_dir,\n nd_patch_size=nd_slice_size,\n n_stride=n_stride,\n n_per_itr_print_results=n_per_itr_print_results,\n kb_work_on_patch=kb_work_on_patch,\n nd_input_frame_size = nd_input_frame_size,\n n_fetch_data=n_fetch_data)\n\n #show_all_variables()\n\n if FLAGS.train:\n print('Program is on Train Mode')\n tmp_model.train(FLAGS)\n else:\n if not tmp_model.load(FLAGS.checkpoint_dir)[0]:\n print('Program is on Test Mode')\n raise Exception(\"[!] Train a model first, then run test mode from file test.py\")\n\nif __name__ == '__main__':\n tf.app.run()\n"
] | [
[
"tensorflow.ConfigProto",
"tensorflow.GPUOptions",
"tensorflow.Session",
"tensorflow.app.run"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
grahamwhiteuk/neutralizing-bias | [
"a6ef764046fcc68ac0daa612c160ec23a79d3e73",
"a6ef764046fcc68ac0daa612c160ec23a79d3e73",
"a6ef764046fcc68ac0daa612c160ec23a79d3e73"
] | [
"src/tagging/train.py",
"src/shared/beam.py",
"src/tagging/model.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\ntrain bert \n\npython tagging/train.py --train ../../data/v6/corpus.wordbiased.tag.train --test ../../data/v6/corpus.wordbiased.tag.test --working_dir TEST --train_batch_size 3 --test_batch_size 10 --hidden_size 32 --debug_skip\n\"\"\"\nfrom pytorch_pretrained_bert.tokenization import BertTokenizer\nfrom pytorch_pretrained_bert.optimization import BertAdam\n\nfrom collections import defaultdict\nfrom torch.utils.data import TensorDataset, DataLoader, RandomSampler\nfrom tqdm import tqdm\nimport torch\nimport torch.nn as nn\nimport pickle\nimport sys\nimport os\nimport numpy as np\nfrom pytorch_pretrained_bert.modeling import BertForTokenClassification\nfrom torch.nn import CrossEntropyLoss\nfrom tensorboardX import SummaryWriter\nimport argparse\nimport sklearn.metrics as metrics\n\nimport model as tagging_model\nimport utils as tagging_utils\n\nimport sys; sys.path.append('.')\nfrom shared.data import get_dataloader\nfrom shared.args import ARGS\nfrom shared.constants import CUDA\n\n\n\n\nif not os.path.exists(ARGS.working_dir):\n os.makedirs(ARGS.working_dir)\n\nwith open(ARGS.working_dir + '/command.sh', 'w') as f:\n f.write('python' + ' '.join(sys.argv) + '\\n')\n\n\n\n\n# # # # # # # # ## # # # ## # # DATA # # # # # # # # ## # # # ## # #\n\n\n\nprint('LOADING DATA...')\ntokenizer = BertTokenizer.from_pretrained(ARGS.bert_model, cache_dir=ARGS.working_dir + '/cache')\ntok2id = tokenizer.vocab\ntok2id['<del>'] = len(tok2id)\n\ntrain_dataloader, num_train_examples = get_dataloader(\n ARGS.train, \n tok2id, ARGS.train_batch_size, \n ARGS.working_dir + '/train_data.pkl', \n categories_path=ARGS.categories_file)\neval_dataloader, num_eval_examples = get_dataloader(\n ARGS.test,\n tok2id, ARGS.test_batch_size, ARGS.working_dir + '/test_data.pkl',\n test=True, categories_path=ARGS.categories_file)\n\n# # # # # # # # ## # # # ## # # MODEL # # # # # # # # ## # # # ## # #\n\n\nprint('BUILDING MODEL...')\nif ARGS.tagger_from_debiaser:\n model = tagging_model.TaggerFromDebiaser(\n cls_num_labels=ARGS.num_categories, tok_num_labels=ARGS.num_tok_labels,\n tok2id=tok2id)\nelif ARGS.extra_features_top:\n model = tagging_model.BertForMultitaskWithFeaturesOnTop.from_pretrained(\n ARGS.bert_model,\n cls_num_labels=ARGS.num_categories,\n tok_num_labels=ARGS.num_tok_labels,\n cache_dir=ARGS.working_dir + '/cache',\n tok2id=tok2id)\nelif ARGS.extra_features_bottom:\n model = tagging_model.BertForMultitaskWithFeaturesOnBottom.from_pretrained(\n ARGS.bert_model,\n cls_num_labels=ARGS.num_categories,\n tok_num_labels=ARGS.num_tok_labels,\n cache_dir=ARGS.working_dir + '/cache',\n tok2id=tok2id)\nelse:\n model = tagging_model.BertForMultitask.from_pretrained(\n ARGS.bert_model,\n cls_num_labels=ARGS.num_categories,\n tok_num_labels=ARGS.num_tok_labels,\n cache_dir=ARGS.working_dir + '/cache',\n tok2id=tok2id)\nif CUDA:\n model = model.cuda()\n\nprint('PREPPING RUN...')\n\n# # # # # # # # ## # # # ## # # OPTIMIZER, LOSS # # # # # # # # ## # # # ## # #\n\n\noptimizer = tagging_utils.build_optimizer(\n model, int((num_train_examples * ARGS.epochs) / ARGS.train_batch_size),\n ARGS.learning_rate)\n\nloss_fn = tagging_utils.build_loss_fn()\n\n# # # # # # # # ## # # # ## # # TRAIN # # # # # # # # ## # # # ## # #\n\nwriter = SummaryWriter(ARGS.working_dir)\n\n\nprint('INITIAL EVAL...')\nmodel.eval()\nresults = tagging_utils.run_inference(model, eval_dataloader, loss_fn, tokenizer)\nwriter.add_scalar('eval/tok_loss', np.mean(results['tok_loss']), 0)\nwriter.add_scalar('eval/tok_acc', np.mean(results['labeling_hits']), 0)\n\nprint('TRAINING...')\nmodel.train()\nfor epoch in range(ARGS.epochs):\n print('STARTING EPOCH ', epoch)\n losses = tagging_utils.train_for_epoch(model, train_dataloader, loss_fn, optimizer)\n writer.add_scalar('train/loss', np.mean(losses), epoch + 1)\n\n # eval\n print('EVAL...')\n model.eval()\n results = tagging_utils.run_inference(model, eval_dataloader, loss_fn, tokenizer)\n writer.add_scalar('eval/tok_loss', np.mean(results['tok_loss']), epoch + 1)\n writer.add_scalar('eval/tok_acc', np.mean(results['labeling_hits']), epoch + 1)\n\n model.train()\n\n print('SAVING...')\n torch.save(model.state_dict(), ARGS.working_dir + '/model_%d.ckpt' % epoch) \n \n",
"import torch\n\n\"\"\"Beam search implementation in PyTorch.\"\"\"\n# Takes care of beams, back pointers, and scores.\n# Adapted from OpenNMT\nclass Beam(object):\n \"\"\"Ordered beam of candidate outputs.\"\"\"\n\n def __init__(self, size, tok2id, cuda=False):\n \"\"\"Initialize params.\"\"\"\n self.size = size\n self.done = False\n self.pad = tok2id['[PAD]']\n self.bos = tok2id['行']\n self.eos = tok2id['止']\n self.tt = torch.cuda if cuda else torch\n\n # The score for each translation on the beam.\n self.scores = self.tt.FloatTensor(size).zero_()\n\n # The backpointers at each time-step.\n self.prevKs = []\n\n # The outputs at each time-step. [time, beam]\n self.nextYs = [self.tt.LongTensor(size).fill_(self.pad)]\n self.nextYs[0][0] = self.bos # TODO CHANGED THIS\n\n # The attentions (matrix) for each time.\n self.attn = []\n\n # Get the outputs for the current timestep.\n def get_current_state(self):\n \"\"\"Get state of beam.\"\"\"\n return self.nextYs[-1]\n\n # Get the backpointers for the current timestep.\n def get_current_origin(self):\n \"\"\"Get the backpointer to the beam at this step.\"\"\"\n return self.prevKs[-1]\n\n # Given prob over words for every last beam `wordLk` and attention\n # `attnOut`: Compute and update the beam search.\n #\n # Parameters:\n #\n # * `wordLk`- probs of advancing from the last step (K x words)\n # * `attnOut`- attention at the last step\n #\n # Returns: True if beam search is complete.\n\n def advance(self, workd_lk):\n \"\"\"Advance the beam.\"\"\"\n num_words = workd_lk.size(1)\n\n # Sum the previous scores.\n if len(self.prevKs) > 0:\n beam_lk = workd_lk + self.scores.unsqueeze(1).expand_as(workd_lk)\n else:\n beam_lk = workd_lk[0]\n\n flat_beam_lk = beam_lk.view(-1)\n\n bestScores, bestScoresId = flat_beam_lk.topk(self.size, 0, True, True)\n self.scores = bestScores\n\n # bestScoresId is flattened beam x word array, so calculate which\n # word and beam each score came from\n prev_k = bestScoresId / num_words\n self.prevKs.append(prev_k)\n self.nextYs.append(bestScoresId - prev_k * num_words)\n\n # End condition is when top-of-beam is EOS.\n if self.nextYs[-1][0] == self.eos:\n self.done = True\n\n return self.done\n\n def sort_best(self):\n \"\"\"Sort the beam.\"\"\"\n return torch.sort(self.scores, 0, True)\n\n # Get the score of the best in the beam.\n def get_best(self):\n \"\"\"Get the most likely candidate.\"\"\"\n scores, ids = self.sort_best()\n return scores[1], ids[1]\n\n # Walk back to construct the full hypothesis.\n #\n # Parameters.\n #\n # * `k` - the position in the beam to construct.\n #\n # Returns.\n #\n # 1. The hypothesis\n # 2. The attention at each time step.\n def get_hyp(self, k):\n \"\"\"Get hypotheses.\"\"\"\n hyp = []\n # -2 to include start tok\n for j in range(len(self.prevKs) - 1, -2, -1):\n hyp.append(self.nextYs[j + 1][k])\n k = self.prevKs[j][k]\n\n return hyp[::-1]\n ",
"from pytorch_pretrained_bert.modeling import PreTrainedBertModel, BertModel, BertSelfAttention\nimport pytorch_pretrained_bert.modeling as modeling\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport copy\n\nimport sys; sys.path.append('.')\nimport sys; sys.path.append('tagging/') # so that the joint model can see this filter\nimport features\nfrom shared.args import ARGS\nfrom shared.constants import CUDA\nimport seq2seq.model as seq2seq_model\n\n\ndef gelu(x):\n \"\"\"Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n \"\"\"\n return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))\n\ndef identity(x):\n return x\n\nclass BertForMultitask(PreTrainedBertModel):\n\n def __init__(self, config, cls_num_labels=2, tok_num_labels=2, tok2id=None):\n super(BertForMultitask, self).__init__(config)\n self.bert = BertModel(config)\n\n self.cls_dropout = nn.Dropout(config.hidden_dropout_prob)\n self.cls_classifier = nn.Linear(config.hidden_size, cls_num_labels)\n \n self.tok_dropout = nn.Dropout(config.hidden_dropout_prob)\n self.tok_classifier = nn.Linear(config.hidden_size, tok_num_labels)\n \n self.apply(self.init_bert_weights)\n\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, \n labels=None, rel_ids=None, pos_ids=None, categories=None, pre_len=None):\n global ARGS\n sequence_output, pooled_output = self.bert(\n input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)\n\n cls_logits = self.cls_classifier(pooled_output)\n cls_logits = self.cls_dropout(cls_logits)\n\n # NOTE -- dropout is after proj, which is non-standard\n tok_logits = self.tok_classifier(sequence_output)\n tok_logits = self.tok_dropout(tok_logits)\n\n return cls_logits, tok_logits\n\n\n\n\nclass ConcatCombine(nn.Module):\n def __init__(self, hidden_size, feature_size, out_size, layers,\n dropout_prob, small=False, pre_enrich=False, activation=False,\n include_categories=False, category_emb=False,\n add_category_emb=False):\n super(ConcatCombine, self).__init__()\n\n self.include_categories = include_categories\n self.add_category_emb = add_category_emb\n if include_categories:\n if category_emb and not add_category_emb:\n feature_size *= 2\n elif not category_emb:\n feature_size += 43\n\n if layers == 1:\n self.out = nn.Sequential(\n nn.Linear(hidden_size + feature_size, out_size),\n nn.Dropout(dropout_prob))\n elif layers == 2:\n waist_size = min(hidden_size, feature_size) if small else max(hidden_size, feature_size)\n if activation:\n self.out = nn.Sequential(\n nn.Linear(hidden_size + feature_size, waist_size),\n nn.Dropout(dropout_prob),\n nn.ReLU(),\n nn.Linear(waist_size, out_size),\n nn.Dropout(dropout_prob))\n else:\n self.out = nn.Sequential(\n nn.Linear(hidden_size + feature_size, waist_size),\n nn.Dropout(dropout_prob),\n nn.Linear(waist_size, out_size),\n nn.Dropout(dropout_prob))\n if pre_enrich:\n if activation:\n self.enricher = nn.Sequential(\n nn.Linear(feature_size, feature_size),\n nn.ReLU())\n else:\n self.enricher = nn.Linear(feature_size, feature_size)\n else:\n self.enricher = None\n # manually set cuda because module doesn't see these combiners for bottom \n if CUDA:\n self.out = self.out.cuda()\n if self.enricher: \n self.enricher = self.enricher.cuda()\n \n def forward(self, hidden, features, categories=None):\n if self.include_categories:\n categories = categories.unsqueeze(1)\n categories = categories.repeat(1, features.shape[1], 1)\n if self.add_category_emb:\n features = features + categories\n else:\n features = torch.cat((features, categories), -1)\n\n if self.enricher is not None:\n features = self.enricher(features)\n\n return self.out(torch.cat((hidden, features), dim=-1))\n\n\nclass AddCombine(nn.Module):\n def __init__(self, hidden_dim, feat_dim, layers, dropout_prob, small=False,\n out_dim=-1, pre_enrich=False, include_categories=False,\n category_emb=False, add_category_emb=False):\n super(AddCombine, self).__init__()\n\n self.include_categories = include_categories\n if include_categories:\n feat_dim += 43\n\n if layers == 1:\n self.expand = nn.Sequential(\n nn.Linear(feat_dim, hidden_dim),\n nn.Dropout(dropout_prob))\n else:\n waist_size = min(feat_dim, hidden_dim) if small else max(feat_dim, hidden_dim)\n self.expand = nn.Sequential(\n nn.Linear(feat_dim, waist_size),\n nn.Dropout(dropout_prob),\n nn.Linear(waist_size, hidden_dim),\n nn.Dropout(dropout_prob))\n \n if out_dim > 0:\n self.out = nn.Linear(hidden_dim, out_dim)\n else:\n self.out = None\n\n if pre_enrich:\n self.enricher = nn.Linear(feature_size, feature_size) \n else:\n self.enricher = None\n\n # manually set cuda because module doesn't see these combiners for bottom \n if CUDA:\n self.expand = self.expand.cuda()\n if out_dim > 0:\n self.out = self.out.cuda()\n if self.enricher is not None:\n self.enricher = self.enricher.cuda()\n\n def forward(self, hidden, feat, categories=None):\n if self.include_categories:\n categories = categories.unsqueeze(1)\n categories = categories.repeat(1, features.shape[1], 1)\n if self.add_category_emb:\n features = features + categories\n else:\n features = torch.cat((features, categories), -1)\n\n if self.enricher is not None:\n feat = self.enricher(feat)\n \n combined = self.expand(feat) + hidden\n \n if self.out is not None:\n return self.out(combined)\n\n return combined\n\n\nclass BertForMultitaskWithFeaturesOnTop(PreTrainedBertModel):\n \"\"\" stick the features on top of the model \"\"\"\n def __init__(self, config, cls_num_labels=2, tok_num_labels=2, tok2id=None):\n super(BertForMultitaskWithFeaturesOnTop, self).__init__(config)\n global ARGS\n \n self.bert = BertModel(config)\n \n self.featurizer = features.Featurizer(\n tok2id, lexicon_feature_bits=ARGS.lexicon_feature_bits) \n # TODO -- don't hardcode this...\n nfeats = 90 if ARGS.lexicon_feature_bits == 1 else 118\n\n if ARGS.extra_features_method == 'concat':\n self.tok_classifier = ConcatCombine(\n config.hidden_size, nfeats, tok_num_labels, \n ARGS.combiner_layers, config.hidden_dropout_prob,\n ARGS.small_waist, pre_enrich=ARGS.pre_enrich,\n activation=ARGS.activation_hidden,\n include_categories=ARGS.concat_categories,\n category_emb=ARGS.category_emb,\n add_category_emb=ARGS.add_category_emb)\n else:\n self.tok_classifier = AddCombine(\n config.hidden_size, nfeats, ARGS.combiner_layers,\n config.hidden_dropout_prob, ARGS.small_waist,\n out_dim=tok_num_labels, pre_enrich=ARGS.pre_enrich,\n include_categories=ARGS.concat_categories,\n category_emb=ARGS.category_emb,\n add_category_emb=ARGS.add_category_emb)\n\n self.cls_dropout = nn.Dropout(config.hidden_dropout_prob)\n self.cls_classifier = nn.Linear(config.hidden_size, cls_num_labels)\n\n self.category_emb = ARGS.category_emb\n if ARGS.category_emb:\n self.category_embeddings = nn.Embedding(43, nfeats)\n\n self.apply(self.init_bert_weights)\n\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, \n labels=None, rel_ids=None, pos_ids=None, categories=None, pre_len=None):\n global ARGS\n global CUDA\n\n features = self.featurizer.featurize_batch(\n input_ids.detach().cpu().numpy(), \n rel_ids.detach().cpu().numpy(), \n pos_ids.detach().cpu().numpy(), \n padded_len=input_ids.shape[1])\n features = torch.tensor(features, dtype=torch.float)\n if CUDA:\n features = features.cuda()\n\n sequence_output, pooled_output = self.bert(\n input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)\n\n pooled_output = self.cls_dropout(pooled_output)\n cls_logits = self.cls_classifier(pooled_output)\n\n if ARGS.category_emb:\n categories = self.category_embeddings(\n categories.max(-1)[1].type(\n 'torch.cuda.LongTensor' if CUDA else 'torch.LongTensor'))\n\n tok_logits = self.tok_classifier(sequence_output, features, categories)\n\n return cls_logits, tok_logits\n\n\nclass TaggerFromDebiaser(nn.Module):\n def __init__(self, cls_num_labels=2, tok_num_labels=2, tok2id=None):\n super(TaggerFromDebiaser, self).__init__()\n\n global ARGS\n global CUDA\n\n if ARGS.pointer_generator:\n self.debias_model = seq2seq_model.PointerSeq2Seq(\n vocab_size=len(tok2id), hidden_size=ARGS.hidden_size,\n emb_dim=768, dropout=0.2, tok2id=tok2id)\n else:\n self.debias_model = seq2seq_model.Seq2Seq(\n vocab_size=len(tok2id), hidden_size=ARGS.hidden_size,\n emb_dim=768, dropout=0.2, tok2id=tok2id)\n\n assert ARGS.debias_checkpoint\n print('LOADING DEBIASER FROM ' + ARGS.debias_checkpoint)\n self.debias_model.load_state_dict(torch.load(ARGS.debias_checkpoint))\n print('...DONE')\n\n self.cls_classifier = nn.Sequential(\n nn.Linear(ARGS.hidden_size, ARGS.hidden_size),\n nn.Dropout(0.1),\n nn.ReLU(),\n nn.Linear(ARGS.hidden_size, cls_num_labels),\n nn.Dropout(0.1))\n\n self.tok_classifier = nn.Sequential(\n nn.Linear(ARGS.hidden_size, ARGS.hidden_size),\n nn.Dropout(0.1),\n nn.ReLU(),\n nn.Linear(ARGS.hidden_size, tok_num_labels),\n nn.Dropout(0.1))\n\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None,\n labels=None, rel_ids=None, pos_ids=None, categories=None, pre_len=None):\n\n pre_mask = 1.0-attention_mask\n\n # src_outputs is [batch_size, sequence_length, hidden_size].\n src_outputs, h_t, _ = self.debias_model.run_encoder(\n input_ids, pre_len, pre_mask)\n\n cls_logits = self.cls_classifier(h_t)\n tok_logits = self.tok_classifier(src_outputs)\n\n return cls_logits, tok_logits\n"
] | [
[
"numpy.mean"
],
[
"torch.sort"
],
[
"torch.nn.Dropout",
"torch.cat",
"torch.load",
"torch.nn.Embedding",
"torch.tensor",
"torch.nn.Linear",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
philippe-heitzmann/python-apps | [
"1cc6e5e9b9ac81c81a3d4f0e420ff488fe6b2f0a",
"1cc6e5e9b9ac81c81a3d4f0e420ff488fe6b2f0a"
] | [
"sagemaker-dash/tutorials/app15.py",
"sagemaker-dash/tutorials/app2.py"
] | [
"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport pandas as pd\nimport plotly.express as px\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\ndf = pd.read_csv('https://plotly.github.io/datasets/country_indicators.csv')\n\navailable_indicators = df['Indicator Name'].unique()\n\napp.layout = html.Div([\n html.Div([\n html.Div([\n dcc.Dropdown(id='crossfilter-xaxis-column',\n options=[{\n 'label': i,\n 'value': i\n } for i in available_indicators],\n value='Fertility rate, total (births per woman)'),\n dcc.RadioItems(id='crossfilter-xaxis-type',\n options=[{\n 'label': i,\n 'value': i\n } for i in ['Linear', 'Log']],\n value='Linear',\n labelStyle={\n 'display': 'inline-block',\n 'marginTop': '5px'\n })\n ],\n style={\n 'width': '49%',\n 'isplay': 'inline-block'\n }),\n html.Div([\n dcc.Dropdown(id='crossfilter-yaxis-column',\n options=[{\n 'label': i,\n 'value': i\n } for i in available_indicators],\n value='Life expectancy at birth, total (years)'),\n dcc.RadioItems(id='crossfilter-yaxis-type',\n options=[{\n 'label': i,\n 'value': i\n } for i in ['Linear', 'Log']],\n value='Linear',\n labelStyle={\n 'display': 'inline-block',\n 'marginTop': '5px'\n })\n ],\n style={\n 'width': '49%',\n 'float': 'right',\n 'display': 'inline-block'\n })\n ],\n style={'padding': '10px 5px'}),\n html.Div([\n dcc.Graph(id='crossfilter-indicator-scatter',\n hoverData={'points': [{\n 'customdata': 'Japan'\n }]})\n ],\n style={\n 'width': '49%',\n 'display': 'inline-block',\n 'padding': '0 20'\n }),\n html.Div([\n dcc.Graph(id='x-time-series'),\n dcc.Graph(id='y-time-series'),\n ],\n style={\n 'display': 'inline-block',\n 'width': '49%'\n }),\n html.Div(dcc.Slider(\n id='crossfilter-year--slider',\n min=df['Year'].min(),\n max=df['Year'].max(),\n value=df['Year'].max(),\n marks={str(year): str(year)\n for year in df['Year'].unique()},\n step=None),\n style={\n 'width': '49%',\n 'padding': '0px 20px 20px 20px'\n })\n])\n\n\[email protected](\n dash.dependencies.Output('crossfilter-indicator-scatter', 'figure'), [\n dash.dependencies.Input('crossfilter-xaxis-column', 'value'),\n dash.dependencies.Input('crossfilter-yaxis-column', 'value'),\n dash.dependencies.Input('crossfilter-xaxis-type', 'value'),\n dash.dependencies.Input('crossfilter-yaxis-type', 'value'),\n dash.dependencies.Input('crossfilter-year--slider', 'value')\n ])\ndef update_graph(xaxis_column_name, yaxis_column_name, xaxis_type, yaxis_type,\n year_value):\n dff = df[df['Year'] == year_value]\n\n fig = px.scatter(\n x=dff[dff['Indicator Name'] == xaxis_column_name]['Value'],\n y=dff[dff['Indicator Name'] == yaxis_column_name]['Value'],\n hover_name=dff[dff['Indicator Name'] ==\n yaxis_column_name]['Country Name'])\n\n fig.update_traces(customdata=dff[dff['Indicator Name'] ==\n yaxis_column_name]['Country Name'])\n\n fig.update_xaxes(title=xaxis_column_name,\n type='linear' if xaxis_type == 'Linear' else 'log')\n\n fig.update_yaxes(title=yaxis_column_name,\n type='linear' if yaxis_type == 'Linear' else 'log')\n\n fig.update_layout(margin={\n 'l': 40,\n 'b': 40,\n 't': 10,\n 'r': 0\n },\n hovermode='closest')\n\n return fig\n\n\ndef create_time_series(dff, axis_type, title):\n\n fig = px.scatter(dff, x='Year', y='Value')\n\n fig.update_traces(mode='lines+markers')\n\n fig.update_xaxes(showgrid=False)\n\n fig.update_yaxes(type='linear' if axis_type == 'Linear' else 'log')\n\n fig.add_annotation(x=0,\n y=0.85,\n xanchor='left',\n yanchor='bottom',\n xref='paper',\n yref='paper',\n showarrow=False,\n align='left',\n text=title)\n\n fig.update_layout(height=225, margin={'l': 20, 'b': 30, 'r': 10, 't': 10})\n\n return fig\n\n\[email protected](dash.dependencies.Output('x-time-series', 'figure'), [\n dash.dependencies.Input('crossfilter-indicator-scatter', 'hoverData'),\n dash.dependencies.Input('crossfilter-xaxis-column', 'value'),\n dash.dependencies.Input('crossfilter-xaxis-type', 'value')\n])\ndef update_y_timeseries(hoverData, xaxis_column_name, axis_type):\n country_name = hoverData['points'][0]['customdata']\n dff = df[df['Country Name'] == country_name]\n dff = dff[dff['Indicator Name'] == xaxis_column_name]\n title = '<b>{}</b><br>{}'.format(country_name, xaxis_column_name)\n return create_time_series(dff, axis_type, title)\n\n\[email protected](dash.dependencies.Output('y-time-series', 'figure'), [\n dash.dependencies.Input('crossfilter-indicator-scatter', 'hoverData'),\n dash.dependencies.Input('crossfilter-yaxis-column', 'value'),\n dash.dependencies.Input('crossfilter-yaxis-type', 'value')\n])\ndef update_x_timeseries(hoverData, yaxis_column_name, axis_type):\n dff = df[df['Country Name'] == hoverData['points'][0]['customdata']]\n dff = dff[dff['Indicator Name'] == yaxis_column_name]\n return create_time_series(dff, axis_type, yaxis_column_name)\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n",
"# -*- coding: utf-8 -*-\n\n# Run this app with `python app.py` and\n# visit http://127.0.0.1:8050/ in your web browser.\n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.express as px\nimport pandas as pd\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\ncolors = {'background': '#111111', 'text': '#7FDBFF'}\n\n# assume you have a \"long-form\" data frame\n# see https://plotly.com/python/px-arguments/ for more options\ndf = pd.DataFrame({\n \"Fruit\": [\"Apples\", \"Oranges\", \"Bananas\", \"Apples\", \"Oranges\", \"Bananas\"],\n \"Amount\": [4, 1, 2, 2, 4, 5],\n \"City\": [\"SF\", \"SF\", \"SF\", \"Montreal\", \"Montreal\", \"Montreal\"]\n})\n\nfig = px.bar(df, x=\"Fruit\", y=\"Amount\", color=\"City\", barmode=\"group\")\n\nfig.update_layout(plot_bgcolor=colors['background'],\n paper_bgcolor=colors['background'],\n font_color=colors['text'])\n\napp.layout = html.Div(\n style={'backgroundColor': colors['background']},\n children=[\n html.H1(children='Hello Dash',\n style={\n 'textAlign': 'center',\n 'color': colors['text']\n }),\n html.Div(children='Dash: A web application framework for Python.',\n style={\n 'textAlign': 'center',\n 'color': colors['text']\n }),\n dcc.Graph(id='example-graph-2', figure=fig)\n ])\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n"
] | [
[
"pandas.read_csv"
],
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
hunterluepke/Learn-Python-for-Stats-and-Econ | [
"d580a8e27ba937fc8401ac6d0714b6488ac8bbb6",
"d580a8e27ba937fc8401ac6d0714b6488ac8bbb6",
"d580a8e27ba937fc8401ac6d0714b6488ac8bbb6"
] | [
"Projects/Sugarscape/Model.py",
"Textbook/Chapter 8/regressionOld.py",
"Textbook/Chapter 5/twoDimensionalListAndNumpyAray.py"
] | [
"import numpy as np \nimport pandas as pd\nfrom scipy.stats.mstats import gmean\nimport random\nimport math\nfrom randomdict import RandomDict\n# from chest import *\nimport shelve\nfrom Patch import *\nfrom AgentBranch import *\nimport gc\nfrom memory_profiler import memory_usage\n#Model.py\nclass Model():\n def __init__(self, gui, num_agents, mutate, genetic, live_visual, agent_attributes,\n model_attributes):\n if live_visual:\n self.GUI = gui\n self.live_visual = live_visual\n self.name = gui.name\n self.run = gui.run\n self.initial_population = num_agents\n self.mutate = mutate\n self.genetic = genetic\n self.agent_attributes = agent_attributes\n self.model_attributes = model_attributes\n self.attributes = agent_attributes + model_attributes\n # attributes that are not copied during mutation or herding\n self.drop_attr = [\"col\", \"row\", \"dx\", \"dy\", \"id\", \"wealth\", \"top_wealth\",\n \"sugar\", \"water\",\"target\", \"not_target\",\n \"exchange_target\", \"not_exchange_target\", \"parent\", \"image\"]\n # if self.GUI.live_visual:\n # self.drop_attr.append(\"image\")\n if self.mutate:\n self.max_mutate_rate = 0.5 if mutate else 0 #.5\n if self.genetic:\n self.cross_over_rate = .5\n ############ set model parameters ############\n self.total_agents_created = 0 \n self.goods = [\"sugar\", \"water\"]\n self.goods_params = {good:{\"min\":5,\n \"max\":25} for good in self.goods}\n \n self.max_init_demand_vals = {\"price\":{\"min\": 1/2,\n \"max\": 2},\n \"quantity\":{\"min\":10,\n \"max\":25}}\n self.consumption_rate = {\"sugar\":.5,\n \"water\":.5}\n self.primary_breeds = [\"basic\", \"switcher\", \"arbitrageur\"]\n self.secondary_breeds = [\"herder\"]\n \n self.breeds = self.primary_breeds + self.secondary_breeds\n # all agents start as basic, only mutation can create other agents\n basic = 1\n self.breed_probabilities = {\"basic\":basic, # if you are not a basic, you are a switcher\n \"herder\":0,\n \"arbitrageur\":0}\n self.max_vision = 1\n # record price of every transaction\n # then take average at end of period\n self.transaction_prices = []\n self.average_price = np.nan\n self.total_exchanges = 0\n ############ import map and build nav_dict ############\n # hash table that identifies possible moves relative to agent position \n self.nav_dict = {\n v:{\n i:{\n j: True for j in range(-v, v + 1) if 0 < (i ** 2 + j ** 2) <= (v ** 2)}\n for i in range(-v, v + 1)}\n for v in range(1, self.max_vision + 1)}\n #sugarMap.shape calls the a tuple with dimensions\n #of the dataframe\n self.sugarMap = pd.read_csv('sugar-map.txt', header = None, sep = ' ')\n # add 1 to each max_Val\n for key in self.sugarMap:\n self.sugarMap[key] = self.sugarMap[key].add(1)\n self.rows, self.cols = self.sugarMap.shape\n \n ############ Initialization ############ \n self.initializePatches()\n self.initializeAgents()\n self.data_dict = shelve.open(\"shelves\\\\masterShelve\", writeback = True)\n for attribute in self.attributes:\n self.data_dict[attribute] = shelve.open(\"shelves\\\\subshelve-\"+attribute, writeback = True) \n \n def initializePatches(self):\n #Instantiate Patches\n #Create a dictionary to hold the patches, organize as grid. \n #We first fill these with zeros as placeh holders\n self.patch_dict = {row:{col:0}\n for row in range(self.rows) for col in range(self.cols)}\n for row in range(self.rows):\n for col in range(self.cols):\n # replace zeros with actual Patch objects\n good = \"sugar\" if row + col < self.cols else \"water\"\n self.patch_dict[row][col] = Patch(self, row , col, \n self.sugarMap[row][col], good)\n # use RandomDict - O(n) time complexity - for choosing random empty patch\n self.empty_patches = RandomDict({\n (row,col):self.patch_dict[row][col]\n for row in range(self.rows) for col in range(self.cols)})\n \n def initializeAgents(self):\n # agents stored in a dict by ID\n self.agent_dict = {} #if self.live_visual else Chest(path = data_aggregator.folder) #shelve.open(\"agent_dict\") \n # dead agents will be removed from agent_dict\n for i in range(self.initial_population):\n self.total_agents_created += 1\n ID = self.total_agents_created\n row, col = self.chooseRandomEmptyPatch() \n self.agent_dict[ID] = Agent(self, row, col, ID)\n self.patch_dict[row][col].agent = self.agent_dict[ID]\n self.population = self.total_agents_created\n# def recordAgentLocationInDict(self, agent):\n# patchIndex = self.convert2dTo1d(agent.row, agent.col)\n# self.agentLocationDict[patchIndex] = agent\n\n def chooseRandomEmptyPatch(self):\n row, col = self.empty_patches.random_key() \n del self.empty_patches[row, col]\n\n return row, col\n\n def runModel(self, periods):\n def updateModelVariables():\n self.population = len(agent_list)\n self.average_price = gmean(self.transaction_prices)\n self.transaction_prices = []\n \n for period in range(1, periods + 1):\n self.growPatches()\n agent_list = list(self.agent_dict.values())\n random.shuffle(agent_list)\n for agent in agent_list:\n agent.move()\n agent.harvest()\n agent.trade()\n agent.consume()\n agent.checkAlive()\n agent.reproduce()\n agent.updateParams()\n \n # data_aggregator.collectData(self, self.name, \n # self.run, period)\n updateModelVariables()\n self.collectData(str(period))\n \n if self.live_visual:\n if period % self.GUI.every_t_frames == 0:\n print(\"period\", period, \"population\", self.population, sep = \"\\t\")\n self.GUI.parent.title(\"Sugarscape: \" + str(period))\n self.GUI.updatePatches()\n self.GUI.moveAgents()\n self.GUI.canvas.update()\n\n if period == periods:\n mem_usage = memory_usage(-1, interval=1)#, timeout=1)\n print(period, \"end memory usage before sync//collect:\", mem_usage[0], sep = \"\\t\")\n self.data_dict.sync()\n gc.collect()\n mem_usage = memory_usage(-1, interval=1)#, timeout=1)\n print(period, \"end memory usage after sync//collect:\", mem_usage[0], sep = \"\\t\")\n\n def growPatches(self):\n for i in self.patch_dict:\n for patch in self.patch_dict[i].values():\n if patch.Q < patch.maxQ:\n patch.Q += 1\n\n\n def collectData(self, period):\n \n def collectAgentAttributes():\n temp_dict={}\n for attribute in self.agent_attributes:\n temp_dict[attribute] = []\n for ID, agent in self.agent_dict.items():\n for attribute in self.agent_attributes:\n temp_dict[attribute].append(getattr(agent, attribute)) \n \n for attribute, val in temp_dict.items():\n self.data_dict[attribute][period] = np.mean(val)\n\n def collectModelAttributes():\n for attribute in self.model_attributes:\n self.data_dict[attribute][period] = getattr(self, attribute)\n \n collectAgentAttributes()\n collectModelAttributes()\n",
"#econFreedomRegression.py\nimport pandas as pd\nimport copy\nimport numpy as np\nfrom scipy.stats import t\nimport matplotlib.pyplot as plt\nfrom stats import Stats\nimport sys\n\nclass Regression:\n def __init__(self):\n self.stats = Stats()\n self.all_regressions = {}\n self.indicator_names = []\n \n def regress(self, reg_name, data, y_name, X_names, min_val = 0,\n max_val = None, constant = True):\n self.reg_name = reg_name\n self.all_regressions[self.reg_name] = {}\n self.min_val = min_val\n if max_val != None:\n self.max_val = max_val\n else:\n self.max_val = len(data)\n \n # self.data is a copy of data, avoid changing original dataframe\n self.y_name = y_name\n self.X_names = copy.copy(X_names)\n self.data = copy.copy(data)\n if constant:\n self.add_constant()\n self.build_matrices()\n self.estimate_betas_and_yhat()\n self.calculate_regression_stats()\n self.X_names = self.X_names \n self.save_reg_stats()\n \n def panel_regression(self, reg_name, data, y_name, X_names, min_val = 0,\n max_val = None, entity = False, time = False, \n constant = True, ):\n if (entity and time) or (not entity and not time): \n print(\"Choose time OR entity for panel regression. Cannot choose\\\n both or neither option.\")\n sys.exit()\n #identify which index column holds dates, which holds entities\n for i in range(len(data.index.levels)):\n if isinstance(data.index.levels[i], pd.DatetimeIndex):\n date_level = i\n date_index_name = data.index.names[date_level]\n else:\n entity_level = i\n entity_index_name = data.index.names[entity_level]\n #save name of selected index \n index_name = entity_index_name if entity else date_index_name \n #reduce list to unique elements and sort\n self.indicator_names = list(set(data.index.get_level_values(index_name)))\n self.indicator_names= sorted(self.indicator_names)\n self.indicator_names.pop()\n #\n for indicator in self.indicator_names:\n self.create_indicator_variable(data, indicator, index_name,\n [indicator])\n X_and_indicator_names = X_names + self.indicator_names\n \n self.regress(reg_name = reg_name, data = data, y_name = y_name, \n X_names = X_and_indicator_names, min_val = min_val, \n max_val = max_val, constant = constant)\n self.data = self.data[self.X_names]\n self.estimates = self.estimates.ix[self.X_names]\n \n def create_indicator_variable(self, data, indicator_name, index_name, \n target_index_list):\n # Prepare column with name of indicator variable\n data[indicator_name] = 0\n # for each index whose name matches an entry in target_index_list\n # a value of 1 will be recorded\n for index in target_index_list:\n data[indicator_name].loc[(data.index.get_level_values(\\\n index_name)== index)] = 1\n\n def add_constant(self):\n self.data[\"Constant\"] = 1\n self.X_names.append(\"Constant\")\n \n def build_matrices(self):\n # Transform dataframes to matrices\n self.y = np.matrix(self.data[self.y_name]\\\n [self.min_val:self.max_val])\n # create a k x n nested list containg vectors for each xi\n self.X = self.data[self.X_names].values\n # create X Array\n self.X = np.matrix(self.X)\n self.X_transpose = np.matrix(self.X.getT())\n \n # (X'X)^-1\n X_transp_X = np.matmul(self.X_transpose, self.X)\n self.X_transp_X_inv = X_transp_X.getI()\n #X'y\n self.X_transp_y = np.matmul(self.X_transpose, self.y)\n \n def estimate_betas_and_yhat(self):\n # betas = (X'X)^-1 * X'y\n self.betas = np.matmul(self.X_transp_X_inv, self.X_transp_y)\n # y-hat = X * betas\n self.y_hat = np.matmul(self.X, self.betas)\n # Create a column that holds y_hat values\n self.data[self.y_name[0] + \" estimator\"] = [i.item(0) for i in self.y_hat]\n # create a table that holds the estimated coefficient\n # this will aslo be used to store SEs, t-stats, and p-values\n self.estimates = pd.DataFrame(self.betas, index = self.X_names,\n columns = [\"Coefficient\"])\n # identify y variable in index\n self.estimates.index.name =\"y = \" + self.y_name[0]\n\n def remove_indicator_names(self):\n #save X_names without indicator variables in case of panel regression\n for name in self.indicator_names:\n try:\n self.X_names.remove(name)\n except:\n print(name, \"not in list\")\n\n \n def calculate_regression_stats(self):\n self.sum_square_stats()\n self.calculate_degrees_of_freedom()\n self.calculate_estimator_variance()\n self.calculate_covariance_matrix()\n # done using indicator variables\n self.remove_indicator_names()\n self.calculate_t_p_error_stats()\n self.calculate_MSE()\n self.calculate_rsquared()\n self.caculate_adjusted_rsquared()\n self.calculate_fstat()\n self.build_stats_DF()\n self.save_reg_stats()\n def sum_square_stats(self):\n ssr_list = []\n sse_list = []\n sst_list = []\n mean_y = self.stats.mean(self.y).item(0)\n for i in range(len(self.y)):\n yhat_i = self.y_hat[i]\n y_i =self.y[i]\n ssr_list.append((yhat_i - mean_y) ** 2)\n sse_list.append((y_i - yhat_i) ** 2)\n sst_list.append((y_i - mean_y) ** 2)\n self.ssr = self.stats.total(ssr_list).item(0)\n self.sst = self.stats.total(sst_list).item(0)\n self.sse = self.stats.total(sse_list).item(0)\n \n def calculate_degrees_of_freedom(self):\n # Degrees of freedom compares the number of observations to the number\n # of variables ued to form prediction\n self.lost_degrees_of_freedom = len(self.estimates)\n # DoF = num_obs - num_X_variables\n self.degrees_of_freedom = (self.max_val + 1 - self.min_val) \\\n - self.lost_degrees_of_freedom\n \n def calculate_estimator_variance(self):\n # estimator variance is the sse normalized by the degrees of freedom\n # thus, there is a tradeoff between estimator variance and degrees of\n # freedom\n self.estimator_variance = self.sse / self.degrees_of_freedom\n \n def calculate_covariance_matrix(self):\n # Covariance matrix will be used to estimate standard errors for each \n # coefficient\n # est_var * (X'X)^-1 is the covariance matrix\n self.cov_matrix = copy.copy(self.X_transp_X_inv)\n if self.estimator_variance != None:\n self.cov_matrix = float(self.estimator_variance) * self.cov_matrix\n self.cov_matrix = pd.DataFrame(self.cov_matrix,\n columns = self.X_names,\n index = self.X_names)\n \n def calculate_t_p_error_stats(self):\n est = [\"SE\", \"t-stats\", \"p-value\", \"p-rating\"]\n rating_dict = {.001: \"***\",\n .01: \"**\",\n .05: \"*\"}\n results = self.estimates\n for name in est:\n results[name] = np.nan\n for var in self.X_names:\n if name == \"SE\":\n # SE of coefficient is found in the diagonal of cov_matrix\n results.ix[var][name] = \\\n self.cov_matrix[var][var] ** (1/2)\n if name == \"t-stats\":\n # tstat = Coef / SE\n results.ix[var][name] = \\\n results[\"Coefficient\"][var] / results[\"SE\"][var]\n if name == \"p-value\":\n # p-values is estimatd from location within a \n # distribution implied by the t-stat\n results.ix[var][name] = round(t.sf(\\\n np.abs(results.ix[var][\"t-stats\"]),\n self.degrees_of_freedom + 1) * 2, 5)\n if name == \"p-rating\":\n for val in rating_dict:\n if results.ix[var][\"p-value\"] < val:\n results[name][var] = rating_dict[val]\n break \n results[name][var]= \"\"\n \n def calculate_MSE(self):\n self.mse = self.estimator_variance ** (1/2)\n \n def calculate_rsquared (self):\n self.r_sq = self.ssr / self.sst\n \n def caculate_adjusted_rsquared(self):\n n = len(self.y_hat)\n k = len(self.X_names) if \"Constant\" not in self.X_names \\\n else len(self.X_names) - 1\n self.adjusted_r_sq = (self.ssr/(n - k)) / (self.sst / (n - 1))\n \n def calculate_fstat(self):\n self.f_stat = ((self.sst - self.sse) / (self.lost_degrees_of_freedom \\\n -1)) / self.estimator_variance\n\n def calculate_generalized_fstat(self, restricted_reg_name, \n unrestricted_reg_name):\n r_reg = self.all_regressions[restricted_reg_name]\n r_sse = r_reg[\"SSE\"]\n r_k = len(r_reg[\"X_names\"]) \n u_reg = self.all_regressions[unrestricted_reg_name]\n u_sse = u_reg[\"SSE\"]\n u_k = len(u_reg[\"X_names\"]) \n if (len(r_reg[\"data\"]) == len(u_reg[\"data\"])): n = len(r_reg[\"data\"]) \n lost_dof = (u_k - r_k)\n fstat = ((r_sse - u_sse) / lost_dof) / (u_sse / n - u_k)\n return fstat\n \n def build_stats_DF(self):\n self.stats_dict = {\"r**2\":[self.r_sq],\n \"adjusted r**2\":[self.adjusted_r_sq],\n \"f-stat\":[self.f_stat],\n \"Est Var\":[self.estimator_variance],\n \"MSE\":[self.mse],\n \"SSE\":[self.sse],\n \"SSR\":[self.ssr],\n \"SST\":[self.sst]\n }\n self.stats_DF = pd.DataFrame(self.stats_dict)\n self.stats_DF = self.stats_DF.rename(index={0:\"Estimation Statistics\"})\n self.stats_DF = self.stats_DF.T \n \n def save_reg_stats(self):\n reg_name = copy.copy(self.reg_name)\n self.all_regressions[reg_name] = \\\n {key:self.stats_dict[key][0] for key in self.stats_dict}\n self.all_regressions[reg_name][\"estimates\"] = \\\n copy.copy(self.estimates.ix[self.X_names])\n self.all_regressions[reg_name][\"cov_matrix\"] = \\\n copy.copy(self.cov_matrix)\n self.all_regressions[reg_name][\"degrees_of_freedom\"] = \\\n copy.copy(self.degrees_of_freedom)\n self.all_regressions[reg_name][\"lost_degrees_of_freedom\"]=\\\n copy.copy(self.lost_degrees_of_freedom)\n self.all_regressions[reg_name][\"estimator_variance\"] =\\\n copy.copy(self.estimator_variance)\n self.all_regressions[reg_name][\"MSE\"] = copy.copy(self.mse)\n self.all_regressions[reg_name][\"data\"] = \\\n copy.copy(self.data[self.X_names])\n self.all_regressions[reg_name][\"y_name\"] = copy.copy(self.y_name)\n self.all_regressions[reg_name][\"X_names\"] = \\\n copy.copy(self.X_names)\n self.all_regressions[reg_name][\"data\"]\\\n [self.y_name[0] + \" estimator\"] = self.y_hat\n \n \n def plot_scatter_with_estimator(self, data, x_vars, y, figsize = (12,8), \n fontsize = 19, s = 10, y_label1 = \"Estimate\",\n y_label2 = \"Observation\", estimate_color = \"r\",\n legend_loc = \"upper left\", bbox = (0, 1.17)):\n # set default font size\n plt.rcParams.update({'font.size': fontsize})\n # use a for loop to call each exogenous variable\n for x in x_vars:\n # prepare a figure that will plot predictor. \n #We will use ax to specify that the plots are in the same figure\n fig, ax = plt.subplots(figsize = figsize)\n # labels will be in a legend\n # y_label1 = \"Estimate\"\n # y_label2 = \"Observation\"\n # plot the estimated value\n data.plot.scatter(x = x, y = y[0] + \" estimator\", ax = ax, \n c = estimate_color, s = s, label = y_label1, \n legend = False)\n # erase the y-axis label to sho that \"estimator is not present\n # the y-label will reappear when the observations are plotted\n plt.ylabel(\"\")\n data.plot.scatter(x = x, y = y, ax = ax, s = s, label = y_label2,\n legend = False)\n # call the legend, place atop the image on the left\n # bbox_to_anchor used to specify exact placement of label\n plt.legend(loc = legend_loc, labels = [y_label1, y_label2],\n bbox_to_anchor = bbox)\n # remove lines marking units on the axis\n ax.xaxis.set_ticks_position('none')\n ax.yaxis.set_ticks_position('none')\n plt.show()\n plt.close()\n",
"#twoDimensionalListAndNumpyArray.py\nimport numpy as np\ntwo_dim_list = [[1,2,3,4],[2,3,4,5]]\nprint(two_dim_list)\ntwo_dim_array = np.array(two_dim_list)\nprint(two_dim_array)\n\nfor i in range(len(two_dim_list)):\n print(two_dim_list[i])\n print(two_dim_array[i])\n for j in range(len(two_dim_list[i])):\n print(two_dim_list[i][j])\n print(two_dim_array[i][j]) "
] | [
[
"scipy.stats.mstats.gmean",
"pandas.read_csv",
"numpy.mean"
],
[
"numpy.matrix",
"matplotlib.pyplot.legend",
"numpy.abs",
"numpy.matmul",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"matplotlib.pyplot.close",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
eashdown/onnx-mlir | [
"2662d5530a01ddb11056ae7958118e82487a9eb8"
] | [
"utils/gen_onnx_mlir.py"
] | [
"#!/usr/bin/env python\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom collections import defaultdict, OrderedDict\nfrom io import StringIO\nimport io\nimport os\nimport sys\nimport datetime\nimport argparse\n\nimport numpy as np # type: ignore\n\nfrom onnx import defs, FunctionProto, helper, OperatorStatus\nfrom onnx.defs import OpSchema, ONNX_DOMAIN, ONNX_ML_DOMAIN\nfrom onnx.backend.test.case import collect_snippets\nfrom onnx.backend.sample.ops import collect_sample_implementations\nfrom typing import Any, Text, Sequence, Dict, List, Type, Set, Tuple\n\nimport pprint\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--dry-run-onnx-ops\",\n help=\"Output ONNXOps.td.inc content to stdout.\",\n action=\"store_true\",\n default=False)\nparser.add_argument(\"--dry-run-op-build-table\",\n help=\"Output OpBuildTable.inc content to stdout.\",\n action=\"store_true\",\n default=False)\nparser.add_argument(\"--check-operation-version\",\n help=\"check whether the imported onnx package has new operation or \"\n \" newer version of operation compared with version stored in version_dicts\",\n action=\"store_true\",\n default=False)\n\nargs = parser.parse_args()\n\ncheck_operation_version = args.check_operation_version\n\n\n# Record the version of each operation that is treated as the current version.\n# To check whether the onnx package being used has newer version operation,\n# run this script with --check-operation-version flag.\n# Update this dictionary when a newer version is implemented\n# TODO: how to keep the old version\nversion_dict = {'Abs': 13,\n 'Acos': 7,\n 'Acosh': 9,\n 'Adagrad': 1,\n 'Adam': 1,\n 'Add': 13,\n 'And': 7,\n 'ArgMax': 13,\n 'ArgMin': 13,\n 'ArrayFeatureExtractor': 1,\n 'Asin': 7,\n 'Asinh': 9,\n 'Atan': 7,\n 'Atanh': 9,\n 'AveragePool': 11,\n 'BatchNormalization': 9,\n 'Binarizer': 1,\n 'BitShift': 11,\n 'Cast': 13,\n 'CastMap': 1,\n 'CategoryMapper': 1,\n 'Ceil': 13,\n 'Celu': 12,\n 'Clip': 13,\n 'Compress': 11,\n 'Concat': 13,\n 'ConcatFromSequence': 11,\n 'Constant': 13,\n 'ConstantOfShape': 9,\n 'Conv': 11,\n 'ConvInteger': 10,\n 'ConvTranspose': 11,\n 'Cos': 7,\n 'Cosh': 9,\n 'CumSum': 11,\n 'DepthToSpace': 13,\n 'DequantizeLinear': 13,\n 'Det': 11,\n 'DictVectorizer': 1,\n 'Div': 13,\n 'Dropout': 13,\n 'DynamicQuantizeLinear': 11,\n 'Einsum': 12,\n 'Elu': 6,\n 'Equal': 13,\n 'Erf': 13,\n 'Exp': 13,\n 'Expand': 13,\n 'EyeLike': 9,\n 'FeatureVectorizer': 1,\n 'Flatten': 13,\n 'Floor': 13,\n 'GRU': 7,\n 'Gather': 13,\n 'GatherElements': 13,\n 'GatherND': 13,\n 'Gemm': 13,\n 'GlobalAveragePool': 1,\n 'GlobalLpPool': 2,\n 'GlobalMaxPool': 1,\n 'Gradient': 1,\n 'Greater': 13,\n 'GreaterOrEqual': 12,\n 'HardSigmoid': 6,\n 'Hardmax': 13,\n 'Identity': 13,\n 'If': 13,\n 'Imputer': 1,\n 'InstanceNormalization': 6,\n 'IsInf': 10,\n 'IsNaN': 13,\n 'LRN': 13,\n 'LSTM': 7,\n 'LabelEncoder': 2,\n 'LeakyRelu': 6,\n 'Less': 13,\n 'LessOrEqual': 12,\n 'LinearClassifier': 1,\n 'LinearRegressor': 1,\n 'Log': 13,\n 'LogSoftmax': 13,\n 'Loop': 13,\n 'LpNormalization': 1,\n 'LpPool': 11,\n 'MatMul': 13,\n 'MatMulInteger': 10,\n 'Max': 13,\n 'MaxPool': 12,\n 'MaxRoiPool': 1,\n 'MaxUnpool': 11,\n 'Mean': 13,\n 'MeanVarianceNormalization': 13,\n 'Min': 13,\n 'Mod': 13,\n 'Momentum': 1,\n 'Mul': 13,\n 'Multinomial': 7,\n 'Neg': 13,\n 'NegativeLogLikelihoodLoss': 13,\n 'NonMaxSuppression': 11,\n 'NonZero': 13,\n 'Normalizer': 1,\n 'Not': 1,\n 'OneHot': 11,\n 'OneHotEncoder': 1,\n 'Or': 7,\n 'PRelu': 9,\n 'Pad': 13,\n 'Pow': 13,\n 'QLinearConv': 10,\n 'QLinearMatMul': 10,\n 'QuantizeLinear': 13,\n 'RNN': 7,\n 'RandomNormal': 1,\n 'RandomNormalLike': 1,\n 'RandomUniform': 1,\n 'RandomUniformLike': 1,\n 'Range': 11,\n 'Reciprocal': 13,\n 'ReduceL1': 13,\n 'ReduceL2': 13,\n 'ReduceLogSum': 13,\n 'ReduceLogSumExp': 13,\n 'ReduceMax': 13,\n 'ReduceMean': 13,\n 'ReduceMin': 13,\n 'ReduceProd': 13,\n 'ReduceSum': 13,\n 'ReduceSumSquare': 13,\n 'Relu': 13,\n 'Reshape': 13,\n 'Resize': 13,\n 'ReverseSequence': 10,\n 'RoiAlign': 10,\n 'Round': 11,\n 'SVMClassifier': 1,\n 'SVMRegressor': 1,\n 'Scaler': 1,\n 'Scan': 11,\n 'Scatter': 11,\n 'ScatterElements': 13,\n 'ScatterND': 13,\n 'Selu': 6,\n 'SequenceAt': 11,\n 'SequenceConstruct': 11,\n 'SequenceEmpty': 11,\n 'SequenceErase': 11,\n 'SequenceInsert': 11,\n 'SequenceLength': 11,\n 'Shape': 13,\n 'Shrink': 9,\n 'Sigmoid': 13,\n 'Sign': 13,\n 'Sin': 7,\n 'Sinh': 9,\n 'Size': 13,\n 'Slice': 13,\n 'Softmax': 13,\n 'SoftmaxCrossEntropyLoss': 13,\n 'Softplus': 1,\n 'Softsign': 1,\n 'SpaceToDepth': 13,\n #'Split': 13,\n 'Split': 11,\n 'SplitToSequence': 11,\n 'Sqrt': 13,\n #'Squeeze': 13,\n 'Squeeze': 11,\n 'StringNormalizer': 10,\n 'Sub': 13,\n 'Sum': 13,\n 'Tan': 7,\n 'Tanh': 13,\n 'TfIdfVectorizer': 9,\n 'ThresholdedRelu': 10,\n 'Tile': 13,\n 'TopK': 11,\n 'Transpose': 13,\n 'TreeEnsembleClassifier': 1,\n 'TreeEnsembleRegressor': 1,\n 'Unique': 11,\n #'Unsqueeze': 13,\n 'Unsqueeze': 11,\n 'Upsample': 10,\n 'Where': 9,\n 'Xor': 7,\n 'ZipMap': 1}\n# Manual specification of attribute defaults.\nspecial_attr_defaults = dict([\n # (\"AveragePool.kernel_shape\", ('ints', '{}')),\n # (\"MaxPool.kernel_shape\", ('ints', '{}')),\n # (\"Cast.to\", ('int', '0')),\n # (\"Concat.axis\", ('int', '0')),\n # (\"Conv.group\", ('int', '1')),\n # (\"Unsqueeze.axes\", ('ints', '{}')),\n # (\"RNN.activation_alpha\", ('floats', '{}')),\n # (\"RNN.activation_beta\", ('floats', '{}')),\n])\n# Manual specification of attribute type.\nspecial_attr_types = dict([(\"Cast.to\", 'type')])\n\n# Special operation importing handlers.\nspecial_op_handler = dict([\n (\"BatchNormalization\", \"ImportNodeBatchNormalization\"),\n (\"Dropout\", \"ImportNodeDropout\"),\n (\"Cast\", \"ImportNodeCast\"),\n (\"MaxPool\", \"ImportNodeMaxPool\"),\n (\"Pad\", \"ImportNodePad\"),\n (\"Slice\", \"ImportNodeSlice\"),\n #(\"Transpose\", \"ImportNodeTranspose\")\n])\n\n# Operations supporting shape inference.\nOpsWithShapeInference=[\n 'Abs',\n 'Add',\n 'And',\n 'Atan',\n 'AveragePool',\n 'Cast',\n 'Concat',\n 'Constant',\n 'ConstantOfShape',\n 'Conv',\n 'ConvInteger',\n 'ConvTranspose',\n 'Cos',\n 'Cosh',\n 'DequantizeLinear',\n 'Div',\n 'Dropout',\n 'DynamicQuantizeLinear',\n 'Elu',\n 'Erf',\n 'Exp',\n 'Expand',\n 'Flatten',\n 'GRU',\n 'Gather',\n 'Gemm',\n 'GlobalAveragePool',\n 'GlobalLpPool',\n 'GlobalMaxPool',\n 'HardSigmoid',\n 'Identity',\n 'LSTM',\n 'LeakyRelu',\n 'Less',\n 'Log',\n 'MatMul',\n 'Max',\n 'Min',\n 'Mul',\n 'Neg',\n 'OneHotEncoder',\n 'Or',\n 'Pad',\n 'Pow',\n 'PRelu',\n 'QLinearConv',\n 'QuantizeLinear',\n 'QLinearMatMul',\n 'RNN',\n 'Reciprocal',\n 'ReduceMax',\n 'ReduceMean',\n 'ReduceMin',\n 'ReduceProd',\n 'ReduceSum',\n 'Relu',\n 'Reshape',\n 'Scaler',\n 'Selu',\n 'Shape',\n 'Sigmoid',\n 'Sign',\n 'Sin',\n 'Sinh',\n 'Size',\n 'Slice',\n 'Softmax',\n 'Softplus',\n 'Softsign',\n 'Split',\n 'Sqrt',\n 'Squeeze',\n 'Sub',\n 'Sum',\n 'Tan',\n 'Tanh',\n 'Tile',\n 'Transpose',\n 'Unsqueeze',\n 'Xor',\n 'Loop',\n]\n\n# Operations supporting canonicalization.\nOpsWithCanonicalizer = ['Add', 'Constant', 'Identity', 'Gemm', 'Cast', 'Transpose',\n 'Dropout', 'Shape', 'Size', 'GlobalAveragePool',\n 'GlobalMaxPool', 'Squeeze', 'Unsqueeze']\n\nOpsWithHelpers = {\n \"Loop\": \"\"\"\n mlir::Operation::result_range v_final();\n mlir::Operation::result_range scan_outputs();\n \"\"\",\n \"Scan\": \"\"\"\n mlir::Operation::operand_range v_initial();\n mlir::Operation::result_range v_final();\n mlir::Operation::operand_range scan_inputs();\n mlir::Operation::result_range scan_outputs();\n \"\"\"\n}\n# Interface for special handling of type inference\n# The common code are put into get_type_inference_func\nOpsWithResultTypeInference = {\n \"Constant\":\n '''if (auto attr = valueAttr()) {\n resultTypes.push_back(attr.getType());\n } else if (auto attr = sparse_valueAttr()) {\n resultTypes.push_back(attr.getType());\n }''',\n \"Cast\":\n '''auto builder = mlir::OpBuilder(getContext());\n resultTypes.push_back(mlir::UnrankedTensorType::get(to()));''',\n \"ConstantOfShape\":\n '''if (auto attr = valueAttr()) {\n resultTypes.push_back(mlir::UnrankedTensorType::get(\n attr.getType().cast<ShapedType>().getElementType()));\n } else {\n resultTypes.push_back(mlir::UnrankedTensorType::get(\n FloatType::getF32(getContext())));\n }'''\n}\n\n# Add an Op in this list if the Op needs result type deduction which is required\n# when writing declarative rewriting rules. Deduced type is always\n# an UnrankedTensorType whose element type is the same as the first operand's\n# element type.\n#\n# Currenlty, there are only two build methods generated:\n# - one with operands and attributes having a separate parameter, and\n# - one with operands and attributes having aggregated parameters.\ncustom_builder_unranked_ops_list = ['Abs', 'Exp', 'ReduceSum', 'ReduceSumSquare',\n 'Pad', 'Sqrt', 'Neg', 'Unsqueeze', 'Softmax',\n 'ReduceMax', 'ReduceLogSum', 'Squeeze',\n 'Identity', 'Split']\n# Custom builder op list for operations with broadcast; we can deduce the right\n# output type, no need to leave it undef as in the above list.\n# Ops must have two operands, not one, not three... And there shall be two.\n# TODO: handle variadic ops omitted here: Max, Min, Min, Sum.\ncustom_builder_broadcast_ops_list = ['Add', 'And', 'Div', 'Equal', 'Greater',\n 'Less', 'Mul', 'Or', 'Pow', 'Sub', 'Xor']\n# union of both\ncustom_builder_ops_list = custom_builder_unranked_ops_list + custom_builder_broadcast_ops_list\n\n#a dictionary to add any special definition for an operation\ncustom_definition_misc = dict([ ('Constant',\n ''' let builders = [\n OpBuilder<(ins \"Attribute\":$sparse_value, \"Attribute\":$value), [{\n if (value) {\n auto tensorType = value.getType();\n build($_builder, $_state, tensorType, sparse_value, value,\n FloatAttr(), ArrayAttr(), IntegerAttr(), ArrayAttr(), StringAttr(), ArrayAttr());\n } else {\n auto tensorType = sparse_value.getType();\n build($_builder, $_state, tensorType, sparse_value, value,\n FloatAttr(), ArrayAttr(), IntegerAttr(), ArrayAttr(), StringAttr(), ArrayAttr());\n }\n }]>\n ];'''),\n ('Cast',\n ''' let builders = [\n OpBuilder<(ins \"Value\":$input, \"TypeAttr\":$to), [{\n auto resultType = mlir::UnrankedTensorType::get(to.getValue());\n build($_builder, $_state, resultType, input, to);\n }] >\n ];'''\n )])\n\nonnx_types = (\n 'bool', 'int8', 'int16', 'int32', 'int64', 'unkown', 'float16',\n 'float', 'double', 'complex64', 'complex128', 'string'\n)\ntblgen_types = ('AnyI1', 'AnyI8', 'AnyI16', 'AnyI32', 'AnyI64', 'BF16', 'F16', 'F32', 'F64',\n 'Complex<F32>', 'Complex<F64>', 'StringType'\n)\n\nMAX_NUM_TYPES=20\n\ndef should_render_domain(domain): # type: (Text) -> bool\n return True\n\n\ndef display_attr_type(v): # type: (OpSchema.AttrType) -> Text\n assert isinstance(v, OpSchema.AttrType)\n s = Text(v)\n s = s[s.rfind('.') + 1:].lower()\n if s[-1] == 's':\n s = 'list of ' + s\n return s\n\n\ndef get_unique_output_name(schema, name):\n for input in schema.inputs:\n if input.name == name:\n return 'out_' + name\n return name\n\n\ndef onnx_attr_type_to_mlir_attr_type(t):\n onnx_attr_type = Text(t)\n onnx_attr_type = onnx_attr_type[onnx_attr_type.rfind('.') + 1:].lower()\n\n if onnx_attr_type == 'int':\n mlir_attr_type = 'SI64Attr'\n elif onnx_attr_type == 'float':\n mlir_attr_type = 'F32Attr'\n elif onnx_attr_type == 'ints':\n mlir_attr_type = 'I64ArrayAttr'\n elif onnx_attr_type == 'floats':\n mlir_attr_type = 'F32ArrayAttr'\n elif onnx_attr_type == \"string\":\n mlir_attr_type = 'StrAttr'\n elif onnx_attr_type == \"strings\":\n mlir_attr_type = 'StrArrayAttr'\n elif onnx_attr_type == 'type':\n mlir_attr_type = 'TypeAttr'\n else:\n mlir_attr_type = 'AnyAttr'\n #TODO: tensor and sparse tensor\n return mlir_attr_type\n\n\n#TODO: any better way to do this.\ndef tblgen_attr_type_to_cpp_type(t):\n if 'I64Attr' in t:\n cpp_type = 'IntegerAttr'\n elif 'F32Attr' in t:\n cpp_type = 'FloatAttr'\n elif 'I64ArrayAttr' in t or 'F32ArrayAttr' in t:\n cpp_type = 'ArrayAttr'\n elif 'StrAttr' in t:\n cpp_type = 'StringAttr'\n elif 'strings' in t:\n cpp_type = 'ArrayAttr'\n else:\n cpp_type = 'Attribute'\n return cpp_type\n\n\ndef tblgen_operand_type_to_cpp_type(op_type):\n if op_type.startswith('Variadic'):\n mytype = 'ValueRange'\n else:\n mytype = 'Value'\n return mytype\n\n\ndef np_type_to_tblgen_attr_type(tstr):\n index = -1\n for i in range(len(onnx_types)):\n if onnx_types[i] in tstr:\n index = i\n break\n if index == -1:\n return None\n else:\n return tblgen_types[i]\n\ndef get_tblgen_type_index(type_str):\n return tblgen_types.index(type_str)\n\n#the possible data structures are tensor, map and seq(tensor())\ndef get_data_structure_element(allowed_type_str):\n structure_list = ['tensor', 'seq', 'map']\n for structure in structure_list:\n if allowed_type_str.startswith(structure) :\n element = allowed_type_str.replace(\n structure+'(', '', 1).replace(')', '', 1)\n return (structure, element)\n return (None, None)\n\ndef get_allowed_elem_types(schema, input):\n #allowed_types_str = None\n # return allowed_types_str\n # TODO: enable type constraints.\n if input.typeStr :\n tstr = input.typeStr\n structure, element = get_data_structure_element(tstr);\n # In case the type is directly specified\n if structure and element :\n t = np_type_to_tblgen_attr_type(element)\n if t == None :\n return allowed_structure, None\n else :\n return structure, [t]\n else :\n return None\n if schema.type_constraints:\n for type_constraint in schema.type_constraints:\n if type_constraint.type_param_str != tstr :\n continue\n allowed_type_list=[]\n allowedTypes = type_constraint.allowed_type_strs\n allowed_structure = None\n for allowedType in allowedTypes:\n structure, element = get_data_structure_element(allowedType);\n if structure == None or element == None:\n return None, None\n\n if allowed_structure != None and allowed_structure != structure :\n return None, None\n allowed_structure = structure\n t = np_type_to_tblgen_attr_type(element)\n if t == None :\n return allowed_structure, None\n if not t in allowed_type_list :\n allowed_tyoe_list = allowed_type_list.append(t)\n\n return allowed_structure,allowed_type_list\n\n return None, None\n\n\ndef inc_indent(indent=None):\n return \"\" if indent is None else indent + ' ' * 2\n\n\ndef dec_indent(indent):\n return indent[:-2]\n\n\ndef join_args(args):\n return \", \".join(args)\n\ndef get_operands_or_results(schema, type_str_dict, is_input):\n value_list = schema.inputs if is_input else schema.outputs\n if not value_list:\n return OrderedDict()\n\n def any_type_of(types):\n assert isinstance(types, list)\n if len(types) == 1:\n return types[0]\n else:\n return \"AnyTypeOf<[{}]>\".format(\", \".join(types))\n\n name_to_types = OrderedDict()\n for i, value in enumerate(value_list):\n types = get_onnx_mlir_types(schema, type_str_dict, value)\n\n '''\n structure, elem_types = get_allowed_elem_types(schema, type_str_dict, value)\n\n if structure == 'tensor' :\n if elem_types is None:\n types = [\"AnyMemRef\", \"AnyTensor\"]\n else:\n elem_types_str = ','.join(elem_types)\n types = [\"TensorOf<[{}]>\", \"MemRefOf<[{}]>\"]\n types = list(map(lambda x: x.format(elem_types_str), types))\n elif structure == 'seq' :\n # Seq is not supported yet.\n # Use of TensorOf<[AnyTensor]> as a placeholder for tablegen.\n # When the Operation is used, warning/error will be generated at runtime.\n if elem_types is None:\n types = [\"AnyMemRef\", \"TensorOf<[AnyTensor]>\"]\n else:\n elem_types_str = ','.join(elem_types)\n types = [\"TensorOf<[TensorOf<[{}]>]>\", \"MemRefOf<[{}]>\"]\n types = list(map(lambda x: x.format(elem_types_str), types))\n elif structure == 'map' :\n # Map is not supported yet.\n # Use of TupleOf as a placeholder for tablegen.\n # When the Operation is used, warning/error will be generated at runtime.\n if elem_types is None:\n types = [\"AnyMemRef\", \"TupleOf<[AnyTensor]>\"]\n else:\n elem_types_str = ','.join(elem_types)\n types = [\"TupleOf<[TensorOf<[{}]>]>\", \"MemRefOf<[{}]>\"]\n types = list(map(lambda x: x.format(elem_types_str), types))\n else:\n types = [\"AnyMemRef\", \"AnyTensor\"]\n '''\n\n if OpSchema.FormalParameterOption.Optional == value.option:\n types.append(\"NoneType\")\n elif OpSchema.FormalParameterOption.Variadic == value.option:\n if value.isHomogeneous:\n types = [\"Variadic<{}>\".format(any_type_of(types))]\n else:\n #TODO handle(variadic, heterogeneous) \"\n types = [\"Variadic<{}>\".format(any_type_of(types))]\n sys.stderr.write(\"warning: (variadic, heterogeneous) for \" + schema.name +\n ' ' + value.name + \"\\n\")\n\n # Since output name can coincide with that of an input, we explicitly\n # append a suffix \"_out\" to such names for disambiguation.\n if is_input:\n value_name = value.name\n else:\n value_name = get_unique_output_name(schema, value.name)\n\n name_to_types[value_name] = any_type_of(types)\n return name_to_types\n\n\ndef get_attrs(schema):\n def get_attr_type_optional(attr_type):\n return 'OptionalAttr<{}>'.format(\n onnx_attr_type_to_mlir_attr_type(attr_type))\n\n def get_attr_type_with_default(attr_type, attr_default):\n return 'DefaultValuedAttr<{}, \"{}\">'.format(\n onnx_attr_type_to_mlir_attr_type(attr_type), attr_default)\n\n if not schema.attributes:\n return OrderedDict()\n\n name_to_type = OrderedDict()\n for _, attr in sorted(schema.attributes.items()):\n if attr.type == OpSchema.AttrType.GRAPH:\n continue\n\n qualified_attr_name = \"{}.{}\".format(schema.name, attr.name)\n if qualified_attr_name in special_attr_defaults:\n name_to_type[attr.name] = get_attr_type_with_default(\n *special_attr_defaults[qualified_attr_name])\n if qualified_attr_name in special_attr_types:\n name_to_type[attr.name] = onnx_attr_type_to_mlir_attr_type(\n special_attr_types[qualified_attr_name])\n # option holds either required or default value\n elif attr.required:\n name_to_type[attr.name] = onnx_attr_type_to_mlir_attr_type(\n attr.type)\n elif attr.default_value.name:\n\n def format_value(value): # type: (Any) -> Text\n if isinstance(value, float):\n formatted = str(np.round(value, 5))\n # use default formatting, unless too long.\n if (len(formatted) > 10):\n formatted = str(\"({:e})\".format(value))\n return formatted\n elif isinstance(\n value,\n (bytes, bytearray)) and sys.version_info[0] == 3:\n return str(value.decode('utf-8'))\n return str(value)\n\n default_value = helper.get_attribute_value(attr.default_value)\n if isinstance(default_value, list):\n default_value = [format_value(val) for val in default_value]\n default_value_str = '{}'.format(default_value)\n default_value_str = default_value_str.replace('[', '{', 1)\n default_value_str = default_value_str.replace(']', '}', 1)\n if Text(attr.type) == \"AttrType.STRINGS\":\n default_value_str = default_value_str.replace(\"'\", '\\\\\"')\n else:\n default_value_str = default_value_str.replace(\"'\", '')\n else:\n default_value = format_value(default_value)\n default_value_str = default_value\n\n name_to_type[attr.name] = get_attr_type_with_default(\n attr.type, default_value_str)\n else:\n name_to_type[attr.name] = get_attr_type_optional(attr.type)\n return name_to_type\n\ndef get_numberof_list(mylist):\n expected_num = len(mylist)\n for element in mylist :\n if OpSchema.FormalParameterOption.Variadic == element.option:\n expected_num = -1\n return expected_num\n\ndef get_output_type_mapping(schema):\n mapping=[]\n for output in schema.outputs :\n #if only one type is allowed, just set that\n structure, allowed_elem_types = get_allowed_elem_types(schema, output)\n if allowed_elem_types != None and len(allowed_elem_types) == 1 :\n mapping.append(str(get_tblgen_type_index(allowed_elem_types[0])))\n continue\n\n #map the type string\n if output.typeStr :\n tstr = output.typeStr\n found = False\n for i, input in enumerate(schema.inputs):\n if input.typeStr and input.typeStr == tstr:\n mapping.append(str(i+MAX_NUM_TYPES))\n found = True\n break\n if found:\n continue\n\n #unknown output type\n mapping.append(str(-1))\n\n return mapping\n\ndef get_numberof_inout(s, indent, schema):\n expected_num_operands = get_numberof_list(schema.inputs)\n indent = inc_indent(indent)\n s += indent + \"static int getNumberOfOperands() {\\n\"\n indent = inc_indent(indent)\n s += indent + \"return {};\\n\".format(expected_num_operands)\n indent = dec_indent(indent)\n s += indent + \"}\\n\"\n\n expected_num_results = get_numberof_list(schema.outputs)\n s += indent + \"static int getNumberOfResults() {\\n\"\n indent = inc_indent(indent)\n s += indent + \"return {};\\n\".format(expected_num_results)\n indent = dec_indent(indent)\n s += indent + \"}\\n\"\n\n s += indent + \"static std::vector<int> getTypeMap() {\\n\"\n mapping = get_output_type_mapping(schema)\n indent = inc_indent(indent)\n s += indent + \"return {\" + \",\".join(mapping) + \"};\\n\"\n indent = dec_indent(indent)\n s += indent + \"}\\n\"\n\n return s\n\n\ndef get_promotable_const_operands_func(s, indent, const_operands_name_to_idx):\n cpp_name_to_idx_literal = \"{\" + \", \".join([\n \"{{\\\"{}\\\", {}}}\".format(*name_to_idx)\n for name_to_idx in const_operands_name_to_idx\n ]) + \"}\"\n\n #s += indent + \"let extraClassDeclaration = [{\\n\"\n indent = inc_indent(indent)\n s += indent + \"std::map<std::string, size_t> promotableConstOperands() {\\n\"\n indent = inc_indent(indent)\n s += indent + \"return {};\\n\".format(cpp_name_to_idx_literal)\n indent = dec_indent(indent)\n s += indent + \"}\\n\"\n #indent = dec_indent(indent)\n #s += indent + \"}];\\n\"\n\n return s\n\ndef get_type_inference_func(s, indent, type_inference_code):\n indent = inc_indent(indent)\n\n s += indent + \"std::vector<mlir::Type> resultTypeInference() {\" + \"\\n\"\n indent = inc_indent(indent)\n s += indent + \"std::vector<mlir::Type> resultTypes;\" + \"\\n\"\n\n s += indent + type_inference_code + '\\n'\n\n s += indent + \"return resultTypes;\" + \"\\n\"\n indent = dec_indent(indent)\n s += indent + \"}\" + \"\\n\"\n\n indent = dec_indent(indent)\n return s\n\ndef parse_type_str(allowedType):\n # AnyI may be used for uint because the onnx_mlir is not generating uint output\n # This will be fixed later and UI will be replace AnyI\n onnx_to_mlir_type_dict = { '(': '<[',\n ')': ']>',\n 'tensor' : 'TensorOf',\n 'seq' : 'SeqOf',\n 'map' : 'TupleOf',\n 'bool': 'I1',\n #'uint8' : 'AnyI8',\n #uint16' : 'AnyI16',\n #uint32' : 'AnyI32',\n #uint64' : 'AnyI64',\n 'uint8' : 'UI8',\n 'uint16' : 'UI16',\n 'uint32' : 'UI32',\n 'uint64' : 'UI64',\n 'int8' : 'I8',\n 'int16' : 'I16',\n 'int32' : 'I32',\n 'int64' : 'I64',\n 'float16' : 'F16',\n 'bfloat16' : 'BF16',\n 'float' : 'F32',\n 'double' : 'F64',\n 'unkown' : 'BF16',\n 'complex64' : 'Complex<F32>',\n 'complex128' : 'Complex<F64>',\n 'string' : 'StringType'}\n\n # Apply substitutions in decreasing order of key-length, so that float16 is replaced\n # before float, and uint16 is replaced before int16, etc.\n mapping = list(onnx_to_mlir_type_dict.items())\n mapping.sort(key=lambda pair:len(pair[0]), reverse=True)\n for key, item in mapping:\n allowedType = allowedType.replace(key, item)\n return allowedType\n\ndef parse_a_type_constraint(constraint):\n allowedTypes = constraint.allowed_type_strs\n mlirTypes = []\n for allowedType in allowedTypes:\n mlirType = parse_type_str(allowedType)\n mlirTypes.append(mlirType)\n # Remove redundant and sort.\n # However onnx keeps a consitently meaningful order\n # There is no redundancy as long as each onnx type is mapped uniquely\n # mlirTypes = sorted(list(set(mlirTypes)))\n\n # MemRef is always needed\n mlirTypes.append(\"AnyMemRef\")\n return mlirTypes\n\ndef parse_type_constraints(schema):\n type_str_dict = dict()\n for type_constraint in schema.type_constraints:\n type_str_dict[type_constraint.type_param_str] = parse_a_type_constraint(type_constraint)\n return type_str_dict\n\ndef get_onnx_mlir_types(schema, type_str_dict, input):\n if input.typeStr :\n if not input.typeStr in type_str_dict :\n # some arguments use type description directly\n # instead of constraint\n return [parse_type_str(input.typeStr), \"AnyMemRef\"]\n else :\n return type_str_dict[input.typeStr]\n else :\n print('No typeStr ', schema.name)\n return []\n\ndef gen_op_def(schema):\n indent = inc_indent()\n s = 'def ONNX{0}Op:ONNX_Op<\"{0}\",\\n'.format(schema.name)\n\n regions = OrderedDict()\n for _, attr in sorted(schema.attributes.items()):\n if attr.type == OpSchema.AttrType.GRAPH:\n if attr.required:\n regions[attr.name] = \"SizedRegion<1>\"\n else:\n regions[attr.name] = \"AnyRegion\"\n\n # Generate decl for op traits.\n traits = [\"NoSideEffect\"]\n # OpsWithShapeInference:\n # Now the ShapeInference traits are added to all operation\n # Dummy implementations are added to ONNXOps.cpp\n # Error will be report if these operations are encountered at runtime\n traits.append(\"DeclareOpInterfaceMethods<ShapeInferenceOpInterface>\")\n if schema.name in OpsWithResultTypeInference.keys():\n traits.append(\"OpInterface<\\\"ResultTypeInferenceOpInterface\\\">\")\n if len(regions):\n traits.append(\"OpInterface<\\\"HasOnnxSubgraphOpInterface\\\">\")\n s += inc_indent(indent) + '[{}]> {{\\n'.format(join_args(traits))\n\n # Generate decl for canonicalizer.\n indent = inc_indent(indent)\n if schema.name in OpsWithCanonicalizer:\n s += indent + 'let hasCanonicalizer = 1;\\n'\n\n # Generate decl for summary.\n s += indent + 'let summary = \"ONNX {} operation\";\\n'.format(schema.name)\n\n # Generate description.\n s += indent + 'let description = [{\\n'\n if schema.doc:\n lines = schema.doc.lstrip().splitlines()\n for line in lines:\n escaped_line = line.replace('\"', '\\\\\"')\\\n .replace('}]', '\\\\}\\\\]')\n s += indent + '\"{}\"\\n'.format(escaped_line)\n s += indent + '}];\\n'\n\n # handle the type constraint for input and output\n # parse type constraint into onnx-mlir type string list\n type_str_dict = parse_type_constraints(schema)\n\n # Generate ins (consisting of operands and attributes).\n ins = get_operands_or_results(schema, type_str_dict, is_input=True)\n ins.update(get_attrs(schema))\n\n ins_strs = [\"{1}:${0}\".format(*i) for i in ins.items()]\n s += indent + 'let arguments = (ins {});\\n'.format(\n (',\\n' + inc_indent(indent)).join(ins_strs))\n\n # Generate outs (operation results).\n outs = get_operands_or_results(schema, type_str_dict, is_input=False)\n outs_strs = [\"{1}:${0}\".format(*i) for i in outs.items()]\n s += indent + 'let results = (outs {});\\n'.format(\n (',\\n' + inc_indent(indent)).join(outs_strs))\n\n regions_strs = [\"{1}:${0}\".format(*i) for i in regions.items()]\n\n if len(regions):\n s += indent + 'let regions = (region {});\\n'.format(\n (',\\n' + inc_indent(indent)).join(regions_strs))\n\n # custom_builder_broadcast_ops_list\n\n # add custom builders\n # use element type of the first operand to construct an UnrankedTensorType for the output.\n if schema.name in custom_builder_ops_list:\n if len(ins) == 0:\n raise RuntimeWarning(\n \"warning: not generate custom build methods for \" +\n schema.name + \" since it does not have operands.\")\n else:\n s += indent + 'let builders = [\\n'\n # Custom builders with operands and attributes having a separate parameter.\n # E.g. OpBuilder<(ins \"Value\":$X, \"Value\":$Y, \"Attribute\":$A), [{}]>\n indent = inc_indent(indent)\n s += indent + 'OpBuilder<(ins '\n operands_dict = get_operands_or_results(schema, type_str_dict, is_input=True)\n attrs_dict = get_attrs(schema)\n s += ', '.join('\"{}\":${}'.format(tblgen_operand_type_to_cpp_type(ty),\n name) for name, ty in operands_dict.items())\n if operands_dict and attrs_dict:\n s += ', '\n s += ', '.join('\"{}\":${}'.format(tblgen_attr_type_to_cpp_type(ty),\n name) for name, ty in attrs_dict.items())\n s += '), [{\\n'\n indent = inc_indent(indent)\n\n # Get output type from first operand's type.\n first_operand_name = list(ins.items())[0][0]\n build_type_name = ''\n if schema.name in custom_builder_broadcast_ops_list:\n second_operand_name = list(ins.items())[1][0]\n s += indent + 'auto lhsTy = {}.getType();\\n'. \\\n format(first_operand_name)\n s += indent + 'auto rhsTy = {}.getType();\\n'. \\\n format(second_operand_name)\n s += indent + 'auto elementType = getBroadcastedRankedType(lhsTy, rhsTy);\\n'\n s += indent + 'auto shapedType = elementType.dyn_cast_or_null<ShapedType>();\\n';\n s += indent + 'if (!shapedType || !shapedType.hasStaticShape()) {\\n';\n s += indent + indent + 'elementType = {}'.format(first_operand_name) + \\\n '.getType().cast<ShapedType>().getElementType();\\n';\n s += indent + indent + 'elementType = UnrankedTensorType::get(elementType);\\n'\n s += indent + '}\\n';\n build_type_name = 'elementType'\n else:\n s += indent + 'auto elementType = {}'.format(first_operand_name) + \\\n '.getType().cast<ShapedType>().getElementType();\\n'\n build_type_name = 'UnrankedTensorType::get(elementType)'\n s += indent + 'build($_builder, $_state, {}'.format(build_type_name)\n for name, _ in ins.items():\n s += ', ' + name\n s += ');\\n'\n indent = dec_indent(indent)\n s += indent + '}]>,\\n'\n\n # Custom builders with all operands and attributes having aggregate parameters.\n # E.g. OpBuilder<(ins \"ValueRange operands,\n # ArrayRef<NamedAttribute> attributes\", [{}]>'\n s += indent + 'OpBuilder<(ins ' + \\\n '\"ValueRange\":$operands, \"ArrayRef<NamedAttribute>\":$attributes), [{\\n'\n indent = inc_indent(indent)\n if schema.name in custom_builder_broadcast_ops_list:\n s += indent + 'auto lhsTy = operands[0].getType();\\n'\n s += indent + 'auto rhsTy = operands[1].getType();\\n'\n s += indent + 'auto elementType = getBroadcastedRankedType(lhsTy, rhsTy);\\n'\n s += indent + 'auto shapedType = elementType.dyn_cast_or_null<ShapedType>();\\n';\n s += indent + 'if (!shapedType || !shapedType.hasStaticShape()) {\\n';\n s += indent + indent + 'elementType = operands[0]' + \\\n '.getType().cast<ShapedType>().getElementType();\\n';\n s += indent + indent + 'elementType = UnrankedTensorType::get(elementType);\\n'\n s += indent + '}\\n';\n else:\n s += indent + 'auto elementType = operands[0].getType().' + \\\n 'cast<ShapedType>().getElementType();\\n'\n s += indent + 'std::vector<mlir::Type> outputTypes;\\n'\n s += indent + 'outputTypes.emplace_back({});\\n'.format(build_type_name)\n s += indent + 'build($_builder, $_state, outputTypes, operands, attributes);\\n'\n indent = dec_indent(indent)\n s += indent + '}]>'\n\n s += '\\n' + indent + '];\\n'\n\n # Generate extracClassDeclaration.\n s += indent + \"let extraClassDeclaration = [{\\n\"\n #indent = inc_indent(indent)\n\n # Generate input/output number.\n s = get_numberof_inout(s, indent, schema)\n\n if schema.name in OpsWithResultTypeInference:\n s = get_type_inference_func(\n s, indent, OpsWithResultTypeInference[schema.name])\n\n if schema.name in OpsWithHelpers:\n s += OpsWithHelpers[schema.name]\n\n if len(regions):\n s += indent + \"int64_t getSubgraphRegionIdx(const std::string& name) {\\n\"\n indent = inc_indent(indent)\n for idx, region_name in enumerate(regions.keys()):\n s += indent + \"if (name == \\\"{}\\\") return {};\\n\".format(region_name, idx)\n s += indent + \"llvm_unreachable(\\\"region with the specified name does not exist\\\");\\n\"\n indent = dec_indent(indent)\n s += indent + \"}\\n\"\n\n s += indent + '}];\\n'\n\n if ( schema.name in custom_definition_misc) :\n s += custom_definition_misc[schema.name] + '\\n'\n\n s += '}\\n\\n'\n return s\n\n\n\"\"\"\nspecial cases:\n* Split: attr split default value: sizeof(output1) namely 1\n* Conv: attr dilations default value is {num_dim of first input - 2, 1}\n* Conv: attr kernel_shape type is ints\n* Transpose: attr perm default value is {} empty int list\n\"\"\"\n\n\ndef gen_op_importer(schema, file):\n indent = inc_indent()\n s = indent + 'import_handler_map_[\"' + schema.name +'\"] = \\n '\n\n expected_num_operands = len(schema.inputs)\n expected_num_results = len(schema.outputs)\n for input in schema.inputs:\n if OpSchema.FormalParameterOption.Variadic == input.option:\n expected_num_operands = -1\n for output in schema.outputs:\n if OpSchema.FormalParameterOption.Variadic == output.option:\n expected_num_results = -1\n\n handler_func = special_op_handler.get(\n schema.name, \"buildOperation<mlir::ONNX{}Op>\".format(schema.name))\n\n # Special handlers currently require expected num operands/results to be specified.\n # TODO: remove special handlers.\n args = [\"node\"]\n \"\"\"\n if expected_num_operands != -1 or expected_num_results != -1 or \"buildOperation\" not in handler_func:\n args.append(\n \"/* expected_num_operands = */ {}\".format(expected_num_operands))\n args.append(\n '/* expected_num_results = */ {}'.format(expected_num_results))\n \"\"\"\n s += inc_indent(indent) + '&onnx_mlir::detail::FrontendGenImpl::'\n s += handler_func+';\\n'\n\n file.write(s)\n\n\ndef build_operator_schemas():\n # domain -> support level -> name -> [schema]\n index = defaultdict(lambda: defaultdict(lambda: defaultdict(\n list))) # type: Dict[Text, Dict[int, Dict[Text, List[OpSchema]]]]\n for schema in defs.get_all_schemas_with_history():\n index[schema.domain][int(\n schema.support_level)][schema.name].append(schema)\n\n # Preprocess the Operator Schemas\n # [(domain, [(support_level, [(schema name, current schema, all versions schemas)])])]\n operator_schemas = list(\n ) # type: List[Tuple[Text, List[Tuple[int, List[Tuple[Text, OpSchema, List[OpSchema]]]]]]]\n exsting_ops = set() # type: Set[Text]\n for domain, _supportmap in sorted(index.items()):\n if not should_render_domain(domain):\n continue\n processed_supportmap = list()\n for _support, _namemap in sorted(_supportmap.items()):\n processed_namemap = list()\n for n, unsorted_versions in sorted(_namemap.items()):\n versions = sorted(unsorted_versions,\n key=lambda s: s.since_version)\n schema = versions[-1]\n if schema.name in exsting_ops:\n continue\n\n if check_operation_version :\n # Generate operation of the latest version of your onnx.\n exsting_ops.add(schema.name)\n processed_namemap.append((n, schema, versions))\n\n # Add checks against version_dict\n if schema.name not in version_dict :\n print(\"Check-operation-version: Operation {} is new with version {}\"\n .format(schema.name, schema.since_version))\n elif schema.since_version > version_dict[schema.name]:\n print(\"Check-operation-version: Operation {}\"\n .format(schema.name)+\n \" has a newer version {} over old version {}\"\n .format(schema.since_version, version_dict[schema.name]))\n else:\n # Generate operation according to the version in version_dict.\n if schema.name not in version_dict :\n continue\n found = False\n for schema in reversed(versions):\n # Check the version number against the version_dict\n if schema.since_version == version_dict[schema.name]:\n exsting_ops.add(schema.name)\n processed_namemap.append((n, schema, versions))\n found = True\n break\n if not found:\n print(\"Your onnx installation may be too old. \"\n \"The desired version for operation {} is not found.\".format(\n schema.name))\n sys.exit()\n processed_supportmap.append((_support, processed_namemap))\n operator_schemas.append((domain, processed_supportmap))\n return operator_schemas\n\n\ndef main(args): # type: (Type[Args]) -> None\n curr_utc_time = datetime.datetime.now(\n datetime.timezone.utc).strftime(\"%m/%d/%Y, %H:%M:%S\")\n autogen_warning = (\n '//********************************************************\\n'\n '// Do not modify this file directly.\\n'\n '// This file is automatically generated via script.\\n'\n '// Details can be found in docs/ImportONNXDefs.md .\\n'\n '//********************************************************\\n\\n')\n autogen_warning = autogen_warning.format(curr_utc_time)\n\n op_def = args.op_def\n op_def.write(autogen_warning)\n\n op_importer = args.op_importer\n op_importer.write(autogen_warning)\n\n version_dict = dict()\n for domain, supportmap in build_operator_schemas():\n for _, namemap in supportmap:\n for op_type, schema, versions in namemap:\n if check_operation_version:\n version_dict[schema.name] = schema.since_version\n else:\n gen_op_importer(schema, op_importer)\n r = gen_op_def(schema)\n op_def.write(r)\n if check_operation_version :\n pprint.pprint(version_dict)\n\nif __name__ == '__main__':\n curr_dir = os.path.dirname(os.path.realpath(__file__))\n\n class Args(object):\n if args.dry_run_onnx_ops:\n op_def = StringIO()\n else:\n op_def_file_path = os.path.join(curr_dir, 'ONNXOps.td.inc')\n op_def = io.open(op_def_file_path, 'w', newline='')\n\n if args.dry_run_op_build_table:\n op_importer = StringIO()\n else:\n op_importer_file_path = os.path.join(curr_dir, 'OpBuildTable.inc')\n op_importer = io.open(op_importer_file_path, 'w', newline='')\n main(Args)\n\n if args.dry_run_onnx_ops:\n sys.stdout.write(Args.op_def.getvalue())\n if args.dry_run_op_build_table:\n sys.stdout.write(Args.op_importer.getvalue())\n"
] | [
[
"numpy.round"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
t2hk/scdv_glove_elasticsearch | [
"41cd336decf1e14e77439caaa26f64edf28ce42b"
] | [
"get_similar_words_triples.py"
] | [
"from gensim.models import KeyedVectors\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport matplotlib.font_manager as fm\nimport pandas as pd\n\nglove_vector_file = \"vectors.txt\"\ngensim_glove_vector_file = \"gensim_glove_vectors.txt\"\ntop_k = 10\n\nwords_triple_file = 'similarity_words.ttl'\n\n# GloVeの単語ベクトルファイルを読み込み、単語数とベクトルサイズを付与した処理用のファイルを作成する。\nvectors = pd.read_csv(glove_vector_file, delimiter=' ', index_col=0, header=None)\n\nvocab_count = vectors.shape[0] # 単語数\nnum_features = vectors.shape[1] # 次元数\n\nprint(\"単語数:{} 次元数:{}\".format(vocab_count, num_features))\n\nglove_vectors = KeyedVectors.load_word2vec_format(gensim_glove_vector_file, binary=False)\nwords = list(glove_vectors.vocab.keys())\n\nsim_words_list = []\n\nwith open(words_triple_file, 'w') as f:\n for word in words:\n sim_words = glove_vectors.most_similar(word, [], top_k)\n \n for sim_word in sim_words:\n triple = '\"{}\" owl:equivalentClass \"{}\"'.format(word, sim_word[0])\n \n sim_words_list.append(triple)\n f.writelines(triple + '\\n')\n\n\nlen(sim_words_list)\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
microsetta/microsetta-admin | [
"1ba6787c0315a74d50cafd722dbbe044d507c07f"
] | [
"microsetta_admin/server.py"
] | [
"import jwt\nfrom flask import render_template, Flask, request, session, send_file\nimport secrets\nfrom datetime import datetime\nimport io\n\nfrom jwt import PyJWTError\nfrom werkzeug.exceptions import BadRequest\nfrom werkzeug.utils import redirect\nimport pandas as pd\n\nfrom microsetta_admin import metadata_util, upload_util\nfrom microsetta_admin.config_manager import SERVER_CONFIG\nfrom microsetta_admin._api import APIRequest\nimport importlib.resources as pkg_resources\n\nTOKEN_KEY_NAME = 'token'\nSEND_EMAIL_CHECKBOX_DEFAULT_NAME = 'send_email'\n\nPUB_KEY = pkg_resources.read_text(\n 'microsetta_admin',\n \"authrocket.pubkey\")\n\nDUMMY_SELECT_TEXT = '-------'\n\nRECEIVED_TYPE_DROPDOWN = \\\n [DUMMY_SELECT_TEXT, \"Blood (skin prick)\", \"Saliva\", \"Stool\",\n \"Sample Type Unclear (Swabs Included)\"]\n\nVALID_STATUS = \"sample-is-valid\"\nNO_SOURCE_STATUS = \"no-associated-source\"\nNO_ACCOUNT_STATUS = \"no-registered-account\"\nNO_COLLECTION_INFO_STATUS = \"no-collection-info\"\nINCONSISTENT_SAMPLE_STATUS = \"sample-has-inconsistencies\"\nUNKNOWN_VALIDITY_STATUS = \"received-unknown-validity\"\n\nSTATUS_OPTIONS = [DUMMY_SELECT_TEXT, VALID_STATUS, NO_SOURCE_STATUS,\n NO_ACCOUNT_STATUS, NO_COLLECTION_INFO_STATUS,\n INCONSISTENT_SAMPLE_STATUS, UNKNOWN_VALIDITY_STATUS]\n\nAPI_PROJECTS_URL = '/api/admin/projects'\n\n\ndef handle_pyjwt(pyjwt_error):\n # PyJWTError (Aka, anything wrong with token) will force user to log out\n # and log in again\n return redirect('/logout')\n\n\ndef parse_jwt(token):\n \"\"\"\n Raises\n ------\n jwt.PyJWTError\n If the token is invalid\n \"\"\"\n decoded = jwt.decode(token, PUB_KEY, algorithms=['RS256'], verify=True)\n return decoded\n\n\ndef build_login_variables():\n # Anything that renders sitebase.html must pass down these variables to\n # jinja2\n token_info = None\n if TOKEN_KEY_NAME in session:\n # If user leaves the page open, the token can expire before the\n # session, so if our token goes back we need to force them to login\n # again.\n token_info = parse_jwt(session[TOKEN_KEY_NAME])\n\n vars = {\n 'endpoint': SERVER_CONFIG[\"endpoint\"],\n 'ui_endpoint': SERVER_CONFIG[\"ui_endpoint\"],\n 'authrocket_url': SERVER_CONFIG[\"authrocket_url\"]\n }\n if token_info is not None:\n vars['email'] = token_info['email']\n return vars\n\n\ndef build_app():\n # Create the application instance\n app = Flask(__name__)\n\n flask_secret = SERVER_CONFIG[\"FLASK_SECRET_KEY\"]\n if flask_secret is None:\n print(\"WARNING: FLASK_SECRET_KEY must be set to run with gUnicorn\")\n flask_secret = secrets.token_urlsafe(16)\n app.secret_key = flask_secret\n app.config['SESSION_TYPE'] = 'memcached'\n app.config['SESSION_COOKIE_NAME'] = 'session-microsetta-admin'\n\n # Set mapping from exception type to response code\n app.register_error_handler(PyJWTError, handle_pyjwt)\n\n return app\n\n\napp = build_app()\n\n\[email protected]_processor\ndef utility_processor():\n def format_timestamp(timestamp_str):\n if not timestamp_str:\n return \"None\"\n datetime_obj = datetime.fromisoformat(timestamp_str)\n return datetime_obj.strftime(\"%Y %B %d %H:%M:%S\")\n return dict(format_timestamp=format_timestamp)\n\n\[email protected]('/')\ndef home():\n return render_template('sitebase.html', **build_login_variables())\n\n\[email protected]('/search', methods=['GET'])\ndef search():\n return _search()\n\n\[email protected]('/search/sample', methods=['GET', 'POST'])\ndef search_sample():\n return _search('samples')\n\n\[email protected]('/search/kit', methods=['GET', 'POST'])\ndef search_kit():\n return _search('kit')\n\n\[email protected]('/search/email', methods=['GET', 'POST'])\ndef search_email():\n return _search('account')\n\n\ndef _search(resource=None):\n if request.method == 'GET':\n return render_template('search.html', **build_login_variables())\n elif request.method == 'POST':\n query = request.form['search_%s' % resource]\n\n status, result = APIRequest.get(\n '/api/admin/search/%s/%s' % (resource, query))\n\n if status == 404:\n result = {'error_message': \"Query not found\"}\n return render_template('search_result.html',\n **build_login_variables(),\n result=result), 200\n elif status == 200:\n return render_template('search_result.html',\n **build_login_variables(),\n resource=resource,\n result=result), 200\n else:\n return result\n\n\ndef _translate_nones(a_dict, do_none_to_str):\n # Note: this ISN'T a deep copy. This function is NOT set up\n # for recursing through a multi-layer dictionary\n result = a_dict.copy()\n for k, v in result.items():\n if do_none_to_str and v is None:\n result[k] = \"\"\n elif not do_none_to_str and v == '':\n result[k] = None\n return result\n\n\ndef _get_projects(include_stats, is_active):\n projects_uri = API_PROJECTS_URL + f\"?include_stats={include_stats}\"\n if is_active is not None:\n projects_uri += f\"&is_active={is_active}\"\n status, projects_output = APIRequest.get(projects_uri)\n\n if status >= 400:\n result = {'error_message': f\"Unable to load project list: \"\n f\"{projects_uri}\"}\n else:\n cleaned_projects = [_translate_nones(x, True) for x in\n projects_output]\n # if we're not using full project stats, sort\n # alphabetically by project name\n if not include_stats:\n cleaned_projects = sorted(cleaned_projects,\n key=lambda k: k['project_name'])\n result = {'projects': cleaned_projects}\n\n return status, result\n\n\[email protected]('/manage_projects', methods=['GET', 'POST'])\ndef manage_projects():\n result = None\n is_active = request.args.get('is_active', None)\n if request.method == 'POST':\n model = {x: request.form[x] for x in request.form}\n project_id = model.pop('project_id')\n model['is_microsetta'] = model.get('is_microsetta', '') == 'true'\n model['bank_samples'] = model.get('bank_samples', '') == 'true'\n model = _translate_nones(model, False)\n\n if project_id.isdigit():\n # update (put) an existing project\n action = \"update\"\n status, api_output = APIRequest.put(\n '{}/{}'.format(API_PROJECTS_URL, project_id),\n json=model)\n else:\n # create (post) a new project\n action = \"create\"\n status, api_output = APIRequest.post(\n API_PROJECTS_URL, json=model)\n\n # if api post or put failed\n if status >= 400:\n result = {'error_message': f'Unable to {action} project.'}\n # end if post\n\n # if the above work (if any) didn't produce an error message, return\n # the projects list\n if result is None:\n _, result = _get_projects(include_stats=True, is_active=is_active)\n\n return render_template('manage_projects.html',\n **build_login_variables(),\n result=result), 200\n\n\[email protected]('/email_stats', methods=['GET', 'POST'])\ndef email_stats():\n _, result = _get_projects(include_stats=False, is_active=True)\n projects = result.get('projects')\n\n if request.method == 'GET':\n project = request.args.get('project', None)\n email = request.args.get('email')\n if email is None:\n # They want to search for emails, show them the search dialog\n return render_template(\"email_stats_pulldown.html\",\n **build_login_variables(),\n resource=None,\n search_error=None,\n projects=projects)\n emails = [email, ]\n elif request.method == 'POST':\n project = request.form.get('project', None)\n emails, upload_err = upload_util.parse_request_csv_col(\n request,\n 'file',\n 'email'\n )\n if upload_err is not None:\n return render_template('email_stats_pulldown.html',\n **build_login_variables(),\n resource=None,\n search_error=[{'error': upload_err}],\n projects=projects)\n else:\n raise BadRequest()\n\n if project == \"\":\n project = None\n\n # de-duplicate\n emails = list({e.lower() for e in emails})\n\n status, result = APIRequest.post(\n '/api/admin/account_email_summary',\n json={\n \"emails\": emails,\n \"project\": project\n })\n\n if status != 200:\n return render_template('email_stats_pulldown.html',\n search_error=[{'error': result}],\n resource=None,\n **build_login_variables(),\n projects=projects)\n\n # At a minimum, our table will display these columns.\n # We may show additional info depending on what comes back from the request\n base_data_template = {\n 'email': 'XXX',\n 'summary': 'XXX',\n 'account_id': 'XXX',\n 'creation_time': 'XXX',\n 'kit_name': 'XXX',\n 'project': 'XXX',\n 'unclaimed-samples-in-kit': 0,\n 'never-scanned': 0,\n 'sample-is-valid': 0,\n 'no-associated-source': 0,\n 'no-registered-account': 0,\n 'no-collection-info': 0,\n 'sample-has-inconsistencies': 0,\n 'received-unknown-validity': 0\n }\n\n df = pd.DataFrame([base_data_template] + result)\n df = df.drop(0) # remove the template row\n numeric_cols = [\n \"unclaimed-samples-in-kit\", \"never-scanned\", \"sample-is-valid\",\n \"no-associated-source\", \"no-registered-account\", \"no-collection-info\",\n \"sample-has-inconsistencies\", \"received-unknown-validity\"\n ]\n df[numeric_cols] = df[numeric_cols].apply(pd.to_numeric)\n df[numeric_cols] = df[numeric_cols].fillna(0)\n\n def urlify_account_id(id_):\n if pd.isnull(id_):\n return \"No associated account\"\n else:\n ui_endpoint = SERVER_CONFIG['ui_endpoint']\n account_url = f\"{ui_endpoint}/accounts/{id_}\"\n return f'<a target=\"_blank\" href=\"{account_url}\">{id_}</a>'\n\n # see https://stackoverflow.com/questions/20035518/insert-a-link-inside-a-pandas-table # noqa\n df['account_id'] = df[\"account_id\"].apply(urlify_account_id)\n return render_template(\"email_stats_pulldown.html\",\n search_error=None,\n resource=df,\n **build_login_variables(),\n projects=projects)\n\n\[email protected]('/per_sample_summary', methods=['GET', 'POST'])\ndef per_sample_summary():\n # get a list of all projects in the system\n _, result = _get_projects(include_stats=False, is_active=True)\n projects = result.get('projects')\n\n # filter out any projects that don't belong to Microsetta\n projects = [x for x in projects if x['is_microsetta'] is True]\n\n # build a list of dictionaries with just the project id and the project\n # name.\n projects = [{'project_name': x['project_name'],\n 'project_id': x['project_id']} for x in projects]\n\n # determine if user wants sample ids stripped\n strip_sampleid = request.form.get('strip_sampleid', 'off')\n strip_sampleid = strip_sampleid.lower() == 'on'\n\n if request.method == 'GET':\n # If user arrived via GET then they are either here w/out\n # querying and they simply need the default webpage, or they are\n # querying with either a list of barcodes, or with a project id.\n\n # look for both parameters to determine which state we are in.\n sample_barcode = request.args.get('sample_barcode')\n project_id = request.args.get('project_id')\n\n if sample_barcode is None and project_id is None:\n # user just wants the default page.\n return render_template('per_sample_summary.html',\n resource=None,\n projects=projects,\n **build_login_variables())\n\n if project_id is not None:\n # user wants to get summaries on all samples in a project.\n payload = {'project_id': project_id}\n status, result = APIRequest.post('/api/admin/account_barcode_summa'\n 'ry?strip_sampleid=False',\n json=payload)\n\n if status == 200:\n if result['partial_result'] is True:\n unprocessed_barcodes = result['unprocessed_barcodes']\n else:\n unprocessed_barcodes = None\n\n resource = pd.DataFrame(result['samples'])\n order = ['sampleid', 'project', 'account-email',\n 'source-email', 'source-type', 'site-sampled',\n 'sample-status', 'sample-received', 'ffq-taken',\n 'ffq-complete', 'vioscreen_username']\n order.extend(sorted(set(resource.columns) - set(order)))\n resource = resource[order]\n if unprocessed_barcodes:\n return render_template('per_sample_summary.html',\n resource=resource,\n projects=projects,\n error_message=\"Too many barcodes. S\"\n \"erver processed only\"\n \" the first 1000.\",\n **build_login_variables())\n else:\n return render_template('per_sample_summary.html',\n resource=resource,\n projects=projects,\n **build_login_variables())\n\n else:\n return render_template('per_sample_summary.html',\n resource=None,\n projects=projects,\n error_message=result,\n **build_login_variables())\n\n # if we are here then the user is querying using barcodes and we\n # simply need to set up the query below to perform.\n sample_barcodes = [sample_barcode, ]\n else:\n # assume POST, since there are only two methods defined in route.\n # if we are here, it is because the user is querying using an uploaded\n # file containing sample names.\n sample_barcodes, err = upload_util.parse_request_csv_col(request,\n 'file',\n 'sample_name')\n if err is not None:\n # there was an error. abort early.\n return render_template('per_sample_summary.html',\n resource=None,\n projects=projects,\n **build_login_variables(),\n search_error=[{'error': err}])\n\n # perform the main query.\n payload = {'sample_barcodes': sample_barcodes}\n status, result = APIRequest.post('/api/admin/account_barcode_summary?stri'\n 'p_sampleid=%s' % str(strip_sampleid),\n json=payload)\n\n if status == 200:\n if result['partial_result'] is True:\n unprocessed_barcodes = result['unprocessed_barcodes']\n else:\n unprocessed_barcodes = None\n resource = pd.DataFrame(result['samples'])\n order = ['sampleid', 'project', 'account-email', 'source-email',\n 'source-type', 'site-sampled', 'sample-status',\n 'sample-received', 'ffq-taken', 'ffq-complete',\n 'vioscreen_username']\n order.extend(sorted(set(resource.columns) - set(order)))\n resource = resource[order]\n\n if unprocessed_barcodes:\n return render_template('per_sample_summary.html',\n resource=resource,\n projects=projects,\n error_message=\"Too many barcodes. S\"\n \"erver processed only\"\n \" the first 1000.\",\n **build_login_variables())\n else:\n return render_template('per_sample_summary.html',\n resource=resource,\n projects=projects,\n **build_login_variables())\n else:\n return render_template('per_sample_summary.html',\n resource=None,\n projects=projects,\n error_message=result,\n **build_login_variables())\n\n\ndef _get_by_sample_barcode(sample_barcodes, strip_sampleid, projects):\n payload = {'sample_barcodes': sample_barcodes}\n status, result = APIRequest.post('/api/admin/account_barcode_summary?'\n 'strip_sampleid=%s' % str(strip_sampleid),\n json=payload)\n if status == 200:\n if result['partial_result'] is True:\n unprocessed_barcodes = result['unprocessed_barcodes']\n else:\n unprocessed_barcodes = None\n\n resource = pd.DataFrame(result['samples'])\n order = ['sampleid', 'project', 'account-email', 'source-email',\n 'source-type', 'site-sampled', 'sample-status',\n 'sample-received', 'ffq-taken', 'ffq-complete',\n 'vioscreen_username']\n order.extend(sorted(set(resource.columns) - set(order)))\n resource = resource[order]\n\n if unprocessed_barcodes:\n return render_template('per_sample_summary.html',\n resource=resource,\n projects=projects,\n error_message=\"Too many barcodes. S\"\n \"erver processed only\"\n \" the first 1000.\",\n **build_login_variables())\n else:\n return render_template('per_sample_summary.html',\n resource=resource,\n projects=projects,\n **build_login_variables())\n else:\n return render_template('per_sample_summary.html',\n resource=None,\n projects=projects,\n error_message=result,\n **build_login_variables())\n\n\[email protected]('/create_kits', methods=['GET', 'POST'])\ndef new_kits():\n _, result = _get_projects(include_stats=False, is_active=True)\n projects = result.get('projects')\n\n if request.method == 'GET':\n return render_template('create_kits.html',\n error_message=result.get('error_message'),\n projects=projects,\n **build_login_variables())\n\n elif request.method == 'POST':\n num_kits = int(request.form['num_kits'])\n num_samples = int(request.form['num_samples'])\n prefix = request.form['prefix']\n selected_project_ids = request.form.getlist('project_ids')\n payload = {'number_of_kits': num_kits,\n 'number_of_samples': num_samples,\n 'project_ids': selected_project_ids}\n if prefix:\n payload['kit_id_prefix'] = prefix\n\n status, result = APIRequest.post(\n '/api/admin/create/kits',\n json=payload)\n\n if status != 201:\n return render_template('create_kits.html',\n error_message='Failed to create kits',\n projects=projects,\n **build_login_variables())\n\n # StringIO/BytesIO based off https://stackoverflow.com/a/45111660\n buf = io.StringIO()\n payload = io.BytesIO()\n\n # explicitly expand out the barcode detail\n kits = pd.DataFrame(result['created'])\n for i in range(num_samples):\n kits['barcode_%d' % (i+1)] = [r['sample_barcodes'][i]\n for _, r in kits.iterrows()]\n kits.drop(columns='sample_barcodes', inplace=True)\n\n kits.to_csv(buf, sep=',', index=False, header=True)\n payload.write(buf.getvalue().encode('utf-8'))\n payload.seek(0)\n buf.close()\n\n stamp = datetime.now().strftime('%d%b%Y-%H%M')\n fname = f'kits-{stamp}.csv'\n\n return send_file(payload, as_attachment=True,\n attachment_filename=fname,\n mimetype='text/csv')\n\n\ndef _check_sample_status(extended_barcode_info):\n warning = None\n in_microsetta_project = any(\n [x['is_microsetta'] for x in extended_barcode_info['projects_info']])\n\n # one warning to rule them all; check in order of precendence\n if not in_microsetta_project:\n warning = UNKNOWN_VALIDITY_STATUS\n elif extended_barcode_info['account'] is None:\n warning = NO_ACCOUNT_STATUS\n elif extended_barcode_info['source'] is None:\n warning = NO_SOURCE_STATUS\n # collection datetime is used as the bellwether for the whole\n # set of sample collection info because it is relevant to all\n # kinds of samples (whereas previously used field, sample site, is not\n # filled when environmental samples are returned).\n elif extended_barcode_info['sample'].get('datetime_collected') is None:\n warning = NO_COLLECTION_INFO_STATUS\n\n return warning\n\n\n# Set up handlers for the cases,\n# GET to view the page,\n# POST to update info for a barcode -AND (possibly)-\n# email end user about the change in sample status,\ndef _scan_get(sample_barcode, update_error):\n # If there is no sample_barcode in the GET\n # they still need to enter one in the box, so show empty page\n if sample_barcode is None:\n return render_template('scan.html', **build_login_variables())\n\n # Assuming there is a sample barcode, grab that sample's information\n status, result = APIRequest.get(\n '/api/admin/search/samples/%s' % sample_barcode)\n\n # If we successfully grab it, show the page to the user\n if status == 200:\n # Process result in python because its easier than jinja2.\n status_warning = _check_sample_status(result)\n\n # check the latest scan to find the default sample_status for form\n latest_status = DUMMY_SELECT_TEXT\n if result['latest_scan']:\n latest_status = result['latest_scan']['sample_status']\n\n account = result.get('account')\n events = []\n if account:\n event_status, event_result = APIRequest.get(\n '/api/admin/events/accounts/%s' % account['id']\n )\n if event_status != 200:\n raise Exception(\"Couldn't pull event history\")\n\n events = event_result\n\n return render_template(\n 'scan.html',\n **build_login_variables(),\n barcode_info=result[\"barcode_info\"],\n projects_info=result['projects_info'],\n scans_info=result['scans_info'],\n latest_status=latest_status,\n dummy_status=DUMMY_SELECT_TEXT,\n status_options=STATUS_OPTIONS,\n send_email=session.get(SEND_EMAIL_CHECKBOX_DEFAULT_NAME, True),\n sample_info=result['sample'],\n extended_info=result,\n status_warning=status_warning,\n update_error=update_error,\n received_type_dropdown=RECEIVED_TYPE_DROPDOWN,\n source=result['source'],\n events=events\n )\n elif status == 401:\n # If we fail due to unauthorized, need the user to log in again\n return redirect('/logout')\n elif status == 404:\n # If we fail due to not found, need to tell the user to pick a diff\n # barcode\n return render_template(\n 'scan.html',\n **build_login_variables(),\n search_error=\"Barcode %s Not Found\" % sample_barcode,\n update_error=update_error,\n received_type_dropdown=RECEIVED_TYPE_DROPDOWN\n )\n else:\n raise BadRequest()\n\n\ndef _scan_post_update_info(sample_barcode,\n technician_notes,\n sample_status,\n action,\n issue_type,\n template,\n received_type,\n recorded_type):\n\n ###\n # Bugfix Part 1 for duplicate emails being sent. Theory is that client is\n # out of sync due to hitting back button after a scan has changed\n # state.\n # Can't test if client is up to date without ETags, so for right now,\n # we just validate whether or not they should send an email, duplicating\n # the client log. (This can still break with multiple admin clients,\n # but that is unlikely at the moment.)\n latest_status = None\n # TODO: Replace this with ETags!\n status, result = APIRequest.get(\n '/api/admin/search/samples/%s' % sample_barcode)\n\n if result['latest_scan']:\n latest_status = result['latest_scan']['sample_status']\n ###\n\n # Do the actual update\n status, response = APIRequest.post(\n '/api/admin/scan/%s' % sample_barcode,\n json={\n \"sample_status\": sample_status,\n \"technician_notes\": technician_notes\n }\n )\n\n # if the update failed, keep track of the error so it can be displayed\n if status != 201:\n update_error = response\n return _scan_get(sample_barcode, update_error)\n else:\n update_error = None\n\n # If we're not supposed to send an email, go back to GET\n if action != \"send_email\":\n return _scan_get(sample_barcode, update_error)\n\n ###\n # Bugfix Part 2 for duplicate emails being sent.\n if sample_status == latest_status:\n # This is what we'll hit if javascript thinks it's updating status\n # but is out of sync with the database.\n update_error = \"Ignoring Send Email, sample_status would \" \\\n \"not have been updated (Displayed page was out of \" \\\n \"sync)\"\n return _scan_get(sample_barcode, update_error)\n ###\n\n # This is what we'll hit if there are no email templates to send for\n # the new sample status (or if we screw up javascript side :D )\n if template is None:\n update_error = \"Cannot Send Email: No Issue Type Specified \" \\\n \"(or no issue types available)\"\n return _scan_get(sample_barcode, update_error)\n\n # Otherwise, send out an email to the end user\n status, response = APIRequest.post(\n '/api/admin/email',\n json={\n \"issue_type\": issue_type,\n \"template\": template,\n \"template_args\": {\n \"sample_barcode\": sample_barcode,\n \"recorded_type\": recorded_type,\n \"received_type\": received_type\n }\n }\n )\n\n # if the email failed to send, keep track of the error\n # so it can be displayed\n if status != 200:\n update_error = response\n else:\n update_error = None\n\n return _scan_get(sample_barcode, update_error)\n\n\[email protected]('/scan', methods=['GET', 'POST'])\ndef scan():\n # Now that the handlers are set up, parse the request to determine what\n # to do.\n\n # If its a get, grab the sample_barcode from the query string rather than\n # form parameters\n if request.method == 'GET':\n sample_barcode = request.args.get('sample_barcode')\n return _scan_get(sample_barcode, None)\n\n # If its a post, make the changes, then refresh the page\n if request.method == 'POST':\n # Without some extra ajax, we can't persist the send_email checkbox\n # until they actually post the form\n send_email = request.form.get('send_email', False)\n session[SEND_EMAIL_CHECKBOX_DEFAULT_NAME] = send_email\n\n sample_barcode = request.form['sample_barcode']\n technician_notes = request.form['technician_notes']\n sample_status = request.form['sample_status']\n\n action = request.form.get('action')\n issue_type = request.form.get('issue_type')\n template = request.form.get('template')\n received_type = request.form.get('received_type')\n recorded_type = request.form.get('recorded_type')\n\n return _scan_post_update_info(sample_barcode,\n technician_notes,\n sample_status,\n action,\n issue_type,\n template,\n received_type,\n recorded_type)\n\n\[email protected]('/metadata_pulldown', methods=['GET', 'POST'])\ndef metadata_pulldown():\n allow_missing = request.form.get('allow_missing_samples', False)\n\n if request.method == 'GET':\n sample_barcode = request.args.get('sample_barcode')\n # If there is no sample_barcode in the GET\n # they still need to enter one in the box, so show empty page\n if sample_barcode is None:\n return render_template('metadata_pulldown.html',\n **build_login_variables())\n sample_barcodes = [sample_barcode]\n elif request.method == 'POST':\n sample_barcodes, upload_err = upload_util.parse_request_csv_col(\n request,\n 'file',\n 'sample_name'\n )\n if upload_err is not None:\n return render_template('metadata_pulldown.html',\n **build_login_variables(),\n search_error=[{'error': upload_err}])\n else:\n raise BadRequest()\n\n df, errors = metadata_util.retrieve_metadata(sample_barcodes)\n\n # Strangely, these api requests are returning an html error page rather\n # than a machine parseable json error response object with message.\n # This is almost certainly due to error handling for the cohosted minimal\n # client. In future, we should just pass down whatever the api says here.\n if len(errors) == 0 or allow_missing:\n df = metadata_util.drop_private_columns(df)\n\n # TODO: Streaming direct from pandas is a pain. Need to search for\n # better ways to iterate and chunk this file as we generate it\n strstream = io.StringIO()\n df.to_csv(strstream, sep='\\t', index=True, header=True)\n\n # TODO: utf-8 or utf-16 encoding??\n bytestream = io.BytesIO()\n bytestream.write(strstream.getvalue().encode('utf-8'))\n bytestream.seek(0)\n\n strstream.close()\n return send_file(bytestream,\n mimetype=\"text/tab-separated-values\",\n as_attachment=True,\n attachment_filename=\"metadata_pulldown.tsv\",\n add_etags=False,\n cache_timeout=None,\n conditional=False,\n last_modified=None,\n )\n else:\n\n return render_template('metadata_pulldown.html',\n **build_login_variables(),\n info={'barcodes': sample_barcodes},\n search_error=errors)\n\n\[email protected]('/submit_daklapack_order', methods=['GET'])\ndef submit_daklapack_order():\n error_msg_key = \"error_message\"\n\n def return_error(msg):\n return render_template('submit_daklapack_order.html',\n **build_login_variables(),\n error_message=msg)\n\n status, dak_articles_output = APIRequest.get(\n '/api/admin/daklapack_articles')\n if status >= 400:\n return return_error(\"Unable to load daklapack articles list.\")\n\n status, projects_output = _get_projects(include_stats=False,\n is_active=True)\n if status >= 400:\n return return_error(projects_output[error_msg_key])\n\n return render_template('submit_daklapack_order.html',\n **build_login_variables(),\n error_message=None,\n dummy_status=DUMMY_SELECT_TEXT,\n dak_articles=dak_articles_output,\n contact_phone_number=SERVER_CONFIG[\n \"order_contact_phone\"],\n projects=projects_output['projects'])\n\n\[email protected]('/submit_daklapack_order', methods=['POST'])\ndef post_submit_daklapack_order():\n def return_error(msg):\n return render_template('submit_daklapack_order.html',\n **build_login_variables(),\n error_message=msg)\n\n error_message = success_submissions = failure_submissions = headers = None\n expected_headers = [\"firstName\", \"lastName\", \"address1\", \"insertion\",\n \"address2\", \"postalCode\", \"city\", \"state\",\n \"country\", \"countryCode\"]\n\n # get required fields; cast where expected by api\n phone_number = request.form['contact_phone_number']\n project_ids_list = list(map(int, request.form.getlist('projects')))\n dak_article_code = request.form['dak_article_code']\n article_quantity = int(request.form['quantity'])\n file = request.files['addresses_file']\n\n # get optional fields or defaults\n planned_send_str = request.form.get('planned_send_date')\n planned_send_date = planned_send_str if planned_send_str else None\n\n description = request.form.get('description')\n fedex_ref_1 = request.form.get('fedex_ref_1')\n fedex_ref_2 = request.form.get('fedex_ref_2')\n fedex_ref_3 = request.form.get('fedex_ref_3')\n\n try:\n # NB: import everything as a string so that zip codes beginning with\n # zero (e.g., 06710) don't get silently cast to numbers\n if file.filename.endswith('xls'):\n addresses_df = pd.read_excel(file, dtype=str)\n elif file.filename.endswith('xlsx'):\n addresses_df = pd.read_excel(file, engine='openpyxl', dtype=str)\n else:\n raise ValueError(f\"Unrecognized extension on putative excel \"\n f\"filename: {file.filename}\")\n\n headers = list(addresses_df.columns)\n except Exception as e: # noqa\n return return_error('Could not parse addresses file')\n\n if headers != expected_headers:\n return return_error(f\"Received column names {headers} do \"\n f\"not match expected column names\"\n f\" {expected_headers}\")\n\n # add (same) contact phone number to every address\n addresses_df['phone'] = phone_number\n\n addresses_df = addresses_df.fillna(\"\")\n temp_dict = addresses_df.to_dict(orient='index')\n addresses_list = [temp_dict[n] for n in range(len(temp_dict))]\n\n status, post_output = APIRequest.post(\n '/api/admin/daklapack_orders',\n json={\n \"project_ids\": project_ids_list,\n \"article_code\": dak_article_code,\n \"quantity\": article_quantity,\n \"addresses\": addresses_list,\n \"planned_send_date\": planned_send_date,\n \"description\": description,\n \"fedex_ref_1\": fedex_ref_1,\n \"fedex_ref_2\": fedex_ref_2,\n \"fedex_ref_3\": fedex_ref_3\n }\n )\n\n # if the post failed, keep track of the error so it can be displayed\n if status != 200:\n error_message = post_output\n else:\n order_submissions = post_output[\"order_submissions\"]\n success_submissions = [x for x in order_submissions if\n x[\"order_success\"]]\n failure_submissions = [x for x in order_submissions if not\n x[\"order_success\"]]\n\n return render_template('submit_daklapack_order.html',\n **build_login_variables(),\n error_message=error_message,\n success_submissions=success_submissions,\n failure_submissions=failure_submissions)\n\n\[email protected]('/authrocket_callback')\ndef authrocket_callback():\n token = request.args.get('token')\n session[TOKEN_KEY_NAME] = token\n return redirect(\"/\")\n\n\[email protected]('/logout')\ndef logout():\n if TOKEN_KEY_NAME in session:\n del session[TOKEN_KEY_NAME]\n return redirect(\"/\")\n\n\n# If we're running in stand alone mode, run the application\nif __name__ == '__main__':\n if SERVER_CONFIG[\"ssl_cert_path\"] and SERVER_CONFIG[\"ssl_key_path\"]:\n ssl_context = (\n SERVER_CONFIG[\"ssl_cert_path\"], SERVER_CONFIG[\"ssl_key_path\"]\n )\n else:\n ssl_context = None\n\n app.run(\n port=SERVER_CONFIG['port'],\n debug=SERVER_CONFIG['debug'],\n ssl_context=ssl_context\n )\n"
] | [
[
"pandas.isnull",
"pandas.read_excel",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
JasonWayne/deep-learning-snippets | [
"7c64e065752fcbb902494d757a41140f42facf05"
] | [
"frameworks/tensorflow/print_tensor_in_ckpt.py"
] | [
"'''\ncommon usage: \n 1. put this script in ckpt folder\n 2. python print_tensor_in_ckpt.py > tensors.txt\n'''\n# ref: https://stackoverflow.com/questions/38218174/how-do-i-find-the-variable-names-and-values-that-are-saved-in-a-checkpoint\nimport tensorflow as tf\nfrom tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file\n\n\nlatest_ckp = tf.train.latest_checkpoint('./')\nprint_tensors_in_checkpoint_file(latest_ckp, all_tensors=True, tensor_name='')\n"
] | [
[
"tensorflow.train.latest_checkpoint",
"tensorflow.python.tools.inspect_checkpoint.print_tensors_in_checkpoint_file"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
TangZhenchaoTZC/Keras-mask-detection | [
"325679d06a12a90b2552ed7d447298a23e3b9d57"
] | [
"fasterRCNNtrain/loss_and_gen.py"
] | [
"\"\"\"fasterRCNN训练的损失函数与数据生成器\"\"\"\nfrom keras.applications.imagenet_utils import preprocess_input\nfrom keras import backend as K\nimport keras\nimport tensorflow as tf\nimport numpy as np\nfrom random import shuffle\nimport random\nfrom PIL import Image\nfrom keras.objectives import categorical_crossentropy\nfrom matplotlib.colors import rgb_to_hsv, hsv_to_rgb\n\nimport sys\nsys.path.append(\"..\")\nfrom net import RPN as RPN\n\n\ndef rand(a=0, b=1):\n return np.random.rand() * (b - a) + a\n\n\ndef cls_loss(ratio=3):\n def _cls_loss(y_true, y_pred):\n # y_true [batch_size, num_anchor, num_classes+1]\n # y_pred [batch_size, num_anchor, num_classes]\n labels = y_true\n anchor_state = y_true[:, :, -1] # -1 是需要忽略的, 0 是背景, 1 是存在目标\n classification = y_pred\n\n # 找出存在目标的先验框\n indices_for_object = tf.where(keras.backend.equal(anchor_state, 1))\n labels_for_object = tf.gather_nd(labels, indices_for_object)\n classification_for_object = tf.gather_nd(classification, indices_for_object)\n\n cls_loss_for_object = keras.backend.binary_crossentropy(labels_for_object, classification_for_object)\n\n # 找出实际上为背景的先验框\n indices_for_back = tf.where(keras.backend.equal(anchor_state, 0))\n labels_for_back = tf.gather_nd(labels, indices_for_back)\n classification_for_back = tf.gather_nd(classification, indices_for_back)\n\n # 计算每一个先验框应该有的权重\n cls_loss_for_back = keras.backend.binary_crossentropy(labels_for_back, classification_for_back)\n\n # 标准化,实际上是正样本的数量\n normalizer_pos = tf.where(keras.backend.equal(anchor_state, 1))\n normalizer_pos = keras.backend.cast(keras.backend.shape(normalizer_pos)[0], keras.backend.floatx())\n normalizer_pos = keras.backend.maximum(keras.backend.cast_to_floatx(1.0), normalizer_pos)\n\n normalizer_neg = tf.where(keras.backend.equal(anchor_state, 0))\n normalizer_neg = keras.backend.cast(keras.backend.shape(normalizer_neg)[0], keras.backend.floatx())\n normalizer_neg = keras.backend.maximum(keras.backend.cast_to_floatx(1.0), normalizer_neg)\n\n # 将所获得的loss除上正样本的数量\n cls_loss_for_object = keras.backend.sum(cls_loss_for_object) / normalizer_pos\n cls_loss_for_back = ratio * keras.backend.sum(cls_loss_for_back) / normalizer_neg\n\n # 总的loss\n loss = cls_loss_for_object + cls_loss_for_back\n\n return loss\n\n return _cls_loss\n\n\ndef smooth_l1(sigma=1.0):\n sigma_squared = sigma ** 2\n\n def _smooth_l1(y_true, y_pred):\n # y_true [batch_size, num_anchor, 4+1]\n # y_pred [batch_size, num_anchor, 4]\n regression = y_pred\n regression_target = y_true[:, :, :-1]\n anchor_state = y_true[:, :, -1]\n\n # 找到正样本\n indices = tf.where(keras.backend.equal(anchor_state, 1))\n regression = tf.gather_nd(regression, indices)\n regression_target = tf.gather_nd(regression_target, indices)\n\n # 计算 smooth L1 loss\n # f(x) = 0.5 * (sigma * x)^2 if |x| < 1 / sigma / sigma\n # |x| - 0.5 / sigma / sigma otherwise\n regression_diff = regression - regression_target\n regression_diff = keras.backend.abs(regression_diff)\n regression_loss = tf.where(\n keras.backend.less(regression_diff, 1.0 / sigma_squared),\n 0.5 * sigma_squared * keras.backend.pow(regression_diff, 2),\n regression_diff - 0.5 / sigma_squared\n )\n\n normalizer = keras.backend.maximum(1, keras.backend.shape(indices)[0])\n normalizer = keras.backend.cast(normalizer, dtype=keras.backend.floatx())\n loss = keras.backend.sum(regression_loss) / normalizer\n\n return loss\n\n return _smooth_l1\n\n\ndef class_loss_regr(num_classes):\n epsilon = 1e-4\n\n def class_loss_regr_fixed_num(y_true, y_pred):\n x = y_true[:, :, 4 * num_classes:] - y_pred\n x_abs = K.abs(x)\n x_bool = K.cast(K.less_equal(x_abs, 1.0), 'float32')\n loss = 4 * K.sum(\n y_true[:, :, :4 * num_classes] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(\n epsilon + y_true[:, :, :4 * num_classes])\n return loss\n\n return class_loss_regr_fixed_num\n\n\ndef class_loss_cls(y_true, y_pred):\n return K.mean(categorical_crossentropy(y_true[0, :, :], y_pred[0, :, :]))\n\n\ndef get_new_img_size(width, height, img_min_side=600):\n if width <= height:\n f = float(img_min_side) / width\n resized_height = int(f * height)\n resized_width = int(img_min_side)\n else:\n f = float(img_min_side) / height\n resized_width = int(f * width)\n resized_height = int(img_min_side)\n\n return resized_width, resized_height\n\n\ndef get_img_output_length(width, height):\n def get_output_length(input_length):\n # input_length += 6\n filter_sizes = [7, 3, 1, 1]\n padding = [3, 1, 0, 0]\n stride = 2\n for i in range(4):\n # input_length = (input_length - filter_size + stride) // stride\n input_length = (input_length + 2 * padding[i] - filter_sizes[i]) // stride + 1\n return input_length\n\n return get_output_length(width), get_output_length(height)\n\n\nclass Generator(object):\n def __init__(self, bbox_util, train_lines, num_classes, solid, solid_shape=[600, 600]):\n self.bbox_util = bbox_util\n self.train_lines = train_lines\n self.train_batches = len(train_lines)\n self.num_classes = num_classes\n self.solid = solid\n # 用于固定训练图片的大小(600,600)\n self.solid_shape = solid_shape\n\n def get_random_data(self, annotation_line, jitter=.3, hue=.1, sat=1.5, val=1.5):\n \"\"\"数据增强,提高模型鲁棒性\"\"\"\n line = annotation_line.split()\n image = Image.open(line[0])\n iw, ih = image.size\n\n # 如果solid=True,训练的图片大小会强制resize\n if self.solid:\n w, h = self.solid_shape\n else:\n w, h = get_new_img_size(iw, ih)\n box = np.array([np.array(list(map(int, box.split(',')))) for box in line[1:]])\n\n # resize image\n new_ar = w / h * rand(1 - jitter, 1 + jitter) / rand(1 - jitter, 1 + jitter)\n scale = rand(.25, 2)\n if new_ar < 1:\n nh = int(scale * h)\n nw = int(nh * new_ar)\n else:\n nw = int(scale * w)\n nh = int(nw / new_ar)\n image = image.resize((nw, nh), Image.BICUBIC)\n\n # place image\n dx = int(rand(0, w - nw))\n dy = int(rand(0, h - nh))\n new_image = Image.new('RGB', (w, h), (128, 128, 128))\n new_image.paste(image, (dx, dy))\n image = new_image\n\n # flip image or not\n flip = rand() < .5\n if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)\n\n # distort image\n hue = rand(-hue, hue)\n sat = rand(1, sat) if rand() < .5 else 1 / rand(1, sat)\n val = rand(1, val) if rand() < .5 else 1 / rand(1, val)\n x = rgb_to_hsv(np.array(image) / 255.)\n x[..., 0] += hue\n x[..., 0][x[..., 0] > 1] -= 1\n x[..., 0][x[..., 0] < 0] += 1\n x[..., 1] *= sat\n x[..., 2] *= val\n x[x > 1] = 1\n x[x < 0] = 0\n image_data = hsv_to_rgb(x) * 255 # numpy array, 0 to 1\n\n # correct boxes\n box_data = np.zeros((len(box), 5))\n if len(box) > 0:\n np.random.shuffle(box)\n box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx\n box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy\n if flip: box[:, [0, 2]] = w - box[:, [2, 0]]\n box[:, 0:2][box[:, 0:2] < 0] = 0\n box[:, 2][box[:, 2] > w] = w\n box[:, 3][box[:, 3] > h] = h\n box_w = box[:, 2] - box[:, 0]\n box_h = box[:, 3] - box[:, 1]\n box = box[np.logical_and(box_w > 1, box_h > 1)] # discard invalid box\n box_data = np.zeros((len(box), 5))\n box_data[:len(box)] = box\n if len(box) == 0:\n return image_data, []\n\n if (box_data[:, :4] > 0).any():\n return image_data, box_data\n else:\n return image_data, []\n\n def generate(self):\n \"\"\"数据生成器\"\"\"\n while True:\n # 打乱2007_train.txt\n shuffle(self.train_lines)\n lines = self.train_lines\n for annotation_line in lines:\n # 对每一行即没一张图片进行数据增强:改变光照,对比度等,使图片变得多样,从而提高模型鲁棒性\n # img为数据增强后的图片,y为目标的信息\n img, y = self.get_random_data(annotation_line)\n height, width, _ = np.shape(img)\n\n # 没有目标就跳过\n if len(y) == 0:\n continue\n # 将目标信息归一化\n boxes = np.array(y[:, :4], dtype=np.float32)\n boxes[:, 0] = boxes[:, 0] / width\n boxes[:, 1] = boxes[:, 1] / height\n boxes[:, 2] = boxes[:, 2] / width\n boxes[:, 3] = boxes[:, 3] / height\n\n box_heights = boxes[:, 3] - boxes[:, 1]\n box_widths = boxes[:, 2] - boxes[:, 0]\n # 如果遇到标记错误为负数的情况,应跳过\n if (box_heights <= 0).any() or (box_widths <= 0).any():\n continue\n\n y[:, :4] = boxes[:, :4]\n\n # 获得先验框 38*38*9个\n anchors = RPN.create_anchor(get_img_output_length(width, height), width, height)\n\n # 计算真实框对应的先验框,返回正样本:可以对应到真实框的先验框,负样本:背景\n assignment = self.bbox_util.assign_boxes(y, anchors)\n\n # 训练一般随机选择128个正样本,128个负样本\n num_regions = 256\n\n classification = assignment[:, 4]\n regression = assignment[:, :]\n\n mask_pos = classification[:] > 0\n num_pos = len(classification[mask_pos])\n # 如果正样本数量大于128,就忽略多余的正样本\n if num_pos > num_regions / 2:\n val_locs = random.sample(range(num_pos), int(num_pos - num_regions / 2))\n classification[mask_pos][val_locs] = -1\n regression[mask_pos][val_locs, -1] = -1\n\n mask_neg = classification[:] == 0\n num_neg = len(classification[mask_neg])\n # 如果负样本过多,也进行忽略,这么做是为了平衡正负样本的数量\n if len(classification[mask_neg]) + num_pos > num_regions:\n val_locs = random.sample(range(num_neg), int(num_neg - num_pos))\n classification[mask_neg][val_locs] = -1\n\n classification = np.reshape(classification, [-1, 1])\n regression = np.reshape(regression, [-1, 5])\n\n tmp_inp = np.array(img)\n tmp_targets = [np.expand_dims(np.array(classification, dtype=np.float32), 0),\n np.expand_dims(np.array(regression, dtype=np.float32), 0)]\n\n # 1.对图片进行预处理 2.返回训练使用的预测信息 3.返回真实框\n yield preprocess_input(np.expand_dims(tmp_inp, 0)), tmp_targets, np.expand_dims(y, 0)"
] | [
[
"numpy.expand_dims",
"tensorflow.gather_nd",
"numpy.logical_and",
"matplotlib.colors.hsv_to_rgb",
"numpy.reshape",
"numpy.random.shuffle",
"numpy.shape",
"numpy.random.rand",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
caelanhadley/NNFSIP | [
"da048af5ded549db7464b206b255104900b40ab8"
] | [
"models/intro/vertical.py"
] | [
"import matplotlib.pyplot as plt\nimport nnfs\nfrom nnfs.datasets import vertical_data\n\nnnfs.init()\n\nX, y = vertical_data(samples=100, classes=3)\n\nplt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap='brg')\nplt.show()\n\nimport numpy as np\nimport nnfs\nimport matplotlib.pyplot as plt\n\nnnfs.init()\n\nclass Layer_Dense:\n def __init__(self, n_inputs, n_neurons):\n self.weights = 0.01 * np.random.randn(n_inputs, n_neurons)\n self.biases = np.zeros((1,n_neurons))\n def forward(self, inputs):\n self.output = np.dot(inputs, self.weights) + self.biases\n\nclass Activation_ReLU:\n # Forward Pass\n def forward(self, inputs):\n self.output = np.maximum(0,inputs)\n\nclass Activation_Softmax:\n def forward(self, inputs):\n exp_values= np.exp(inputs - np.max(inputs, axis=1, keepdims=True))\n normalized = exp_values / np.sum(exp_values, axis=1, keepdims=True)\n self.output = normalized\n\nclass Loss:\n # Calculates the data and regularization losses\n # given model output and ground truth values\n def calculate(self, output, y):\n # Calculate sample losses\n sample_losses = self.forward(output, y)\n # Calculate mean loss\n data_loss = np.mean(sample_losses)\n # Return loss\n return data_loss\n\n\nclass Loss_CatagoricalCrossEntropy(Loss):\n def forward(self, y_pred, y_true):\n # Number of Samples\n samples = len(y_pred)\n # Clip Data to prevent div by 0\n # Clip Both sides to not drag the mean torwards any value\n y_pred_clipped = np.clip(y_pred, 1e-7, 1-1e-7)\n\n # Probabilities for target values -\n # Only if categorical labels\n if len(y_true.shape) == 1:\n correct_confidences = y_pred_clipped[range(samples), y_true]\n # Mask Values - only for one-hot encoded labels\n elif len(y_true.shape) == 2:\n correct_confidences = np.sum(y_pred_clipped * y_true, axis=1)\n \n negative_log_likelyhoods = -np.log(correct_confidences)\n return negative_log_likelyhoods\n\n# Model\ndense1 = Layer_Dense(2,3)\nactivation1 = Activation_ReLU()\ndense2 = Layer_Dense(3, 3)\nactivation2 = Activation_Softmax()\nloss_function = Loss_CatagoricalCrossEntropy()\n\n# Helper variables\nlowest_loss = 9999999 # some initial value\nbest_dense1_weights = dense1.weights.copy()\nbest_dense1_biases = dense1.biases.copy()\nbest_dense2_weights = dense2.weights.copy()\nbest_dense2_biases = dense2.biases.copy()\n\n\nfor iteration in range(10000):\n# Generate a new set of weights for iteration\n dense1.weights += 0.05 * np.random.randn(2, 3)\n dense1.biases += 0.05 * np.random.randn(1, 3)\n dense2.weights += 0.05 * np.random.randn(3, 3)\n dense2.biases += 0.05 * np.random.randn(1, 3)\n # Perform a forward pass of the training data through this layer\n dense1.forward(X)\n activation1.forward(dense1.output)\n dense2.forward(activation1.output)\n activation2.forward(dense2.output)\n # Perform a forward pass through activation function\n # it takes the output of second dense layer here and returns loss\n loss = loss_function.calculate(activation2.output, y)\n # Calculate accuracy from output of activation2 and targets\n # calculate values along first axis\n predictions = np.argmax(activation2.output, axis=1)\n accuracy = np.mean(predictions==y)\n # If loss is smaller - print and save weights and biases aside\n if loss < lowest_loss:\n print('New set of weights found, iteration:', iteration,\n 'loss:', loss, 'acc:', accuracy)\n best_dense1_weights = dense1.weights.copy()\n best_dense1_biases = dense1.biases.copy()\n best_dense2_weights = dense2.weights.copy()\n best_dense2_biases = dense2.biases.copy()\n lowest_loss = loss\n # Revert weights and biases\n else:\n dense1.weights = best_dense1_weights.copy()\n dense1.biases = best_dense1_biases.copy()\n dense2.weights = best_dense2_weights.copy()\n dense2.biases = best_dense2_biases.copy()"
] | [
[
"numpy.dot",
"numpy.log",
"numpy.maximum",
"matplotlib.pyplot.scatter",
"numpy.clip",
"numpy.max",
"numpy.argmax",
"numpy.mean",
"numpy.random.randn",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tudorcebere/jax | [
"b1d0f87648f73b06091ea3929a52b5d572391088",
"24c02148f27345b35555b0898bfc1d694a68c19a"
] | [
"jax/experimental/jax2tf/tests/primitives_test.py",
"jax/interpreters/sharded_jit.py"
] | [
"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for JAX primitive coverage.\"\"\"\n\nimport unittest\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n\nfrom functools import partial\n\nimport jax\nfrom jax import dtypes\nfrom jax import lax\nfrom jax import numpy as jnp\nfrom jax import test_util as jtu\nfrom jax.config import config\nfrom jax.experimental import jax2tf\nfrom jax.experimental.jax2tf.tests import tf_test_util\nfrom jax.interpreters import xla\n\nimport numpy as np\nimport tensorflow as tf # type: ignore[import]\n\nconfig.parse_flags_with_absl()\n\n# Import after parsing flags\nfrom jax.experimental.jax2tf.tests import primitive_harness\n\nREDUCE = (\n jnp.all,\n jnp.any,\n jnp.max,\n jnp.min,\n jnp.prod,\n jnp.sum,\n)\n\nINDEX = (\n jax.ops.index_add,\n jax.ops.index_max,\n jax.ops.index_min,\n jax.ops.index_mul,\n jax.ops.index_update,\n)\n\n\nclass JaxPrimitiveTest(tf_test_util.JaxToTfTestCase):\n\n def test_primitive_coverage(self):\n \"\"\"Fail if there are JAX primitives that are not implemented.\"\"\"\n # Harvest primitives from XLA translation tables\n all_primitives = (set(xla.translations)\n | set(xla.backend_specific_translations['cpu'])\n | set(xla.backend_specific_translations['gpu'])\n | set(xla.backend_specific_translations['tpu'])\n | set(xla.initial_style_translations)\n | set(xla.parallel_translations))\n\n tf_impl = set(jax.experimental.jax2tf.jax2tf.tf_impl)\n tf_not_yet_impl = set(jax.experimental.jax2tf.jax2tf.tf_not_yet_impl)\n\n all_primitives = tuple(sorted(all_primitives, key=str))\n for p in all_primitives:\n # TODO: remove tie_in once omnistaging is on by default\n if p.name == \"axis_index\" or p.name == \"tie_in\":\n continue\n if p in tf_not_yet_impl:\n self.assertNotIn(p, tf_impl) # Should not be in both tf_impl and tf_not_yet_impl\n else:\n self.assertIn(p, tf_impl)\n\n @parameterized.named_parameters(\n dict(testcase_name=f\"_{f_jax.__name__}\",\n f_jax=f_jax)\n for f_jax in [jnp.add, jnp.subtract, jnp.multiply, jnp.divide,\n jnp.less, jnp.less_equal, jnp.equal, jnp.greater,\n jnp.greater_equal, jnp.not_equal, jnp.maximum,\n jnp.minimum])\n def test_type_promotion(self, f_jax=jnp.add):\n # We only test a few types here, as tensorflow does not support many\n # types like uint* or bool in binary ops.\n types = [dtypes.bfloat16, np.int32, np.int64, np.float32]\n for x_dtype in types:\n for y_dtype in types:\n x = np.array([1, 2], dtype=x_dtype)\n y = np.array([3, 4], dtype=y_dtype)\n self.ConvertAndCompare(f_jax, x, y)\n\n def test_concat(self):\n values = [np.array([1, 2], dtype=np.float32),\n np.array([1, 2], dtype=np.int32),\n np.array([1, 2], dtype=np.int8)]\n f_jax = jax.jit(lambda x: jnp.concatenate(x, axis=0))\n self.ConvertAndCompare(f_jax, values)\n\n @primitive_harness.parameterized(primitive_harness.lax_pad)\n def test_pad(self, harness: primitive_harness.Harness):\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_top_k)\n def test_top_k(self, harness: primitive_harness.Harness):\n if (harness.params[\"k\"] > harness.params[\"shape\"][-1] or\n harness.params[\"k\"] < 0):\n with self.assertRaisesRegex(ValueError, \"k argument to top_k must be\"):\n harness.dyn_fun(*harness.dyn_args_maker(self.rng()))\n elif harness.params[\"dtype\"] in jtu.dtypes.complex:\n # TODO(necula): fix top_k complex bug on TPU\n if jtu.device_under_test() == \"tpu\":\n raise unittest.SkipTest(\"top_k complex on TPU raises different error\")\n with self.assertRaisesRegex(RuntimeError, \"Unimplemented: complex comparison\"):\n harness.dyn_fun(*harness.dyn_args_maker(self.rng()))\n # TODO: TF and JAX sort [inf, nan] differently.\n elif harness.name.startswith(\"nan_\"):\n raise unittest.SkipTest(\"inconsistent [nan, inf] sorting\")\n else:\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_sort)\n def test_sort(self, harness: primitive_harness.Harness):\n if (jtu.device_under_test() == \"gpu\" and\n len(harness.arg_descriptors) == 4 and\n not harness.params[\"is_stable\"]):\n # TODO: fix the TF GPU test\n raise unittest.SkipTest(\"GPU tests are running TF on CPU\")\n if jtu.device_under_test() == \"tpu\" and harness.params[\"dtype\"] in jtu.dtypes.complex:\n raise unittest.SkipTest(\"JAX sort is not implemented on TPU for complex\")\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_fft)\n @jtu.skip_on_flag(\"jax_skip_slow_tests\", True)\n def test_fft(self, harness: primitive_harness.Harness):\n if len(harness.params[\"fft_lengths\"]) > 3:\n with self.assertRaisesRegex(RuntimeError, \"FFT only supports ranks 1-3\"):\n harness.dyn_fun(*harness.dyn_args_maker(self.rng()))\n elif (jtu.device_under_test() == \"tpu\" and\n len(harness.params[\"fft_lengths\"]) > 1):\n # TODO(b/140351181): FFT is mostly unimplemented on TPU, even for JAX\n with self.assertRaisesRegex(RuntimeError,\n \"only 1D FFT is currently supported.\"):\n harness.dyn_fun(*harness.dyn_args_maker(self.rng()))\n else:\n tol = None\n if jtu.device_under_test() == \"gpu\":\n if harness.params[\"dtype\"] in jtu.dtypes.boolean:\n tol = 0.01\n else:\n tol = 1e-3\n self.ConvertAndCompare(harness.dyn_fun,\n *harness.dyn_args_maker(self.rng()),\n atol=tol, rtol=tol)\n\n @primitive_harness.parameterized(primitive_harness.lax_linalg_qr)\n def test_qr(self, harness: primitive_harness.Harness):\n # See jax.lib.lapack.geqrf for the list of compatible types\n\n dtype = harness.params[\"dtype\"]\n dut = jtu.device_under_test()\n # These cases are not implemented in JAX\n if dtype in (jtu.dtypes.all_integer + [jnp.bfloat16]):\n unimplemented_jax = True\n elif dtype is np.complex64 and dut == \"tpu\":\n unimplemented_jax = True\n elif dtype is np.float16 and dut in (\"cpu\", \"gpu\"):\n unimplemented_jax = True\n else:\n unimplemented_jax = False\n\n if unimplemented_jax:\n raise unittest.SkipTest(f\"QR not implemented in JAX for {dtype} on {dut}\")\n\n # TODO: see https://github.com/google/jax/pull/3775#issuecomment-659407824.\n # - for now, the performance of the HLO QR implementation called when\n # compiling with TF is expected to have worse performance than the\n # custom calls made in JAX.\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()),\n atol=1e-5, rtol=1e-5)\n\n @primitive_harness.parameterized(primitive_harness.lax_linalg_svd)\n @jtu.skip_on_flag(\"jax_skip_slow_tests\", True)\n def test_svd(self, harness: primitive_harness.Harness):\n if harness.params[\"dtype\"] in [np.float16, dtypes.bfloat16]:\n if jtu.device_under_test() != \"tpu\":\n # Does not work in JAX\n with self.assertRaisesRegex(NotImplementedError, \"Unsupported dtype\"):\n harness.dyn_fun(*harness.dyn_args_maker(self.rng()))\n return\n\n if harness.params[\"dtype\"] in [np.complex64, np.complex128]:\n if jtu.device_under_test() == \"tpu\":\n # TODO: on JAX on TPU there is no SVD implementation for complex\n with self.assertRaisesRegex(RuntimeError,\n \"Binary op compare with different element types\"):\n harness.dyn_fun(*harness.dyn_args_maker(self.rng()))\n return\n\n def _custom_assert(r_jax, r_tf, atol=1e-6, rtol=1e-6):\n def _reconstruct_operand(result, is_tf: bool):\n # Reconstructing operand as documented in numpy.linalg.svd (see\n # https://numpy.org/doc/stable/reference/generated/numpy.linalg.svd.html)\n s, u, v = result\n if is_tf:\n s = s.numpy()\n u = u.numpy()\n v = v.numpy()\n U = u[..., :s.shape[-1]]\n V = v[..., :s.shape[-1], :]\n S = s[..., None, :]\n return jnp.matmul(U * S, V), s.shape, u.shape, v.shape\n\n if harness.params[\"compute_uv\"]:\n r_jax_reconstructed = _reconstruct_operand(r_jax, False)\n r_tf_reconstructed = _reconstruct_operand(r_tf, True)\n self.assertAllClose(r_jax_reconstructed, r_tf_reconstructed,\n atol=atol, rtol=rtol)\n else:\n self.assertAllClose(r_jax, r_tf, atol=atol, rtol=rtol)\n\n tol = 1e-4\n custom_assert = partial(_custom_assert, atol=tol, rtol=tol)\n\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()),\n atol=tol, rtol=tol,\n custom_assert=custom_assert,\n always_custom_assert=True)\n\n @primitive_harness.parameterized(primitive_harness.lax_select_and_gather_add)\n @jtu.ignore_warning(category=UserWarning,\n message=\"Using reduced precision for gradient.*\")\n def test_select_and_gather_add(self, harness: primitive_harness.Harness):\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_reduce_window)\n def test_reduce_window(self, harness: primitive_harness.Harness):\n dtype = harness.params['dtype']\n\n if (jtu.device_under_test() == 'tpu' and dtype is np.complex64):\n raise unittest.SkipTest(\n 'TODO: JAX reduce_window on TPU does not handle complex64'\n )\n\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_unary_elementwise)\n def test_unary_elementwise(self, harness: primitive_harness.Harness):\n dtype = harness.params[\"dtype\"]\n lax_name = harness.params[\"lax_name\"]\n arg, = harness.dyn_args_maker(self.rng())\n custom_assert = None\n if lax_name == \"digamma\":\n # TODO(necula): fix bug with digamma/(f32|f16) on TPU\n if dtype in [np.float16, np.float32] and jtu.device_under_test() == \"tpu\":\n raise unittest.SkipTest(\"TODO: fix bug: nan vs not-nan\")\n\n # In the bfloat16 case, TF and lax both return NaN in undefined cases.\n if not dtype is dtypes.bfloat16:\n # digamma is not defined at 0 and -1\n def custom_assert(result_jax, result_tf):\n # lax.digamma returns NaN and tf.math.digamma returns inf\n special_cases = (arg == 0.) | (arg == -1.)\n nr_special_cases = np.count_nonzero(special_cases)\n self.assertAllClose(np.full((nr_special_cases,), dtype(np.nan)),\n result_jax[special_cases])\n self.assertAllClose(np.full((nr_special_cases,), dtype(np.inf)),\n result_tf[special_cases])\n # non-special cases are equal\n self.assertAllClose(result_jax[~ special_cases],\n result_tf[~ special_cases])\n if lax_name == \"erf_inv\":\n # TODO(necula): fix erf_inv bug on TPU\n if jtu.device_under_test() == \"tpu\":\n raise unittest.SkipTest(\"erf_inv bug on TPU: nan vs non-nan\")\n # TODO: investigate: in the (b)float16 cases, TF and lax both return the\n # same result in undefined cases.\n if not dtype in [np.float16, dtypes.bfloat16]:\n # erf_inv is not defined for arg <= -1 or arg >= 1\n def custom_assert(result_jax, result_tf): # noqa: F811\n # for arg < -1 or arg > 1\n # lax.erf_inv returns NaN; tf.math.erf_inv return +/- inf\n special_cases = (arg < -1.) | (arg > 1.)\n nr_special_cases = np.count_nonzero(special_cases)\n self.assertAllClose(np.full((nr_special_cases,), dtype(np.nan),\n dtype=dtype),\n result_jax[special_cases])\n signs = np.where(arg[special_cases] < 0., -1., 1.)\n self.assertAllClose(np.full((nr_special_cases,),\n signs * dtype(np.inf), dtype=dtype),\n result_tf[special_cases])\n # non-special cases are equal\n self.assertAllClose(result_jax[~ special_cases],\n result_tf[~ special_cases])\n atol = None\n if jtu.device_under_test() == \"gpu\":\n # TODO(necula): revisit once we fix the GPU tests\n atol = 1e-3\n self.ConvertAndCompare(harness.dyn_fun, arg, custom_assert=custom_assert,\n atol=atol)\n\n @primitive_harness.parameterized(primitive_harness.lax_bitwise_not)\n def test_bitwise_not(self, harness):\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_population_count)\n def test_population_count(self, harness: primitive_harness.Harness):\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_add_mul)\n def test_add_mul(self, harness: primitive_harness.Harness):\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_min_max)\n def test_min_max(self, harness: primitive_harness.Harness):\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_binary_elementwise)\n def test_binary_elementwise(self, harness):\n tol = None\n lax_name, dtype = harness.params[\"lax_name\"], harness.params[\"dtype\"]\n if lax_name in (\"igamma\", \"igammac\"):\n # TODO(necula): fix bug with igamma/f16\n if dtype in [np.float16, dtypes.bfloat16]:\n raise unittest.SkipTest(\"TODO: igamma(c) unsupported with (b)float16 in JAX\")\n # TODO(necula): fix bug with igamma/f32 on TPU\n if dtype is np.float32 and jtu.device_under_test() == \"tpu\":\n raise unittest.SkipTest(\"TODO: fix bug: nan vs not-nan\")\n arg1, arg2 = harness.dyn_args_maker(self.rng())\n custom_assert = None\n if lax_name == \"igamma\":\n # igamma is not defined when the first argument is <=0\n def custom_assert(result_jax, result_tf):\n # lax.igamma returns NaN when arg1 == arg2 == 0; tf.math.igamma returns 0\n special_cases = (arg1 == 0.) & (arg2 == 0.)\n nr_special_cases = np.count_nonzero(special_cases)\n self.assertAllClose(np.full((nr_special_cases,), np.nan, dtype=dtype),\n result_jax[special_cases])\n self.assertAllClose(np.full((nr_special_cases,), 0., dtype=dtype),\n result_tf[special_cases])\n # non-special cases are equal\n self.assertAllClose(result_jax[~ special_cases],\n result_tf[~ special_cases])\n if lax_name == \"igammac\":\n # On GPU, tolerance also needs to be adjusted in compiled mode\n if dtype == np.float64 and jtu.device_under_test() == 'gpu':\n tol = 1e-14\n # igammac is not defined when the first argument is <=0\n def custom_assert(result_jax, result_tf): # noqa: F811\n # lax.igammac returns 1. when arg1 <= 0; tf.math.igammac returns NaN\n special_cases = (arg1 <= 0.) | (arg2 <= 0)\n nr_special_cases = np.count_nonzero(special_cases)\n self.assertAllClose(np.full((nr_special_cases,), 1., dtype=dtype),\n result_jax[special_cases])\n self.assertAllClose(np.full((nr_special_cases,), np.nan, dtype=dtype),\n result_tf[special_cases])\n # On CPU, tolerance only needs to be adjusted in eager & graph modes\n tol = None\n if dtype == np.float64:\n tol = 1e-14\n\n # non-special cases are equal\n self.assertAllClose(result_jax[~ special_cases],\n result_tf[~ special_cases], atol=tol, rtol=tol)\n self.ConvertAndCompare(harness.dyn_fun, arg1, arg2,\n custom_assert=custom_assert, atol=tol, rtol=tol)\n\n @primitive_harness.parameterized(primitive_harness.lax_binary_elementwise_logical)\n def test_binary_elementwise_logical(self, harness):\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n\n @primitive_harness.parameterized(primitive_harness.lax_betainc)\n def test_betainc(self, harness: primitive_harness.Harness):\n dtype = harness.params[\"dtype\"]\n # TODO: https://www.tensorflow.org/api_docs/python/tf/math/betainc only\n # supports float32/64 tests.\n # TODO(bchetioui): investigate why the test actually fails in JAX.\n if dtype in [np.float16, dtypes.bfloat16]:\n raise unittest.SkipTest(\"(b)float16 not implemented in TF\")\n\n tol = None\n if dtype is np.float64:\n tol = 1e-14\n\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()),\n atol=tol, rtol=tol)\n\n # TODO(necula): combine tests that are identical except for the harness\n # wait until we get more experience with using harnesses.\n @primitive_harness.parameterized(primitive_harness.lax_shift_left)\n def test_shift_left(self, harness):\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_shift_right_logical)\n def test_shift_right_logical(self, harness):\n if jtu.device_under_test() == \"tpu\" and harness.params[\"dtype\"] in [np.int8, np.int16]:\n raise unittest.SkipTest(\"TODO: silent error for negative inputs\")\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_shift_right_arithmetic)\n def test_shift_right_arithmetic(self, harness):\n if jtu.device_under_test() == \"tpu\" and harness.params[\"dtype\"] in [np.uint8, np.uint16]:\n raise unittest.SkipTest(\"TODO: silent error for negative inputs\")\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_slice)\n def test_slice(self, harness):\n # JAX.slice rejects negative indices; check, and skip jax2tf\n if any(si < 0 or si >= sh or li < 0 or li > sh\n for sh, si, li in zip(harness.params[\"shape\"],\n harness.params[\"start_indices\"],\n harness.params[\"limit_indices\"])):\n with self.assertRaisesRegex(TypeError, \"\"):\n harness.dyn_fun(*harness.dyn_args_maker(self.rng()))\n else:\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_dynamic_slice)\n def test_dynamic_slice(self, harness):\n # JAX.dynamic_slice rejects slice sizes too big; check this, and skip jax2tf\n args = harness.dyn_args_maker(self.rng())\n if any(li - si < 0 or li - si >= sh\n for sh, si, li in zip(harness.params[\"shape\"],\n harness.params[\"start_indices\"],\n harness.params[\"limit_indices\"])):\n with self.assertRaisesRegex(TypeError, \"\"):\n harness.dyn_fun(*args)\n return\n\n self.ConvertAndCompare(harness.dyn_fun, *args)\n\n @primitive_harness.parameterized(primitive_harness.lax_dynamic_update_slice)\n def test_dynamic_update_slice(self, harness):\n # JAX.dynamic_update_slice rejects update slices too big; check, and skip jax2tf\n if any(ush > sh\n for sh, ush in zip(harness.params[\"shape\"],\n harness.params[\"update_shape\"])):\n with self.assertRaisesRegex(TypeError, \"\"):\n harness.dyn_fun(*harness.dyn_args_maker(self.rng()))\n else:\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_squeeze)\n def test_squeeze(self, harness: primitive_harness.Harness):\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_conv_general_dilated)\n def test_conv_general_dilated(self, harness: primitive_harness.Harness):\n if jtu.device_under_test() == \"gpu\":\n raise unittest.SkipTest(\"TODO: test failures on GPU\")\n tol = None\n # TODO(bchetioui): significant discrepancies in some float16 cases.\n if harness.params[\"dtype\"] is np.float16:\n tol = 1.\n # TODO(bchetioui): slight occasional discrepancy in float32 cases.\n elif harness.params[\"dtype\"] is np.float32:\n tol = 1e-5\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()),\n atol=tol, rtol=tol)\n\n @primitive_harness.parameterized(primitive_harness.lax_gather)\n def test_gather(self, harness: primitive_harness.Harness):\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n @primitive_harness.parameterized(primitive_harness.lax_scatter)\n def test_scatter(self, harness: primitive_harness.Harness):\n f_name = harness.params['f_lax'].__name__\n dtype = harness.params['dtype']\n\n if jtu.device_under_test() == 'tpu':\n if dtype is np.complex64 and f_name in ['scatter_min', 'scatter_max']:\n raise unittest.SkipTest(f\"TODO: complex {f_name} on TPU fails in JAX\")\n\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n def test_boolean_gather(self):\n values = np.array([[True, True], [False, True], [False, False]],\n dtype=np.bool_)\n indices = np.array([0, 1], dtype=np.int32)\n for axis in [0, 1]:\n f_jax = jax.jit(lambda v, i: jnp.take(v, i, axis=axis)) # pylint: disable=cell-var-from-loop\n self.ConvertAndCompare(f_jax, values, indices)\n\n def test_gather_rank_change(self):\n params = jnp.array([[1.0, 1.5, 2.0], [2.0, 2.5, 3.0], [3.0, 3.5, 4.0]])\n indices = jnp.array([[1, 1, 2], [0, 1, 0]])\n f_jax = jax.jit(lambda i: params[i])\n self.ConvertAndCompare(f_jax, indices)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n dict(testcase_name=f\"_{f_jax.__name__}\",\n f_jax=f_jax)\n for f_jax in REDUCE))\n def test_reduce_ops_with_numerical_input(self, f_jax):\n values = np.array([1, 2, 3], dtype=np.float32)\n self.ConvertAndCompare(f_jax, values)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n dict(testcase_name=f\"_{f_jax.__name__}\",\n f_jax=f_jax)\n for f_jax in (jnp.cumsum, jnp.cumprod)))\n def test_cumulated_ops(self, f_jax):\n values = np.array([1, 2, 3], dtype=np.float32)\n self.ConvertAndCompare(f_jax, values)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n dict(testcase_name=f\"_{op.__name__}\",\n op=op)\n for op in INDEX))\n def test_scatter_static(self, op):\n values = np.ones((5, 6), dtype=np.float32)\n update = np.float32(6.)\n f_jax = jax.jit(lambda v, u: op(v, jax.ops.index[::2, 3:], u))\n self.ConvertAndCompare(f_jax, values, update)\n\n @parameterized.named_parameters(jtu.cases_from_list(\n dict(testcase_name=f\"_{f_jax.__name__}\",\n f_jax=f_jax)\n for f_jax in REDUCE))\n def test_reduce_ops_with_boolean_input(self, f_jax):\n values = np.array([True, False, True], dtype=np.bool_)\n self.ConvertAndCompare(f_jax, values)\n\n @primitive_harness.parameterized(primitive_harness.random_gamma)\n def test_random_gamma(self, harness: primitive_harness.Harness):\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()),\n rtol=1e-5)\n\n @primitive_harness.parameterized(primitive_harness.random_split)\n def test_random_split(self, harness: primitive_harness.Harness):\n self.ConvertAndCompare(harness.dyn_fun, *harness.dyn_args_maker(self.rng()))\n\n def test_zeros_like(self):\n v = np.float32(2.)\n f_jax = jax.ad_util.zeros_like_jaxval\n self.ConvertAndCompare(f_jax, v)\n\n def test_stop_gradient(self):\n f = jax2tf.convert(lax.stop_gradient)\n self.assertEqual(f(tf.ones([])), 1.)\n\n # test_bfloat16_constant checks that https://github.com/google/jax/issues/3942 is\n # fixed\n def test_bfloat16_constant(self):\n def jax_fn_scalar(x):\n x = x.astype(jnp.bfloat16)\n x *= 2.\n return x\n\n def jax_fn_array(x):\n x = x.astype(jnp.bfloat16)\n x *= np.array([1.5, 2.5, 3.5], jnp.bfloat16)\n return x\n\n tf_fn_scalar = jax2tf.convert(jax_fn_scalar)\n self.assertAllClose(tf_fn_scalar(1.375).numpy(), jnp.bfloat16(2.750))\n\n tf_fn_array = jax2tf.convert(jax_fn_array)\n self.assertAllClose(tf_fn_array(np.array([3, 4, 5])),\n np.array([4.5, 10, 17.5], jnp.bfloat16))\n\nif __name__ == \"__main__\":\n absltest.main(testLoader=jtu.JaxTestLoader())\n",
"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom functools import partial\nfrom typing import Callable, Optional, Tuple\n\nimport numpy as np\n\nfrom .. import core\nfrom . import ad\nfrom . import partial_eval as pe\n# TODO(skye): separate pmap into it's own module?\nfrom . import pxla\nfrom . import xla\nfrom .. import linear_util as lu\nfrom ..lib import xla_bridge as xb\nfrom ..lib import xla_client as xc\nfrom ..api_util import flatten_axes, flatten_fun, wraps\nfrom ..tree_util import tree_flatten, tree_unflatten\nfrom ..util import extend_name_stack, wrap_name, safe_zip\nfrom ..config import config\n\nxops = xc._xla.ops\n\n\ndef _map(f, *xs):\n return tuple(map(f, *xs))\n\n\nclass ResultToPopulate: pass\nresult_to_populate = ResultToPopulate()\n\n\ndef _avals_to_results_handler(nrep, npart, partitions, out_avals):\n nouts = len(out_avals)\n handlers = [_aval_to_result_handler(npart, parts, out_aval)\n for parts, out_aval in safe_zip(partitions, out_avals)]\n\n def handler(out_bufs):\n assert nrep * npart == len(out_bufs)\n buffers = [[result_to_populate] * nrep * npart for _ in range(nouts)]\n for r, tuple_buf in enumerate(out_bufs):\n for i, buf in enumerate(tuple_buf):\n buffers[i][r] = buf\n assert not any(buf is result_to_populate for bufs in buffers\n for buf in bufs)\n return [h(bufs) for h, bufs in zip(handlers, buffers)]\n\n return handler\n\ndef _aval_to_result_handler(npart, parts, aval):\n if aval is not core.abstract_unit:\n spec = pxla.partitioned_sharding_spec(npart, parts, aval)\n indices = pxla.spec_to_indices(aval.shape, spec)\n else:\n spec = indices = None\n return pxla.aval_to_result_handler(spec, indices, aval)\n\n\[email protected]\ndef _sharded_callable(\n fun: lu.WrappedFun, num_partitions: Optional[int],\n in_parts: Tuple[pxla.PartitionsOrReplicated, ...],\n out_parts_thunk: Callable[[], Tuple[pxla.PartitionsOrReplicated, ...]],\n name: str, *abstract_args):\n nrep = 1\n\n if config.omnistaging_enabled:\n jaxpr, out_avals, consts = pe.trace_to_jaxpr_final(fun, abstract_args)\n else:\n in_pvals = [pe.PartialVal.unknown(aval) for aval in abstract_args]\n jaxpr, out_pvals, consts = pe.trace_to_jaxpr(fun, in_pvals, # type: ignore\n instantiate=False, bottom=True) # type: ignore\n\n # TODO(skye): add tests for equationless jaxpr cases\n if not jaxpr.eqns and all(outvar.aval is core.abstract_unit\n for outvar in jaxpr.outvars):\n return lambda *_: [\n const if pv is None else core.unit for pv, const in out_pvals\n ]\n\n if xb.get_backend().platform != \"tpu\":\n # TODO(skye): fall back to regular jit?\n raise ValueError(\"sharded_jit only works on TPU!\")\n\n num_partitions = pxla.reconcile_num_partitions(jaxpr, num_partitions)\n assert num_partitions is not None\n if num_partitions > xb.local_device_count():\n raise ValueError(\n f\"sharded_jit computation requires {num_partitions} devices, \"\n f\"but only {xb.local_device_count()} devices are available.\")\n\n out_parts = out_parts_thunk()\n\n c = xb.make_computation_builder(\"spjit_{}\".format(fun.__name__))\n xla_consts = _map(partial(xb.constant, c), consts)\n xla_args = _xla_sharded_args(c, abstract_args, in_parts)\n axis_env = xla.AxisEnv(nrep, (), (), None)\n out_nodes = xla.jaxpr_subcomp(\n c, jaxpr, None, axis_env, xla_consts,\n extend_name_stack(wrap_name(name, \"sharded_jit\")), *xla_args)\n out_tuple = xb.with_sharding(c, out_parts, xops.Tuple, c, out_nodes)\n built = c.Build(out_tuple)\n\n devices = xb.local_devices()[:num_partitions]\n device_assignment = np.array([[d.id for d in devices]])\n device_assignment = np.reshape(device_assignment, (-1, num_partitions))\n # device_assignment = None # TODO(skye): replace with default device assignment?\n\n compiled = xla.backend_compile(\n xb.get_backend(), built,\n xb.get_compile_options(nrep, num_partitions, device_assignment))\n\n input_specs = [\n pxla.partitioned_sharding_spec(num_partitions, parts, aval)\n for parts, aval in zip(in_parts, abstract_args)]\n input_indices = [pxla.spec_to_indices(aval.shape, spec)\n if spec is not None else None\n for aval, spec in zip(abstract_args, input_specs)]\n\n handle_args = partial(pxla.shard_args, compiled.local_devices(),\n input_indices)\n if config.omnistaging_enabled:\n handle_outs = _avals_to_results_handler(nrep, num_partitions, out_parts, # type: ignore\n out_avals)\n else:\n handle_outs = _pvals_to_results_handler(nrep, num_partitions, out_parts, # type: ignore\n out_pvals)\n return partial(_execute_spatially_partitioned, compiled, handle_args,\n handle_outs)\n\n\ndef _sharded_jit_translation_rule(c, axis_env, in_nodes, name_stack,\n in_parts, out_parts_thunk, num_partitions,\n backend, name, call_jaxpr):\n subc = xc.XlaBuilder(f\"sharded_jit_{name}\")\n\n # We assume any extra leading in_nodes are constants and replicate them.\n num_extra_nodes = len(in_nodes) - len(in_parts)\n assert num_extra_nodes >= 0\n in_parts = (None,) * num_extra_nodes + in_parts\n\n args = []\n for i, (n, sharding) in enumerate(safe_zip(in_nodes, in_parts)):\n # We use xb.set_sharding instead of xb.with_sharding because inlined calls\n # shouldn't have shardings set directly on the inputs or outputs.\n arg = xb.parameter(subc, i, c.GetShape(n))\n args.append(xb.set_sharding(subc, arg, sharding))\n\n out_nodes = xla.jaxpr_subcomp(\n subc, call_jaxpr, backend, axis_env, (),\n extend_name_stack(name_stack, wrap_name(name, \"sharded_jit\")), *args)\n out_parts = out_parts_thunk()\n assert len(out_parts) == len(out_nodes)\n out_nodes = [xb.set_sharding(subc, out, sharding)\n for out, sharding in safe_zip(out_nodes, out_parts)]\n\n subc = subc.build(xops.Tuple(subc, out_nodes))\n return xops.Call(c, subc, list(in_nodes))\n\n\ndef _execute_spatially_partitioned(compiled, in_handler, out_handler, *args):\n input_bufs = in_handler(args)\n out_bufs = compiled.execute_on_local_devices(list(input_bufs))\n return out_handler(out_bufs)\n\n\ndef _xla_sharded_args(c, avals, in_parts):\n xla_args = []\n for i, (sharding, aval) in enumerate(safe_zip(in_parts, avals)):\n param = xb.with_sharding(c, sharding, xb.parameter, c, i,\n xla.aval_to_xla_shape(aval))\n xla_args.append(param)\n return xla_args\n\n\ndef _sharded_call_impl(fun, *args, num_partitions, in_parts, out_parts_thunk,\n name):\n compiled_fun = _sharded_callable(fun, num_partitions, in_parts,\n out_parts_thunk, name,\n *map(xla.abstractify, args))\n return compiled_fun(*args)\n\n\nsharded_call_p = core.CallPrimitive(\"sharded_call\")\nsharded_call = sharded_call_p.bind\nsharded_call_p.def_impl(_sharded_call_impl)\nxla.call_translations[sharded_call_p] = _sharded_jit_translation_rule\n\n\nclass PartitionSpec(tuple):\n \"\"\"Tuple of integer specifying how a value should be partitioned.\n\n Each integer corresponds to how many ways a dimension is partitioned. We\n create a separate class for this so JAX's pytree utilities can distinguish it\n from a tuple that should be treated as a pytree.\n \"\"\"\n def __new__(cls, *partitions):\n return tuple.__new__(PartitionSpec, partitions)\n\n def __repr__(self):\n return \"PartitionSpec%s\" % tuple.__repr__(self)\n\n\ndef sharded_jit(fun: Callable, in_parts, out_parts, num_partitions: int = None):\n \"\"\"Like ``jit``, but partitions ``fun`` across multiple devices.\n\n WARNING: this feature is still under active development! It may not work well,\n and may change without warning!\n\n `sharded_jit` sets up ``fun`` for just-in-time compilation with XLA, but\n unlike ``jit``, the compiled function will run across multiple devices\n (e.g. multiple GPUs or multiple TPU cores). This is achieved by spatially\n partitioning the data that flows through the computation, so each operation is\n run across all devices and each device runs only a shard of the full\n data. (Some data can optionally be replicated, which is sometimes more\n efficient for small arrays when combined with larger spatially-partitioned\n arrays.) Communication between devices is automatically inserted as necessary.\n\n ``sharded_jit`` can be useful if the jitted version of ``fun`` would not fit\n in a single device's memory, or to speed up ``fun`` by running each operation\n in parallel across multiple devices.\n\n Note: ``sharded_jit`` is currently available on TPU only!\n\n Args:\n fun: Function to be jitted.\n in_parts: The input partitions, i.e. how each argument to ``fun`` should be\n partitioned or replicated. This should be a PartitionSpec indicating into\n how many partitions each dimension should be sharded, None indicating\n replication, or (nested) standard Python containers thereof. For example,\n ``in_parts=PartitionSpec(2,1)`` means all arguments should be partitioned\n over two devices across the first dimension;\n ``in_parts=(PartitionSpec(2,2), PartitionSpec(4,1), None)`` means the\n first argument should be partitioned over four devices by splitting the\n first two dimensions in half, the second argument should be partitioned\n over the four devices across the first dimension, and the third argument\n is replicated across the four devices. All PartitionSpecs in a given\n ``sharded_jit`` call must correspond to the same total number of\n partitions, i.e. the product of all PartitionSpecs must be equal.\n out_parts: The output partitions, i.e. how each output of ``fun`` should be\n partitioned or replicated. This follows the same convention as\n ``in_parts``.\n num_partitions: Optional. If set, explicitly specifies the number of devices\n ``fun`` should partitioned across (rather than inferring it from\n ``in_parts``, ``out_parts``, and/or any ``with_sharding_constraint``\n calls). Setting this should usually be unnecessary, but can be used to\n maintain device persistence across multiple sharded_jit calls when some of\n those calls only involve replicated values.\n\n Returns:\n A version of ``fun`` that will be distributed across multiple devices.\n \"\"\"\n if num_partitions != None:\n num_parts = num_partitions\n else:\n num_parts = pxla.get_num_partitions(in_parts, out_parts)\n\n @wraps(fun)\n def wrapped(*args, **kwargs):\n if kwargs:\n raise NotImplementedError(\"sharded_jit over kwargs not yet supported\")\n f = lu.wrap_init(fun)\n args_flat, in_tree = tree_flatten((args, kwargs))\n in_parts_flat = tuple(flatten_axes(\"sharded_jit in_parts\",\n in_tree.children()[0], in_parts))\n flat_fun, out_tree = flatten_fun(f, in_tree)\n # TODO(skye): having a function-typed param in a primitive seems dicey, is\n # there a better way?\n out_parts_thunk = lambda: tuple(flatten_axes(\"sharded_jit out_parts\",\n out_tree(), out_parts))\n out = sharded_call(\n flat_fun,\n *args_flat,\n num_partitions=num_parts,\n in_parts=in_parts_flat,\n out_parts_thunk=out_parts_thunk,\n name=flat_fun.__name__)\n return tree_unflatten(out_tree(), out)\n\n return wrapped\n\n\ndef _sharding_constraint_impl(x, partitions):\n # TODO(skye): can we also prevent this from being called in other\n # non-sharded_jit contexts? (e.g. pmap, control flow)\n raise NotImplementedError(\n \"with_sharding_constraint() should only be called inside sharded_jit()\")\n\ndef _sharding_constraint_translation_rule(c, x_node, partitions):\n return xb.set_sharding(c, x_node, partitions)\n\nsharding_constraint_p = core.Primitive(\"sharding_constraint\")\nsharding_constraint_p.def_impl(_sharding_constraint_impl)\nsharding_constraint_p.def_abstract_eval(lambda x, partitions: x)\nad.deflinear(sharding_constraint_p,\n lambda ct, partitions: (with_sharding_constraint(ct, partitions),))\nxla.translations[sharding_constraint_p] = _sharding_constraint_translation_rule\n\ndef with_sharding_constraint(x, partitions: Optional[PartitionSpec]):\n \"\"\"Identity-like function that specifies how ``x`` should be sharded.\n\n WARNING: this feature is still under active development! It may not work well,\n and may change without warning!\n\n This should only be called inside a function transformed by ``sharded_jit``.\n It constrains how the function is sharded: regardless of any other specified\n partitions, the compiler will make sure that ``x`` is sharded according to\n ``partitions``. Note that a ``with_sharding_constraint`` call doesn't\n necessarily correspond to a reshard, since the compiler is free to achieve\n this sharding as long as the constraint is met, e.g. it might insert a reshard\n earlier in the computation. Another way to think of this is that the\n ``with_sharding_constraint`` call may flow \"up\" the function to preceding\n operations as well as \"down\" to subsequent ones.\n\n ``partitions`` must correspond to the same number of total partitions dictated\n by the outer ``sharded_jit`` and any other ``with_sharding_constraint`` calls.\n In the case where only replication has been specified, any ``partitions`` are\n valid.\n\n Example usage:\n @partial(sharded_jit, in_parts=None, out_parts=None, num_shards=2\n def f(x):\n y = x + 1\n y = with_sharding_constraint(y, PartitionSpec(2,1))\n return y * 2\n\n In this example, the inputs and outputs of ``f`` will be replicated, but the\n inner value of ``y`` will be partitioned in half. ``f`` will run on two\n devices due to the with_sharding_constraint call.\n\n Args:\n x: Array value\n partitions: PartitionSpec indicating how ``x`` should be partitioned, or\n None for replication.\n\n Returns:\n A new version of ``x`` with the specified sharding applied.\n \"\"\"\n return sharding_constraint_p.bind(x, partitions=partitions)\n\n\[email protected]_omnistaging_disabler\ndef omnistaging_disabler() -> None:\n global _pvals_to_results_handler, _pval_to_result_handler\n\n def _pvals_to_results_handler(nrep, npart, partitions, out_pvals):\n nouts = len(out_pvals)\n handlers = [_pval_to_result_handler(npart, parts, out_pval)\n for parts, out_pval in safe_zip(partitions, out_pvals)] # type: ignore\n\n def handler(out_bufs):\n assert nrep * npart == len(out_bufs)\n buffers = [[result_to_populate] * nrep * npart for _ in range(nouts)]\n for r, tuple_buf in enumerate(out_bufs):\n for i, buf in enumerate(tuple_buf):\n buffers[i][r] = buf\n assert not any(buf is result_to_populate for bufs in buffers\n for buf in bufs)\n return [h(bufs) for h, bufs in zip(handlers, buffers)]\n\n return handler\n\n def _pval_to_result_handler(npart, parts, pval):\n pv, const = pval\n if pv is None:\n raise NotImplementedError # TODO(skye): handle constant outputs\n else:\n if pv is not core.abstract_unit:\n spec = pxla.partitioned_sharding_spec(npart, parts, pv)\n indices = pxla.spec_to_indices(pv.shape, spec)\n else:\n spec = indices = None\n return pxla.aval_to_result_handler(spec, indices, pv)\n"
] | [
[
"tensorflow.ones",
"numpy.ones",
"numpy.full",
"numpy.count_nonzero",
"numpy.float32",
"numpy.array",
"numpy.where"
],
[
"numpy.reshape",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sorhus/tensorflow | [
"99de1826646c8d354259187fc9c2330b794c1ac4",
"99de1826646c8d354259187fc9c2330b794c1ac4",
"99de1826646c8d354259187fc9c2330b794c1ac4",
"acb1ef68f5aea3b6f7f1e14db588b74134719b5e",
"5b2a293b1d43c450ed9e08c1ae5cf6e50daa849d",
"acb1ef68f5aea3b6f7f1e14db588b74134719b5e",
"99de1826646c8d354259187fc9c2330b794c1ac4"
] | [
"tensorflow/python/eager/ops_test.py",
"tensorflow/python/training/saver_test.py",
"tensorflow/python/ops/summary_ops.py",
"tensorflow/contrib/py2tf/pyct/static_analysis/live_values.py",
"tensorflow/contrib/distributions/python/ops/relaxed_onehot_categorical.py",
"tensorflow/contrib/eager/python/examples/spinn/spinn_test.py",
"tensorflow/python/keras/_impl/keras/estimator.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for operations in eager execution.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import execute\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.layers import core\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import sparse_ops\n\n\nclass OpsTest(test_util.TensorFlowTestCase):\n\n def testExecuteBasic(self):\n three = constant_op.constant(3)\n five = constant_op.constant(5)\n product = three * five\n self.assertAllEqual(15, product)\n\n def testMatMulGPU(self):\n if not context.context().num_gpus():\n self.skipTest('No GPUs found')\n three = constant_op.constant([[3.]]).gpu()\n five = constant_op.constant([[5.]]).gpu()\n product = math_ops.matmul(three, five)\n self.assertEqual([[15.0]], product.numpy())\n\n def testExecuteStringAttr(self):\n three = constant_op.constant(3.0)\n checked_three = array_ops.check_numerics(three,\n message='just checking')\n self.assertEqual([[3]], checked_three.numpy())\n\n def testExecuteFloatAttr(self):\n three = constant_op.constant(3.0)\n almost_three = constant_op.constant(2.8)\n almost_equal = math_ops.approximate_equal(\n three, almost_three, tolerance=0.3)\n self.assertTrue(almost_equal)\n\n def testExecuteIntAttr(self):\n three = constant_op.constant(3)\n four = constant_op.constant(4)\n total = math_ops.add_n([three, four])\n self.assertAllEqual(7, total)\n\n def testExecuteBoolAttr(self):\n three = constant_op.constant([[3]])\n five = constant_op.constant([[5]])\n product = math_ops.matmul(three, five, transpose_a=True)\n self.assertAllEqual([[15]], product)\n\n def testExecuteOneListOutput(self):\n split_dim = constant_op.constant(1)\n value = constant_op.constant([[0, 1, 2], [3, 4, 5]])\n x1, x2, x3 = array_ops.split(value, 3, axis=split_dim)\n self.assertAllEqual([[0], [3]], x1)\n self.assertAllEqual([[1], [4]], x2)\n self.assertAllEqual([[2], [5]], x3)\n\n def testGraphMode(self):\n graph = ops.Graph()\n with graph.as_default(), context.graph_mode():\n array_ops.placeholder(dtypes.int32)\n self.assertEqual(1, len(graph.get_operations()))\n\n # See comments on handling of int32 tensors on GPU in\n # EagerTensor.__init__.\n def testInt32CPUDefault(self):\n if not context.context().num_gpus():\n self.skipTest('No GPUs found')\n with context.device('/gpu:0'):\n r = constant_op.constant(1) + constant_op.constant(2)\n self.assertAllEqual(r, 3)\n\n def testExecuteListOutputLen1(self):\n split_dim = constant_op.constant(1)\n value = constant_op.constant([[0, 1, 2], [3, 4, 5]])\n result = array_ops.split(value, 1, axis=split_dim)\n self.assertTrue(isinstance(result, list))\n self.assertEqual(1, len(result))\n self.assertAllEqual([[0, 1, 2], [3, 4, 5]], result[0])\n\n def testExecuteListOutputLen0(self):\n empty = constant_op.constant([], dtype=dtypes.int32)\n result = array_ops.unstack(empty, 0)\n self.assertTrue(isinstance(result, list))\n self.assertEqual(0, len(result))\n\n def testExecuteMultipleNonListOutput(self):\n x = constant_op.constant([1, 2, 3, 4, 5, 6])\n y = constant_op.constant([1, 3, 5])\n result = array_ops.listdiff(x, y)\n out, idx = result\n self.assertTrue(out is result.out)\n self.assertTrue(idx is result.idx)\n self.assertAllEqual([2, 4, 6], out)\n self.assertAllEqual([1, 3, 5], idx)\n\n def testExecuteMultipleListOutput(self):\n split_dim = constant_op.constant(1, dtype=dtypes.int64)\n indices = constant_op.constant([[0, 2], [0, 4], [0, 5], [1, 0], [1, 1]],\n dtype=dtypes.int64)\n values = constant_op.constant([2, 3, 5, 7, 11])\n shape = constant_op.constant([2, 7], dtype=dtypes.int64)\n result = sparse_ops.gen_sparse_ops.sparse_split(\n split_dim,\n indices,\n values,\n shape,\n num_split=2)\n output_indices, output_values, output_shape = result\n self.assertEqual(2, len(output_indices))\n self.assertEqual(2, len(output_values))\n self.assertEqual(2, len(output_shape))\n self.assertEqual(output_indices, result.output_indices)\n self.assertEqual(output_values, result.output_values)\n self.assertEqual(output_shape, result.output_shape)\n self.assertAllEqual([[0, 2], [1, 0], [1, 1]], output_indices[0])\n self.assertAllEqual([[0, 0], [0, 1]], output_indices[1])\n self.assertAllEqual([2, 7, 11], output_values[0])\n self.assertAllEqual([3, 5], output_values[1])\n self.assertAllEqual([2, 4], output_shape[0])\n self.assertAllEqual([2, 3], output_shape[1])\n\n # TODO(josh11b): Test an op that has multiple outputs, some but not\n # all of which are lists. Examples: barrier_take_many (currently\n # unsupported since it uses a type list) or sdca_optimizer (I don't\n # have an example of legal inputs & outputs).\n\n def testComposition(self):\n x = constant_op.constant(1, dtype=dtypes.int32)\n three_x = x + x + x\n self.assertEquals(dtypes.int32, three_x.dtype)\n self.assertAllEqual(3, three_x)\n\n def testOperatorOverrides(self):\n # TODO(henrytan): test with negative number.\n a = constant_op.constant([1])\n b = constant_op.constant([2])\n\n self.assertAllEqual((-a), [-1])\n self.assertAllEqual(abs(b), [2])\n\n self.assertAllEqual((a + b), [3])\n self.assertAllEqual((a - b), [-1])\n self.assertAllEqual((a * b), [2])\n self.assertAllEqual((a * a), [1])\n\n self.assertAllEqual((a**b), [1])\n self.assertAllEqual((a / b), [1 / 2])\n self.assertAllEqual((a / a), [1])\n self.assertAllEqual((a % b), [1])\n\n self.assertAllEqual((a < b), [True])\n self.assertAllEqual((a <= b), [True])\n self.assertAllEqual((a > b), [False])\n self.assertAllEqual((a >= b), [False])\n self.assertAllEqual((a == b), False)\n self.assertAllEqual((a != b), True)\n\n self.assertAllEqual(1, a[constant_op.constant(0)])\n\n def test_basic_slice(self):\n npt = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3)\n t = constant_op.constant(npt)\n\n self.assertAllEqual(npt[:, :, :], t[:, :, :])\n self.assertAllEqual(npt[::, ::, ::], t[::, ::, ::])\n self.assertAllEqual(npt[::1, ::1, ::1], t[::1, ::1, ::1])\n self.assertAllEqual(npt[::1, ::5, ::2], t[::1, ::5, ::2])\n self.assertAllEqual(npt[::-1, :, :], t[::-1, :, :])\n self.assertAllEqual(npt[:, ::-1, :], t[:, ::-1, :])\n self.assertAllEqual(npt[:, :, ::-1], t[:, :, ::-1])\n self.assertAllEqual(npt[-2::-1, :, ::1], t[-2::-1, :, ::1])\n self.assertAllEqual(npt[-2::-1, :, ::2], t[-2::-1, :, ::2])\n\n def testDegenerateSlices(self):\n npt = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3)\n t = constant_op.constant(npt)\n # degenerate by offering a forward interval with a negative stride\n self.assertAllEqual(npt[0:-1:-1, :, :], t[0:-1:-1, :, :])\n # degenerate with a reverse interval with a positive stride\n self.assertAllEqual(npt[-1:0, :, :], t[-1:0, :, :])\n # empty interval in every dimension\n self.assertAllEqual(npt[-1:0, 2:2, 2:3:-1], t[-1:0, 2:2, 2:3:-1])\n\n def testEllipsis(self):\n npt = np.array(\n [[[[[1, 2], [3, 4], [5, 6]]], [[[7, 8], [9, 10], [11, 12]]]]])\n t = constant_op.constant(npt)\n\n self.assertAllEqual(npt[0:], t[0:])\n # implicit ellipsis\n self.assertAllEqual(npt[0:, ...], t[0:, ...])\n # ellipsis alone\n self.assertAllEqual(npt[...], t[...])\n # ellipsis at end\n self.assertAllEqual(npt[0:1, ...], t[0:1, ...])\n # ellipsis at begin\n self.assertAllEqual(npt[..., 0:1], t[..., 0:1])\n # ellipsis at middle\n self.assertAllEqual(npt[0:1, ..., 0:1], t[0:1, ..., 0:1])\n\n def testShrink(self):\n npt = np.array([[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]],\n [[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]])\n t = constant_op.constant(npt)\n self.assertAllEqual(npt[:, :, :, :, 3], t[:, :, :, :, 3])\n self.assertAllEqual(npt[..., 3], t[..., 3])\n self.assertAllEqual(npt[:, 0], t[:, 0])\n self.assertAllEqual(npt[:, :, 0], t[:, :, 0])\n\n def testOpWithInputsOnDifferentDevices(self):\n if not context.context().num_gpus():\n self.skipTest('No GPUs found')\n\n # The GPU kernel for the Reshape op requires that the\n # shape input be on CPU.\n value = constant_op.constant([1., 2.]).gpu()\n shape = constant_op.constant([2, 1])\n reshaped = array_ops.reshape(value, shape)\n self.assertAllEqual([[1], [2]], reshaped.cpu())\n\n def testInt64(self):\n # Fill requires the first input to be an int32 tensor.\n self.assertAllEqual(\n [1.0, 1.0],\n array_ops.fill(constant_op.constant([2], dtype=dtypes.int64),\n constant_op.constant(1)))\n\n def testOutputOnHostMemory(self):\n if not context.context().num_gpus():\n self.skipTest('No GPUs found')\n # The Shape op kernel on GPU places the output in host memory.\n value = constant_op.constant([1.]).gpu()\n shape = array_ops.shape(value)\n self.assertEqual([1], shape.numpy())\n\n def testSilentCopy(self):\n if not context.context().num_gpus():\n self.skipTest('No GPUs found')\n # Temporarily replace the context\n # pylint: disable=protected-access\n del context._context\n try:\n context._context = context.Context(\n device_policy=context.DEVICE_PLACEMENT_SILENT)\n cpu_tensor = constant_op.constant(1.0)\n gpu_tensor = cpu_tensor.gpu()\n self.assertAllEqual(cpu_tensor + gpu_tensor, 2.0)\n finally:\n del context._context\n context._context = context.Context()\n # pylint: enable=protected-access\n\n def testSoftPlacement(self):\n if not context.context().num_gpus():\n self.skipTest('No GPUs found')\n # Temporarily replace the context\n # pylint: disable=protected-access\n del context._context\n try:\n context._context = context.Context(\n device_policy=context.DEVICE_PLACEMENT_SILENT,\n config=config_pb2.ConfigProto(allow_soft_placement=True))\n cpu_tensor = constant_op.constant(1.0)\n result = cpu_tensor + cpu_tensor\n self.assertEqual(result.device,\n '/job:localhost/replica:0/task:0/device:GPU:0')\n finally:\n del context._context\n context._context = context.Context()\n # pylint: enable=protected-access\n\n def testRandomUniform(self):\n scalar_shape = constant_op.constant([], dtype=dtypes.int32)\n\n x = random_ops.random_uniform(scalar_shape)\n self.assertEquals(0, x.shape.ndims)\n self.assertEquals(dtypes.float32, x.dtype)\n\n x = random_ops.random_uniform(\n scalar_shape, minval=constant_op.constant(5.),\n maxval=constant_op.constant(6.))\n self.assertLess(x, 6)\n self.assertGreaterEqual(x, 5)\n\n def testArgsToMatchingEagerDefault(self):\n # Uses default\n ctx = context.context()\n t, r = execute.args_to_matching_eager([[3, 4]], ctx, dtypes.int32)\n self.assertEquals(t, dtypes.int32)\n self.assertEquals(r[0].dtype, dtypes.int32)\n t, r = execute.args_to_matching_eager([[3, 4]], ctx, dtypes.int64)\n self.assertEquals(t, dtypes.int64)\n self.assertEquals(r[0].dtype, dtypes.int64)\n # Doesn't use default\n t, r = execute.args_to_matching_eager(\n [['string', 'arg']], ctx, dtypes.int32)\n self.assertEquals(t, dtypes.string)\n self.assertEquals(r[0].dtype, dtypes.string)\n\n def testFlattenLayer(self):\n flatten_layer = core.Flatten()\n x = constant_op.constant([[[-10, -20], [-30, -40]], [[10, 20], [30, 40]]])\n y = flatten_layer(x)\n self.assertAllEqual([[-10, -20, -30, -40], [10, 20, 30, 40]], y)\n\n def testIdentity(self):\n self.assertAllEqual(2, array_ops.identity(2))\n\n def testIdentityOnVariable(self):\n if not context.context().num_gpus():\n self.skipTest('No GPUs found')\n with context.device('/gpu:0'):\n v = resource_variable_ops.ResourceVariable(True)\n self.assertAllEqual(True, array_ops.identity(v))\n\n def testIncompatibleSetShape(self):\n x = constant_op.constant(1)\n with self.assertRaises(ValueError):\n x.set_shape((1, 2))\n\n def testCompatibleSetShape(self):\n x = constant_op.constant([[1, 2]])\n x.set_shape(tensor_shape.TensorShape([None, 2]))\n self.assertEqual(x.get_shape(), (1, 2))\n\n def testCastScalarToPrimitiveTypes(self):\n x = constant_op.constant(1.3)\n self.assertIsInstance(int(x), int)\n self.assertEqual(int(x), 1)\n self.assertIsInstance(float(x), float)\n self.assertAllClose(float(x), 1.3)\n\n def testCastNonScalarToPrimitiveTypesFails(self):\n x = constant_op.constant([1.3, 2])\n with self.assertRaises(TypeError):\n int(x)\n with self.assertRaises(TypeError):\n float(x)\n\n def testFormatString(self):\n x = constant_op.constant(3.1415)\n self.assertEqual('3.14', '{:.2f}'.format(x))\n\n def testNoOpIsNone(self):\n self.assertTrue(control_flow_ops.no_op() is None)\n\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"Tests for tensorflow.python.training.saver.py.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport contextlib\nimport math\nimport os\nimport random\nimport shutil\nimport tempfile\nimport time\n\nimport numpy as np\nimport six\n\nfrom google.protobuf.any_pb2 import Any\nfrom google.protobuf import text_format\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.core.protobuf import meta_graph_pb2\nfrom tensorflow.core.protobuf import queue_runner_pb2\nfrom tensorflow.core.protobuf import saver_pb2\nfrom tensorflow.python import pywrap_tensorflow\nfrom tensorflow.python.client import session\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import function\nfrom tensorflow.python.framework import graph_io\nfrom tensorflow.python.framework import meta_graph\nfrom tensorflow.python.framework import ops as ops_lib\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.lib.io import file_io\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import data_flow_ops\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import partitioned_variables\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nimport tensorflow.python.ops.nn_grad # pylint: disable=unused-import\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.summary import summary\nfrom tensorflow.python.training import adam\nfrom tensorflow.python.training import checkpointable\nfrom tensorflow.python.training import gradient_descent\nfrom tensorflow.python.training import queue_runner_impl\nfrom tensorflow.python.training import saver as saver_module\nfrom tensorflow.python.training import saver_test_utils\nfrom tensorflow.python.training.checkpoint_state_pb2 import CheckpointState\nfrom tensorflow.python.util import compat\n\n\n@test_util.with_c_api\nclass SaverTest(test.TestCase):\n\n def basicSaveRestore(self, variable_op):\n save_path = os.path.join(self.get_temp_dir(), \"basic_save_restore\")\n\n with self.test_session(graph=ops_lib.Graph()) as sess:\n # Build a graph with 2 parameter nodes, and Save and\n # Restore nodes for them.\n v0 = variable_op(10.0, name=\"v0\")\n v1 = variable_op(20.0, name=\"v1\")\n v2 = saver_test_utils.CheckpointedOp(name=\"v2\")\n v2_init = v2.insert(\"k1\", 30.0)\n\n # Initialize all variables\n if context.in_graph_mode():\n self.evaluate([variables.global_variables_initializer(), v2_init])\n\n # Check that the parameter nodes have been initialized.\n self.assertEqual(10.0, self.evaluate(v0))\n self.assertEqual(20.0, self.evaluate(v1))\n self.assertEqual(b\"k1\", self.evaluate(v2.keys()))\n self.assertEqual(30.0, self.evaluate(v2.values()))\n\n # Save the initialized values in the file at \"save_path\"\n save = saver_module.Saver(\n {\n \"v0\": v0,\n \"v1\": v1,\n \"v2\": v2.saveable\n }, restore_sequentially=True)\n val = save.save(sess, save_path)\n self.assertTrue(isinstance(val, six.string_types))\n self.assertEqual(save_path, val)\n\n # Start a second session. In that session the parameter nodes\n # have not been initialized either.\n with self.test_session(graph=ops_lib.Graph()) as sess:\n v0 = variable_op(-1.0, name=\"v0\")\n v1 = variable_op(-1.0, name=\"v1\")\n v2 = saver_test_utils.CheckpointedOp(name=\"v2\")\n\n # Assert that the variables are not initialized.\n if context.in_graph_mode():\n self.assertEqual(\n len(variables.report_uninitialized_variables().eval()), 2)\n self.assertEqual(0, len(v2.keys().eval()))\n self.assertEqual(0, len(v2.values().eval()))\n # Restore the saved values in the parameter nodes.\n save = saver_module.Saver({\"v0\": v0, \"v1\": v1, \"v2\": v2.saveable})\n save.restore(sess, save_path)\n # Check that the parameter nodes have been restored.\n self.assertEqual(10.0, self.evaluate(v0))\n self.assertEqual(20.0, self.evaluate(v1))\n self.assertEqual(b\"k1\", self.evaluate(v2.keys()))\n self.assertEqual(30.0, self.evaluate(v2.values()))\n\n # Build another graph with 2 nodes, initialized\n # differently, and a Restore node for them.\n with self.test_session(graph=ops_lib.Graph()) as sess:\n v0_2 = variable_op(1000.0, name=\"v0\")\n v1_2 = variable_op(2000.0, name=\"v1\")\n v2_2 = saver_test_utils.CheckpointedOp(name=\"v2\")\n v2_init = v2_2.insert(\"k1000\", 3000.0)\n\n # Check that the parameter nodes have been initialized.\n if context.in_graph_mode():\n init_all_op = [variables.global_variables_initializer(), v2_init]\n self.evaluate(init_all_op)\n # TODO(xpan): Why _mutable_hash_table_v2 doesn't create empty\n # table as it claims in eager mode?\n self.assertEqual(b\"k1000\", self.evaluate(v2_2.keys()))\n self.assertEqual(3000.0, self.evaluate(v2_2.values()))\n self.assertEqual(1000.0, self.evaluate(v0_2))\n self.assertEqual(2000.0, self.evaluate(v1_2))\n\n # Restore the values saved earlier in the parameter nodes.\n save2 = saver_module.Saver({\"v0\": v0_2, \"v1\": v1_2, \"v2\": v2_2.saveable})\n save2.restore(sess, save_path)\n # Check that the parameter nodes have been restored.\n self.assertEqual(10.0, self.evaluate(v0_2))\n self.assertEqual(20.0, self.evaluate(v1_2))\n self.assertEqual(b\"k1\", self.evaluate(v2_2.keys()))\n self.assertEqual(30.0, self.evaluate(v2_2.values()))\n\n def testBasic(self):\n self.basicSaveRestore(variables.Variable)\n\n @test_util.run_in_graph_and_eager_modes()\n def testResourceBasic(self):\n self.basicSaveRestore(resource_variable_ops.ResourceVariable)\n\n def testResourceVariableReadOpsAddedDeterministically(self):\n graph_defs = []\n num_graphs = 10\n for _ in range(num_graphs):\n with ops_lib.Graph().as_default() as g:\n for i in range(20):\n resource_variable_ops.ResourceVariable(i, name=\"var%s\" % i)\n saver_module.Saver()\n graph_defs.append(g.as_graph_def())\n for i in range(num_graphs - 1):\n self.assertEqual(graph_defs[i], graph_defs[i + 1])\n\n def testEagerBasic(self):\n with context.eager_mode():\n ckpt_prefix = os.path.join(self.get_temp_dir(), \"ckpt\")\n\n v1 = resource_variable_ops.ResourceVariable(3.14, name=\"v1\")\n v2 = resource_variable_ops.ResourceVariable([1, 2], name=\"v2\")\n save = saver_module.Saver([v1, v2])\n save.save(None, ckpt_prefix)\n\n v1.assign(0.0)\n v2.assign([0, 0])\n self.assertNear(0.0, self.evaluate(v1), 1e-5)\n self.assertAllEqual([0, 0], self.evaluate(v2))\n\n save.restore(None, ckpt_prefix)\n self.assertNear(3.14, self.evaluate(v1), 1e-5)\n self.assertAllEqual([1, 2], self.evaluate(v2))\n\n def testEagerGraphCompatibility(self):\n # Save from graph mode and restore from eager mode.\n graph_ckpt_prefix = os.path.join(self.get_temp_dir(), \"graph_ckpt\")\n with context.graph_mode():\n with self.test_session(graph=ops_lib.Graph()) as sess:\n # Create a graph model and save the checkpoint.\n w1 = resource_variable_ops.ResourceVariable(1.0, name=\"w1\")\n w2 = resource_variable_ops.ResourceVariable(2.0, name=\"w2\")\n graph_saver = saver_module.Saver([w1, w2])\n sess.run(variables.global_variables_initializer())\n graph_saver.save(sess, graph_ckpt_prefix)\n\n with context.eager_mode():\n ops_lib._default_graph_stack.reset() # pylint: disable=protected-access\n ops_lib.reset_default_graph()\n\n w1 = resource_variable_ops.ResourceVariable(0.0, name=\"w1\")\n w2 = resource_variable_ops.ResourceVariable(0.0, name=\"w2\")\n\n graph_saver = saver_module.Saver([w1, w2])\n graph_saver.restore(None, graph_ckpt_prefix)\n\n self.assertAllEqual(self.evaluate(w1), 1.0)\n self.assertAllEqual(self.evaluate(w2), 2.0)\n\n # Save from eager mode and restore from graph mode.\n eager_ckpt_prefix = os.path.join(self.get_temp_dir(), \"eager_ckpt\")\n with context.eager_mode():\n ops_lib._default_graph_stack.reset() # pylint: disable=protected-access\n ops_lib.reset_default_graph()\n\n w3 = resource_variable_ops.ResourceVariable(3.0, name=\"w3\")\n w4 = resource_variable_ops.ResourceVariable(4.0, name=\"w4\")\n\n graph_saver = saver_module.Saver([w3, w4])\n graph_saver.save(None, eager_ckpt_prefix)\n\n with context.graph_mode():\n with self.test_session(graph=ops_lib.Graph()) as sess:\n w3 = resource_variable_ops.ResourceVariable(0.0, name=\"w3\")\n w4 = resource_variable_ops.ResourceVariable(0.0, name=\"w4\")\n graph_saver = saver_module.Saver([w3, w4])\n sess.run(variables.global_variables_initializer())\n graph_saver.restore(sess, eager_ckpt_prefix)\n self.assertAllEqual(w3.eval(), 3.0)\n self.assertAllEqual(w4.eval(), 4.0)\n\n @test_util.run_in_graph_and_eager_modes()\n def testResourceSaveRestoreCachingDevice(self):\n save_path = os.path.join(self.get_temp_dir(), \"resource_cache\")\n with self.test_session(graph=ops_lib.Graph()) as sess:\n v = resource_variable_ops.ResourceVariable([1], caching_device=\"/cpu:0\",\n name=\"v\")\n if context.in_graph_mode():\n self.evaluate(variables.global_variables_initializer())\n else:\n sess = None\n save = saver_module.Saver([v])\n save.save(sess, save_path)\n\n save2 = saver_module.Saver([v])\n save2.restore(sess, save_path)\n self.assertEquals(self.evaluate(v), [1])\n\n def testNoAdditionalOpsAddedBySaverForResourceVariablesOutsideSaveScope(self):\n with ops_lib.Graph().as_default() as g:\n v = resource_variable_ops.ResourceVariable(1.0, name=\"v\")\n with ops_lib.name_scope(\"saver1\"):\n saver_module.Saver()\n with ops_lib.name_scope(\"saver2\"):\n saver_module.Saver({\"name\": v})\n ops_in_saver1_scope_but_not_save_scope = [\n op for op in g.get_operations()\n if (op.name.startswith(\"saver1/\") and\n not op.name.startswith(\"saver1/save/\"))]\n self.assertEqual(ops_in_saver1_scope_but_not_save_scope, [])\n ops_in_saver2_scope_but_not_save_scope = [\n op for op in g.get_operations()\n if (op.name.startswith(\"saver2/\") and\n not op.name.startswith(\"saver2/save/\"))]\n self.assertEqual(ops_in_saver2_scope_but_not_save_scope, [])\n\n def testSaveCopyRestoreWithSaveRelativePaths(self):\n \"\"\"Save, copy checkpoint dir and restore from copied dir.\n\n This only works for save_relative_paths=True.\n \"\"\"\n save_dir1 = os.path.join(self.get_temp_dir(), \"save_dir1\")\n os.mkdir(save_dir1)\n save_path1 = os.path.join(save_dir1, \"save_copy_restore\")\n\n # Build a graph with 2 parameter nodes, and Save and\n # Restore nodes for them.\n v0 = variables.Variable(10.0, name=\"v0\")\n v1 = variables.Variable(20.0, name=\"v1\")\n v2 = saver_test_utils.CheckpointedOp(name=\"v2\")\n v2_init = v2.insert(\"k1\", 30.0)\n save = saver_module.Saver(\n var_list={\n \"v0\": v0,\n \"v1\": v1,\n \"v2\": v2.saveable},\n restore_sequentially=True,\n save_relative_paths=True)\n init_all_op = [variables.global_variables_initializer(), v2_init]\n\n with self.test_session() as sess:\n # Initialize all variables\n sess.run(init_all_op)\n\n # Check that the parameter nodes have been initialized.\n self.assertEqual(10.0, v0.eval())\n self.assertEqual(20.0, v1.eval())\n self.assertEqual(b\"k1\", v2.keys().eval())\n self.assertEqual(30.0, v2.values().eval())\n\n # Save the initialized values in the file at \"save_path\"\n val = save.save(sess, save_path1)\n self.assertTrue(isinstance(val, six.string_types))\n self.assertEqual(save_path1, val)\n\n self.assertEqual(saver_module.latest_checkpoint(save_dir1), save_path1)\n save_dir2 = os.path.join(self.get_temp_dir(), \"save_dir2\")\n os.renames(save_dir1, save_dir2)\n save_path2 = os.path.join(save_dir2, \"save_copy_restore\")\n self.assertEqual(saver_module.latest_checkpoint(save_dir2), save_path2)\n\n # Start a second session. In that session the parameter nodes\n # have not been initialized either.\n with self.test_session() as sess:\n v0 = variables.Variable(-1.0, name=\"v0\")\n v1 = variables.Variable(-1.0, name=\"v1\")\n v2 = saver_test_utils.CheckpointedOp(name=\"v2\")\n save = saver_module.Saver({\"v0\": v0, \"v1\": v1, \"v2\": v2.saveable})\n\n # Assert that the variables are not initialized.\n self.assertEqual(\n len(variables.report_uninitialized_variables().eval()), 2)\n self.assertEqual(0, len(v2.keys().eval()))\n self.assertEqual(0, len(v2.values().eval()))\n\n # Restore the saved values in the parameter nodes.\n save.restore(sess, save_path2)\n # Check that the parameter nodes have been restored.\n self.assertEqual(10.0, v0.eval())\n self.assertEqual(20.0, v1.eval())\n self.assertEqual(b\"k1\", v2.keys().eval())\n self.assertEqual(30.0, v2.values().eval())\n\n def testFilenameTensor(self):\n v0 = variables.Variable(0, name=\"v0\")\n filename = b\"somerandomfilename\"\n save = saver_module.Saver({\"v0\": v0}, filename=filename)\n with self.test_session() as sess:\n tensor = sess.graph.get_tensor_by_name(\n save.saver_def.filename_tensor_name)\n self.assertEqual(sess.run(tensor), filename)\n\n def testInvalidPath(self):\n v0 = variables.Variable(0, name=\"v0\")\n for ver in (saver_pb2.SaverDef.V1, saver_pb2.SaverDef.V2):\n with self.test_session() as sess:\n save = saver_module.Saver({\"v0\": v0}, write_version=ver)\n with self.assertRaisesRegexp(errors.NotFoundError,\n \"Failed to find any matching files for\"):\n save.restore(sess, \"invalid path\")\n\n def testInt64(self):\n save_path = os.path.join(self.get_temp_dir(), \"int64\")\n\n with self.test_session() as sess:\n # Build a graph with 1 node, and save and restore for them.\n v = variables.Variable(np.int64(15), name=\"v\")\n save = saver_module.Saver({\"v\": v}, restore_sequentially=True)\n variables.global_variables_initializer().run()\n\n # Save the initialized values in the file at \"save_path\"\n val = save.save(sess, save_path)\n self.assertTrue(isinstance(val, six.string_types))\n self.assertEqual(save_path, val)\n\n with self.test_session() as sess:\n v = variables.Variable(np.int64(-1), name=\"v\")\n save = saver_module.Saver({\"v\": v})\n\n with self.assertRaisesWithPredicateMatch(\n errors_impl.OpError, lambda e: \"uninitialized value v\" in e.message):\n sess.run(v)\n\n # Restore the saved values in the parameter nodes.\n save.restore(sess, save_path)\n # Check that the parameter nodes have been restored.\n self.assertEqual(np.int64(15), v.eval())\n\n def testSomeErrors(self):\n with ops_lib.Graph().as_default():\n v0 = variables.Variable([10.0], name=\"v0\")\n v1 = variables.Variable([20.0], name=\"v1\")\n v2 = variables.Variable([20.0], name=\"v2\")\n v2._set_save_slice_info(\n variables.Variable.SaveSliceInfo(\"v1\", [1], [0], [1]))\n\n # By default the name used for \"v2\" will be \"v1\" and raise an error.\n with self.assertRaisesRegexp(ValueError, \"same name: v1\"):\n saver_module.Saver([v0, v1, v2])\n\n # The names are different and will work.\n saver_module.Saver({\"vee1\": v1, \"other\": [v2]})\n\n # Partitioned variables also cause name conflicts.\n p_v1 = variable_scope.get_variable(\n \"p_v1\",\n shape=[4, 5],\n partitioner=partitioned_variables.fixed_size_partitioner(\n num_shards=2))\n p_v2 = variable_scope.get_variable(\n \"p_v2\",\n shape=[4, 5],\n partitioner=partitioned_variables.fixed_size_partitioner(\n num_shards=2))\n p_v2._name = \"p_v1\"\n with self.assertRaisesRegexp(ValueError, \"same name: p_v1\"):\n saver_module.Saver([p_v1, p_v2])\n\n def testSameName(self):\n with ops_lib.Graph().as_default():\n v0 = variables.Variable([10.0], name=\"v0\")\n v2 = saver_test_utils.CheckpointedOp(name=\"v2\")\n\n # Saving one variable under two names raises an error.\n with self.assertRaisesRegexp(\n ValueError, \"The same saveable will be restored with two names: v0\"):\n saver_module.Saver({\"v0\": v0, \"v0too\": v0})\n\n # Ditto for custom saveables.\n with self.assertRaisesRegexp(\n ValueError, \"The same saveable will be restored with two names: v2\"):\n saver_module.Saver({\"v2\": v2.saveable, \"v2too\": v2.saveable})\n\n # Verify non-duplicate names work.\n saver_module.Saver({\"v0\": v0, \"v2\": v2.saveable})\n\n def testBasicsWithListOfVariables(self):\n save_path = os.path.join(self.get_temp_dir(), \"basics_with_list\")\n\n with self.test_session(graph=ops_lib.Graph()) as sess:\n # Build a graph with 2 parameter nodes, and Save and\n # Restore nodes for them.\n v0 = variables.Variable(10.0, name=\"v0\")\n v1 = variables.Variable(20.0, name=\"v1\")\n v2 = saver_test_utils.CheckpointedOp(name=\"v2\")\n v2_init = v2.insert(\"k1\", 30.0)\n save = saver_module.Saver([v0, v1, v2.saveable])\n variables.global_variables_initializer().run()\n v2_init.run()\n\n # Check that the parameter nodes have been initialized.\n self.assertEqual(10.0, v0.eval())\n self.assertEqual(20.0, v1.eval())\n self.assertEqual(b\"k1\", v2.keys().eval())\n self.assertEqual(30.0, v2.values().eval())\n\n # Save the initialized values in the file at \"save_path\"\n val = save.save(sess, save_path)\n self.assertTrue(isinstance(val, six.string_types))\n self.assertEqual(save_path, val)\n\n # Start a second session. In that session the variables\n # have not been initialized either.\n with self.test_session(graph=ops_lib.Graph()) as sess:\n v0 = variables.Variable(-1.0, name=\"v0\")\n v1 = variables.Variable(-1.0, name=\"v1\")\n v2 = saver_test_utils.CheckpointedOp(name=\"v2\")\n save = saver_module.Saver([v0, v1, v2.saveable])\n\n with self.assertRaisesWithPredicateMatch(\n errors_impl.OpError, lambda e: \"uninitialized value v0\" in e.message):\n sess.run(v0)\n with self.assertRaisesWithPredicateMatch(\n errors_impl.OpError, lambda e: \"uninitialized value v1\" in e.message):\n sess.run(v1)\n self.assertEqual(0, len(v2.keys().eval()))\n self.assertEqual(0, len(v2.values().eval()))\n\n # Restore the saved values in the parameter nodes.\n save.restore(sess, save_path)\n # Check that the parameter nodes have been restored.\n self.assertEqual(10.0, v0.eval())\n self.assertEqual(20.0, v1.eval())\n self.assertEqual(b\"k1\", v2.keys().eval())\n self.assertEqual(30.0, v2.values().eval())\n\n # Build another graph with 2 nodes, initialized\n # differently, and a Restore node for them.\n with self.test_session(graph=ops_lib.Graph()) as sess:\n v0_2 = variables.Variable(1000.0, name=\"v0\")\n v1_2 = variables.Variable(2000.0, name=\"v1\")\n v2_2 = saver_test_utils.CheckpointedOp(name=\"v2\")\n save2 = saver_module.Saver([v0_2, v1_2, v2_2.saveable])\n v2_2.insert(\"k1000\", 3000.0).run()\n variables.global_variables_initializer().run()\n\n # Check that the parameter nodes have been initialized.\n self.assertEqual(1000.0, v0_2.eval())\n self.assertEqual(2000.0, v1_2.eval())\n self.assertEqual(b\"k1000\", v2_2.keys().eval())\n self.assertEqual(3000.0, v2_2.values().eval())\n # Restore the values saved earlier in the parameter nodes.\n save2.restore(sess, save_path)\n # Check that the parameter nodes have been restored.\n self.assertEqual(10.0, v0_2.eval())\n self.assertEqual(20.0, v1_2.eval())\n self.assertEqual(b\"k1\", v2_2.keys().eval())\n self.assertEqual(30.0, v2_2.values().eval())\n\n def _SaveAndLoad(self, var_name, var_value, other_value, save_path):\n with self.test_session(graph=ops_lib.Graph()) as sess:\n var = resource_variable_ops.ResourceVariable(var_value, name=var_name)\n save = saver_module.Saver({var_name: var})\n if context.in_graph_mode():\n self.evaluate(var.initializer)\n val = save.save(sess, save_path)\n self.assertEqual(save_path, val)\n with self.test_session(graph=ops_lib.Graph()) as sess:\n var = resource_variable_ops.ResourceVariable(other_value, name=var_name)\n save = saver_module.Saver({var_name: var})\n save.restore(sess, save_path)\n self.assertAllClose(var_value, self.evaluate(var))\n\n def testCacheRereadsFile(self):\n save_path = os.path.join(self.get_temp_dir(), \"cache_rereads\")\n # Save and reload one Variable named \"var0\".\n self._SaveAndLoad(\"var0\", 0.0, 1.0, save_path)\n # Save and reload one Variable named \"var1\" in the same file.\n # The cached readers should know to re-read the file.\n self._SaveAndLoad(\"var1\", 1.1, 2.2, save_path)\n\n def testAllowEmpty(self):\n save_path = os.path.join(self.get_temp_dir(), \"allow_empty\")\n with self.test_session() as sess:\n _ = constant_op.constant(1)\n save = saver_module.Saver(allow_empty=True)\n val = save.save(sess, save_path)\n self.assertIsNone(val)\n with self.test_session() as sess:\n save = saver_module.Saver(allow_empty=True)\n save.restore(sess, save_path)\n\n def testGPU(self):\n if not test.is_gpu_available():\n return\n save_path = os.path.join(self.get_temp_dir(), \"gpu\")\n with session.Session(\"\", graph=ops_lib.Graph()) as sess:\n with sess.graph.device(test.gpu_device_name()):\n v0_1 = variables.Variable(123.45)\n save = saver_module.Saver({\"v0\": v0_1})\n variables.global_variables_initializer().run()\n save.save(sess, save_path)\n\n with session.Session(\"\", graph=ops_lib.Graph()) as sess:\n with sess.graph.device(test.gpu_device_name()):\n v0_2 = variables.Variable(543.21)\n save = saver_module.Saver({\"v0\": v0_2})\n variables.global_variables_initializer().run()\n\n def testSharedServerOnGPU(self):\n if not test.is_gpu_available():\n return\n save_path = os.path.join(self.get_temp_dir(), \"gpu\")\n with session.Session(\"\", graph=ops_lib.Graph()) as sess:\n with sess.graph.device(test.gpu_device_name()):\n v0_1 = variables.Variable(123.45)\n save = saver_module.Saver({\"v0\": v0_1}, sharded=True, allow_empty=True)\n variables.global_variables_initializer().run()\n save.save(sess, save_path)\n\n with session.Session(\"\", graph=ops_lib.Graph()) as sess:\n with sess.graph.device(test.gpu_device_name()):\n v0_2 = variables.Variable(543.21)\n save = saver_module.Saver({\"v0\": v0_2}, sharded=True, allow_empty=True)\n variables.global_variables_initializer().run()\n\n def testVariables(self):\n save_path = os.path.join(self.get_temp_dir(), \"variables\")\n with session.Session(\"\", graph=ops_lib.Graph()) as sess:\n one = variables.Variable(1.0)\n twos = variables.Variable([2.0, 2.0, 2.0])\n v2 = saver_test_utils.CheckpointedOp(name=\"v2\")\n init = variables.global_variables_initializer()\n save = saver_module.Saver()\n init.run()\n v2.insert(\"k1\", 3.0).run()\n save.save(sess, save_path)\n\n with session.Session(\"\", graph=ops_lib.Graph()) as sess:\n one = variables.Variable(0.0)\n twos = variables.Variable([0.0, 0.0, 0.0])\n v2 = saver_test_utils.CheckpointedOp(name=\"v2\")\n # Saver with no arg, defaults to 'all variables'.\n save = saver_module.Saver()\n save.restore(sess, save_path)\n self.assertAllClose(1.0, one.eval())\n self.assertAllClose([2.0, 2.0, 2.0], twos.eval())\n self.assertEqual(b\"k1\", v2.keys().eval())\n self.assertEqual(3.0, v2.values().eval())\n\n def testVarListShouldBeEmptyInDeferredBuild(self):\n with ops_lib.Graph().as_default():\n v = variables.Variable(1.0)\n with self.assertRaisesRegexp(ValueError, \"defer_build\"):\n saver_module.Saver([v], defer_build=True)\n\n def testBuildShouldBeCalledBeforeSaveInCaseOfDeferBuild(self):\n save_path = os.path.join(self.get_temp_dir(), \"error_deferred_build\")\n with ops_lib.Graph().as_default(), session.Session() as sess:\n variables.Variable(1.0)\n saver = saver_module.Saver(defer_build=True)\n with self.assertRaisesRegexp(RuntimeError, \"build\"):\n saver.save(sess, save_path)\n\n def testDeferredBuild(self):\n save_path = os.path.join(self.get_temp_dir(), \"deferred_build\")\n with session.Session(\"\", graph=ops_lib.Graph()) as sess:\n one = variables.Variable(1.0)\n save = saver_module.Saver(defer_build=True)\n # if build is not deferred, saver cannot save the `twos`.\n twos = variables.Variable([2.0, 2.0, 2.0])\n init = variables.global_variables_initializer()\n save.build()\n init.run()\n save.save(sess, save_path)\n\n with session.Session(\"\", graph=ops_lib.Graph()) as sess:\n one = variables.Variable(0.0)\n twos = variables.Variable([0.0, 0.0, 0.0])\n # Saver with no arg, defaults to 'all variables'.\n save = saver_module.Saver()\n save.restore(sess, save_path)\n self.assertAllClose(1.0, one.eval())\n self.assertAllClose([2.0, 2.0, 2.0], twos.eval())\n\n def testReshape(self):\n save_path = os.path.join(self.get_temp_dir(), \"variables_reshape\")\n with session.Session(\"\", graph=ops_lib.Graph()) as sess:\n var = variables.Variable([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n init = variables.global_variables_initializer()\n save = saver_module.Saver()\n init.run()\n save.save(sess, save_path)\n\n # Error when restoring with default reshape=False\n with session.Session(\"\", graph=ops_lib.Graph()) as sess:\n var = variables.Variable([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])\n save = saver_module.Saver()\n with self.assertRaisesRegexp(\n errors_impl.InvalidArgumentError,\n \"Assign requires shapes of both tensors to match.\"):\n save.restore(sess, save_path)\n\n # Restored to new shape with reshape=True\n with session.Session(\"\", graph=ops_lib.Graph()) as sess:\n var = variables.Variable([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])\n save = saver_module.Saver(reshape=True)\n save.restore(sess, save_path)\n self.assertAllClose([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], var.eval())\n\n @test_util.run_in_graph_and_eager_modes()\n def testSaveWithGlobalStep(self, pad_step_number=False):\n save_path = os.path.join(self.get_temp_dir(), \"ckpt_with_global_step\")\n global_step_int = 5\n # Save and reload one Variable named \"var0\".\n self._SaveAndLoad(\"var0\", 0.0, 1.0, save_path)\n for use_tensor in [True, False]:\n with self.test_session(graph=ops_lib.Graph()):\n var = resource_variable_ops.ResourceVariable(1.0, name=\"var0\")\n save = saver_module.Saver(\n {\n var._shared_name: var\n }, pad_step_number=pad_step_number)\n if context.in_graph_mode():\n self.evaluate(var.initializer)\n sess = ops_lib.get_default_session()\n else:\n sess = None\n if use_tensor:\n global_step = constant_op.constant(global_step_int)\n val = save.save(sess, save_path, global_step=global_step)\n else:\n val = save.save(sess, save_path, global_step=global_step_int)\n if pad_step_number:\n expected_save_path = \"%s-%s\" % (save_path,\n \"{:08d}\".format(global_step_int))\n else:\n expected_save_path = \"%s-%d\" % (save_path, global_step_int)\n self.assertEqual(expected_save_path, val)\n\n def testSaveWithGlobalStepWithPadding(self):\n self.testSaveWithGlobalStep(pad_step_number=True)\n\n def testSaveToNonexistingPath(self):\n file_io.write_string_to_file(\n os.path.join(self.get_temp_dir(), \"actually_a_file\"), \"\")\n paths = [\n os.path.join(self.get_temp_dir(), \"nonexisting_dir/path\"),\n os.path.join(self.get_temp_dir(), \"other_nonexisting_dir/path1/path2\"),\n os.path.join(self.get_temp_dir(), \"actually_a_file/path\"),\n ]\n\n for save_path in paths:\n # Build a graph with 2 parameter nodes, and Save and\n # Restore nodes for them.\n v0 = variables.Variable(10.0, name=\"v0\")\n v1 = variables.Variable(20.0, name=\"v1\")\n save = saver_module.Saver({\"v0\": v0, \"v1\": v1}, restore_sequentially=True)\n init_all_op = variables.global_variables_initializer()\n\n # In the case where the parent directory doesn't exist, whether or not the\n # save succeeds or fails is implementation dependent. Therefore we allow\n # both cases.\n try:\n with self.test_session() as sess:\n # Initialize all variables\n sess.run(init_all_op)\n\n # Check that the parameter nodes have been initialized.\n self.assertEqual(10.0, v0.eval())\n self.assertEqual(20.0, v1.eval())\n\n # Save the graph.\n save.save(sess, save_path)\n\n with self.test_session() as sess:\n # Restore the saved values in the parameter nodes.\n save.restore(sess, save_path)\n # Check that the parameter nodes have been restored.\n self.assertEqual(10.0, v0.eval())\n self.assertEqual(20.0, v1.eval())\n except ValueError as exc:\n error_msg_template = \"Parent directory of {} doesn't exist, can't save.\"\n self.assertEqual(error_msg_template.format(save_path), str(exc))\n\n def testSaveToURI(self):\n # ParseURI functions don't work on Windows yet.\n # TODO(jhseu): Remove this check when it works.\n if os.name == \"nt\":\n self.skipTest(\"Local URI support doesn't work on Windows\")\n save_path = \"file://\" + os.path.join(self.get_temp_dir(), \"uri\")\n\n # Build a graph with 2 parameter nodes, and Save and\n # Restore nodes for them.\n v0 = variables.Variable(10.0, name=\"v0\")\n v1 = variables.Variable(20.0, name=\"v1\")\n save = saver_module.Saver({\"v0\": v0, \"v1\": v1}, restore_sequentially=True)\n init_all_op = variables.global_variables_initializer()\n\n with self.test_session() as sess:\n # Initialize all variables\n sess.run(init_all_op)\n\n # Check that the parameter nodes have been initialized.\n self.assertEqual(10.0, v0.eval())\n self.assertEqual(20.0, v1.eval())\n save.save(sess, save_path)\n\n\n@test_util.with_c_api\nclass SaveRestoreShardedTest(test.TestCase):\n\n _WRITE_VERSION = saver_pb2.SaverDef.V1\n\n def _get_test_dir(self, dirname):\n test_dir = os.path.join(self.get_temp_dir(), dirname)\n gfile.MakeDirs(test_dir)\n return test_dir\n\n def testBasics(self):\n save_path = os.path.join(self.get_temp_dir(), \"sharded_basics\")\n\n # Build a graph with 2 parameter nodes on different devices.\n with session.Session(\n target=\"\",\n config=config_pb2.ConfigProto(device_count={\"CPU\": 2})) as sess:\n with sess.graph.device(\"/cpu:0\"):\n v0 = variables.Variable(10, name=\"v0\")\n t0 = saver_test_utils.CheckpointedOp(name=\"t0\")\n with sess.graph.device(\"/cpu:1\"):\n v1 = variables.Variable(20, name=\"v1\")\n t1 = saver_test_utils.CheckpointedOp(name=\"t1\")\n save = saver_module.Saver(\n {\n \"v0\": v0,\n \"v1\": v1,\n \"t0\": t0.saveable,\n \"t1\": t1.saveable\n },\n write_version=self._WRITE_VERSION,\n sharded=True)\n variables.global_variables_initializer().run()\n t0.insert(\"k1\", 30.0).run()\n t1.insert(\"k2\", 40.0).run()\n val = save.save(sess, save_path)\n if save._write_version is saver_pb2.SaverDef.V1:\n self.assertEqual(save_path + \"-?????-of-00002\", val)\n else:\n self.assertEqual(save_path, val)\n meta_graph_filename = save._MetaGraphFilename(val)\n self.assertEqual(save_path + \".meta\", meta_graph_filename)\n\n if save._write_version is saver_pb2.SaverDef.V1:\n # Restore different ops from shard 0 of the saved files.\n with session.Session(\n target=\"\",\n config=config_pb2.ConfigProto(device_count={\"CPU\": 2})) as sess:\n with sess.graph.device(\"/cpu:0\"):\n v0 = variables.Variable(111, name=\"v0\")\n t0 = saver_test_utils.CheckpointedOp(name=\"t0\")\n save = saver_module.Saver(\n {\n \"v0\": v0,\n \"t0\": t0.saveable\n },\n write_version=self._WRITE_VERSION,\n sharded=True)\n variables.global_variables_initializer().run()\n t0.insert(\"k11\", 33.0).run()\n self.assertEqual(111, v0.eval())\n self.assertEqual(b\"k11\", t0.keys().eval())\n self.assertEqual(33.0, t0.values().eval())\n save.restore(sess, save_path + \"-00000-of-00002\")\n self.assertEqual(10, v0.eval())\n self.assertEqual(b\"k1\", t0.keys().eval())\n self.assertEqual(30.0, t0.values().eval())\n\n # Restore different ops from shard 1 of the saved files.\n with session.Session(\n target=\"\",\n config=config_pb2.ConfigProto(device_count={\"CPU\": 2})) as sess:\n with sess.graph.device(\"/cpu:0\"):\n v1 = variables.Variable(222)\n t1 = saver_test_utils.CheckpointedOp(name=\"t1\")\n save = saver_module.Saver(\n {\n \"v1\": v1,\n \"t1\": t1.saveable\n },\n write_version=self._WRITE_VERSION,\n sharded=True)\n variables.global_variables_initializer().run()\n t1.insert(\"k22\", 44.0).run()\n self.assertEqual(222, v1.eval())\n self.assertEqual(b\"k22\", t1.keys().eval())\n self.assertEqual(44.0, t1.values().eval())\n save.restore(sess, save_path + \"-00001-of-00002\")\n self.assertEqual(20, v1.eval())\n self.assertEqual(b\"k2\", t1.keys().eval())\n self.assertEqual(40.0, t1.values().eval())\n\n # Now try a restore with the sharded filename.\n with session.Session(\n target=\"\",\n config=config_pb2.ConfigProto(device_count={\"CPU\": 2})) as sess:\n with sess.graph.device(\"/cpu:0\"):\n v0 = variables.Variable(111, name=\"v0\")\n t0 = saver_test_utils.CheckpointedOp(name=\"t0\")\n with sess.graph.device(\"/cpu:1\"):\n v1 = variables.Variable(222, name=\"v1\")\n t1 = saver_test_utils.CheckpointedOp(name=\"t1\")\n save = saver_module.Saver(\n {\n \"v0\": v0,\n \"v1\": v1,\n \"t0\": t0.saveable,\n \"t1\": t1.saveable\n },\n write_version=self._WRITE_VERSION,\n sharded=True)\n variables.global_variables_initializer().run()\n t0.insert(\"k11\", 33.0).run()\n t1.insert(\"k22\", 44.0).run()\n self.assertEqual(111, v0.eval())\n self.assertEqual(222, v1.eval())\n self.assertEqual(b\"k11\", t0.keys().eval())\n self.assertEqual(33.0, t0.values().eval())\n self.assertEqual(b\"k22\", t1.keys().eval())\n self.assertEqual(44.0, t1.values().eval())\n save_path = os.path.join(self.get_temp_dir(), \"sharded_basics\")\n if save._write_version is saver_pb2.SaverDef.V1:\n save.restore(sess, save_path + \"-?????-of-?????\")\n else:\n save.restore(sess, save_path)\n self.assertEqual(10, v0.eval())\n self.assertEqual(20, v1.eval())\n self.assertEqual(b\"k1\", t0.keys().eval())\n self.assertEqual(30.0, t0.values().eval())\n self.assertEqual(b\"k2\", t1.keys().eval())\n self.assertEqual(40.0, t1.values().eval())\n\n if save._write_version is saver_pb2.SaverDef.V1:\n self.assertEqual(\n saver_module.latest_checkpoint(self.get_temp_dir()),\n os.path.join(self.get_temp_dir(), \"sharded_basics-?????-of-00002\"))\n else:\n self.assertEqual(\n saver_module.latest_checkpoint(self.get_temp_dir()),\n os.path.join(self.get_temp_dir(), \"sharded_basics\"))\n\n def testSaverDef(self):\n with self.test_session():\n v0 = variables.Variable(123, name=\"v0\")\n save = saver_module.Saver({\"v0\": v0}, sharded=True)\n sd = save.as_saver_def()\n self.assertTrue(sd.sharded)\n\n def _testPartitionedVariables(self, use_resource):\n var_full_shape = [10, 3]\n # Allows save/restore mechanism to work w/ different slicings.\n var_name = \"my_var\"\n saved_dir = self._get_test_dir(\"partitioned_variables\")\n saved_path = os.path.join(saved_dir, \"ckpt\")\n\n call_saver_with_dict = False # updated by test loop below\n\n def _save(slices=None, partitioner=None):\n with self.test_session(graph=ops_lib.Graph()) as sess:\n # Calls .eval() to return the ndarray that makes up the full variable.\n rnd = random_ops.random_uniform(var_full_shape).eval()\n\n if slices:\n assert not partitioner\n # TODO(apassos): make create_partitioned_variables take use_resource\n # option to make this test passable without creating a named\n # variable_scope.\n vs = partitioned_variables.create_partitioned_variables(\n var_full_shape, slices, rnd, name=var_name)\n elif partitioner:\n vs = [\n variable_scope.get_variable(\n var_name,\n shape=var_full_shape,\n initializer=rnd,\n partitioner=partitioner,\n use_resource=use_resource)\n ]\n else:\n if use_resource:\n vs = [resource_variable_ops.ResourceVariable(rnd, name=var_name)]\n else:\n vs = [variables.Variable(rnd, name=var_name)]\n\n variables.global_variables_initializer().run()\n if call_saver_with_dict:\n saver = saver_module.Saver({var_name: (vs if slices else vs[0])})\n else:\n saver = saver_module.Saver(vs)\n actual_path = saver.save(sess, saved_path)\n self.assertEqual(saved_path, actual_path)\n\n return rnd\n\n def _restore(slices=None, partitioner=None):\n with self.test_session(graph=ops_lib.Graph()) as sess:\n if slices:\n assert not partitioner\n new_vs = partitioned_variables.create_partitioned_variables(\n var_full_shape,\n slices,\n array_ops.zeros(var_full_shape), # != original contents.\n name=var_name)\n elif partitioner:\n new_vs = [\n variable_scope.get_variable(\n var_name,\n shape=var_full_shape,\n initializer=array_ops.zeros(var_full_shape),\n partitioner=partitioner)\n ]\n else:\n new_vs = [\n variables.Variable(\n array_ops.zeros(\n shape=var_full_shape), # != original contents.\n name=var_name)\n ]\n\n variables.global_variables_initializer().run()\n if call_saver_with_dict:\n saver = saver_module.Saver({\n var_name: (new_vs if slices else new_vs[0])\n })\n else:\n saver = saver_module.Saver(new_vs)\n saver.restore(sess, saved_path)\n\n if partitioner:\n return new_vs[0].as_tensor().eval()\n elif slices and slices[0] != 1:\n return array_ops.concat(new_vs, 0).eval()\n elif slices and slices[1] != 1:\n return array_ops.concat(new_vs, 1).eval()\n else: # Non-sliced.\n return new_vs[0].eval()\n\n for call_saver_with_dict in {False, True}:\n # Save PartitionedVariable and restore into full variable.\n saved_full = _save(\n partitioner=partitioned_variables.fixed_size_partitioner(\n num_shards=2))\n restored_full = _restore()\n self.assertAllEqual(saved_full, restored_full)\n\n # Saves 10 horizontal parts of a partitioned variable.\n # Restores into a full variable, non-sliced.\n saved_full = _save(slices=[10, 1])\n restored_full = _restore()\n self.assertAllEqual(saved_full, restored_full)\n\n # Restores into a different number/orientation of slices.\n restored_full = _restore(slices=[2, 1]) # 2 horizon parts.\n self.assertAllEqual(saved_full, restored_full)\n restored_full = _restore(slices=[1, 3]) # 3 vertical parts.\n self.assertAllEqual(saved_full, restored_full)\n\n # Restores into a PartitionedVariable\n restored_full = _restore(\n partitioner=partitioned_variables.fixed_size_partitioner(\n num_shards=2))\n self.assertAllEqual(saved_full, restored_full)\n\n # Now, saves a full variable and restores in slices.\n saved_full = _save()\n restored_full = _restore(slices=[1, 3])\n self.assertAllEqual(saved_full, restored_full)\n\n def testPartitionedVariable(self):\n self._testPartitionedVariables(use_resource=False)\n\n def testPartitionedResourceVariable(self):\n self._testPartitionedVariables(use_resource=True)\n\n\n@test_util.with_c_api\nclass SaveRestoreShardedTestV2(SaveRestoreShardedTest):\n _WRITE_VERSION = saver_pb2.SaverDef.V2\n\n\n@test_util.with_c_api\nclass MaxToKeepTest(test.TestCase):\n\n def _get_test_dir(self, dirname):\n test_dir = os.path.join(self.get_temp_dir(), dirname)\n gfile.MakeDirs(test_dir)\n return test_dir\n\n def assertCheckpointState(self, model_checkpoint_path,\n all_model_checkpoint_paths, save_dir):\n checkpoint_state = saver_module.get_checkpoint_state(save_dir)\n self.assertEqual(checkpoint_state.model_checkpoint_path,\n model_checkpoint_path)\n self.assertEqual(checkpoint_state.all_model_checkpoint_paths,\n all_model_checkpoint_paths)\n\n def testNonSharded(self):\n save_dir = self._get_test_dir(\"max_to_keep_non_sharded\")\n\n with self.test_session() as sess:\n v = variables.Variable(10.0, name=\"v\")\n save = saver_module.Saver({\"v\": v}, max_to_keep=2)\n variables.global_variables_initializer().run()\n self.assertEqual([], save.last_checkpoints)\n\n s1 = save.save(sess, os.path.join(save_dir, \"s1\"))\n self.assertEqual([s1], save.last_checkpoints)\n self.assertTrue(saver_module.checkpoint_exists(s1))\n self.assertCheckpointState(\n model_checkpoint_path=s1,\n all_model_checkpoint_paths=[s1],\n save_dir=save_dir)\n\n s2 = save.save(sess, os.path.join(save_dir, \"s2\"))\n self.assertEqual([s1, s2], save.last_checkpoints)\n self.assertTrue(saver_module.checkpoint_exists(s1))\n self.assertTrue(saver_module.checkpoint_exists(s2))\n self.assertCheckpointState(\n model_checkpoint_path=s2,\n all_model_checkpoint_paths=[s1, s2],\n save_dir=save_dir)\n\n s3 = save.save(sess, os.path.join(save_dir, \"s3\"))\n self.assertEqual([s2, s3], save.last_checkpoints)\n self.assertFalse(saver_module.checkpoint_exists(s1))\n self.assertTrue(saver_module.checkpoint_exists(s2))\n self.assertTrue(saver_module.checkpoint_exists(s3))\n self.assertCheckpointState(\n model_checkpoint_path=s3,\n all_model_checkpoint_paths=[s2, s3],\n save_dir=save_dir)\n\n # Create a second helper, identical to the first.\n save2 = saver_module.Saver(saver_def=save.as_saver_def())\n save2.set_last_checkpoints(save.last_checkpoints)\n\n # Create a third helper, with the same configuration but no knowledge of\n # previous checkpoints.\n save3 = saver_module.Saver(saver_def=save.as_saver_def())\n\n # Exercise the first helper.\n\n # Adding s2 again (old s2 is removed first, then new s2 appended)\n s2 = save.save(sess, os.path.join(save_dir, \"s2\"))\n self.assertEqual([s3, s2], save.last_checkpoints)\n self.assertFalse(saver_module.checkpoint_exists(s1))\n self.assertFalse(\n saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))\n self.assertTrue(saver_module.checkpoint_exists(s3))\n self.assertTrue(\n saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))\n self.assertTrue(saver_module.checkpoint_exists(s2))\n self.assertTrue(\n saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))\n self.assertCheckpointState(\n model_checkpoint_path=s2,\n all_model_checkpoint_paths=[s3, s2],\n save_dir=save_dir)\n\n # Adding s1 (s3 should now be deleted as oldest in list)\n s1 = save.save(sess, os.path.join(save_dir, \"s1\"))\n self.assertEqual([s2, s1], save.last_checkpoints)\n self.assertFalse(saver_module.checkpoint_exists(s3))\n self.assertFalse(\n saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))\n self.assertTrue(saver_module.checkpoint_exists(s2))\n self.assertTrue(\n saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))\n self.assertTrue(saver_module.checkpoint_exists(s1))\n self.assertTrue(\n saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))\n self.assertCheckpointState(\n model_checkpoint_path=s1,\n all_model_checkpoint_paths=[s2, s1],\n save_dir=save_dir)\n\n # Exercise the second helper.\n\n # Adding s2 again (old s2 is removed first, then new s2 appended)\n s2 = save2.save(sess, os.path.join(save_dir, \"s2\"))\n self.assertEqual([s3, s2], save2.last_checkpoints)\n # Created by the first helper.\n self.assertTrue(saver_module.checkpoint_exists(s1))\n self.assertTrue(\n saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))\n # Deleted by the first helper.\n self.assertFalse(saver_module.checkpoint_exists(s3))\n self.assertFalse(\n saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))\n self.assertTrue(saver_module.checkpoint_exists(s2))\n self.assertTrue(\n saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))\n self.assertCheckpointState(\n model_checkpoint_path=s2,\n all_model_checkpoint_paths=[s3, s2],\n save_dir=save_dir)\n\n # Adding s1 (s3 should now be deleted as oldest in list)\n s1 = save2.save(sess, os.path.join(save_dir, \"s1\"))\n self.assertEqual([s2, s1], save2.last_checkpoints)\n self.assertFalse(saver_module.checkpoint_exists(s3))\n self.assertFalse(\n saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))\n self.assertTrue(saver_module.checkpoint_exists(s2))\n self.assertTrue(\n saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))\n self.assertTrue(saver_module.checkpoint_exists(s1))\n self.assertTrue(\n saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))\n self.assertCheckpointState(\n model_checkpoint_path=s1,\n all_model_checkpoint_paths=[s2, s1],\n save_dir=save_dir)\n\n # Exercise the third helper.\n\n # Adding s2 again (but helper is unaware of previous s2)\n s2 = save3.save(sess, os.path.join(save_dir, \"s2\"))\n self.assertEqual([s2], save3.last_checkpoints)\n # Created by the first helper.\n self.assertTrue(saver_module.checkpoint_exists(s1))\n self.assertTrue(\n saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))\n # Deleted by the first helper.\n self.assertFalse(saver_module.checkpoint_exists(s3))\n self.assertFalse(\n saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))\n self.assertTrue(saver_module.checkpoint_exists(s2))\n self.assertTrue(\n saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))\n # Even though the file for s1 exists, this saver isn't aware of it, which\n # is why it doesn't end up in the checkpoint state.\n self.assertCheckpointState(\n model_checkpoint_path=s2,\n all_model_checkpoint_paths=[s2],\n save_dir=save_dir)\n\n # Adding s1 (s3 should not be deleted because helper is unaware of it)\n s1 = save3.save(sess, os.path.join(save_dir, \"s1\"))\n self.assertEqual([s2, s1], save3.last_checkpoints)\n self.assertFalse(saver_module.checkpoint_exists(s3))\n self.assertFalse(\n saver_module.checkpoint_exists(save._MetaGraphFilename(s3)))\n self.assertTrue(saver_module.checkpoint_exists(s2))\n self.assertTrue(\n saver_module.checkpoint_exists(save._MetaGraphFilename(s2)))\n self.assertTrue(saver_module.checkpoint_exists(s1))\n self.assertTrue(\n saver_module.checkpoint_exists(save._MetaGraphFilename(s1)))\n self.assertCheckpointState(\n model_checkpoint_path=s1,\n all_model_checkpoint_paths=[s2, s1],\n save_dir=save_dir)\n\n def testSharded(self):\n save_dir = self._get_test_dir(\"max_to_keep_sharded\")\n\n with session.Session(\n target=\"\",\n config=config_pb2.ConfigProto(device_count={\"CPU\": 2})) as sess:\n with sess.graph.device(\"/cpu:0\"):\n v0 = variables.Variable(111, name=\"v0\")\n with sess.graph.device(\"/cpu:1\"):\n v1 = variables.Variable(222, name=\"v1\")\n save = saver_module.Saver(\n {\n \"v0\": v0,\n \"v1\": v1\n }, sharded=True, max_to_keep=2)\n variables.global_variables_initializer().run()\n self.assertEqual([], save.last_checkpoints)\n\n s1 = save.save(sess, os.path.join(save_dir, \"s1\"))\n self.assertEqual([s1], save.last_checkpoints)\n if save._write_version is saver_pb2.SaverDef.V1:\n self.assertEqual(2, len(gfile.Glob(s1)))\n else:\n self.assertEqual(4, len(gfile.Glob(s1 + \"*\")))\n\n self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))\n\n s2 = save.save(sess, os.path.join(save_dir, \"s2\"))\n self.assertEqual([s1, s2], save.last_checkpoints)\n if save._write_version is saver_pb2.SaverDef.V1:\n self.assertEqual(2, len(gfile.Glob(s1)))\n else:\n self.assertEqual(4, len(gfile.Glob(s1 + \"*\")))\n self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))\n if save._write_version is saver_pb2.SaverDef.V1:\n self.assertEqual(2, len(gfile.Glob(s2)))\n else:\n self.assertEqual(4, len(gfile.Glob(s2 + \"*\")))\n self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))\n\n s3 = save.save(sess, os.path.join(save_dir, \"s3\"))\n self.assertEqual([s2, s3], save.last_checkpoints)\n self.assertEqual(0, len(gfile.Glob(s1 + \"*\")))\n self.assertFalse(gfile.Exists(save._MetaGraphFilename(s1)))\n if save._write_version is saver_pb2.SaverDef.V1:\n self.assertEqual(2, len(gfile.Glob(s2)))\n else:\n self.assertEqual(4, len(gfile.Glob(s2 + \"*\")))\n self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))\n if save._write_version is saver_pb2.SaverDef.V1:\n self.assertEqual(2, len(gfile.Glob(s3)))\n else:\n self.assertEqual(4, len(gfile.Glob(s3 + \"*\")))\n self.assertTrue(gfile.Exists(save._MetaGraphFilename(s3)))\n\n def testNoMaxToKeep(self):\n save_dir = self._get_test_dir(\"no_max_to_keep\")\n save_dir2 = self._get_test_dir(\"max_to_keep_0\")\n\n with self.test_session() as sess:\n v = variables.Variable(10.0, name=\"v\")\n variables.global_variables_initializer().run()\n\n # Test max_to_keep being None.\n save = saver_module.Saver({\"v\": v}, max_to_keep=None)\n self.assertEqual([], save.last_checkpoints)\n s1 = save.save(sess, os.path.join(save_dir, \"s1\"))\n self.assertEqual([], save.last_checkpoints)\n self.assertTrue(saver_module.checkpoint_exists(s1))\n s2 = save.save(sess, os.path.join(save_dir, \"s2\"))\n self.assertEqual([], save.last_checkpoints)\n self.assertTrue(saver_module.checkpoint_exists(s2))\n\n # Test max_to_keep being 0.\n save2 = saver_module.Saver({\"v\": v}, max_to_keep=0)\n self.assertEqual([], save2.last_checkpoints)\n s1 = save2.save(sess, os.path.join(save_dir2, \"s1\"))\n self.assertEqual([], save2.last_checkpoints)\n self.assertTrue(saver_module.checkpoint_exists(s1))\n s2 = save2.save(sess, os.path.join(save_dir2, \"s2\"))\n self.assertEqual([], save2.last_checkpoints)\n self.assertTrue(saver_module.checkpoint_exists(s2))\n\n def testNoMetaGraph(self):\n save_dir = self._get_test_dir(\"no_meta_graph\")\n\n with self.test_session() as sess:\n v = variables.Variable(10.0, name=\"v\")\n save = saver_module.Saver({\"v\": v})\n variables.global_variables_initializer().run()\n\n s1 = save.save(sess, os.path.join(save_dir, \"s1\"), write_meta_graph=False)\n self.assertTrue(saver_module.checkpoint_exists(s1))\n self.assertFalse(gfile.Exists(save._MetaGraphFilename(s1)))\n\n\n@test_util.with_c_api\nclass KeepCheckpointEveryNHoursTest(test.TestCase):\n\n def _get_test_dir(self, dirname):\n test_dir = os.path.join(self.get_temp_dir(), dirname)\n gfile.MakeDirs(test_dir)\n return test_dir\n\n @test.mock.patch.object(saver_module, \"time\")\n def testNonSharded(self, mock_time):\n save_dir = self._get_test_dir(\"keep_checkpoint_every_n_hours\")\n\n with self.test_session() as sess:\n v = variables.Variable([10.0], name=\"v\")\n # Run the initializer NOW to avoid the 0.5s overhead of the first Run()\n # call, which throws the test timing off in fastbuild mode.\n variables.global_variables_initializer().run()\n # Create a saver that will keep the last 2 checkpoints plus one every 0.7\n # seconds.\n start_time = time.time()\n mock_time.time.return_value = start_time\n save = saver_module.Saver(\n {\n \"v\": v\n }, max_to_keep=2, keep_checkpoint_every_n_hours=0.7 / 3600)\n self.assertEqual([], save.last_checkpoints)\n\n # Wait till 1 seconds have elapsed so s1 will be old enough to keep.\n # sleep may return early, don't trust it.\n mock_time.time.return_value = start_time + 1.0\n s1 = save.save(sess, os.path.join(save_dir, \"s1\"))\n self.assertEqual([s1], save.last_checkpoints)\n\n s2 = save.save(sess, os.path.join(save_dir, \"s2\"))\n self.assertEqual([s1, s2], save.last_checkpoints)\n\n # We now have 2 'last_checkpoints': [s1, s2]. The next call to Save(),\n # would normally delete s1, because max_to_keep is 2. However, s1 is\n # older than 0.7s so we must keep it.\n s3 = save.save(sess, os.path.join(save_dir, \"s3\"))\n self.assertEqual([s2, s3], save.last_checkpoints)\n\n # s1 should still be here, we are Not checking now to reduce time\n # variance in the test.\n\n # We now have 2 'last_checkpoints': [s2, s3], and s1 on disk. The next\n # call to Save(), will delete s2, because max_to_keep is 2, and because\n # we already kept the old s1. s2 is very close in time to s1 so it gets\n # deleted.\n s4 = save.save(sess, os.path.join(save_dir, \"s4\"))\n self.assertEqual([s3, s4], save.last_checkpoints)\n\n # Check that s1 is still here, but s2 is gone.\n self.assertTrue(saver_module.checkpoint_exists(s1))\n self.assertFalse(saver_module.checkpoint_exists(s2))\n self.assertTrue(saver_module.checkpoint_exists(s3))\n self.assertTrue(saver_module.checkpoint_exists(s4))\n\n\n@test_util.with_c_api\nclass SaveRestoreWithVariableNameMap(test.TestCase):\n\n def _testNonReshape(self, variable_op):\n save_path = os.path.join(self.get_temp_dir(), \"non_reshape\")\n\n with self.test_session(graph=ops_lib.Graph()) as sess:\n # Build a graph with 2 parameter nodes, and Save and\n # Restore nodes for them.\n v0 = variable_op(10.0, name=\"v0\")\n v1 = variable_op(20.0, name=\"v1\")\n save = saver_module.Saver({\"save_prefix/v0\": v0, \"save_prefix/v1\": v1})\n self.evaluate(variables.global_variables_initializer())\n\n # Check that the parameter nodes have been initialized.\n self.assertEqual(10.0, self.evaluate(v0))\n self.assertEqual(20.0, self.evaluate(v1))\n\n # Save the initialized values in the file at \"save_path\"\n # Use a variable name map to set the saved tensor names\n val = save.save(sess, save_path)\n self.assertTrue(isinstance(val, six.string_types))\n self.assertEqual(save_path, val)\n\n # Verify that the original names are not in the Saved file\n save = saver_module.Saver({\"v0\": v0, \"v1\": v1})\n with self.assertRaisesOpError(\"not found in checkpoint\"):\n save.restore(sess, save_path)\n\n # Verify that the mapped names are present in the Saved file and can be\n # Restored using remapped names.\n with self.test_session(graph=ops_lib.Graph()) as sess:\n v0 = variable_op(-1.0, name=\"v0\")\n v1 = variable_op(-1.0, name=\"v1\")\n\n if context.in_graph_mode():\n with self.assertRaisesOpError(\"uninitialized\"):\n self.evaluate(v0)\n with self.assertRaisesOpError(\"uninitialized\"):\n self.evaluate(v1)\n\n save = saver_module.Saver({\"save_prefix/v0\": v0, \"save_prefix/v1\": v1})\n save.restore(sess, save_path)\n\n # Check that the parameter nodes have been restored.\n if context.in_graph_mode():\n self.assertEqual(10.0, self.evaluate(v0))\n self.assertEqual(20.0, self.evaluate(v1))\n\n # Add a prefix to the node names in the current graph and Restore using\n # remapped names.\n with self.test_session(graph=ops_lib.Graph()) as sess:\n v0 = variable_op(-1.0, name=\"restore_prefix/v0\")\n v1 = variable_op(-1.0, name=\"restore_prefix/v1\")\n\n if context.in_graph_mode():\n with self.assertRaisesOpError(\"uninitialized\"):\n self.evaluate(v0)\n with self.assertRaisesOpError(\"uninitialized\"):\n self.evaluate(v1)\n\n # Restore the saved values in the parameter nodes.\n save = saver_module.Saver({\"save_prefix/v0\": v0, \"save_prefix/v1\": v1})\n save.restore(sess, save_path)\n\n # Check that the parameter nodes have been restored.\n self.assertEqual(10.0, self.evaluate(v0))\n self.assertEqual(20.0, self.evaluate(v1))\n\n @test_util.run_in_graph_and_eager_modes()\n def testNonReshapeResourceVariable(self):\n self._testNonReshape(resource_variable_ops.ResourceVariable)\n\n def testNonReshapeVariable(self):\n self._testNonReshape(variables.Variable)\n\n\n@test_util.with_c_api\nclass LatestCheckpointWithRelativePaths(test.TestCase):\n\n @staticmethod\n @contextlib.contextmanager\n def tempWorkingDir(temppath):\n cwd = os.getcwd()\n os.chdir(temppath)\n try:\n yield\n finally:\n os.chdir(cwd)\n\n @staticmethod\n @contextlib.contextmanager\n def tempDir():\n tempdir = tempfile.mkdtemp()\n try:\n yield tempdir\n finally:\n shutil.rmtree(tempdir)\n\n def testNameCollision(self):\n # Make sure we have a clean directory to work in.\n with self.tempDir() as tempdir:\n # Jump to that directory until this test is done.\n with self.tempWorkingDir(tempdir):\n # Save training snapshots to a relative path.\n traindir = \"train/\"\n os.mkdir(traindir)\n # Collides with the default name of the checkpoint state file.\n filepath = os.path.join(traindir, \"checkpoint\")\n\n with self.test_session() as sess:\n unused_a = variables.Variable(0.0) # So that Saver saves something.\n variables.global_variables_initializer().run()\n\n # Should fail.\n saver = saver_module.Saver(sharded=False)\n with self.assertRaisesRegexp(ValueError, \"collides with\"):\n saver.save(sess, filepath)\n\n # Succeeds: the file will be named \"checkpoint-<step>\".\n saver.save(sess, filepath, global_step=1)\n self.assertIsNotNone(saver_module.latest_checkpoint(traindir))\n\n # Succeeds: the file will be named \"checkpoint-<i>-of-<n>\".\n saver = saver_module.Saver(sharded=True)\n saver.save(sess, filepath)\n self.assertIsNotNone(saver_module.latest_checkpoint(traindir))\n\n # Succeeds: the file will be named \"checkpoint-<step>-<i>-of-<n>\".\n saver = saver_module.Saver(sharded=True)\n saver.save(sess, filepath, global_step=1)\n self.assertIsNotNone(saver_module.latest_checkpoint(traindir))\n\n def testRelativePath(self):\n # Make sure we have a clean directory to work in.\n with self.tempDir() as tempdir:\n\n # Jump to that directory until this test is done.\n with self.tempWorkingDir(tempdir):\n\n # Save training snapshots to a relative path.\n traindir = \"train/\"\n os.mkdir(traindir)\n\n filename = \"snapshot\"\n filepath = os.path.join(traindir, filename)\n\n with self.test_session() as sess:\n # Build a simple graph.\n v0 = variables.Variable(0.0)\n inc = v0.assign_add(1.0)\n\n save = saver_module.Saver({\"v0\": v0})\n\n # Record a short training history.\n variables.global_variables_initializer().run()\n save.save(sess, filepath, global_step=0)\n inc.eval()\n save.save(sess, filepath, global_step=1)\n inc.eval()\n save.save(sess, filepath, global_step=2)\n\n with self.test_session() as sess:\n # Build a new graph with different initialization.\n v0 = variables.Variable(-1.0)\n\n # Create a new saver.\n save = saver_module.Saver({\"v0\": v0})\n variables.global_variables_initializer().run()\n\n # Get the most recent checkpoint name from the training history file.\n name = saver_module.latest_checkpoint(traindir)\n self.assertIsNotNone(name)\n\n # Restore \"v0\" from that checkpoint.\n save.restore(sess, name)\n self.assertEqual(v0.eval(), 2.0)\n\n\n@test_util.with_c_api\nclass CheckpointStateTest(test.TestCase):\n\n def _get_test_dir(self, dirname):\n test_dir = os.path.join(self.get_temp_dir(), dirname)\n gfile.MakeDirs(test_dir)\n return test_dir\n\n def testAbsPath(self):\n save_dir = self._get_test_dir(\"abs_paths\")\n abs_path = os.path.join(save_dir, \"model-0\")\n ckpt = saver_module.generate_checkpoint_state_proto(save_dir, abs_path)\n self.assertEqual(ckpt.model_checkpoint_path, abs_path)\n self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path))\n self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1)\n self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path)\n\n def testRelPath(self):\n train_dir = \"train\"\n model = os.path.join(train_dir, \"model-0\")\n # model_checkpoint_path should have no \"train\" directory part.\n new_rel_path = \"model-0\"\n ckpt = saver_module.generate_checkpoint_state_proto(train_dir, model)\n self.assertEqual(ckpt.model_checkpoint_path, new_rel_path)\n self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1)\n self.assertEqual(ckpt.all_model_checkpoint_paths[-1], new_rel_path)\n\n def testAllModelCheckpointPaths(self):\n save_dir = self._get_test_dir(\"all_models_test\")\n abs_path = os.path.join(save_dir, \"model-0\")\n for paths in [None, [], [\"model-2\"]]:\n ckpt = saver_module.generate_checkpoint_state_proto(\n save_dir, abs_path, all_model_checkpoint_paths=paths)\n self.assertEqual(ckpt.model_checkpoint_path, abs_path)\n self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path))\n self.assertEqual(\n len(ckpt.all_model_checkpoint_paths), len(paths) if paths else 1)\n self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path)\n\n def testUpdateCheckpointState(self):\n save_dir = self._get_test_dir(\"update_checkpoint_state\")\n os.chdir(save_dir)\n # Make a temporary train directory.\n train_dir = \"train\"\n os.mkdir(train_dir)\n abs_path = os.path.join(save_dir, \"model-0\")\n rel_path = os.path.join(\"train\", \"model-2\")\n saver_module.update_checkpoint_state(\n train_dir, rel_path, all_model_checkpoint_paths=[abs_path, rel_path])\n ckpt = saver_module.get_checkpoint_state(train_dir)\n self.assertEqual(ckpt.model_checkpoint_path, rel_path)\n self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2)\n self.assertEqual(ckpt.all_model_checkpoint_paths[-1], rel_path)\n self.assertEqual(ckpt.all_model_checkpoint_paths[0], abs_path)\n\n def testUpdateCheckpointStateSaveRelativePaths(self):\n save_dir = self._get_test_dir(\"update_checkpoint_state\")\n os.chdir(save_dir)\n abs_path2 = os.path.join(save_dir, \"model-2\")\n rel_path2 = \"model-2\"\n abs_path0 = os.path.join(save_dir, \"model-0\")\n rel_path0 = \"model-0\"\n saver_module._update_checkpoint_state( # pylint: disable=protected-access\n save_dir=save_dir,\n model_checkpoint_path=abs_path2,\n all_model_checkpoint_paths=[rel_path0, abs_path2],\n save_relative_paths=True)\n\n # File should contain relative paths.\n file_content = file_io.read_file_to_string(\n os.path.join(save_dir, \"checkpoint\"))\n ckpt = CheckpointState()\n text_format.Merge(file_content, ckpt)\n self.assertEqual(ckpt.model_checkpoint_path, rel_path2)\n self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2)\n self.assertEqual(ckpt.all_model_checkpoint_paths[-1], rel_path2)\n self.assertEqual(ckpt.all_model_checkpoint_paths[0], rel_path0)\n\n # get_checkpoint_state should return absolute paths.\n ckpt = saver_module.get_checkpoint_state(save_dir)\n self.assertEqual(ckpt.model_checkpoint_path, abs_path2)\n self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2)\n self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path2)\n self.assertEqual(ckpt.all_model_checkpoint_paths[0], abs_path0)\n\n def testCheckPointStateFailsWhenIncomplete(self):\n save_dir = self._get_test_dir(\"checkpoint_state_fails_when_incomplete\")\n os.chdir(save_dir)\n ckpt_path = os.path.join(save_dir, \"checkpoint\")\n ckpt_file = open(ckpt_path, \"w\")\n ckpt_file.write(\"\")\n ckpt_file.close()\n with self.assertRaises(ValueError):\n saver_module.get_checkpoint_state(save_dir)\n\n def testCheckPointCompletesRelativePaths(self):\n save_dir = self._get_test_dir(\"checkpoint_completes_relative_paths\")\n os.chdir(save_dir)\n ckpt_path = os.path.join(save_dir, \"checkpoint\")\n ckpt_file = open(ckpt_path, \"w\")\n ckpt_file.write(\"\"\"\n model_checkpoint_path: \"./model.ckpt-687529\"\n all_model_checkpoint_paths: \"./model.ckpt-687500\"\n all_model_checkpoint_paths: \"./model.ckpt-687529\"\n \"\"\")\n ckpt_file.close()\n ckpt = saver_module.get_checkpoint_state(save_dir)\n self.assertEqual(ckpt.model_checkpoint_path,\n os.path.join(save_dir, \"./model.ckpt-687529\"))\n self.assertEqual(ckpt.all_model_checkpoint_paths[0],\n os.path.join(save_dir, \"./model.ckpt-687500\"))\n self.assertEqual(ckpt.all_model_checkpoint_paths[1],\n os.path.join(save_dir, \"./model.ckpt-687529\"))\n\n\n@test_util.with_c_api\nclass MetaGraphTest(test.TestCase):\n\n def _get_test_dir(self, dirname):\n test_dir = os.path.join(self.get_temp_dir(), dirname)\n gfile.MakeDirs(test_dir)\n return test_dir\n\n def testAddCollectionDef(self):\n test_dir = self._get_test_dir(\"good_collection\")\n filename = os.path.join(test_dir, \"metafile\")\n with self.test_session():\n # Creates a graph.\n v0 = variables.Variable(1.0, name=\"v0\")\n control_flow_ops.cond(\n math_ops.less(v0, 10), lambda: math_ops.add(v0, 1),\n lambda: math_ops.subtract(v0, 1))\n control_flow_ops.while_loop(lambda i: math_ops.less(i, 10),\n lambda i: math_ops.add(i, 1), [v0])\n var = variables.Variable(constant_op.constant(0, dtype=dtypes.int64))\n count_up_to = var.count_up_to(3)\n input_queue = data_flow_ops.FIFOQueue(\n 30, dtypes.float32, shared_name=\"collection_queue\")\n qr = queue_runner_impl.QueueRunner(input_queue, [count_up_to])\n variables.global_variables_initializer()\n # Creates a saver.\n save = saver_module.Saver({\"v0\": v0})\n # Adds a set of collections.\n ops_lib.add_to_collection(\"int_collection\", 3)\n ops_lib.add_to_collection(\"float_collection\", 3.5)\n ops_lib.add_to_collection(\"string_collection\", \"hello\")\n ops_lib.add_to_collection(\"variable_collection\", v0)\n # Add QueueRunners.\n queue_runner_impl.add_queue_runner(qr)\n # Adds user_defined proto in three formats: string, bytes and Any.\n queue_runner = queue_runner_pb2.QueueRunnerDef(queue_name=\"test_queue\")\n ops_lib.add_to_collection(\"user_defined_string_collection\",\n str(queue_runner))\n ops_lib.add_to_collection(\"user_defined_bytes_collection\",\n queue_runner.SerializeToString())\n any_buf = Any()\n any_buf.Pack(queue_runner)\n ops_lib.add_to_collection(\"user_defined_any_collection\", any_buf)\n\n # Generates MetaGraphDef.\n meta_graph_def = save.export_meta_graph(filename)\n self.assertTrue(meta_graph_def.HasField(\"saver_def\"))\n self.assertTrue(meta_graph_def.HasField(\"graph_def\"))\n self.assertTrue(meta_graph_def.HasField(\"meta_info_def\"))\n self.assertNotEqual(meta_graph_def.meta_info_def.tensorflow_version, \"\")\n self.assertNotEqual(meta_graph_def.meta_info_def.tensorflow_git_version,\n \"\")\n collection_def = meta_graph_def.collection_def\n self.assertEqual(len(collection_def), 12)\n\n with ops_lib.Graph().as_default():\n # Restores from MetaGraphDef.\n new_saver = saver_module.import_meta_graph(filename)\n # Generates a new MetaGraphDef.\n new_meta_graph_def = new_saver.export_meta_graph()\n # It should be the same as the original.\n\n test_util.assert_meta_graph_protos_equal(\n self, meta_graph_def, new_meta_graph_def)\n\n def testAddCollectionDefFails(self):\n with self.test_session():\n # Creates a graph.\n v0 = variables.Variable(10.0, name=\"v0\")\n # Creates a saver.\n save = saver_module.Saver({\"v0\": v0})\n # Generates MetaGraphDef.\n meta_graph_def = meta_graph_pb2.MetaGraphDef()\n\n # Verifies that collection with unsupported key will not be added.\n ops_lib.add_to_collection(save, 3)\n save._add_collection_def(meta_graph_def, save)\n self.assertEqual(len(meta_graph_def.collection_def), 0)\n\n # Verifies that collection where item type does not match expected\n # type will not be added.\n ops_lib.add_to_collection(\"int_collection\", 3)\n ops_lib.add_to_collection(\"int_collection\", 3.5)\n save._add_collection_def(meta_graph_def, \"int_collection\")\n self.assertEqual(len(meta_graph_def.collection_def), 0)\n\n def _testMultiSaverCollectionSave(self, test_dir):\n filename = os.path.join(test_dir, \"metafile\")\n saver0_ckpt = os.path.join(test_dir, \"saver0.ckpt\")\n saver1_ckpt = os.path.join(test_dir, \"saver1.ckpt\")\n with self.test_session(graph=ops_lib.Graph()) as sess:\n # Creates a graph.\n v0 = variables.Variable([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], name=\"v0\")\n v1 = variables.Variable(11.0, name=\"v1\")\n # Creates 2 savers.\n saver0 = saver_module.Saver({\"v0\": v0}, name=\"saver0\")\n saver1 = saver_module.Saver({\"v1\": v1}, name=\"saver1\")\n ops_lib.add_to_collection(\"savers\", saver0)\n ops_lib.add_to_collection(\"savers\", saver1)\n variables.global_variables_initializer().run()\n # Saves to different checkpoints.\n saver0.save(sess, saver0_ckpt)\n saver1.save(sess, saver1_ckpt)\n # Generates MetaGraphDef.\n meta_graph_def = saver_module.export_meta_graph(filename)\n meta_graph_def0 = saver0.export_meta_graph()\n meta_graph_def1 = saver1.export_meta_graph()\n\n # Verifies that there is no saver_def in meta_graph_def.\n self.assertFalse(meta_graph_def.HasField(\"saver_def\"))\n # Verifies that there is saver_def in meta_graph_def0 and 1.\n self.assertTrue(meta_graph_def0.HasField(\"saver_def\"))\n self.assertTrue(meta_graph_def1.HasField(\"saver_def\"))\n\n # Verifies SAVERS is saved as bytes_list for meta_graph_def.\n collection_def = meta_graph_def.collection_def[\"savers\"]\n kind = collection_def.WhichOneof(\"kind\")\n self.assertEqual(kind, \"bytes_list\")\n # Verifies that there are 2 entries in SAVERS collection.\n savers = getattr(collection_def, kind)\n self.assertEqual(2, len(savers.value))\n\n # Verifies SAVERS collection is saved as bytes_list for meta_graph_def0.\n collection_def = meta_graph_def0.collection_def[\"savers\"]\n kind = collection_def.WhichOneof(\"kind\")\n self.assertEqual(kind, \"bytes_list\")\n # Verifies that there are 2 entries in SAVERS collection.\n savers = getattr(collection_def, kind)\n self.assertEqual(2, len(savers.value))\n\n def _testMultiSaverCollectionRestore(self, test_dir):\n filename = os.path.join(test_dir, \"metafile\")\n saver0_ckpt = os.path.join(test_dir, \"saver0.ckpt\")\n saver1_ckpt = os.path.join(test_dir, \"saver1.ckpt\")\n with self.test_session(graph=ops_lib.Graph()) as sess:\n # Imports from meta_graph.\n saver_module.import_meta_graph(filename)\n # Retrieves SAVERS collection. Verifies there are 2 entries.\n savers = ops_lib.get_collection(\"savers\")\n self.assertEqual(2, len(savers))\n # Retrieves saver0. Verifies that new_saver0 can restore v0, but not v1.\n new_saver0 = savers[0]\n new_saver0.restore(sess, saver0_ckpt)\n v0 = sess.graph.get_tensor_by_name(\"v0:0\")\n v1 = sess.graph.get_tensor_by_name(\"v1:0\")\n self.assertAllEqual([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], v0.eval())\n self.assertEqual([3, 2], v0.get_shape())\n self.assertEqual([], v1.get_shape())\n with self.assertRaisesWithPredicateMatch(\n errors_impl.OpError, lambda e: \"uninitialized value v1\" in e.message):\n sess.run(v1)\n # Retrieves saver1. Verifies that new_saver1 can restore v1.\n new_saver1 = savers[1]\n new_saver1.restore(sess, saver1_ckpt)\n v1 = sess.graph.get_tensor_by_name(\"v1:0\")\n self.assertEqual(11.0, v1.eval())\n\n def testMultiSaverCollection(self):\n test_dir = self._get_test_dir(\"saver_collection\")\n self._testMultiSaverCollectionSave(test_dir)\n self._testMultiSaverCollectionRestore(test_dir)\n\n def testClearExtraneousSavers(self):\n test_dir = self._get_test_dir(\"clear_extraneous_savers\")\n filename = os.path.join(test_dir, \"metafile\")\n saver0_ckpt = os.path.join(test_dir, \"saver0.ckpt\")\n saver1_ckpt = os.path.join(test_dir, \"saver1.ckpt\")\n with self.test_session(graph=ops_lib.Graph()) as sess:\n # Creates a graph.\n v0 = variables.Variable([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], name=\"v0\")\n v1 = variables.Variable(11.0, name=\"v1\")\n\n # Creates 2 savers.\n saver0 = saver_module.Saver({\"v0\": v0}, name=\"saver0\")\n saver1 = saver_module.Saver({\"v1\": v1}, name=\"saver1\")\n ops_lib.add_to_collection(\"savers\", saver0)\n ops_lib.add_to_collection(\"savers\", saver1)\n variables.global_variables_initializer().run()\n\n # Saves to different checkpoints.\n saver0.save(sess, saver0_ckpt)\n saver1.save(sess, saver1_ckpt)\n\n # Generates MetaGraphDef.\n meta_graph_def = saver_module.export_meta_graph(filename)\n meta_graph_def0 = saver0.export_meta_graph()\n meta_graph_def1 = saver1.export_meta_graph(clear_extraneous_savers=True)\n\n # Verifies that there is no saver_def in meta_graph_def.\n self.assertFalse(meta_graph_def.HasField(\"saver_def\"))\n # Verifies that there is saver_def in meta_graph_def0 and 1.\n self.assertTrue(meta_graph_def0.HasField(\"saver_def\"))\n self.assertTrue(meta_graph_def1.HasField(\"saver_def\"))\n\n # Verifies SAVERS is saved as bytes_list for meta_graph_def.\n collection_def = meta_graph_def.collection_def[\"savers\"]\n kind = collection_def.WhichOneof(\"kind\")\n self.assertEqual(kind, \"bytes_list\")\n\n # Verifies that there are 2 entries in SAVERS collection.\n savers = getattr(collection_def, kind)\n self.assertEqual(2, len(savers.value))\n\n # Verifies SAVERS collection is saved as bytes_list for meta_graph_def1.\n collection_def = meta_graph_def1.collection_def[\"savers\"]\n kind = collection_def.WhichOneof(\"kind\")\n self.assertEqual(kind, \"bytes_list\")\n\n # Verifies that there is 1 entry in SAVERS collection.\n savers = getattr(collection_def, kind)\n self.assertEqual(1, len(savers.value))\n\n # Verifies that saver0 graph nodes are omitted from the saver1 export\n self.assertEqual(29, len(meta_graph_def0.graph_def.node))\n self.assertEqual(19, len(meta_graph_def1.graph_def.node))\n\n def testBinaryAndTextFormat(self):\n test_dir = self._get_test_dir(\"binary_and_text\")\n filename = os.path.join(test_dir, \"metafile\")\n with self.test_session(graph=ops_lib.Graph()):\n # Creates a graph.\n variables.Variable(10.0, name=\"v0\")\n # Exports the graph as binary format.\n saver_module.export_meta_graph(filename, as_text=False)\n with self.test_session(graph=ops_lib.Graph()):\n # Imports the binary format graph.\n saver = saver_module.import_meta_graph(filename)\n self.assertIsNotNone(saver)\n # Exports the graph as text format.\n saver.export_meta_graph(filename, as_text=True)\n with self.test_session(graph=ops_lib.Graph()):\n # Imports the text format graph.\n saver_module.import_meta_graph(filename)\n # Writes wrong contents to the file.\n graph_io.write_graph(saver.as_saver_def(),\n os.path.dirname(filename),\n os.path.basename(filename))\n with self.test_session(graph=ops_lib.Graph()):\n # Import should fail.\n with self.assertRaisesWithPredicateMatch(IOError,\n lambda e: \"Cannot parse file\"):\n saver_module.import_meta_graph(filename)\n # Deletes the file\n gfile.Remove(filename)\n with self.assertRaisesWithPredicateMatch(IOError,\n lambda e: \"does not exist\"):\n saver_module.import_meta_graph(filename)\n\n def testSliceVariable(self):\n test_dir = self._get_test_dir(\"slice_saver\")\n filename = os.path.join(test_dir, \"metafile\")\n with self.test_session():\n v1 = variables.Variable([20.0], name=\"v1\")\n v2 = variables.Variable([20.0], name=\"v2\")\n v2._set_save_slice_info(\n variables.Variable.SaveSliceInfo(\"v1\", [1], [0], [1]))\n\n # The names are different and will work.\n slice_saver = saver_module.Saver({\"first\": v1, \"second\": v2})\n variables.global_variables_initializer().run()\n # Exports to meta_graph\n meta_graph_def = slice_saver.export_meta_graph(filename)\n\n with ops_lib.Graph().as_default():\n # Restores from MetaGraphDef.\n new_saver = saver_module.import_meta_graph(filename)\n self.assertIsNotNone(new_saver)\n # Generates a new MetaGraphDef.\n new_meta_graph_def = new_saver.export_meta_graph()\n # It should be the same as the original.\n test_util.assert_meta_graph_protos_equal(self, meta_graph_def,\n new_meta_graph_def)\n\n def _testGraphExtensionSave(self, test_dir):\n filename = os.path.join(test_dir, \"metafile\")\n saver0_ckpt = os.path.join(test_dir, \"saver0.ckpt\")\n # Creates an inference graph.\n # Hidden 1\n images = constant_op.constant(1.2, dtypes.float32, shape=[100, 28])\n with ops_lib.name_scope(\"hidden1\"):\n weights = variables.Variable(\n random_ops.truncated_normal(\n [28, 128], stddev=1.0 / math.sqrt(float(28))),\n name=\"weights\")\n # The use of control_flow_ops.cond here is purely for adding test coverage\n # the save and restore of control flow context (which doesn't make any\n # sense here from a machine learning perspective). The typical biases is\n # a simple Variable without the conditions.\n biases = variables.Variable(\n control_flow_ops.cond(\n math_ops.less(random.random(), 0.5),\n lambda: array_ops.ones([128]), lambda: array_ops.zeros([128])),\n name=\"biases\")\n hidden1 = nn_ops.relu(math_ops.matmul(images, weights) + biases)\n # Hidden 2\n with ops_lib.name_scope(\"hidden2\"):\n weights = variables.Variable(\n random_ops.truncated_normal(\n [128, 32], stddev=1.0 / math.sqrt(float(128))),\n name=\"weights\")\n\n # The use of control_flow_ops.while_loop here is purely for adding test\n # coverage the save and restore of control flow context (which doesn't\n # make any sense here from a machine learning perspective). The typical\n # biases is a simple Variable without the conditions.\n def loop_cond(it, _):\n return it < 2\n\n def loop_body(it, biases):\n biases += constant_op.constant(0.1, shape=[32])\n return it + 1, biases\n\n _, biases = control_flow_ops.while_loop(\n loop_cond, loop_body,\n [constant_op.constant(0), variables.Variable(array_ops.zeros([32]))])\n hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights) + biases)\n # Linear\n with ops_lib.name_scope(\"softmax_linear\"):\n weights = variables.Variable(\n random_ops.truncated_normal(\n [32, 10], stddev=1.0 / math.sqrt(float(32))),\n name=\"weights\")\n biases = variables.Variable(array_ops.zeros([10]), name=\"biases\")\n logits = math_ops.matmul(hidden2, weights) + biases\n ops_lib.add_to_collection(\"logits\", logits)\n init_all_op = variables.global_variables_initializer()\n\n with self.test_session() as sess:\n # Initializes all the variables.\n sess.run(init_all_op)\n # Runs to logit.\n sess.run(logits)\n # Creates a saver.\n saver0 = saver_module.Saver()\n saver0.save(sess, saver0_ckpt)\n # Generates MetaGraphDef.\n saver0.export_meta_graph(filename)\n\n def _testGraphExtensionRestore(self, test_dir):\n filename = os.path.join(test_dir, \"metafile\")\n train_filename = os.path.join(test_dir, \"train_metafile\")\n saver0_ckpt = os.path.join(test_dir, \"saver0.ckpt\")\n with self.test_session(graph=ops_lib.Graph()) as sess:\n # Restores from MetaGraphDef.\n new_saver = saver_module.import_meta_graph(filename)\n # Generates a new MetaGraphDef.\n new_saver.export_meta_graph()\n # Restores from checkpoint.\n new_saver.restore(sess, saver0_ckpt)\n # Adds loss and train.\n labels = constant_op.constant(0, dtypes.int32, shape=[100], name=\"labels\")\n batch_size = array_ops.size(labels)\n labels = array_ops.expand_dims(labels, 1)\n indices = array_ops.expand_dims(math_ops.range(0, batch_size), 1)\n concated = array_ops.concat([indices, labels], 1)\n onehot_labels = sparse_ops.sparse_to_dense(\n concated, array_ops.stack([batch_size, 10]), 1.0, 0.0)\n logits = ops_lib.get_collection(\"logits\")[0]\n cross_entropy = nn_ops.softmax_cross_entropy_with_logits(\n labels=onehot_labels, logits=logits, name=\"xentropy\")\n loss = math_ops.reduce_mean(cross_entropy, name=\"xentropy_mean\")\n\n summary.scalar(\"loss\", loss)\n # Creates the gradient descent optimizer with the given learning rate.\n optimizer = gradient_descent.GradientDescentOptimizer(0.01)\n\n # Runs train_op.\n train_op = optimizer.minimize(loss)\n ops_lib.add_to_collection(\"train_op\", train_op)\n\n # Runs train_op.\n sess.run(train_op)\n\n # Generates MetaGraphDef.\n saver_module.export_meta_graph(train_filename)\n\n def _testRestoreFromTrainGraphWithControlContext(self, test_dir):\n train_filename = os.path.join(test_dir, \"train_metafile\")\n saver0_ckpt = os.path.join(test_dir, \"saver0.ckpt\")\n with self.test_session(graph=ops_lib.Graph()) as sess:\n # Restores from MetaGraphDef.\n new_saver = saver_module.import_meta_graph(train_filename)\n # Restores from checkpoint.\n new_saver.restore(sess, saver0_ckpt)\n train_op = ops_lib.get_collection(\"train_op\")[0]\n sess.run(train_op)\n\n def testGraphExtension(self):\n test_dir = self._get_test_dir(\"graph_extension\")\n self._testGraphExtensionSave(test_dir)\n self._testGraphExtensionRestore(test_dir)\n self._testRestoreFromTrainGraphWithControlContext(test_dir)\n\n def _testWhileLoopAndGradientSerDes(self, outer_body_fn):\n # Build a while loop with `outer_body_fn`, export it, and verify that it can\n # be imported and the gradient can be built and run correctly.\n\n test_dir = self._get_test_dir(\"nested_control_flow\")\n filename = os.path.join(test_dir, \"metafile\")\n saver_ckpt = os.path.join(test_dir, \"saver.ckpt\")\n\n # Create while loop using `outer_body_fn`.\n with ops_lib.Graph().as_default():\n var = variables.Variable(0)\n var_name = var.name\n _, output = control_flow_ops.while_loop(lambda i, x: i < 5, outer_body_fn,\n [0, var])\n output_name = output.name\n init_op = variables.global_variables_initializer()\n\n # Generate a MetaGraphDef containing the while loop.\n with session.Session() as sess:\n sess.run(init_op)\n sess.run(output)\n saver = saver_module.Saver()\n saver.save(sess, saver_ckpt)\n saver.export_meta_graph(filename)\n\n # Build and run the gradients of the while loop. We use this below to\n # verify that the gradients are correct with an imported MetaGraphDef.\n grad = gradients_impl.gradients([output], [var])\n with session.Session() as sess:\n sess.run(init_op)\n expected_grad_value = sess.run(grad)\n\n # Restore the MetaGraphDef into a new Graph.\n with ops_lib.Graph().as_default():\n with session.Session() as sess:\n saver = saver_module.import_meta_graph(filename)\n saver.restore(sess, saver_ckpt)\n\n # Make sure we can still build gradients and get the same result.\n var = ops_lib.get_default_graph().get_tensor_by_name(var_name)\n output = ops_lib.get_default_graph().get_tensor_by_name(output_name)\n grad = gradients_impl.gradients([output], [var])\n\n init_op = variables.global_variables_initializer()\n\n with session.Session() as sess:\n sess.run(init_op)\n actual_grad_value = sess.run(grad)\n self.assertEqual(expected_grad_value, actual_grad_value)\n\n def testNestedWhileLoopsSerDes(self):\n # Test two simple nested while loops.\n def body(i, x):\n _, r = control_flow_ops.while_loop(lambda j, y: j < 3,\n lambda j, y: (j + 1, y + x),\n [0, 0])\n return i + 1, x + r\n self._testWhileLoopAndGradientSerDes(body)\n\n def testNestedControlFlowSerDes(self):\n # Test while loop in a cond in a while loop.\n # pylint: disable=g-long-lambda\n def body(i, x):\n cond_result = control_flow_ops.cond(\n i > 0,\n lambda: control_flow_ops.while_loop(\n lambda j, y: j < 3,\n lambda j, y: (j + 1, y + x),\n [0, 0])[1],\n lambda: x)\n return i + 1, cond_result\n # pylint: enable=g-long-lambda\n self._testWhileLoopAndGradientSerDes(body)\n\n def testStrippedOpListDef(self):\n with self.test_session():\n # Creates a graph.\n v0 = variables.Variable(0.0)\n var = variables.Variable(10.0)\n math_ops.add(v0, var)\n\n @function.Defun(dtypes.float32)\n def minus_one(x):\n return x - 1\n\n minus_one(array_ops.identity(v0))\n save = saver_module.Saver({\"v0\": v0})\n variables.global_variables_initializer()\n\n # Generates MetaGraphDef.\n meta_graph_def = save.export_meta_graph()\n ops = [o.name for o in meta_graph_def.meta_info_def.stripped_op_list.op]\n if save._write_version is saver_pb2.SaverDef.V1:\n self.assertEqual(ops, [\n \"Add\", \"Assign\", \"Const\", \"Identity\", \"NoOp\", \"RestoreV2\",\n \"SaveSlices\", \"Sub\", \"VariableV2\"\n ])\n else:\n self.assertEqual(ops, [\n \"Add\", \"Assign\", \"Const\", \"Identity\", \"NoOp\", \"RestoreV2\", \"SaveV2\",\n \"Sub\", \"VariableV2\"\n ])\n\n # Test calling stripped_op_list_for_graph directly\n op_list = meta_graph.stripped_op_list_for_graph(meta_graph_def.graph_def)\n self.assertEqual(ops, [o.name for o in op_list.op])\n for o in op_list.op:\n self.assertEqual(o.summary, \"\")\n self.assertEqual(o.description, \"\")\n\n def testStripDefaultValuedAttrs(self):\n \"\"\"Verifies that default valued attrs are stripped, unless disabled.\"\"\"\n\n # With strip_default_attrs enabled, attributes \"T\" (float32) and \"Tout\"\n # (complex64) in the \"Complex\" op must be removed.\n with self.test_session():\n real_num = variables.Variable(1.0, dtype=dtypes.float32, name=\"real\")\n imag_num = variables.Variable(2.0, dtype=dtypes.float32, name=\"imag\")\n math_ops.complex(real_num, imag_num, name=\"complex\")\n\n save = saver_module.Saver({\"real_num\": real_num, \"imag_num\": imag_num})\n variables.global_variables_initializer()\n\n meta_graph_def = save.export_meta_graph(strip_default_attrs=True)\n node_def = test_util.get_node_def_from_graph(\"complex\",\n meta_graph_def.graph_def)\n self.assertNotIn(\"T\", node_def.attr)\n self.assertNotIn(\"Tout\", node_def.attr)\n\n # With strip_default_attrs disabled, attributes \"T\" (float32) and \"Tout\"\n # (complex64) in the \"Complex\" op must *not* be removed, even if they map\n # to their defaults.\n with self.test_session(graph=ops_lib.Graph()):\n real_num = variables.Variable(1.0, dtype=dtypes.float32, name=\"real\")\n imag_num = variables.Variable(2.0, dtype=dtypes.float32, name=\"imag\")\n math_ops.complex(real_num, imag_num, name=\"complex\")\n\n save = saver_module.Saver({\"real_num\": real_num, \"imag_num\": imag_num})\n variables.global_variables_initializer()\n\n meta_graph_def = save.export_meta_graph(strip_default_attrs=False)\n node_def = test_util.get_node_def_from_graph(\"complex\",\n meta_graph_def.graph_def)\n self.assertIn(\"T\", node_def.attr)\n self.assertIn(\"Tout\", node_def.attr)\n\n def testImportIntoNamescope(self):\n # Test that we can import a meta graph into a namescope.\n test_dir = self._get_test_dir(\"import_into_namescope\")\n filename = os.path.join(test_dir, \"ckpt\")\n image = array_ops.placeholder(dtypes.float32, [None, 784], name=\"image\")\n label = array_ops.placeholder(dtypes.float32, [None, 10], name=\"label\")\n with session.Session() as sess:\n weights = variables.Variable(\n random_ops.random_uniform([784, 10]), name=\"weights\")\n bias = variables.Variable(array_ops.zeros([10]), name=\"bias\")\n logit = nn_ops.relu(math_ops.matmul(image, weights) + bias, name=\"logits\")\n nn_ops.softmax(logit, name=\"prediction\")\n cost = nn_ops.softmax_cross_entropy_with_logits(labels=label,\n logits=logit, name=\"cost\")\n adam.AdamOptimizer().minimize(cost, name=\"optimize\")\n saver = saver_module.Saver()\n sess.run(variables.global_variables_initializer())\n saver.save(sess, filename)\n\n graph = ops_lib.Graph()\n with session.Session(graph=graph) as sess:\n new_saver = saver_module.import_meta_graph(\n filename + \".meta\", graph=graph, import_scope=\"new_model\")\n new_saver.restore(sess, filename)\n sess.run([\"new_model/optimize\"], {\n \"new_model/image:0\": np.random.random([1, 784]),\n \"new_model/label:0\": np.random.randint(\n 10, size=[1, 10])\n })\n\n def testClearDevicesOnImport(self):\n # Test that we import a graph without its devices and run successfully.\n with ops_lib.Graph().as_default():\n with ops_lib.device(\"/job:ps/replica:0/task:0/device:GPU:0\"):\n image = array_ops.placeholder(dtypes.float32, [None, 784], name=\"image\")\n label = array_ops.placeholder(dtypes.float32, [None, 10], name=\"label\")\n weights = variables.Variable(\n random_ops.random_uniform([784, 10]), name=\"weights\")\n bias = variables.Variable(array_ops.zeros([10]), name=\"bias\")\n logit = nn_ops.relu(math_ops.matmul(image, weights) + bias)\n nn_ops.softmax(logit, name=\"prediction\")\n cost = nn_ops.softmax_cross_entropy_with_logits(labels=label,\n logits=logit)\n adam.AdamOptimizer().minimize(cost, name=\"optimize\")\n meta_graph_def = saver_module.export_meta_graph()\n\n with session.Session(graph=ops_lib.Graph()) as sess:\n saver_module.import_meta_graph(\n meta_graph_def, clear_devices=False, import_scope=\"new_model\")\n # Device refers to GPU, which is not available here.\n with self.assertRaises(errors_impl.InvalidArgumentError):\n sess.run(variables.global_variables_initializer())\n\n with session.Session(graph=ops_lib.Graph()) as sess:\n saver_module.import_meta_graph(\n meta_graph_def, clear_devices=True, import_scope=\"new_model\")\n sess.run(variables.global_variables_initializer())\n sess.run([\"new_model/optimize\"], {\n \"new_model/image:0\": np.random.random([1, 784]),\n \"new_model/label:0\": np.random.randint(\n 10, size=[1, 10])\n })\n\n def testClearDevicesOnExport(self):\n # Test that we export a graph without its devices and run successfully.\n with ops_lib.Graph().as_default():\n with ops_lib.device(\"/job:ps/replica:0/task:0/device:GPU:0\"):\n image = array_ops.placeholder(dtypes.float32, [None, 784], name=\"image\")\n label = array_ops.placeholder(dtypes.float32, [None, 10], name=\"label\")\n weights = variables.Variable(\n random_ops.random_uniform([784, 10]), name=\"weights\")\n bias = variables.Variable(array_ops.zeros([10]), name=\"bias\")\n logit = nn_ops.relu(math_ops.matmul(image, weights) + bias)\n nn_ops.softmax(logit, name=\"prediction\")\n cost = nn_ops.softmax_cross_entropy_with_logits(labels=label,\n logits=logit)\n adam.AdamOptimizer().minimize(cost, name=\"optimize\")\n meta_graph_def = saver_module.export_meta_graph(clear_devices=True)\n graph_io.write_graph(meta_graph_def, self.get_temp_dir(),\n \"meta_graph.pbtxt\")\n\n with session.Session(graph=ops_lib.Graph()) as sess:\n saver_module.import_meta_graph(meta_graph_def, import_scope=\"new_model\")\n sess.run(variables.global_variables_initializer())\n sess.run([\"new_model/optimize\"], {\n \"new_model/image:0\": np.random.random([1, 784]),\n \"new_model/label:0\": np.random.randint(\n 10, size=[1, 10])\n })\n\n def testPreserveDatasetAndFunctions(self):\n with ops_lib.Graph().as_default() as g:\n dataset = dataset_ops.Dataset.range(10).map(lambda x: x * x)\n iterator = dataset.make_one_shot_iterator()\n next_element = iterator.get_next()\n _ = array_ops.identity(next_element, name=\"output\")\n\n # Generate three MetaGraphDef protos using different code paths.\n meta_graph_def_simple = saver_module.export_meta_graph()\n meta_graph_def_devices_cleared = saver_module.export_meta_graph(\n clear_devices=True)\n meta_graph_def_from_graph_def = saver_module.export_meta_graph(\n clear_devices=True, graph_def=g.as_graph_def())\n\n for meta_graph_def in [meta_graph_def_simple,\n meta_graph_def_devices_cleared,\n meta_graph_def_from_graph_def]:\n with session.Session(graph=ops_lib.Graph()) as sess:\n saver_module.import_meta_graph(meta_graph_def, import_scope=\"new_model\")\n sess.run(variables.global_variables_initializer())\n for i in range(10):\n self.assertEqual(i * i, sess.run(\"new_model/output:0\"))\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(\"new_model/output:0\")\n\n\n@test_util.with_c_api\nclass CheckpointReaderTest(test.TestCase):\n\n _WRITE_VERSION = saver_pb2.SaverDef.V1\n\n def testDebugString(self):\n # Builds a graph.\n v0 = variables.Variable(\n [[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32, name=\"v0\")\n v1 = variables.Variable(\n [[[1], [2]], [[3], [4]], [[5], [6]]], dtype=dtypes.float32, name=\"v1\")\n init_all_op = variables.global_variables_initializer()\n save = saver_module.Saver(\n {\n \"v0\": v0,\n \"v1\": v1\n }, write_version=self._WRITE_VERSION)\n save_path = os.path.join(self.get_temp_dir(),\n \"ckpt_for_debug_string\" + str(self._WRITE_VERSION))\n with self.test_session() as sess:\n sess.run(init_all_op)\n # Saves a checkpoint.\n save.save(sess, save_path)\n\n # Creates a reader.\n reader = pywrap_tensorflow.NewCheckpointReader(save_path)\n # Verifies that the tensors exist.\n self.assertTrue(reader.has_tensor(\"v0\"))\n self.assertTrue(reader.has_tensor(\"v1\"))\n debug_string = reader.debug_string()\n # Verifies that debug string contains the right strings.\n self.assertTrue(compat.as_bytes(\"v0 (DT_FLOAT) [2,3]\") in debug_string)\n self.assertTrue(compat.as_bytes(\"v1 (DT_FLOAT) [3,2,1]\") in debug_string)\n # Verifies get_variable_to_shape_map() returns the correct information.\n var_map = reader.get_variable_to_shape_map()\n self.assertEqual([2, 3], var_map[\"v0\"])\n self.assertEqual([3, 2, 1], var_map[\"v1\"])\n # Verifies get_tensor() returns the tensor value.\n v0_tensor = reader.get_tensor(\"v0\")\n v1_tensor = reader.get_tensor(\"v1\")\n self.assertAllEqual(v0.eval(), v0_tensor)\n self.assertAllEqual(v1.eval(), v1_tensor)\n # Verifies get_tensor() fails for non-existent tensors.\n with self.assertRaisesRegexp(errors.NotFoundError,\n \"v3 not found in checkpoint\"):\n reader.get_tensor(\"v3\")\n\n def testNonexistentPath(self):\n with self.assertRaisesRegexp(errors.NotFoundError,\n \"Unsuccessful TensorSliceReader\"):\n pywrap_tensorflow.NewCheckpointReader(\"non-existent\")\n\n\n@test_util.with_c_api\nclass CheckpointReaderForV2Test(CheckpointReaderTest):\n _WRITE_VERSION = saver_pb2.SaverDef.V2\n\n\n@test_util.with_c_api\nclass WriteGraphTest(test.TestCase):\n\n def _get_test_dir(self, dirname):\n test_dir = os.path.join(self.get_temp_dir(), dirname)\n gfile.MakeDirs(test_dir)\n return test_dir\n\n def testWriteGraph(self):\n test_dir = self._get_test_dir(\"write_graph_dir\")\n variables.Variable([[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32, name=\"v0\")\n path = graph_io.write_graph(ops_lib.get_default_graph(),\n os.path.join(test_dir, \"l1\"), \"graph.pbtxt\")\n truth = os.path.join(test_dir, \"l1\", \"graph.pbtxt\")\n self.assertEqual(path, truth)\n self.assertTrue(os.path.exists(path))\n\n def testRecursiveCreate(self):\n test_dir = self._get_test_dir(\"deep_dir\")\n variables.Variable([[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32, name=\"v0\")\n path = graph_io.write_graph(ops_lib.get_default_graph().as_graph_def(),\n os.path.join(test_dir, \"l1\", \"l2\", \"l3\"),\n \"graph.pbtxt\")\n truth = os.path.join(test_dir, \"l1\", \"l2\", \"l3\", \"graph.pbtxt\")\n self.assertEqual(path, truth)\n self.assertTrue(os.path.exists(path))\n\n\n@test_util.with_c_api\nclass SaverUtilsTest(test.TestCase):\n\n def setUp(self):\n self._base_dir = os.path.join(self.get_temp_dir(), \"saver_utils_test\")\n gfile.MakeDirs(self._base_dir)\n\n def tearDown(self):\n gfile.DeleteRecursively(self._base_dir)\n\n def testCheckpointExists(self):\n for sharded in (False, True):\n for version in (saver_pb2.SaverDef.V2, saver_pb2.SaverDef.V1):\n with self.test_session(graph=ops_lib.Graph()) as sess:\n unused_v = variables.Variable(1.0, name=\"v\")\n variables.global_variables_initializer().run()\n saver = saver_module.Saver(sharded=sharded, write_version=version)\n\n path = os.path.join(self._base_dir, \"%s-%s\" % (sharded, version))\n self.assertFalse(\n saver_module.checkpoint_exists(path)) # Not saved yet.\n\n ckpt_prefix = saver.save(sess, path)\n self.assertTrue(saver_module.checkpoint_exists(ckpt_prefix))\n\n ckpt_prefix = saver_module.latest_checkpoint(self._base_dir)\n self.assertTrue(saver_module.checkpoint_exists(ckpt_prefix))\n\n def testGetCheckpointMtimes(self):\n prefixes = []\n for version in (saver_pb2.SaverDef.V2, saver_pb2.SaverDef.V1):\n with self.test_session(graph=ops_lib.Graph()) as sess:\n unused_v = variables.Variable(1.0, name=\"v\")\n variables.global_variables_initializer().run()\n saver = saver_module.Saver(write_version=version)\n prefixes.append(\n saver.save(sess, os.path.join(self._base_dir, str(version))))\n\n mtimes = saver_module.get_checkpoint_mtimes(prefixes)\n self.assertEqual(2, len(mtimes))\n self.assertTrue(mtimes[1] >= mtimes[0])\n\n\n@test_util.with_c_api\nclass ScopedGraphTest(test.TestCase):\n\n def _get_test_dir(self, dirname):\n test_dir = os.path.join(self.get_temp_dir(), dirname)\n gfile.MakeDirs(test_dir)\n return test_dir\n\n def _testScopedSave(self, test_dir, exported_filename, ckpt_filename):\n graph = ops_lib.Graph()\n with graph.as_default():\n # Creates an inference graph.\n # Hidden 1\n images = constant_op.constant(\n 1.2, dtypes.float32, shape=[100, 28], name=\"images\")\n with ops_lib.name_scope(\"hidden1\"):\n weights1 = variables.Variable(\n random_ops.truncated_normal(\n [28, 128], stddev=1.0 / math.sqrt(float(28))),\n name=\"weights\")\n # The use of control_flow_ops.cond here is purely for adding test\n # coverage the save and restore of control flow context (which doesn't\n # make any sense here from a machine learning perspective). The typical\n # biases is a simple Variable without the conditions.\n biases1 = variables.Variable(\n control_flow_ops.cond(\n math_ops.less(random.random(), 0.5),\n lambda: array_ops.ones([128]), lambda: array_ops.zeros([128])),\n name=\"biases\")\n hidden1 = nn_ops.relu(math_ops.matmul(images, weights1) + biases1)\n\n # Hidden 2\n with ops_lib.name_scope(\"hidden2\"):\n weights2 = variables.Variable(\n random_ops.truncated_normal(\n [128, 32], stddev=1.0 / math.sqrt(float(128))),\n name=\"weights\")\n\n # The use of control_flow_ops.while_loop here is purely for adding test\n # coverage the save and restore of control flow context (which doesn't\n # make any sense here from a machine learning perspective). The typical\n # biases is a simple Variable without the conditions.\n def loop_cond(it, _):\n return it < 2\n\n def loop_body(it, biases2):\n biases2 += constant_op.constant(0.1, shape=[32])\n return it + 1, biases2\n\n _, biases2 = control_flow_ops.while_loop(loop_cond, loop_body, [\n constant_op.constant(0), variables.Variable(array_ops.zeros([32]))\n ])\n hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights2) + biases2)\n # Linear\n with ops_lib.name_scope(\"softmax_linear\"):\n weights3 = variables.Variable(\n random_ops.truncated_normal(\n [32, 10], stddev=1.0 / math.sqrt(float(32))),\n name=\"weights\")\n biases3 = variables.Variable(array_ops.zeros([10]), name=\"biases\")\n logits = math_ops.matmul(hidden2, weights3) + biases3\n ops_lib.add_to_collection(\"logits\", logits)\n\n # Adds user_defined proto in three formats: string, bytes and Any.\n # Any proto should just pass through.\n queue_runner = queue_runner_pb2.QueueRunnerDef(queue_name=\"test_queue\")\n ops_lib.add_to_collection(\"user_defined_string_collection\",\n str(queue_runner))\n ops_lib.add_to_collection(\"user_defined_bytes_collection\",\n queue_runner.SerializeToString())\n any_buf = Any()\n any_buf.Pack(queue_runner)\n ops_lib.add_to_collection(\"user_defined_any_collection\", any_buf)\n\n _, var_list = meta_graph.export_scoped_meta_graph(\n filename=os.path.join(test_dir, exported_filename),\n graph=ops_lib.get_default_graph(),\n export_scope=\"hidden1\")\n self.assertEqual([\"biases:0\", \"weights:0\"], sorted(var_list.keys()))\n\n with self.test_session(graph=graph) as sess:\n sess.run(variables.global_variables_initializer())\n saver = saver_module.Saver(var_list=var_list, max_to_keep=1)\n saver.save(sess, os.path.join(test_dir, ckpt_filename), write_state=False)\n\n def _testScopedRestore(self, test_dir, exported_filename,\n new_exported_filename, ckpt_filename):\n graph = ops_lib.Graph()\n # Create all the missing inputs.\n with graph.as_default():\n new_image = constant_op.constant(\n 1.2, dtypes.float32, shape=[100, 28], name=\"images\")\n var_list = meta_graph.import_scoped_meta_graph(\n os.path.join(test_dir, exported_filename),\n graph=graph,\n input_map={\"$unbound_inputs_images\": new_image},\n import_scope=\"new_hidden1\")\n self.assertEqual([\"biases:0\", \"weights:0\"], sorted(var_list.keys()))\n hidden1 = graph.as_graph_element(\"new_hidden1/Relu:0\")\n weights1 = graph.as_graph_element(\"new_hidden1/weights:0\")\n biases1 = graph.as_graph_element(\"new_hidden1/biases:0\")\n\n with graph.as_default():\n # Hidden 2\n with ops_lib.name_scope(\"hidden2\"):\n weights = variables.Variable(\n random_ops.truncated_normal(\n [128, 32], stddev=1.0 / math.sqrt(float(128))),\n name=\"weights\")\n\n # The use of control_flow_ops.while_loop here is purely for adding test\n # coverage the save and restore of control flow context (which doesn't\n # make any sense here from a machine learning perspective). The typical\n # biases is a simple Variable without the conditions.\n def loop_cond(it, _):\n return it < 2\n\n def loop_body(it, biases):\n biases += constant_op.constant(0.1, shape=[32])\n return it + 1, biases\n\n _, biases = control_flow_ops.while_loop(loop_cond, loop_body, [\n constant_op.constant(0), variables.Variable(array_ops.zeros([32]))\n ])\n hidden2 = nn_ops.relu(math_ops.matmul(hidden1, weights) + biases)\n # Linear\n with ops_lib.name_scope(\"softmax_linear\"):\n weights = variables.Variable(\n random_ops.truncated_normal(\n [32, 10], stddev=1.0 / math.sqrt(float(32))),\n name=\"weights\")\n biases = variables.Variable(array_ops.zeros([10]), name=\"biases\")\n logits = math_ops.matmul(hidden2, weights) + biases\n ops_lib.add_to_collection(\"logits\", logits)\n\n # The rest of the variables.\n rest_variables = list(\n set(variables.global_variables()) - set(var_list.keys()))\n init_rest_op = variables.initialize_variables(rest_variables)\n\n with self.test_session(graph=graph) as sess:\n saver = saver_module.Saver(var_list=var_list, max_to_keep=1)\n saver.restore(sess, os.path.join(test_dir, ckpt_filename))\n # Verify that we have restored weights1 and biases1.\n sess.run([weights1, biases1])\n # Initialize the rest of the variables and run logits.\n sess.run(init_rest_op)\n sess.run(logits)\n\n # Verifies that we can save the subgraph under \"hidden1\" and restore it\n # into \"new_hidden1\" in the new graph.\n def testScopedSaveAndRestore(self):\n test_dir = self._get_test_dir(\"scoped_export_import\")\n ckpt_filename = \"ckpt\"\n self._testScopedSave(test_dir, \"exported_hidden1.pbtxt\", ckpt_filename)\n self._testScopedRestore(test_dir, \"exported_hidden1.pbtxt\",\n \"exported_new_hidden1.pbtxt\", ckpt_filename)\n\n # Verifies that we can copy the subgraph under \"hidden1\" and copy it\n # to different name scope in the same graph or different graph.\n def testCopyScopedGraph(self):\n test_dir = self._get_test_dir(\"scoped_copy\")\n saver0_ckpt = os.path.join(test_dir, \"saver0.ckpt\")\n graph1 = ops_lib.Graph()\n with graph1.as_default():\n with ops_lib.name_scope(\"hidden1\"):\n images = constant_op.constant(\n 1.0, dtypes.float32, shape=[3, 2], name=\"images\")\n weights1 = variables.Variable(\n [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], name=\"weights\")\n biases1 = variables.Variable([0.1] * 3, name=\"biases\")\n nn_ops.relu(math_ops.matmul(images, weights1) + biases1, name=\"relu\")\n\n # Run the graph and save scoped checkpoint.\n with self.test_session(graph=graph1) as sess:\n sess.run(variables.global_variables_initializer())\n _, var_list_1 = meta_graph.export_scoped_meta_graph(\n export_scope=\"hidden1\")\n saver = saver_module.Saver(var_list=var_list_1, max_to_keep=1)\n saver.save(sess, saver0_ckpt, write_state=False)\n\n expected = np.reshape([[5.0999999, 7.0999999, 9.10000038] * 3], (3, 3))\n\n # Verifies copy to the same graph with the same name fails.\n with graph1.as_default():\n with self.assertRaisesWithPredicateMatch(\n ValueError, lambda e: \"need to be different\" in str(e)):\n meta_graph.copy_scoped_meta_graph(\n from_scope=\"hidden1\", to_scope=\"hidden1\")\n\n # Verifies copy to the same graph.\n with graph1.as_default():\n var_list_2 = meta_graph.copy_scoped_meta_graph(\n from_scope=\"hidden1\", to_scope=\"hidden2\")\n\n with self.test_session(graph=graph1) as sess:\n saver1 = saver_module.Saver(var_list=var_list_1, max_to_keep=1)\n saver1.restore(sess, saver0_ckpt)\n saver2 = saver_module.Saver(var_list=var_list_2, max_to_keep=1)\n saver2.restore(sess, saver0_ckpt)\n self.assertAllClose(expected, sess.run(\"hidden1/relu:0\"))\n self.assertAllClose(expected, sess.run(\"hidden2/relu:0\"))\n\n # Verifies copy to differen graph.\n graph2 = ops_lib.Graph()\n new_var_list_1 = meta_graph.copy_scoped_meta_graph(\n from_scope=\"hidden1\",\n to_scope=\"new_hidden1\",\n from_graph=graph1,\n to_graph=graph2)\n\n with self.test_session(graph=graph2) as sess:\n saver3 = saver_module.Saver(var_list=new_var_list_1, max_to_keep=1)\n saver3.restore(sess, saver0_ckpt)\n self.assertAllClose(expected, sess.run(\"new_hidden1/relu:0\"))\n\n def testExportGraphDefWithScope(self):\n test_dir = self._get_test_dir(\"export_graph_def\")\n saver0_ckpt = os.path.join(test_dir, \"saver0.ckpt\")\n graph1 = ops_lib.Graph()\n with graph1.as_default():\n with ops_lib.name_scope(\"hidden1\"):\n images = constant_op.constant(\n 1.0, dtypes.float32, shape=[3, 2], name=\"images\")\n weights1 = variables.Variable(\n [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], name=\"weights\")\n biases1 = variables.Variable([0.1] * 3, name=\"biases\")\n nn_ops.relu(math_ops.matmul(images, weights1) + biases1, name=\"relu\")\n\n # Run the graph and save scoped checkpoint.\n with self.test_session(graph=graph1) as sess:\n sess.run(variables.global_variables_initializer())\n _, var_list_1 = meta_graph.export_scoped_meta_graph(\n graph_def=graph1.as_graph_def(), export_scope=\"hidden1\")\n saver = saver_module.Saver(var_list=var_list_1, max_to_keep=1)\n saver.save(sess, saver0_ckpt, write_state=False)\n\n expected = np.reshape([[5.0999999, 7.0999999, 9.10000038] * 3], (3, 3))\n\n # Verifies that we can run successfully after restoring.\n graph2 = ops_lib.Graph()\n new_var_list_1 = meta_graph.copy_scoped_meta_graph(\n from_scope=\"hidden1\",\n to_scope=\"new_hidden1\",\n from_graph=graph1,\n to_graph=graph2)\n\n with self.test_session(graph=graph2) as sess:\n saver3 = saver_module.Saver(var_list=new_var_list_1, max_to_keep=1)\n saver3.restore(sess, saver0_ckpt)\n self.assertAllClose(expected, sess.run(\"new_hidden1/relu:0\"))\n\n def testSerializeSaverWithScope(self):\n test_dir = self._get_test_dir(\"export_graph_def\")\n saver1_ckpt = os.path.join(test_dir, \"saver1.ckpt\")\n saver2_ckpt = os.path.join(test_dir, \"saver2.ckpt\")\n graph = ops_lib.Graph()\n with graph.as_default():\n with ops_lib.name_scope(\"hidden1\"):\n variable1 = variables.Variable([1.0], name=\"variable1\")\n saver1 = saver_module.Saver(var_list=[variable1])\n graph.add_to_collection(ops_lib.GraphKeys.SAVERS, saver1)\n\n with ops_lib.name_scope(\"hidden2\"):\n variable2 = variables.Variable([2.0], name=\"variable2\")\n saver2 = saver_module.Saver(var_list=[variable2], name=\"hidden2/\")\n graph.add_to_collection(ops_lib.GraphKeys.SAVERS, saver2)\n\n with self.test_session(graph=graph) as sess:\n variables.global_variables_initializer().run()\n saver1.save(sess, saver1_ckpt, write_state=False)\n saver2.save(sess, saver2_ckpt, write_state=False)\n\n graph1 = ops_lib.Graph()\n var_dict1 = meta_graph.copy_scoped_meta_graph(\n from_scope=\"hidden1\",\n to_scope=\"new_hidden1\",\n from_graph=graph,\n to_graph=graph1)\n self.assertEqual(1, len(var_dict1))\n\n saver_list1 = graph1.get_collection(ops_lib.GraphKeys.SAVERS)\n self.assertEqual(1, len(saver_list1))\n\n with self.test_session(graph=graph1) as sess:\n saver_list1[0].restore(sess, saver1_ckpt)\n self.assertEqual(1.0, var_dict1[\"variable1:0\"].eval())\n\n graph2 = ops_lib.Graph()\n var_dict2 = meta_graph.copy_scoped_meta_graph(\n from_scope=\"hidden2\",\n to_scope=\"new_hidden2\",\n from_graph=graph,\n to_graph=graph2)\n self.assertEqual(1, len(var_dict2))\n\n saver_list2 = graph2.get_collection(ops_lib.GraphKeys.SAVERS)\n self.assertEqual(1, len(saver_list2))\n\n with self.test_session(graph=graph2) as sess:\n saver_list2[0].restore(sess, saver2_ckpt)\n self.assertEqual(2.0, var_dict2[\"variable2:0\"].eval())\n\n\nclass _OwnsAVariableSimple(checkpointable.CheckpointableBase):\n \"\"\"A Checkpointable object which can be saved using a tf.train.Saver.\"\"\"\n\n def __init__(self):\n self.non_dep_variable = variable_scope.get_variable(\n name=\"non_dep_variable\", initializer=6., use_resource=True)\n\n def _gather_saveables_for_checkpoint(self):\n return {checkpointable.VARIABLE_VALUE_KEY: self.non_dep_variable}\n\n # The Saver sorts by name before parsing, so we need a name property.\n @property\n def name(self):\n return self.non_dep_variable.name\n\n\nclass _MirroringSaveable(\n saver_module.BaseSaverBuilder.ResourceVariableSaveable):\n\n def __init__(self, primary_variable, mirrored_variable):\n self._primary_variable = primary_variable\n self._mirrored_variable = mirrored_variable\n super(_MirroringSaveable, self).__init__(\n self._primary_variable, \"\", self._primary_variable.name)\n\n def restore(self, restored_tensors, restored_shapes):\n \"\"\"Restore the same value into both variables.\"\"\"\n tensor, = restored_tensors\n return control_flow_ops.group(\n self._primary_variable.assign(tensor),\n self._mirrored_variable.assign(tensor))\n\n\nclass _OwnsMirroredVariables(checkpointable.CheckpointableBase):\n \"\"\"A Checkpointable object which returns a more complex SaveableObject.\"\"\"\n\n def __init__(self):\n self.non_dep_variable = variable_scope.get_variable(\n name=\"non_dep_variable\", initializer=6., use_resource=True)\n self.mirrored = variable_scope.get_variable(\n name=\"mirrored\", initializer=15., use_resource=True)\n\n def _gather_saveables_for_checkpoint(self):\n saveable = _MirroringSaveable(\n primary_variable=self.non_dep_variable,\n mirrored_variable=self.mirrored)\n return {checkpointable.VARIABLE_VALUE_KEY: saveable}\n\n # The Saver sorts by name before parsing, so we need a name property.\n @property\n def name(self):\n return self.non_dep_variable.name\n\n\n@test_util.with_c_api\nclass CheckpointableCompatibilityTests(test.TestCase):\n\n # TODO(allenl): Track down python3 reference cycles in these tests.\n @test_util.run_in_graph_and_eager_modes()\n def testNotSaveableButIsCheckpointable(self):\n v = _OwnsAVariableSimple()\n saver = saver_module.Saver(var_list=[v])\n test_dir = self.get_temp_dir()\n prefix = os.path.join(test_dir, \"ckpt\")\n self.evaluate(v.non_dep_variable.assign(42.))\n with self.test_session() as sess:\n save_path = saver.save(sess, prefix)\n self.evaluate(v.non_dep_variable.assign(43.))\n saver.restore(sess, save_path)\n self.assertEqual(42., self.evaluate(v.non_dep_variable))\n\n @test_util.run_in_graph_and_eager_modes()\n def testMoreComplexSaveableReturned(self):\n v = _OwnsMirroredVariables()\n saver = saver_module.Saver(var_list=[v])\n test_dir = self.get_temp_dir()\n prefix = os.path.join(test_dir, \"ckpt\")\n self.evaluate(v.non_dep_variable.assign(42.))\n with self.test_session() as sess:\n save_path = saver.save(sess, prefix)\n self.evaluate(v.non_dep_variable.assign(43.))\n self.evaluate(v.mirrored.assign(44.))\n saver.restore(sess, save_path)\n self.assertEqual(42., self.evaluate(v.non_dep_variable))\n self.assertEqual(42., self.evaluate(v.mirrored))\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Summary Operations.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.core.framework import summary_pb2\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import gen_logging_ops\nfrom tensorflow.python.ops import summary_op_util\n# go/tf-wildcard-import\n# pylint: disable=wildcard-import\nfrom tensorflow.python.ops.gen_logging_ops import *\nfrom tensorflow.python.util.tf_export import tf_export\n# pylint: enable=wildcard-import\n\n\n@tf_export(\"summary.tensor_summary\")\ndef tensor_summary(name,\n tensor,\n summary_description=None,\n collections=None,\n summary_metadata=None,\n family=None,\n display_name=None):\n \"\"\"Outputs a `Summary` protocol buffer with a serialized tensor.proto.\n\n Args:\n name: A name for the generated node. If display_name is not set, it will\n also serve as the tag name in TensorBoard. (In that case, the tag\n name will inherit tf name scopes.)\n tensor: A tensor of any type and shape to serialize.\n summary_description: A long description of the summary sequence. Markdown\n is supported.\n collections: Optional list of graph collections keys. The new summary op is\n added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.\n summary_metadata: Optional SummaryMetadata proto (which describes which\n plugins may use the summary value).\n family: Optional; if provided, used as the prefix of the summary tag,\n which controls the name used for display on TensorBoard when\n display_name is not set.\n display_name: A string used to name this data in TensorBoard. If this is\n not set, then the node name will be used instead.\n\n Returns:\n A scalar `Tensor` of type `string`. The serialized `Summary` protocol\n buffer.\n \"\"\"\n\n if summary_metadata is None:\n summary_metadata = summary_pb2.SummaryMetadata()\n\n if summary_description is not None:\n summary_metadata.summary_description = summary_description\n\n if display_name is not None:\n summary_metadata.display_name = display_name\n\n serialized_summary_metadata = summary_metadata.SerializeToString()\n\n with summary_op_util.summary_scope(\n name, family, values=[tensor]) as (tag, scope):\n val = gen_logging_ops.tensor_summary_v2(\n tensor=tensor,\n tag=tag,\n name=scope,\n serialized_summary_metadata=serialized_summary_metadata)\n summary_op_util.collect(val, collections, [ops.GraphKeys.SUMMARIES])\n return val\n\nops.NotDifferentiable(\"TensorSummary\")\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Live value resolution.\n\nLive values are extracted from the known execution context.\n\nRequires activity analysis annotations.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gast\n\nfrom tensorflow.contrib.py2tf.pyct import anno\nfrom tensorflow.contrib.py2tf.pyct import transformer\nfrom tensorflow.contrib.py2tf.pyct.static_analysis.annos import NodeAnno\n\n\nclass LiveValueResolver(transformer.Base):\n \"\"\"Annotates nodes with live values.\"\"\"\n\n def __init__(self, context, literals):\n super(LiveValueResolver, self).__init__(context)\n self.literals = literals\n\n def visit_ClassDef(self, node):\n self.generic_visit(node)\n anno.setanno(node, 'live_val', self.context.namespace[node.name])\n return node\n\n def visit_Name(self, node):\n self.generic_visit(node)\n if isinstance(node.ctx, gast.Load):\n assert anno.hasanno(node, NodeAnno.IS_LOCAL), node\n symbol_is_local = anno.getanno(node, NodeAnno.IS_LOCAL)\n assert anno.hasanno(node, NodeAnno.IS_MODIFIED_SINCE_ENTRY), node\n symbol_is_modified = anno.getanno(node, NodeAnno.IS_MODIFIED_SINCE_ENTRY)\n assert anno.hasanno(node, NodeAnno.IS_PARAM), node\n symbol_is_param = anno.getanno(node, NodeAnno.IS_PARAM)\n\n if not symbol_is_local and not symbol_is_param:\n if node.id in self.literals:\n anno.setanno(node, 'live_val', self.literals[node.id])\n # TODO(mdan): Could live values have FQNs? i.e. 'a'.join()\n elif node.id in self.context.namespace:\n obj = self.context.namespace[node.id]\n anno.setanno(node, 'live_val', obj)\n anno.setanno(node, 'fqn', (obj.__name__,))\n else:\n pass\n # TODO(mdan): Should we raise an error here?\n # Can encounter this when:\n # * a symbol truly lacks reference\n # * a symbol is new, like the new name of a function we just renamed.\n else:\n pass\n # TODO(mdan): Attempt to trace its value through the local chain.\n # TODO(mdan): Use type annotations as fallback.\n\n if not symbol_is_modified:\n if node.id in self.context.arg_values:\n obj = self.context.arg_values[node.id]\n anno.setanno(node, 'live_val', obj)\n anno.setanno(node, 'fqn', (obj.__class__.__name__,))\n return node\n\n def visit_Attribute(self, node):\n self.generic_visit(node)\n if anno.hasanno(node.value, 'live_val'):\n assert anno.hasanno(node.value, 'fqn')\n parent_object = anno.getanno(node.value, 'live_val')\n if not hasattr(parent_object, node.attr):\n raise AttributeError('%s has no attribute %s' % (parent_object,\n node.attr))\n anno.setanno(node, 'parent_type', type(parent_object))\n anno.setanno(node, 'live_val', getattr(parent_object, node.attr))\n anno.setanno(node, 'fqn', anno.getanno(node.value, 'fqn') + (node.attr,))\n # TODO(mdan): Investigate the role built-in annotations can play here.\n elif anno.hasanno(node.value, 'type'):\n parent_type = anno.getanno(node.value, 'type')\n if hasattr(parent_type, node.attr):\n # This should hold for static members like methods.\n # This would not hold for dynamic members like function attributes.\n # For the dynamic case, we simply leave the node without an annotation,\n # and let downstream consumers figure out what to do.\n anno.setanno(node, 'parent_type', parent_type)\n anno.setanno(node, 'live_val', getattr(parent_type, node.attr))\n anno.setanno(node, 'fqn',\n anno.getanno(node.value, 'type_fqn') + (node.attr,))\n elif isinstance(node.value, gast.Name):\n stem_name = node.value\n # All nonlocal symbols should be fully resolved.\n assert anno.hasanno(stem_name, NodeAnno.IS_LOCAL), stem_name\n # TODO(mdan): Figure out what to do when calling attribute on local object\n # Maybe just leave as-is?\n return node\n\n\ndef resolve(node, context, literals):\n return LiveValueResolver(context, literals).visit(node)\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Relaxed OneHotCategorical distribution classes.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom tensorflow.contrib.distributions.python.ops import bijectors\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops.distributions import distribution\nfrom tensorflow.python.ops.distributions import transformed_distribution\nfrom tensorflow.python.ops.distributions import util as distribution_util\n\n\nclass ExpRelaxedOneHotCategorical(distribution.Distribution):\n \"\"\"ExpRelaxedOneHotCategorical distribution with temperature and logits.\n\n An ExpRelaxedOneHotCategorical distribution is a log-transformed\n RelaxedOneHotCategorical distribution. The RelaxedOneHotCategorical is a\n distribution over random probability vectors, vectors of positive real\n values that sum to one, which continuously approximates a OneHotCategorical.\n The degree of approximation is controlled by a temperature: as the temperature\n goes to 0 the RelaxedOneHotCategorical becomes discrete with a distribution\n described by the logits, as the temperature goes to infinity the\n RelaxedOneHotCategorical becomes the constant distribution that is identically\n the constant vector of (1/event_size, ..., 1/event_size).\n\n Because computing log-probabilities of the RelaxedOneHotCategorical can\n suffer from underflow issues, this class is one solution for loss\n functions that depend on log-probabilities, such as the KL Divergence found\n in the variational autoencoder loss. The KL divergence between two\n distributions is invariant under invertible transformations, so evaluating\n KL divergences of ExpRelaxedOneHotCategorical samples, which are always\n followed by a `tf.exp` op, is equivalent to evaluating KL divergences of\n RelaxedOneHotCategorical samples. See the appendix of Maddison et al., 2016\n for more mathematical details, where this distribution is called the\n ExpConcrete.\n\n #### Examples\n\n Creates a continuous distribution, whose exp approximates a 3-class one-hot\n categorical distribution. The 2nd class is the most likely to be the\n largest component in samples drawn from this distribution. If those samples\n are followed by a `tf.exp` op, then they are distributed as a relaxed onehot\n categorical.\n\n ```python\n temperature = 0.5\n p = [0.1, 0.5, 0.4]\n dist = ExpRelaxedOneHotCategorical(temperature, probs=p)\n samples = dist.sample()\n exp_samples = tf.exp(samples)\n # exp_samples has the same distribution as samples from\n # RelaxedOneHotCategorical(temperature, probs=p)\n ```\n\n Creates a continuous distribution, whose exp approximates a 3-class one-hot\n categorical distribution. The 2nd class is the most likely to be the\n largest component in samples drawn from this distribution.\n\n ```python\n temperature = 0.5\n logits = [-2, 2, 0]\n dist = ExpRelaxedOneHotCategorical(temperature, logits=logits)\n samples = dist.sample()\n exp_samples = tf.exp(samples)\n # exp_samples has the same distribution as samples from\n # RelaxedOneHotCategorical(temperature, probs=p)\n ```\n\n Creates a continuous distribution, whose exp approximates a 3-class one-hot\n categorical distribution. Because the temperature is very low, samples from\n this distribution are almost discrete, with one component almost 0 and the\n others very negative. The 2nd class is the most likely to be the largest\n component in samples drawn from this distribution.\n\n ```python\n temperature = 1e-5\n logits = [-2, 2, 0]\n dist = ExpRelaxedOneHotCategorical(temperature, logits=logits)\n samples = dist.sample()\n exp_samples = tf.exp(samples)\n # exp_samples has the same distribution as samples from\n # RelaxedOneHotCategorical(temperature, probs=p)\n ```\n\n Creates a continuous distribution, whose exp approximates a 3-class one-hot\n categorical distribution. Because the temperature is very high, samples from\n this distribution are usually close to the (-log(3), -log(3), -log(3)) vector.\n The 2nd class is still the most likely to be the largest component\n in samples drawn from this distribution.\n\n ```python\n temperature = 10\n logits = [-2, 2, 0]\n dist = ExpRelaxedOneHotCategorical(temperature, logits=logits)\n samples = dist.sample()\n exp_samples = tf.exp(samples)\n # exp_samples has the same distribution as samples from\n # RelaxedOneHotCategorical(temperature, probs=p)\n ```\n\n Chris J. Maddison, Andriy Mnih, and Yee Whye Teh. The Concrete Distribution:\n A Continuous Relaxation of Discrete Random Variables. 2016.\n \"\"\"\n\n def __init__(\n self,\n temperature,\n logits=None,\n probs=None,\n dtype=None,\n validate_args=False,\n allow_nan_stats=True,\n name=\"ExpRelaxedOneHotCategorical\"):\n \"\"\"Initialize ExpRelaxedOneHotCategorical using class log-probabilities.\n\n Args:\n temperature: An 0-D `Tensor`, representing the temperature\n of a set of ExpRelaxedCategorical distributions. The temperature should\n be positive.\n logits: An N-D `Tensor`, `N >= 1`, representing the log probabilities\n of a set of ExpRelaxedCategorical distributions. The first\n `N - 1` dimensions index into a batch of independent distributions and\n the last dimension represents a vector of logits for each class. Only\n one of `logits` or `probs` should be passed in.\n probs: An N-D `Tensor`, `N >= 1`, representing the probabilities\n of a set of ExpRelaxedCategorical distributions. The first\n `N - 1` dimensions index into a batch of independent distributions and\n the last dimension represents a vector of probabilities for each\n class. Only one of `logits` or `probs` should be passed in.\n dtype: The type of the event samples (default: inferred from\n logits/probs).\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n allow_nan_stats: Python `bool`, default `True`. When `True`, statistics\n (e.g., mean, mode, variance) use the value \"`NaN`\" to indicate the\n result is undefined. When `False`, an exception is raised if one or\n more of the statistic's batch members are undefined.\n name: Python `str` name prefixed to Ops created by this class.\n \"\"\"\n parameters = locals()\n with ops.name_scope(name, values=[logits, probs, temperature]):\n\n self._logits, self._probs = distribution_util.get_logits_and_probs(\n name=name, logits=logits, probs=probs, validate_args=validate_args,\n multidimensional=True)\n\n if dtype is None:\n dtype = self._logits.dtype\n if not validate_args:\n temperature = math_ops.cast(temperature, dtype)\n\n with ops.control_dependencies([check_ops.assert_positive(temperature)]\n if validate_args else []):\n self._temperature = array_ops.identity(temperature, name=\"temperature\")\n self._temperature_2d = array_ops.reshape(temperature, [-1, 1],\n name=\"temperature_2d\")\n\n logits_shape_static = self._logits.get_shape().with_rank_at_least(1)\n if logits_shape_static.ndims is not None:\n self._batch_rank = ops.convert_to_tensor(\n logits_shape_static.ndims - 1,\n dtype=dtypes.int32,\n name=\"batch_rank\")\n else:\n with ops.name_scope(name=\"batch_rank\"):\n self._batch_rank = array_ops.rank(self._logits) - 1\n\n with ops.name_scope(name=\"event_size\"):\n self._event_size = array_ops.shape(self._logits)[-1]\n\n super(ExpRelaxedOneHotCategorical, self).__init__(\n dtype=dtype,\n reparameterization_type=distribution.FULLY_REPARAMETERIZED,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n parameters=parameters,\n graph_parents=[self._logits,\n self._probs,\n self._temperature],\n name=name)\n\n @property\n def event_size(self):\n \"\"\"Scalar `int32` tensor: the number of classes.\"\"\"\n return self._event_size\n\n @property\n def temperature(self):\n \"\"\"Batchwise temperature tensor of a RelaxedCategorical.\"\"\"\n return self._temperature\n\n @property\n def logits(self):\n \"\"\"Vector of coordinatewise logits.\"\"\"\n return self._logits\n\n @property\n def probs(self):\n \"\"\"Vector of probabilities summing to one.\"\"\"\n return self._probs\n\n def _batch_shape_tensor(self):\n return array_ops.shape(self._logits)[:-1]\n\n def _batch_shape(self):\n return self.logits.get_shape()[:-1]\n\n def _event_shape_tensor(self):\n return array_ops.shape(self.logits)[-1:]\n\n def _event_shape(self):\n return self.logits.get_shape().with_rank_at_least(1)[-1:]\n\n def _sample_n(self, n, seed=None):\n sample_shape = array_ops.concat([[n], array_ops.shape(self.logits)], 0)\n logits = self.logits * array_ops.ones(sample_shape, dtype=self.dtype)\n logits_2d = array_ops.reshape(logits, [-1, self.event_size])\n # Uniform variates must be sampled from the open-interval `(0, 1)` rather\n # than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`\n # because it is the smallest, positive, \"normal\" number. A \"normal\" number\n # is such that the mantissa has an implicit leading 1. Normal, positive\n # numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In\n # this case, a subnormal number (i.e., np.nextafter) can cause us to sample\n # 0.\n uniform = random_ops.random_uniform(\n shape=array_ops.shape(logits_2d),\n minval=np.finfo(self.dtype.as_numpy_dtype).tiny,\n maxval=1.,\n dtype=self.dtype,\n seed=seed)\n gumbel = -math_ops.log(-math_ops.log(uniform))\n noisy_logits = math_ops.div(gumbel + logits_2d, self._temperature_2d)\n samples = nn_ops.log_softmax(noisy_logits)\n ret = array_ops.reshape(samples, sample_shape)\n return ret\n\n def _log_prob(self, x):\n x = self._assert_valid_sample(x)\n # broadcast logits or x if need be.\n logits = self.logits\n if (not x.get_shape().is_fully_defined() or\n not logits.get_shape().is_fully_defined() or\n x.get_shape() != logits.get_shape()):\n logits = array_ops.ones_like(x, dtype=logits.dtype) * logits\n x = array_ops.ones_like(logits, dtype=x.dtype) * x\n logits_shape = array_ops.shape(math_ops.reduce_sum(logits, axis=[-1]))\n logits_2d = array_ops.reshape(logits, [-1, self.event_size])\n x_2d = array_ops.reshape(x, [-1, self.event_size])\n # compute the normalization constant\n k = math_ops.cast(self.event_size, x.dtype)\n log_norm_const = (math_ops.lgamma(k)\n + (k - 1.)\n * math_ops.log(self.temperature))\n # compute the unnormalized density\n log_softmax = nn_ops.log_softmax(logits_2d - x_2d * self._temperature_2d)\n log_unnorm_prob = math_ops.reduce_sum(log_softmax, [-1], keepdims=False)\n # combine unnormalized density with normalization constant\n log_prob = log_norm_const + log_unnorm_prob\n # Reshapes log_prob to be consistent with shape of user-supplied logits\n ret = array_ops.reshape(log_prob, logits_shape)\n return ret\n\n def _assert_valid_sample(self, x):\n if not self.validate_args:\n return x\n return control_flow_ops.with_dependencies([\n check_ops.assert_non_positive(x),\n distribution_util.assert_close(\n array_ops.zeros([], dtype=self.dtype),\n math_ops.reduce_logsumexp(x, axis=[-1])),\n ], x)\n\n\nclass RelaxedOneHotCategorical(\n transformed_distribution.TransformedDistribution):\n \"\"\"RelaxedOneHotCategorical distribution with temperature and logits.\n\n The RelaxedOneHotCategorical is a distribution over random probability\n vectors, vectors of positive real values that sum to one, which continuously\n approximates a OneHotCategorical. The degree of approximation is controlled by\n a temperature: as the temperaturegoes to 0 the RelaxedOneHotCategorical\n becomes discrete with a distribution described by the `logits` or `probs`\n parameters, as the temperature goes to infinity the RelaxedOneHotCategorical\n becomes the constant distribution that is identically the constant vector of\n (1/event_size, ..., 1/event_size).\n\n The RelaxedOneHotCategorical distribution was concurrently introduced as the\n Gumbel-Softmax (Jang et al., 2016) and Concrete (Maddison et al., 2016)\n distributions for use as a reparameterized continuous approximation to the\n `Categorical` one-hot distribution. If you use this distribution, please cite\n both papers.\n\n #### Examples\n\n Creates a continuous distribution, which approximates a 3-class one-hot\n categorical distribution. The 2nd class is the most likely to be the\n largest component in samples drawn from this distribution.\n\n ```python\n temperature = 0.5\n p = [0.1, 0.5, 0.4]\n dist = RelaxedOneHotCategorical(temperature, probs=p)\n ```\n\n Creates a continuous distribution, which approximates a 3-class one-hot\n categorical distribution. The 2nd class is the most likely to be the\n largest component in samples drawn from this distribution.\n\n ```python\n temperature = 0.5\n logits = [-2, 2, 0]\n dist = RelaxedOneHotCategorical(temperature, logits=logits)\n ```\n\n Creates a continuous distribution, which approximates a 3-class one-hot\n categorical distribution. Because the temperature is very low, samples from\n this distribution are almost discrete, with one component almost 1 and the\n others nearly 0. The 2nd class is the most likely to be the largest component\n in samples drawn from this distribution.\n\n ```python\n temperature = 1e-5\n logits = [-2, 2, 0]\n dist = RelaxedOneHotCategorical(temperature, logits=logits)\n ```\n\n Creates a continuous distribution, which approximates a 3-class one-hot\n categorical distribution. Because the temperature is very high, samples from\n this distribution are usually close to the (1/3, 1/3, 1/3) vector. The 2nd\n class is still the most likely to be the largest component\n in samples drawn from this distribution.\n\n ```python\n temperature = 10\n logits = [-2, 2, 0]\n dist = RelaxedOneHotCategorical(temperature, logits=logits)\n ```\n\n Eric Jang, Shixiang Gu, and Ben Poole. Categorical Reparameterization with\n Gumbel-Softmax. 2016.\n\n Chris J. Maddison, Andriy Mnih, and Yee Whye Teh. The Concrete Distribution:\n A Continuous Relaxation of Discrete Random Variables. 2016.\n \"\"\"\n\n def __init__(\n self,\n temperature,\n logits=None,\n probs=None,\n dtype=None,\n validate_args=False,\n allow_nan_stats=True,\n name=\"RelaxedOneHotCategorical\"):\n \"\"\"Initialize RelaxedOneHotCategorical using class log-probabilities.\n\n Args:\n temperature: An 0-D `Tensor`, representing the temperature\n of a set of RelaxedOneHotCategorical distributions. The temperature\n should be positive.\n logits: An N-D `Tensor`, `N >= 1`, representing the log probabilities\n of a set of RelaxedOneHotCategorical distributions. The first\n `N - 1` dimensions index into a batch of independent distributions and\n the last dimension represents a vector of logits for each class. Only\n one of `logits` or `probs` should be passed in.\n probs: An N-D `Tensor`, `N >= 1`, representing the probabilities\n of a set of RelaxedOneHotCategorical distributions. The first `N - 1`\n dimensions index into a batch of independent distributions and the last\n dimension represents a vector of probabilities for each class. Only one\n of `logits` or `probs` should be passed in.\n dtype: The type of the event samples (default: inferred from\n logits/probs).\n validate_args: Unused in this distribution.\n allow_nan_stats: Python `bool`, default `True`. If `False`, raise an\n exception if a statistic (e.g. mean/mode/etc...) is undefined for any\n batch member. If `True`, batch members with valid parameters leading to\n undefined statistics will return NaN for this statistic.\n name: A name for this distribution (optional).\n \"\"\"\n dist = ExpRelaxedOneHotCategorical(temperature,\n logits=logits,\n probs=probs,\n dtype=dtype,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats)\n super(RelaxedOneHotCategorical, self).__init__(dist,\n bijectors.Exp(event_ndims=1),\n name=name)\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport gc\nimport glob\nimport os\nimport shutil\nimport tempfile\nimport time\n\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\n\n# pylint: disable=g-bad-import-order\nimport tensorflow.contrib.eager as tfe\nfrom tensorflow.contrib.eager.python.examples.spinn import data\nfrom third_party.examples.eager.spinn import spinn\nfrom tensorflow.contrib.summary import summary_test_util\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.training import checkpoint_utils\n# pylint: enable=g-bad-import-order\n\n\ndef _generate_synthetic_snli_data_batch(sequence_length,\n batch_size,\n vocab_size):\n \"\"\"Generate a fake batch of SNLI data for testing.\"\"\"\n with tf.device(\"cpu:0\"):\n labels = tf.random_uniform([batch_size], minval=1, maxval=4, dtype=tf.int64)\n prem = tf.random_uniform(\n (sequence_length, batch_size), maxval=vocab_size, dtype=tf.int64)\n prem_trans = tf.constant(np.array(\n [[3, 3, 2, 3, 3, 3, 2, 2, 2, 3, 3, 3,\n 2, 3, 3, 2, 2, 3, 3, 3, 2, 2, 2, 2,\n 3, 2, 2]] * batch_size, dtype=np.int64).T)\n hypo = tf.random_uniform(\n (sequence_length, batch_size), maxval=vocab_size, dtype=tf.int64)\n hypo_trans = tf.constant(np.array(\n [[3, 3, 2, 3, 3, 3, 2, 2, 2, 3, 3, 3,\n 2, 3, 3, 2, 2, 3, 3, 3, 2, 2, 2, 2,\n 3, 2, 2]] * batch_size, dtype=np.int64).T)\n if tfe.num_gpus():\n labels = labels.gpu()\n prem = prem.gpu()\n prem_trans = prem_trans.gpu()\n hypo = hypo.gpu()\n hypo_trans = hypo_trans.gpu()\n return labels, prem, prem_trans, hypo, hypo_trans\n\n\ndef _test_spinn_config(d_embed, d_out, logdir=None, inference_sentences=None):\n \"\"\"Generate a config tuple for testing.\n\n Args:\n d_embed: Embedding dimensions.\n d_out: Model output dimensions.\n logdir: Optional logdir.\n inference_sentences: A 2-tuple of strings representing the sentences (with\n binary parsing result), e.g.,\n (\"( ( The dog ) ( ( is running ) . ) )\", \"( ( The dog ) ( moves . ) )\").\n\n Returns:\n A config tuple.\n \"\"\"\n config_tuple = collections.namedtuple(\n \"Config\", [\"d_hidden\", \"d_proj\", \"d_tracker\", \"predict\",\n \"embed_dropout\", \"mlp_dropout\", \"n_mlp_layers\", \"d_mlp\",\n \"d_out\", \"projection\", \"lr\", \"batch_size\", \"epochs\",\n \"force_cpu\", \"logdir\", \"log_every\", \"dev_every\", \"save_every\",\n \"lr_decay_every\", \"lr_decay_by\", \"inference_premise\",\n \"inference_hypothesis\"])\n\n inference_premise = inference_sentences[0] if inference_sentences else None\n inference_hypothesis = inference_sentences[1] if inference_sentences else None\n return config_tuple(\n d_hidden=d_embed,\n d_proj=d_embed * 2,\n d_tracker=8,\n predict=False,\n embed_dropout=0.1,\n mlp_dropout=0.1,\n n_mlp_layers=2,\n d_mlp=32,\n d_out=d_out,\n projection=True,\n lr=2e-2,\n batch_size=2,\n epochs=20,\n force_cpu=False,\n logdir=logdir,\n log_every=1,\n dev_every=2,\n save_every=2,\n lr_decay_every=1,\n lr_decay_by=0.75,\n inference_premise=inference_premise,\n inference_hypothesis=inference_hypothesis)\n\n\nclass SpinnTest(test_util.TensorFlowTestCase):\n\n def setUp(self):\n super(SpinnTest, self).setUp()\n self._test_device = \"gpu:0\" if tfe.num_gpus() else \"cpu:0\"\n self._temp_data_dir = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self._temp_data_dir)\n super(SpinnTest, self).tearDown()\n\n def testBundle(self):\n with tf.device(self._test_device):\n lstm_iter = [np.array([[0, 1], [2, 3]], dtype=np.float32),\n np.array([[0, -1], [-2, -3]], dtype=np.float32),\n np.array([[0, 2], [4, 6]], dtype=np.float32),\n np.array([[0, -2], [-4, -6]], dtype=np.float32)]\n out = spinn._bundle(lstm_iter)\n\n self.assertEqual(2, len(out))\n self.assertEqual(tf.float32, out[0].dtype)\n self.assertEqual(tf.float32, out[1].dtype)\n self.assertAllEqual(np.array([[0, 2, 0, -2, 0, 4, 0, -4]]).T,\n out[0].numpy())\n self.assertAllEqual(np.array([[1, 3, -1, -3, 2, 6, -2, -6]]).T,\n out[1].numpy())\n\n def testUnbunbdle(self):\n with tf.device(self._test_device):\n state = [np.array([[0, 1, 2], [3, 4, 5]], dtype=np.float32),\n np.array([[0, -1, -2], [-3, -4, -5]], dtype=np.float32)]\n out = spinn._unbundle(state)\n\n self.assertEqual(2, len(out))\n self.assertEqual(tf.float32, out[0].dtype)\n self.assertEqual(tf.float32, out[1].dtype)\n self.assertAllEqual(np.array([[0, 1, 2, 0, -1, -2]]),\n out[0].numpy())\n self.assertAllEqual(np.array([[3, 4, 5, -3, -4, -5]]),\n out[1].numpy())\n\n def testReducer(self):\n with tf.device(self._test_device):\n batch_size = 3\n size = 10\n tracker_size = 8\n reducer = spinn.Reducer(size, tracker_size=tracker_size)\n\n left_in = []\n right_in = []\n tracking = []\n for _ in range(batch_size):\n left_in.append(tf.random_normal((1, size * 2)))\n right_in.append(tf.random_normal((1, size * 2)))\n tracking.append(tf.random_normal((1, tracker_size * 2)))\n\n out = reducer(left_in, right_in, tracking=tracking)\n self.assertEqual(batch_size, len(out))\n self.assertEqual(tf.float32, out[0].dtype)\n self.assertEqual((1, size * 2), out[0].shape)\n\n def testReduceTreeLSTM(self):\n with tf.device(self._test_device):\n size = 10\n tracker_size = 8\n reducer = spinn.Reducer(size, tracker_size=tracker_size)\n\n lstm_in = np.array([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],\n [0, -1, -2, -3, -4, -5, -6, -7, -8, -9]],\n dtype=np.float32)\n c1 = np.array([[0, 1], [2, 3]], dtype=np.float32)\n c2 = np.array([[0, -1], [-2, -3]], dtype=np.float32)\n\n h, c = reducer._tree_lstm(c1, c2, lstm_in)\n self.assertEqual(tf.float32, h.dtype)\n self.assertEqual(tf.float32, c.dtype)\n self.assertEqual((2, 2), h.shape)\n self.assertEqual((2, 2), c.shape)\n\n def testTracker(self):\n with tf.device(self._test_device):\n batch_size = 2\n size = 10\n tracker_size = 8\n buffer_length = 18\n stack_size = 3\n\n tracker = spinn.Tracker(tracker_size, False)\n tracker.reset_state()\n\n # Create dummy inputs for testing.\n bufs = []\n buf = []\n for _ in range(buffer_length):\n buf.append(tf.random_normal((batch_size, size * 2)))\n bufs.append(buf)\n self.assertEqual(1, len(bufs))\n self.assertEqual(buffer_length, len(bufs[0]))\n self.assertEqual((batch_size, size * 2), bufs[0][0].shape)\n\n stacks = []\n stack = []\n for _ in range(stack_size):\n stack.append(tf.random_normal((batch_size, size * 2)))\n stacks.append(stack)\n self.assertEqual(1, len(stacks))\n self.assertEqual(3, len(stacks[0]))\n self.assertEqual((batch_size, size * 2), stacks[0][0].shape)\n\n for _ in range(2):\n out1, out2 = tracker(bufs, stacks)\n self.assertIsNone(out2)\n self.assertEqual(batch_size, len(out1))\n self.assertEqual(tf.float32, out1[0].dtype)\n self.assertEqual((1, tracker_size * 2), out1[0].shape)\n\n self.assertEqual(tf.float32, tracker.state.c.dtype)\n self.assertEqual((batch_size, tracker_size), tracker.state.c.shape)\n self.assertEqual(tf.float32, tracker.state.h.dtype)\n self.assertEqual((batch_size, tracker_size), tracker.state.h.shape)\n\n def testSPINN(self):\n with tf.device(self._test_device):\n embedding_dims = 10\n d_tracker = 8\n sequence_length = 15\n num_transitions = 27\n\n config_tuple = collections.namedtuple(\n \"Config\", [\"d_hidden\", \"d_proj\", \"d_tracker\", \"predict\"])\n config = config_tuple(\n embedding_dims, embedding_dims * 2, d_tracker, False)\n s = spinn.SPINN(config)\n\n # Create some fake data.\n buffers = tf.random_normal((sequence_length, 1, config.d_proj))\n transitions = tf.constant(\n [[3], [3], [2], [3], [3], [3], [2], [2], [2], [3], [3], [3],\n [2], [3], [3], [2], [2], [3], [3], [3], [2], [2], [2], [2],\n [3], [2], [2]], dtype=tf.int64)\n self.assertEqual(tf.int64, transitions.dtype)\n self.assertEqual((num_transitions, 1), transitions.shape)\n\n out = s(buffers, transitions, training=True)\n self.assertEqual(tf.float32, out.dtype)\n self.assertEqual((1, embedding_dims), out.shape)\n\n def testSNLIClassifierAndTrainer(self):\n with tf.device(self._test_device):\n vocab_size = 40\n batch_size = 2\n d_embed = 10\n sequence_length = 15\n d_out = 4\n\n config = _test_spinn_config(d_embed, d_out)\n\n # Create fake embedding matrix.\n embed = tf.random_normal((vocab_size, d_embed))\n\n model = spinn.SNLIClassifier(config, embed)\n trainer = spinn.SNLIClassifierTrainer(model, config.lr)\n\n (labels, prem, prem_trans, hypo,\n hypo_trans) = _generate_synthetic_snli_data_batch(sequence_length,\n batch_size,\n vocab_size)\n\n # Invoke model under non-training mode.\n logits = model(prem, prem_trans, hypo, hypo_trans, training=False)\n self.assertEqual(tf.float32, logits.dtype)\n self.assertEqual((batch_size, d_out), logits.shape)\n\n # Invoke model under training model.\n logits = model(prem, prem_trans, hypo, hypo_trans, training=True)\n self.assertEqual(tf.float32, logits.dtype)\n self.assertEqual((batch_size, d_out), logits.shape)\n\n # Calculate loss.\n loss1 = trainer.loss(labels, logits)\n self.assertEqual(tf.float32, loss1.dtype)\n self.assertEqual((), loss1.shape)\n\n loss2, logits = trainer.train_batch(\n labels, prem, prem_trans, hypo, hypo_trans)\n self.assertEqual(tf.float32, loss2.dtype)\n self.assertEqual((), loss2.shape)\n self.assertEqual(tf.float32, logits.dtype)\n self.assertEqual((batch_size, d_out), logits.shape)\n # Training on the batch should have led to a change in the loss value.\n self.assertNotEqual(loss1.numpy(), loss2.numpy())\n\n def _create_test_data(self, snli_1_0_dir):\n fake_train_file = os.path.join(snli_1_0_dir, \"snli_1.0_train.txt\")\n os.makedirs(snli_1_0_dir)\n\n # Four sentences in total.\n with open(fake_train_file, \"wt\") as f:\n f.write(\"gold_label\\tsentence1_binary_parse\\tsentence2_binary_parse\\t\"\n \"sentence1_parse\\tsentence2_parse\\tsentence1\\tsentence2\\t\"\n \"captionID\\tpairID\\tlabel1\\tlabel2\\tlabel3\\tlabel4\\tlabel5\\n\")\n f.write(\"neutral\\t( ( Foo bar ) . )\\t( ( foo . )\\t\"\n \"DummySentence1Parse\\tDummySentence2Parse\\t\"\n \"Foo bar.\\tfoo baz.\\t\"\n \"4705552913.jpg#2\\t4705552913.jpg#2r1n\\t\"\n \"neutral\\tentailment\\tneutral\\tneutral\\tneutral\\n\")\n f.write(\"contradiction\\t( ( Bar foo ) . )\\t( ( baz . )\\t\"\n \"DummySentence1Parse\\tDummySentence2Parse\\t\"\n \"Foo bar.\\tfoo baz.\\t\"\n \"4705552913.jpg#2\\t4705552913.jpg#2r1n\\t\"\n \"neutral\\tentailment\\tneutral\\tneutral\\tneutral\\n\")\n f.write(\"entailment\\t( ( Quux quuz ) . )\\t( ( grault . )\\t\"\n \"DummySentence1Parse\\tDummySentence2Parse\\t\"\n \"Foo bar.\\tfoo baz.\\t\"\n \"4705552913.jpg#2\\t4705552913.jpg#2r1n\\t\"\n \"neutral\\tentailment\\tneutral\\tneutral\\tneutral\\n\")\n f.write(\"entailment\\t( ( Quuz quux ) . )\\t( ( garply . )\\t\"\n \"DummySentence1Parse\\tDummySentence2Parse\\t\"\n \"Foo bar.\\tfoo baz.\\t\"\n \"4705552913.jpg#2\\t4705552913.jpg#2r1n\\t\"\n \"neutral\\tentailment\\tneutral\\tneutral\\tneutral\\n\")\n\n glove_dir = os.path.join(self._temp_data_dir, \"glove\")\n os.makedirs(glove_dir)\n glove_file = os.path.join(glove_dir, \"glove.42B.300d.txt\")\n\n words = [\".\", \"foo\", \"bar\", \"baz\", \"quux\", \"quuz\", \"grault\", \"garply\"]\n with open(glove_file, \"wt\") as f:\n for i, word in enumerate(words):\n f.write(\"%s \" % word)\n for j in range(data.WORD_VECTOR_LEN):\n f.write(\"%.5f\" % (i * 0.1))\n if j < data.WORD_VECTOR_LEN - 1:\n f.write(\" \")\n else:\n f.write(\"\\n\")\n\n return fake_train_file\n\n def testInferSpinnWorks(self):\n \"\"\"Test inference with the spinn model.\"\"\"\n snli_1_0_dir = os.path.join(self._temp_data_dir, \"snli/snli_1.0\")\n self._create_test_data(snli_1_0_dir)\n\n vocab = data.load_vocabulary(self._temp_data_dir)\n word2index, embed = data.load_word_vectors(self._temp_data_dir, vocab)\n\n config = _test_spinn_config(\n data.WORD_VECTOR_LEN, 4,\n logdir=os.path.join(self._temp_data_dir, \"logdir\"),\n inference_sentences=(\"( foo ( bar . ) )\", \"( bar ( foo . ) )\"))\n logits = spinn.train_or_infer_spinn(\n embed, word2index, None, None, None, config)\n self.assertEqual(tf.float32, logits.dtype)\n self.assertEqual((3,), logits.shape)\n\n def testInferSpinnThrowsErrorIfOnlyOneSentenceIsSpecified(self):\n snli_1_0_dir = os.path.join(self._temp_data_dir, \"snli/snli_1.0\")\n self._create_test_data(snli_1_0_dir)\n\n vocab = data.load_vocabulary(self._temp_data_dir)\n word2index, embed = data.load_word_vectors(self._temp_data_dir, vocab)\n\n config = _test_spinn_config(\n data.WORD_VECTOR_LEN, 4,\n logdir=os.path.join(self._temp_data_dir, \"logdir\"),\n inference_sentences=(\"( foo ( bar . ) )\", None))\n with self.assertRaises(ValueError):\n spinn.train_or_infer_spinn(embed, word2index, None, None, None, config)\n\n def testTrainSpinn(self):\n \"\"\"Test with fake toy SNLI data and GloVe vectors.\"\"\"\n\n # 1. Create and load a fake SNLI data file and a fake GloVe embedding file.\n snli_1_0_dir = os.path.join(self._temp_data_dir, \"snli/snli_1.0\")\n fake_train_file = self._create_test_data(snli_1_0_dir)\n\n vocab = data.load_vocabulary(self._temp_data_dir)\n word2index, embed = data.load_word_vectors(self._temp_data_dir, vocab)\n\n train_data = data.SnliData(fake_train_file, word2index)\n dev_data = data.SnliData(fake_train_file, word2index)\n test_data = data.SnliData(fake_train_file, word2index)\n\n # 2. Create a fake config.\n config = _test_spinn_config(\n data.WORD_VECTOR_LEN, 4,\n logdir=os.path.join(self._temp_data_dir, \"logdir\"))\n\n # 3. Test training of a SPINN model.\n trainer = spinn.train_or_infer_spinn(\n embed, word2index, train_data, dev_data, test_data, config)\n\n # 4. Load train loss values from the summary files and verify that they\n # decrease with training.\n summary_file = glob.glob(os.path.join(config.logdir, \"events.out.*\"))[0]\n events = summary_test_util.events_from_file(summary_file)\n train_losses = [event.summary.value[0].simple_value for event in events\n if event.summary.value\n and event.summary.value[0].tag == \"train/loss\"]\n self.assertEqual(config.epochs, len(train_losses))\n self.assertLess(train_losses[-1], train_losses[0])\n\n # 5. Verify that checkpoints exist and contains all the expected variables.\n self.assertTrue(glob.glob(os.path.join(config.logdir, \"ckpt*\")))\n ckpt_variable_names = [\n item[0] for item in checkpoint_utils.list_variables(config.logdir)]\n self.assertIn(\"global_step\", ckpt_variable_names)\n for v in trainer.variables:\n variable_name = v.name[:v.name.index(\":\")] if \":\" in v.name else v.name\n self.assertIn(variable_name, ckpt_variable_names)\n\n\nclass EagerSpinnSNLIClassifierBenchmark(test.Benchmark):\n\n def benchmarkEagerSpinnSNLIClassifier(self):\n test_device = \"gpu:0\" if tfe.num_gpus() else \"cpu:0\"\n with tf.device(test_device):\n burn_in_iterations = 2\n benchmark_iterations = 10\n\n vocab_size = 1000\n batch_size = 128\n sequence_length = 15\n d_embed = 200\n d_out = 4\n\n embed = tf.random_normal((vocab_size, d_embed))\n\n config = _test_spinn_config(d_embed, d_out)\n model = spinn.SNLIClassifier(config, embed)\n trainer = spinn.SNLIClassifierTrainer(model, config.lr)\n\n (labels, prem, prem_trans, hypo,\n hypo_trans) = _generate_synthetic_snli_data_batch(sequence_length,\n batch_size,\n vocab_size)\n\n for _ in range(burn_in_iterations):\n trainer.train_batch(labels, prem, prem_trans, hypo, hypo_trans)\n\n gc.collect()\n start_time = time.time()\n for _ in xrange(benchmark_iterations):\n trainer.train_batch(labels, prem, prem_trans, hypo, hypo_trans)\n wall_time = time.time() - start_time\n # Named \"examples\"_per_sec to conform with other benchmarks.\n extras = {\"examples_per_sec\": benchmark_iterations / wall_time}\n self.report_benchmark(\n name=\"Eager_SPINN_SNLIClassifier_Benchmark\",\n iters=benchmark_iterations,\n wall_time=wall_time,\n extras=extras)\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=protected-access\n\"\"\"Home of estimator related functions.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom tensorflow.python.client import session\nfrom tensorflow.python.estimator import estimator as estimator_lib\nfrom tensorflow.python.estimator import export as export_lib\nfrom tensorflow.python.estimator import model_fn as model_fn_lib\nfrom tensorflow.python.estimator import run_config as run_config_lib\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import random_seed\nfrom tensorflow.python.framework import sparse_tensor as sparse_tensor_lib\nfrom tensorflow.python.keras._impl.keras import backend as K\nfrom tensorflow.python.keras._impl.keras import models\nfrom tensorflow.python.keras._impl.keras import optimizers\nfrom tensorflow.python.keras._impl.keras.engine.base_layer import Layer\nfrom tensorflow.python.keras._impl.keras.engine.network import Network\nfrom tensorflow.python.keras._impl.keras.utils.generic_utils import CustomObjectScope\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import metrics as metrics_module\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.saved_model import signature_constants\nfrom tensorflow.python.training import saver as saver_lib\nfrom tensorflow.python.training import training_util\nfrom tensorflow.python.util.tf_export import tf_export\n\n_DEFAULT_SERVING_KEY = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY\n\n\ndef _cast_tensor_to_floatx(x):\n \"\"\"Cast tensor to keras's floatx dtype if it is not already the same dtype.\"\"\"\n if x.dtype == K.floatx():\n return x\n else:\n return math_ops.cast(x, K.floatx())\n\n\ndef _create_ordered_io(keras_model, estimator_io, is_input=True):\n \"\"\"Create a list of tensors from IO dictionary based on Keras IO order.\n\n Args:\n keras_model: An instance of compiled keras model.\n estimator_io: The features or labels (dict or plain array) from model_fn.\n is_input: True if dictionary is for inputs.\n\n Returns:\n A list of tensors based on Keras IO order.\n\n Raises:\n ValueError: if dictionary keys cannot be found in Keras model input_names\n or output_names.\n \"\"\"\n if isinstance(estimator_io, (list, tuple)):\n # Case currently not supported by most built-in input_fn,\n # but it's good to have for sanity\n return [_cast_tensor_to_floatx(x) for x in estimator_io]\n elif isinstance(estimator_io, dict):\n if is_input:\n if keras_model._is_graph_network:\n keras_io_names = keras_model.input_names\n else:\n keras_io_names = [\n 'input_%d' % i for i in range(1, len(estimator_io) + 1)]\n else:\n if keras_model._is_graph_network:\n keras_io_names = keras_model.output_names\n else:\n keras_io_names = [\n 'output_%d' % i for i in range(1, len(estimator_io) + 1)]\n\n for key in estimator_io:\n if key not in keras_io_names:\n raise ValueError(\n 'Cannot find %s with name \"%s\" in Keras Model. '\n 'It needs to match one '\n 'of the following: %s' % ('input' if is_input else 'output', key,\n ', '.join(keras_io_names)))\n tensors = [_cast_tensor_to_floatx(estimator_io[io_name])\n for io_name in keras_io_names]\n return tensors\n else:\n # Plain array.\n return _cast_tensor_to_floatx(estimator_io)\n\n\ndef _in_place_subclassed_model_reset(model):\n \"\"\"Substitute for model cloning that works for subclassed models.\n\n Subclassed models cannot be cloned because their topology is not serializable.\n To \"instantiate\" an identical model in a new TF graph, we reuse the original\n model object, but we clear its state.\n\n After calling this function on a model intance, you can use the model instance\n as if it were a model clone (in particular you can use it in a new graph).\n\n This method clears the state of the input model. It is thus destructive.\n However the original state can be restored fully by calling\n `_in_place_subclassed_model_state_restoration`.\n\n Args:\n model: Instance of a Keras model created via subclassing.\n\n Raises:\n ValueError: In case the model uses a subclassed model as inner layer.\n \"\"\"\n assert not model._is_graph_network # Only makes sense for subclassed networks\n # Retrieve all layers tracked by the model as well as their attribute names\n attributes_cache = {}\n for name in dir(model):\n try:\n value = getattr(model, name)\n except (AttributeError, ValueError, TypeError):\n continue\n if isinstance(value, Layer):\n attributes_cache[name] = value\n assert value in model._layers\n elif isinstance(value, (list, tuple)) and name not in ('layers', '_layers'):\n # Handle case: list/tuple of layers (also tracked by the Network API).\n if value and all(isinstance(val, Layer) for val in value):\n raise ValueError('We do not support the use of list-of-layers '\n 'attributes in subclassed models used with '\n '`model_to_estimator` at this time. Found list '\n 'model: %s' % name)\n\n # Replace layers on the model with fresh layers\n layers_to_names = {value: key for key, value in attributes_cache.items()}\n original_layers = model._layers[:]\n model._layers = []\n for layer in original_layers: # We preserve layer order.\n config = layer.get_config()\n # This will not work for nested subclassed models used as layers.\n # This would be theoretically possible to support, but would add complexity.\n # Only do it if users complain.\n if isinstance(layer, Network) and not layer._is_graph_network:\n raise ValueError('We do not support the use of nested subclassed models '\n 'in `model_to_estimator` at this time. Found nested '\n 'model: %s' % layer)\n fresh_layer = layer.__class__.from_config(config)\n name = layers_to_names[layer]\n setattr(model, name, fresh_layer)\n\n # Cache original model build attributes (in addition to layers)\n if (not hasattr(model, '_original_attributes_cache') or\n model._original_attributes_cache is None):\n if model.built:\n attributes_to_cache = [\n 'inputs',\n 'outputs',\n '_feed_outputs',\n '_feed_output_names',\n '_feed_output_shapes',\n '_feed_loss_fns',\n 'loss_weights_list',\n 'targets',\n '_feed_targets',\n 'sample_weight_modes',\n 'weighted_metrics',\n 'metrics_names',\n 'metrics_tensors',\n 'metrics_updates',\n 'stateful_metric_names',\n 'total_loss',\n 'sample_weights',\n '_feed_sample_weights',\n 'train_function',\n 'test_function',\n 'predict_function',\n '_collected_trainable_weights',\n '_feed_inputs',\n '_feed_input_names',\n '_feed_input_shapes',\n 'optimizer',\n ]\n for name in attributes_to_cache:\n attributes_cache[name] = getattr(model, name)\n model._original_attributes_cache = attributes_cache\n\n # Reset built state\n model.built = False\n model.inputs = None\n model.outputs = None\n\n\ndef _in_place_subclassed_model_state_restoration(model):\n \"\"\"Restores the original state of a model after it was \"reset\".\n\n This undoes this action of `_in_place_subclassed_model_reset`.\n\n Args:\n model: Instance of a Keras model created via subclassing, on which\n `_in_place_subclassed_model_reset` was previously called.\n \"\"\"\n assert not model._is_graph_network\n # Restore layers and build attributes\n if (hasattr(model, '_original_attributes_cache') and\n model._original_attributes_cache is not None):\n model._layers = []\n for name, value in model._original_attributes_cache.items():\n setattr(model, name, value)\n model._original_attributes_cache = None\n else:\n # Restore to the state of a never-called model.\n model.built = False\n model.inputs = None\n model.outputs = None\n\n\ndef _clone_and_build_model(mode,\n keras_model,\n custom_objects,\n features=None,\n labels=None):\n \"\"\"Clone and build the given keras_model.\n\n Args:\n mode: training mode.\n keras_model: an instance of compiled keras model.\n custom_objects: Dictionary for custom objects.\n features: Dict of tensors.\n labels: Dict of tensors, or single tensor instance.\n\n Returns:\n The newly built model.\n \"\"\"\n # Set to True during training, False for inference.\n K.set_learning_phase(mode == model_fn_lib.ModeKeys.TRAIN)\n\n # Get list of inputs.\n if features is None:\n input_tensors = None\n else:\n input_tensors = _create_ordered_io(keras_model,\n estimator_io=features,\n is_input=True)\n # Get list of outputs.\n if labels is None:\n target_tensors = None\n elif isinstance(labels, dict):\n target_tensors = _create_ordered_io(keras_model,\n estimator_io=labels,\n is_input=False)\n else:\n target_tensors = [\n _cast_tensor_to_floatx(\n sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(labels))\n ]\n\n if keras_model._is_graph_network:\n if custom_objects:\n with CustomObjectScope(custom_objects):\n model = models.clone_model(keras_model, input_tensors=input_tensors)\n else:\n model = models.clone_model(keras_model, input_tensors=input_tensors)\n else:\n model = keras_model\n _in_place_subclassed_model_reset(model)\n if input_tensors is not None:\n model._set_inputs(input_tensors)\n\n # Compile/Build model\n if mode is model_fn_lib.ModeKeys.PREDICT:\n if isinstance(model, models.Sequential):\n model.build()\n else:\n if isinstance(keras_model.optimizer, optimizers.TFOptimizer):\n optimizer = keras_model.optimizer\n else:\n optimizer_config = keras_model.optimizer.get_config()\n optimizer = keras_model.optimizer.__class__.from_config(optimizer_config)\n optimizer.iterations = training_util.get_or_create_global_step()\n\n model.compile(\n optimizer,\n keras_model.loss,\n metrics=keras_model.metrics,\n loss_weights=keras_model.loss_weights,\n sample_weight_mode=keras_model.sample_weight_mode,\n weighted_metrics=keras_model.weighted_metrics,\n target_tensors=target_tensors)\n\n if isinstance(model, models.Sequential):\n model = model.model\n return model\n\n\ndef _create_keras_model_fn(keras_model, custom_objects=None):\n \"\"\"Creates model_fn for keras Estimator.\n\n Args:\n keras_model: an instance of compiled keras model.\n custom_objects: Dictionary for custom objects.\n\n Returns:\n The model_fn for a keras Estimator.\n \"\"\"\n\n def model_fn(features, labels, mode):\n \"\"\"model_fn for keras Estimator.\"\"\"\n model = _clone_and_build_model(mode, keras_model, custom_objects, features,\n labels)\n # Get inputs to EstimatorSpec\n predictions = dict(zip(model.output_names, model.outputs))\n\n loss = None\n train_op = None\n eval_metric_ops = None\n\n # Set loss and metric only during train and evaluate.\n if mode is not model_fn_lib.ModeKeys.PREDICT:\n if mode is model_fn_lib.ModeKeys.TRAIN:\n model._make_train_function() # pylint: disable=protected-access\n else:\n model._make_test_function() # pylint: disable=protected-access\n loss = model.total_loss\n\n if model.metrics:\n # TODO(fchollet): support stateful metrics\n eval_metric_ops = {}\n # When each metric maps to an output\n if isinstance(model.metrics, dict):\n for i, output_name in enumerate(model.metrics.keys()):\n metric_name = model.metrics[output_name]\n if callable(metric_name):\n metric_name = metric_name.__name__\n # When some outputs use the same metric\n if list(model.metrics.values()).count(metric_name) > 1:\n metric_name += '_' + output_name\n eval_metric_ops[metric_name] = metrics_module.mean(\n model.metrics_tensors[i - len(model.metrics)])\n else:\n for i, metric_name in enumerate(model.metrics):\n if callable(metric_name):\n metric_name = metric_name.__name__\n eval_metric_ops[metric_name] = metrics_module.mean(\n model.metrics_tensors[i])\n\n # Set train_op only during train.\n if mode is model_fn_lib.ModeKeys.TRAIN:\n train_op = model.train_function.updates_op\n\n if not model._is_graph_network:\n # Reset model state to original state,\n # to avoid `model_fn` being destructive for the initial model argument.\n _in_place_subclassed_model_state_restoration(keras_model)\n return model_fn_lib.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n loss=loss,\n train_op=train_op,\n eval_metric_ops=eval_metric_ops,\n export_outputs={\n _DEFAULT_SERVING_KEY:\n export_lib.export_output.PredictOutput(predictions)\n })\n\n return model_fn\n\n\ndef _save_first_checkpoint(keras_model, estimator, custom_objects,\n keras_weights):\n \"\"\"Save first checkpoint for the keras Estimator.\n\n Args:\n keras_model: an instance of compiled keras model.\n estimator: keras estimator.\n custom_objects: Dictionary for custom objects.\n keras_weights: A flat list of Numpy arrays for weights of given keras_model.\n\n Returns:\n The model_fn for a keras Estimator.\n \"\"\"\n # Load weights and save to checkpoint if there is no checkpoint\n latest_path = saver_lib.latest_checkpoint(estimator.model_dir)\n if not latest_path:\n with ops.Graph().as_default():\n random_seed.set_random_seed(estimator.config.tf_random_seed)\n training_util.create_global_step()\n model = _clone_and_build_model(model_fn_lib.ModeKeys.TRAIN, keras_model,\n custom_objects)\n if isinstance(model, models.Sequential):\n model = model.model\n # save to checkpoint\n with session.Session(config=estimator._session_config) as sess:\n model.set_weights(keras_weights)\n # Make update ops and initialize all variables.\n if not model.train_function:\n # pylint: disable=protected-access\n model._make_train_function()\n K._initialize_variables(sess)\n # pylint: enable=protected-access\n saver = saver_lib.Saver()\n saver.save(sess, os.path.join(estimator.model_dir, 'keras_model.ckpt'))\n\n\n@tf_export('keras.estimator.model_to_estimator')\ndef model_to_estimator(keras_model=None,\n keras_model_path=None,\n custom_objects=None,\n model_dir=None,\n config=None):\n \"\"\"Constructs an `Estimator` instance from given keras model.\n\n For usage example, please see\n @{$programmers_guide/estimators$creating_estimators_from_keras_models}.\n\n Args:\n keras_model: Keras model in memory.\n keras_model_path: Directory to a keras model on disk.\n custom_objects: Dictionary for custom objects.\n model_dir: Directory to save Estimator model parameters, graph and etc.\n config: Configuration object.\n\n Returns:\n An Estimator from given keras model.\n\n Raises:\n ValueError: if neither keras_model nor keras_model_path was given.\n ValueError: if both keras_model and keras_model_path was given.\n ValueError: if the keras_model_path is a GCS URI.\n ValueError: if keras_model has not been compiled.\n \"\"\"\n if (not keras_model) and (not keras_model_path):\n raise ValueError(\n 'Either `keras_model` or `keras_model_path` needs to be provided.')\n if keras_model and keras_model_path:\n raise ValueError(\n 'Please specity either `keras_model` or `keras_model_path`, '\n 'but not both.')\n\n if not keras_model:\n if keras_model_path.startswith(\n 'gs://') or 'storage.googleapis.com' in keras_model_path:\n raise ValueError(\n '%s is not a local path. Please copy the model locally first.' %\n keras_model_path)\n logging.info('Loading models from %s', keras_model_path)\n keras_model = models.load_model(keras_model_path)\n else:\n logging.info('Using the Keras model provided.')\n keras_model = keras_model\n\n if not hasattr(keras_model, 'optimizer') or not keras_model.optimizer:\n raise ValueError(\n 'The given keras model has not been compiled yet. Please compile first '\n 'before calling `model_to_estimator`.')\n\n if isinstance(config, dict):\n config = run_config_lib.RunConfig(**config)\n\n keras_model_fn = _create_keras_model_fn(keras_model, custom_objects)\n estimator = estimator_lib.Estimator(\n keras_model_fn, model_dir=model_dir, config=config)\n\n # Pass the config into keras backend's default session.\n with session.Session(config=estimator._session_config) as sess:\n K.set_session(sess)\n\n keras_weights = keras_model.get_weights()\n if keras_model._is_graph_network:\n # TODO(yifeif): move checkpoint initialization to scaffold.init_fn\n _save_first_checkpoint(keras_model,\n estimator,\n custom_objects,\n keras_weights)\n elif keras_model.built:\n logging.warning('You are creating an Estimator from a Keras model '\n 'manually subclassed from `Model`, that was '\n 'already called on some inputs (and thus already had '\n 'weights). We are currently unable to preserve '\n 'the model\\'s state (its weights) '\n 'as part of the estimator '\n 'in this case. Be warned that the estimator '\n 'has been created using '\n 'a freshly initialized version of your model.\\n'\n 'Note that this doesn\\'t affect the state of the '\n 'model instance you passed as `keras_model` argument.')\n return estimator\n"
] | [
[
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.array_ops.split",
"tensorflow.python.ops.array_ops.listdiff",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.eager.context.Context",
"tensorflow.python.ops.math_ops.approximate_equal",
"tensorflow.python.eager.context.context",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.ops.control_flow_ops.no_op",
"tensorflow.python.eager.context.device",
"tensorflow.python.ops.resource_variable_ops.ResourceVariable",
"numpy.arange",
"tensorflow.python.ops.array_ops.unstack",
"tensorflow.python.eager.context.graph_mode",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.eager.execute.args_to_matching_eager",
"tensorflow.python.eager.test.main",
"tensorflow.python.ops.array_ops.check_numerics",
"tensorflow.python.ops.sparse_ops.gen_sparse_ops.sparse_split",
"numpy.array",
"tensorflow.python.ops.math_ops.add_n",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.layers.core.Flatten",
"tensorflow.python.ops.random_ops.random_uniform",
"tensorflow.core.protobuf.config_pb2.ConfigProto",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.ops.math_ops.subtract",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.ops.nn_ops.softmax",
"tensorflow.python.ops.gradients_impl.gradients",
"tensorflow.python.framework.meta_graph.copy_scoped_meta_graph",
"tensorflow.python.ops.math_ops.less",
"tensorflow.python.framework.test_util.assert_meta_graph_protos_equal",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.training.saver.generate_checkpoint_state_proto",
"tensorflow.python.framework.ops.reset_default_graph",
"tensorflow.python.eager.context.eager_mode",
"tensorflow.python.platform.test.is_gpu_available",
"tensorflow.python.training.queue_runner_impl.add_queue_runner",
"tensorflow.python.training.adam.AdamOptimizer",
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.training.saver_test_utils.CheckpointedOp",
"tensorflow.python.ops.variable_scope.get_variable",
"tensorflow.python.platform.test.mock.patch.object",
"tensorflow.python.platform.test.gpu_device_name",
"tensorflow.python.training.saver.export_meta_graph",
"tensorflow.core.protobuf.config_pb2.ConfigProto",
"tensorflow.core.protobuf.meta_graph_pb2.MetaGraphDef",
"tensorflow.python.framework.test_util.run_in_graph_and_eager_modes",
"tensorflow.python.ops.control_flow_ops.while_loop",
"tensorflow.python.training.saver.update_checkpoint_state",
"tensorflow.python.training.checkpoint_state_pb2.CheckpointState",
"tensorflow.python.platform.gfile.Remove",
"tensorflow.python.training.saver.latest_checkpoint",
"numpy.reshape",
"tensorflow.python.ops.resource_variable_ops.ResourceVariable",
"tensorflow.python.ops.nn_ops.softmax_cross_entropy_with_logits",
"tensorflow.python.platform.gfile.DeleteRecursively",
"tensorflow.python.framework.ops.get_default_session",
"tensorflow.python.ops.math_ops.add",
"tensorflow.python.platform.test.main",
"tensorflow.python.framework.meta_graph.stripped_op_list_for_graph",
"tensorflow.python.ops.variables.initialize_variables",
"tensorflow.python.ops.variables.global_variables",
"numpy.int64",
"tensorflow.python.training.gradient_descent.GradientDescentOptimizer",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.framework.ops._default_graph_stack.reset",
"tensorflow.python.util.compat.as_bytes",
"tensorflow.python.ops.variables.Variable.SaveSliceInfo",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.array_ops.identity",
"numpy.random.randint",
"tensorflow.python.ops.variables.report_uninitialized_variables",
"tensorflow.python.framework.ops.get_collection",
"tensorflow.python.framework.function.Defun",
"tensorflow.python.platform.gfile.MakeDirs",
"tensorflow.python.ops.array_ops.size",
"tensorflow.python.framework.meta_graph.export_scoped_meta_graph",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.training.queue_runner_impl.QueueRunner",
"tensorflow.python.ops.math_ops.reduce_mean",
"tensorflow.python.ops.data_flow_ops.FIFOQueue",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.training.saver._update_checkpoint_state",
"tensorflow.python.platform.gfile.Glob",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.training.saver.checkpoint_exists",
"tensorflow.python.ops.random_ops.random_uniform",
"tensorflow.python.summary.summary.scalar",
"tensorflow.python.framework.ops.add_to_collection",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.framework.ops.device",
"tensorflow.python.data.ops.dataset_ops.Dataset.range",
"tensorflow.python.ops.partitioned_variables.fixed_size_partitioner",
"tensorflow.python.training.saver.get_checkpoint_state",
"tensorflow.python.training.saver.import_meta_graph",
"tensorflow.python.pywrap_tensorflow.NewCheckpointReader",
"tensorflow.python.framework.test_util.get_node_def_from_graph",
"tensorflow.python.ops.partitioned_variables.create_partitioned_variables",
"tensorflow.python.eager.context.graph_mode",
"tensorflow.python.training.saver.get_checkpoint_mtimes",
"tensorflow.core.protobuf.queue_runner_pb2.QueueRunnerDef",
"tensorflow.python.ops.math_ops.complex",
"tensorflow.python.client.session.Session",
"numpy.random.random",
"tensorflow.python.eager.context.in_graph_mode",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.training.saver.Saver",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.framework.ops.NotDifferentiable",
"tensorflow.python.ops.gen_logging_ops.tensor_summary_v2",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.ops.summary_op_util.collect",
"tensorflow.core.framework.summary_pb2.SummaryMetadata",
"tensorflow.python.ops.summary_op_util.summary_scope"
],
[
"tensorflow.contrib.py2tf.pyct.anno.setanno",
"tensorflow.contrib.py2tf.pyct.anno.hasanno",
"tensorflow.contrib.py2tf.pyct.anno.getanno"
],
[
"tensorflow.python.ops.math_ops.log",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.ops.array_ops.rank",
"tensorflow.python.ops.math_ops.reduce_logsumexp",
"numpy.finfo",
"tensorflow.python.ops.math_ops.lgamma",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.math_ops.div",
"tensorflow.python.ops.distributions.util.get_logits_and_probs",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.check_ops.assert_non_positive",
"tensorflow.python.ops.array_ops.ones_like",
"tensorflow.python.ops.check_ops.assert_positive",
"tensorflow.python.ops.nn_ops.log_softmax",
"tensorflow.contrib.distributions.python.ops.bijectors.Exp",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.math_ops.reduce_sum"
],
[
"tensorflow.device",
"tensorflow.constant",
"tensorflow.python.eager.test.main",
"tensorflow.contrib.summary.summary_test_util.events_from_file",
"tensorflow.contrib.eager.python.examples.spinn.data.load_vocabulary",
"tensorflow.contrib.eager.python.examples.spinn.data.SnliData",
"tensorflow.python.training.checkpoint_utils.list_variables",
"tensorflow.contrib.eager.python.examples.spinn.data.load_word_vectors",
"tensorflow.contrib.eager.num_gpus",
"numpy.array",
"tensorflow.random_uniform",
"tensorflow.random_normal"
],
[
"tensorflow.python.estimator.estimator.Estimator",
"tensorflow.python.keras._impl.keras.backend.set_learning_phase",
"tensorflow.python.training.saver.latest_checkpoint",
"tensorflow.python.keras._impl.keras.models.clone_model",
"tensorflow.python.keras._impl.keras.backend.floatx",
"tensorflow.python.ops.metrics.mean",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.framework.sparse_tensor.convert_to_tensor_or_sparse_tensor",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.keras._impl.keras.backend._initialize_variables",
"tensorflow.python.keras._impl.keras.utils.generic_utils.CustomObjectScope",
"tensorflow.python.client.session.Session",
"tensorflow.python.keras._impl.keras.models.load_model",
"tensorflow.python.training.training_util.create_global_step",
"tensorflow.python.framework.random_seed.set_random_seed",
"tensorflow.python.keras._impl.keras.backend.set_session",
"tensorflow.python.training.training_util.get_or_create_global_step",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.python.estimator.run_config.RunConfig",
"tensorflow.python.estimator.export.export_output.PredictOutput",
"tensorflow.python.training.saver.Saver"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.5",
"1.4"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.7"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"1.2"
]
}
] |
CommanderCero/RL_Algorithms | [
"fd8172e0075247b682a1dca752306147fa2ed3ba",
"fd8172e0075247b682a1dca752306147fa2ed3ba"
] | [
"A2C/utils.py",
"Rainbow/ptan/experience.py"
] | [
"import scipy.signal as signal\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport models\nimport gym\nimport wandb\n\ndef create_feedforward(sizes, activation=nn.ReLU): \n layers = []\n for i in range(len(sizes) - 1):\n layers.append(nn.Linear(sizes[i], sizes[i+1]))\n if i < len(sizes) - 2:\n layers.append(activation())\n return nn.Sequential(*layers)\n\ndef get_shape(shape):\n if shape is None:\n return ()\n return shape\n\ndef discounted_cumsum(rewards, reward_decay):\n \"\"\"Taken from https://stackoverflow.com/questions/47970683/vectorize-a-numpy-discount-calculation\"\"\"\n return signal.lfilter([1], [1, -reward_decay], x=rewards[::-1])[::-1]\n\nclass TrajectoryBuffer:\n def __init__(self, observation_shape, action_shape, size, reward_decay=0.99):\n self.max_size = size\n self.trajectory_start = 0\n self.pos = 0\n self.reward_decay = reward_decay\n \n self.observations = np.empty((size, *observation_shape), dtype=np.float32)\n self.actions = np.empty((size, *get_shape(action_shape)), dtype=np.float32)\n self.rewards = np.empty((size,), dtype=np.float32)\n self.returns = np.empty((size,), dtype=np.float32)\n self.dones = np.empty((size,), dtype=np.float32)\n \n def store(self, observation, action, reward, done):\n assert self.pos < self.max_size, \"Buffer Overflow\"\n \n self.observations[self.pos] = observation\n self.actions[self.pos] = action\n self.rewards[self.pos] = reward\n self.dones[self.pos] = done\n self.pos += 1\n \n def end_trajectory(self, value=0):\n # Compute return\n sl = slice(self.trajectory_start, self.pos)\n rewards = self.rewards[sl]\n rewards = np.append(rewards, value)\n self.returns[sl] = discounted_cumsum(rewards, self.reward_decay)[:-1]\n \n self.trajectory_start = self.pos\n \n def get_data(self):\n sl = slice(0, self.pos)\n data = dict(\n observations=self.observations[sl],\n actions=self.actions[sl],\n rewards=self.rewards[sl],\n returns=self.returns[sl],\n dones=self.dones[sl]\n )\n \n return {key : torch.from_numpy(value) for key, value in data.items()}\n \n def clear(self):\n self.pos = 0\n self.trajectory_start = 0\n \n \nclass VecTrajectoryBuffer:\n def __init__(self, observation_shape, action_shape, num_envs, size, reward_decay=0.99):\n self.max_size = size\n self.pos = 0\n self.reward_decay = reward_decay\n self.traj_starts = np.zeros((num_envs,), dtype=int)\n \n self.observations = np.empty((size, num_envs, *observation_shape), dtype=np.float32)\n self.actions = np.empty((size, num_envs, *get_shape(action_shape)), dtype=np.float32)\n self.rewards = np.empty((size, num_envs), dtype=np.float32)\n self.returns = np.empty((size, num_envs), dtype=np.float32)\n self.dones = np.empty((size, num_envs), dtype=np.float32)\n \n def store(self, observations, actions, rewards, dones):\n assert self.pos < self.max_size, \"Buffer Overflow\"\n \n self.observations[self.pos] = observations\n self.actions[self.pos] = actions\n self.rewards[self.pos] = rewards\n self.dones[self.pos] = dones\n self.pos += 1\n \n # Compute returns\n for env_index, done in enumerate(dones):\n if done:\n self._end_trajectory(env_index)\n \n def end_trajectory(self, values):\n for env_index, value in enumerate(values):\n self._end_trajectory(env_index, value)\n \n def _end_trajectory(self, env_index, value=0):\n # Compute return\n sl = slice(self.traj_starts[env_index], self.pos)\n rewards = self.rewards[sl, env_index]\n rewards = np.append(rewards, value)\n self.returns[sl, env_index] = discounted_cumsum(rewards, self.reward_decay)[:-1]\n \n # Update trajectory start\n self.traj_starts[env_index] = self.pos\n \n def get_data(self, device=torch.device('cpu')):\n sl = slice(0, self.pos)\n \n data = dict(\n observations=self._remove_env_axis(self.observations[sl]),\n actions=self._remove_env_axis(self.actions[sl]),\n rewards=self._remove_env_axis(self.rewards[sl]),\n returns=self._remove_env_axis(self.returns[sl]),\n dones=self._remove_env_axis(self.dones[sl])\n )\n \n return {key : torch.from_numpy(value).to(device) for key, value in data.items()}\n \n def clear(self):\n self.pos = 0\n self.traj_starts.fill(0)\n \n def _remove_env_axis(self, array):\n # array.shape = (size, num_envs, ???)\n shape = array.shape\n # Swap size with num_envs to ensure reshaping won't mix trajectories\n array = array.swapaxes(0, 1)\n # Flatten\n new_shape = (shape[0] * shape[1], *shape[2:])\n array = array.reshape(new_shape)\n return array\n \n \ndef play(model: models.Policy, env: gym.Env, repeats=10, device=torch.device('cpu')):\n for _ in range(repeats):\n state = env.reset()\n done = False\n while not done:\n inp = torch.FloatTensor([state]).to(device)\n action = model.get_actions(inp)[0]\n state, reward, done, _ = env.step(action)\n env.render()\n \n env.close()\n \ndef capture_video(model: models.Policy, env: gym.Env, fps=30, device=torch.device('cpu')):\n frames = []\n reward_sum = 0\n step_count = 0\n \n state = env.reset()\n done = False\n while not done:\n inp = torch.FloatTensor([state]).to(device)\n action = model.get_actions(inp)[0]\n state, reward, done, _ = env.step(action)\n frames.append(np.array(env.render(\"rgb_array\")))\n \n reward_sum += reward\n step_count += 1\n \n frames = np.array(frames) # (Time, Width, Height, Channels)\n frames = np.moveaxis(frames, 3, 1) # (Time, Channels, Width, Height)\n return wandb.Video(frames, caption=f\"RewardSum={reward_sum}; EpisodeLength={step_count}\", fps=fps)",
"import gym\nimport torch\nimport random\nimport collections\nfrom torch.autograd import Variable\n\nimport numpy as np\n\nfrom collections import namedtuple, deque\n\nfrom .agent import BaseAgent\nfrom .common import utils\n\n# one single experience step\nExperience = namedtuple('Experience', ['state', 'action', 'reward', 'done'])\n\n\nclass ExperienceSource:\n \"\"\"\n Simple n-step experience source using single or multiple environments\n\n Every experience contains n list of Experience entries\n \"\"\"\n def __init__(self, env, agent, steps_count=2, steps_delta=1, vectorized=False):\n \"\"\"\n Create simple experience source\n :param env: environment or list of environments to be used\n :param agent: callable to convert batch of states into actions to take\n :param steps_count: count of steps to track for every experience chain\n :param steps_delta: how many steps to do between experience items\n :param vectorized: support of vectorized envs from OpenAI universe\n \"\"\"\n assert isinstance(env, (gym.Env, list, tuple))\n assert isinstance(agent, BaseAgent)\n assert isinstance(steps_count, int)\n assert steps_count >= 1\n assert isinstance(vectorized, bool)\n if isinstance(env, (list, tuple)):\n self.pool = env\n else:\n self.pool = [env]\n self.agent = agent\n self.steps_count = steps_count\n self.steps_delta = steps_delta\n self.total_rewards = []\n self.total_steps = []\n self.vectorized = vectorized\n\n def __iter__(self):\n states, agent_states, histories, cur_rewards, cur_steps = [], [], [], [], []\n env_lens = []\n for env in self.pool:\n obs = env.reset()\n # if the environment is vectorized, all it's output is lists of results.\n # Details are here: https://github.com/openai/universe/blob/master/doc/env_semantics.rst\n if self.vectorized:\n obs_len = len(obs)\n states.extend(obs)\n else:\n obs_len = 1\n states.append(obs)\n env_lens.append(obs_len)\n\n for _ in range(obs_len):\n histories.append(deque(maxlen=self.steps_count))\n cur_rewards.append(0.0)\n cur_steps.append(0)\n agent_states.append(self.agent.initial_state())\n\n iter_idx = 0\n while True:\n actions = [None] * len(states)\n states_input = []\n states_indices = []\n for idx, state in enumerate(states):\n if state is None:\n actions[idx] = self.pool[0].action_space.sample() # assume that all envs are from the same family\n else:\n states_input.append(state)\n states_indices.append(idx)\n if states_input:\n states_actions, new_agent_states = self.agent(states_input, agent_states)\n for idx, action in enumerate(states_actions):\n g_idx = states_indices[idx]\n actions[g_idx] = action\n agent_states[g_idx] = new_agent_states[idx]\n grouped_actions = _group_list(actions, env_lens)\n\n global_ofs = 0\n for env_idx, (env, action_n) in enumerate(zip(self.pool, grouped_actions)):\n if self.vectorized:\n next_state_n, r_n, is_done_n, _ = env.step(action_n)\n else:\n next_state, r, is_done, _ = env.step(action_n[0])\n next_state_n, r_n, is_done_n = [next_state], [r], [is_done]\n\n for ofs, (action, next_state, r, is_done) in enumerate(zip(action_n, next_state_n, r_n, is_done_n)):\n idx = global_ofs + ofs\n state = states[idx]\n history = histories[idx]\n\n cur_rewards[idx] += r\n cur_steps[idx] += 1\n if state is not None:\n history.append(Experience(state=state, action=action, reward=r, done=is_done))\n if len(history) == self.steps_count and iter_idx % self.steps_delta == 0:\n yield tuple(history)\n states[idx] = next_state\n if is_done:\n # in case of very short episode (shorter than our steps count), send gathered history\n if 0 < len(history) < self.steps_count:\n yield tuple(history)\n # generate tail of history\n while len(history) > 1:\n history.popleft()\n yield tuple(history)\n self.total_rewards.append(cur_rewards[idx])\n self.total_steps.append(cur_steps[idx])\n cur_rewards[idx] = 0.0\n cur_steps[idx] = 0\n # vectorized envs are reset automatically\n states[idx] = env.reset() if not self.vectorized else None\n agent_states[idx] = self.agent.initial_state()\n history.clear()\n global_ofs += len(action_n)\n iter_idx += 1\n\n def pop_total_rewards(self):\n r = self.total_rewards\n if r:\n self.total_rewards = []\n self.total_steps = []\n return r\n\n def pop_rewards_steps(self):\n res = list(zip(self.total_rewards, self.total_steps))\n if res:\n self.total_rewards, self.total_steps = [], []\n return res\n\n\ndef _group_list(items, lens):\n \"\"\"\n Unflat the list of items by lens\n :param items: list of items\n :param lens: list of integers\n :return: list of list of items grouped by lengths\n \"\"\"\n res = []\n cur_ofs = 0\n for g_len in lens:\n res.append(items[cur_ofs:cur_ofs+g_len])\n cur_ofs += g_len\n return res\n\n\n# those entries are emitted from ExperienceSourceFirstLast. Reward is discounted over the trajectory piece\nExperienceFirstLast = collections.namedtuple('ExperienceFirstLast', ('state', 'action', 'reward', 'last_state'))\n\n\nclass ExperienceSourceFirstLast(ExperienceSource):\n \"\"\"\n This is a wrapper around ExperienceSource to prevent storing full trajectory in replay buffer when we need\n only first and last states. For every trajectory piece it calculates discounted reward and emits only first\n and last states and action taken in the first state.\n\n If we have partial trajectory at the end of episode, last_state will be None\n \"\"\"\n def __init__(self, env, agent, gamma, steps_count=1, steps_delta=1, vectorized=False):\n assert isinstance(gamma, float)\n super(ExperienceSourceFirstLast, self).__init__(env, agent, steps_count+1, steps_delta, vectorized=vectorized)\n self.gamma = gamma\n self.steps = steps_count\n\n def __iter__(self):\n for exp in super(ExperienceSourceFirstLast, self).__iter__():\n if exp[-1].done and len(exp) <= self.steps:\n last_state = None\n elems = exp\n else:\n last_state = exp[-1].state\n elems = exp[:-1]\n total_reward = 0.0\n for e in reversed(elems):\n total_reward *= self.gamma\n total_reward += e.reward\n yield ExperienceFirstLast(state=exp[0].state, action=exp[0].action,\n reward=total_reward, last_state=last_state)\n\n\ndef discount_with_dones(rewards, dones, gamma):\n discounted = []\n r = 0\n for reward, done in zip(rewards[::-1], dones[::-1]):\n r = reward + gamma*r*(1.-done)\n discounted.append(r)\n return discounted[::-1]\n\n\nclass ExperienceSourceRollouts:\n \"\"\"\n N-step rollout experience source following A3C rollouts scheme. Have to be used with agent,\n keeping the value in its state (for example, agent.ActorCriticAgent).\n\n Yields batches of num_envs * n_steps samples with the following arrays:\n 1. observations\n 2. actions\n 3. discounted rewards, with values approximation\n 4. values\n \"\"\"\n def __init__(self, env, agent, gamma, steps_count=5):\n \"\"\"\n Constructs the rollout experience source\n :param env: environment or list of environments to be used\n :param agent: callable to convert batch of states into actions\n :param steps_count: how many steps to perform rollouts\n \"\"\"\n assert isinstance(env, (gym.Env, list, tuple))\n assert isinstance(agent, BaseAgent)\n assert isinstance(gamma, float)\n assert isinstance(steps_count, int)\n assert steps_count >= 1\n\n if isinstance(env, (list, tuple)):\n self.pool = env\n else:\n self.pool = [env]\n self.agent = agent\n self.gamma = gamma\n self.steps_count = steps_count\n self.total_rewards = []\n self.total_steps = []\n\n def __iter__(self):\n pool_size = len(self.pool)\n states = [np.array(e.reset()) for e in self.pool]\n mb_states = np.zeros((pool_size, self.steps_count) + states[0].shape, dtype=states[0].dtype)\n mb_rewards = np.zeros((pool_size, self.steps_count), dtype=np.float32)\n mb_values = np.zeros((pool_size, self.steps_count), dtype=np.float32)\n mb_actions = np.zeros((pool_size, self.steps_count), dtype=np.int64)\n mb_dones = np.zeros((pool_size, self.steps_count), dtype=np.bool)\n total_rewards = [0.0] * pool_size\n total_steps = [0] * pool_size\n agent_states = None\n step_idx = 0\n\n while True:\n actions, agent_states = self.agent(states, agent_states)\n rewards = []\n dones = []\n new_states = []\n for env_idx, (e, action) in enumerate(zip(self.pool, actions)):\n o, r, done, _ = e.step(action)\n total_rewards[env_idx] += r\n total_steps[env_idx] += 1\n if done:\n o = e.reset()\n self.total_rewards.append(total_rewards[env_idx])\n self.total_steps.append(total_steps[env_idx])\n total_rewards[env_idx] = 0.0\n total_steps[env_idx] = 0\n new_states.append(np.array(o))\n dones.append(done)\n rewards.append(r)\n # we need an extra step to get values approximation for rollouts\n if step_idx == self.steps_count:\n # calculate rollout rewards\n for env_idx, (env_rewards, env_dones, last_value) in enumerate(zip(mb_rewards, mb_dones, agent_states)):\n env_rewards = env_rewards.tolist()\n env_dones = env_dones.tolist()\n if not env_dones[-1]:\n env_rewards = discount_with_dones(env_rewards + [last_value], env_dones + [False], self.gamma)[:-1]\n else:\n env_rewards = discount_with_dones(env_rewards, env_dones, self.gamma)\n mb_rewards[env_idx] = env_rewards\n yield mb_states.reshape((-1,) + mb_states.shape[2:]), mb_rewards.flatten(), mb_actions.flatten(), mb_values.flatten()\n step_idx = 0\n mb_states[:, step_idx] = states\n mb_rewards[:, step_idx] = rewards\n mb_values[:, step_idx] = agent_states\n mb_actions[:, step_idx] = actions\n mb_dones[:, step_idx] = dones\n step_idx += 1\n states = new_states\n\n def pop_total_rewards(self):\n r = self.total_rewards\n if r:\n self.total_rewards = []\n self.total_steps = []\n return r\n\n def pop_rewards_steps(self):\n res = list(zip(self.total_rewards, self.total_steps))\n if res:\n self.total_rewards, self.total_steps = [], []\n return res\n\n\nclass ExperienceSourceBuffer:\n \"\"\"\n The same as ExperienceSource, but takes episodes from the buffer\n \"\"\"\n def __init__(self, buffer, steps_count=1):\n \"\"\"\n Create buffered experience source\n :param buffer: list of episodes, each is a list of Experience object\n :param steps_count: count of steps in every entry\n \"\"\"\n self.update_buffer(buffer)\n self.steps_count = steps_count\n\n def update_buffer(self, buffer):\n self.buffer = buffer\n self.lens = list(map(len, buffer))\n\n def __iter__(self):\n \"\"\"\n Infinitely sample episode from the buffer and then sample item offset\n \"\"\"\n while True:\n episode = random.randrange(len(self.buffer))\n ofs = random.randrange(self.lens[episode] - self.steps_count - 1)\n yield self.buffer[episode][ofs:ofs+self.steps_count]\n\n\nclass ExperienceReplayBuffer:\n def __init__(self, experience_source, buffer_size):\n assert isinstance(experience_source, (ExperienceSource, type(None)))\n assert isinstance(buffer_size, int)\n self.experience_source_iter = None if experience_source is None else iter(experience_source)\n self.buffer = []\n self.capacity = buffer_size\n self.pos = 0\n\n def __len__(self):\n return len(self.buffer)\n\n def __iter__(self):\n return iter(self.buffer)\n\n def sample(self, batch_size):\n \"\"\"\n Get one random batch from experience replay\n TODO: implement sampling order policy\n :param batch_size:\n :return:\n \"\"\"\n if len(self.buffer) <= batch_size:\n return self.buffer\n # Warning: replace=False makes random.choice O(n)\n keys = np.random.choice(len(self.buffer), batch_size, replace=True)\n return [self.buffer[key] for key in keys]\n\n def _add(self, sample):\n if len(self.buffer) < self.capacity:\n self.buffer.append(sample)\n else:\n self.buffer[self.pos] = sample\n self.pos = (self.pos + 1) % self.capacity\n\n def populate(self, samples):\n \"\"\"\n Populates samples into the buffer\n :param samples: how many samples to populate\n \"\"\"\n for _ in range(samples):\n entry = next(self.experience_source_iter)\n self._add(entry)\n\nclass PrioReplayBufferNaive:\n def __init__(self, exp_source, buf_size, prob_alpha=0.6):\n self.exp_source_iter = iter(exp_source)\n self.prob_alpha = prob_alpha\n self.capacity = buf_size\n self.pos = 0\n self.buffer = []\n self.priorities = np.zeros((buf_size, ), dtype=np.float32)\n\n def __len__(self):\n return len(self.buffer)\n\n def populate(self, count):\n max_prio = self.priorities.max() if self.buffer else 1.0\n for _ in range(count):\n sample = next(self.exp_source_iter)\n if len(self.buffer) < self.capacity:\n self.buffer.append(sample)\n else:\n self.buffer[self.pos] = sample\n self.priorities[self.pos] = max_prio\n self.pos = (self.pos + 1) % self.capacity\n\n def sample(self, batch_size, beta=0.4):\n if len(self.buffer) == self.capacity:\n prios = self.priorities\n else:\n prios = self.priorities[:self.pos]\n probs = np.array(prios, dtype=np.float32) ** self.prob_alpha\n\n probs /= probs.sum()\n indices = np.random.choice(len(self.buffer), batch_size, p=probs, replace=True)\n samples = [self.buffer[idx] for idx in indices]\n total = len(self.buffer)\n weights = (total * probs[indices]) ** (-beta)\n weights /= weights.max()\n return samples, indices, np.array(weights, dtype=np.float32)\n\n def update_priorities(self, batch_indices, batch_priorities):\n for idx, prio in zip(batch_indices, batch_priorities):\n self.priorities[idx] = prio\n\n\nclass PrioritizedReplayBuffer(ExperienceReplayBuffer):\n def __init__(self, experience_source, buffer_size, alpha, beta):\n super(PrioritizedReplayBuffer, self).__init__(experience_source, buffer_size)\n assert alpha > 0\n assert beta > 0\n self._alpha = alpha\n self._beta = beta\n\n it_capacity = 1\n while it_capacity < buffer_size:\n it_capacity *= 2\n\n self._it_sum = utils.SumSegmentTree(it_capacity)\n self._it_min = utils.MinSegmentTree(it_capacity)\n self._max_priority = 1.0\n\n def _add(self, *args, **kwargs):\n idx = self.pos\n super()._add(*args, **kwargs)\n self._it_sum[idx] = self._max_priority ** self._alpha\n self._it_min[idx] = self._max_priority ** self._alpha\n\n def _sample_proportional(self, batch_size):\n res = []\n for _ in range(batch_size):\n mass = random.random() * self._it_sum.sum(0, len(self) - 1)\n idx = self._it_sum.find_prefixsum_idx(mass)\n res.append(idx)\n return res\n\n def sample(self, batch_size):\n idxes = self._sample_proportional(batch_size)\n\n weights = []\n p_min = self._it_min.min() / self._it_sum.sum()\n max_weight = (p_min * len(self)) ** (-self._beta)\n\n for idx in idxes:\n p_sample = self._it_sum[idx] / self._it_sum.sum()\n weight = (p_sample * len(self)) ** (-self._beta)\n weights.append(weight / max_weight)\n weights = np.array(weights, dtype=np.float32)\n samples = [self.buffer[idx] for idx in idxes]\n return samples, idxes, weights\n\n def update_priorities(self, idxes, priorities):\n assert len(idxes) == len(priorities)\n for idx, priority in zip(idxes, priorities):\n assert priority > 0\n assert 0 <= idx < len(self)\n self._it_sum[idx] = priority ** self._alpha\n self._it_min[idx] = priority ** self._alpha\n\n self._max_priority = max(self._max_priority, priority)\n\n\nclass BatchPreprocessor:\n \"\"\"\n Abstract preprocessor class descendants to which converts experience\n batch to form suitable to learning.\n \"\"\"\n def preprocess(self, batch):\n raise NotImplementedError\n\n\nclass QLearningPreprocessor(BatchPreprocessor):\n \"\"\"\n Supports SimpleDQN, TargetDQN, DoubleDQN and can additionally feed TD-error back to\n experience replay buffer.\n\n To use different modes, use appropriate class method\n \"\"\"\n def __init__(self, model, target_model, use_double_dqn=False, batch_td_error_hook=None, gamma=0.99, device=\"cpu\"):\n self.model = model\n self.target_model = target_model\n self.use_double_dqn = use_double_dqn\n self.batch_dt_error_hook = batch_td_error_hook\n self.gamma = gamma\n self.device = device\n\n @staticmethod\n def simple_dqn(model, **kwargs):\n return QLearningPreprocessor(model=model, target_model=None, use_double_dqn=False, **kwargs)\n\n @staticmethod\n def target_dqn(model, target_model, **kwards):\n return QLearningPreprocessor(model, target_model, use_double_dqn=False, **kwards)\n\n @staticmethod\n def double_dqn(model, target_model, **kwargs):\n return QLearningPreprocessor(model, target_model, use_double_dqn=True, **kwargs)\n\n def _calc_Q(self, states_first, states_last):\n \"\"\"\n Calculates apropriate q values for first and last states. Way of calculate depends on our settings.\n :param states_first: numpy array of first states\n :param states_last: numpy array of last states\n :return: tuple of numpy arrays of q values\n \"\"\"\n # here we need both first and last values calculated using our main model, so we\n # combine both states into one batch for efficiency and separate results later\n if self.target_model is None or self.use_double_dqn:\n states_t = torch.tensor(np.concatenate((states_first, states_last), axis=0)).to(self.device)\n res_both = self.model(states_t).data.cpu().numpy()\n return res_both[:len(states_first)], res_both[len(states_first):]\n\n # in this case we have target_model set and use_double_dqn==False\n # so, we should calculate first_q and last_q using different models\n states_first_v = torch.tensor(states_first).to(self.device)\n states_last_v = torch.tensor(states_last).to(self.device)\n q_first = self.model(states_first_v).data\n q_last = self.target_model(states_last_v).data\n return q_first.cpu().numpy(), q_last.cpu().numpy()\n\n def _calc_target_rewards(self, states_last, q_last):\n \"\"\"\n Calculate rewards from final states according to variants from our construction:\n 1. simple DQN: max(Q(states, model))\n 2. target DQN: max(Q(states, target_model))\n 3. double DQN: Q(states, target_model)[argmax(Q(states, model)]\n :param states_last: numpy array of last states from the games\n :param q_last: numpy array of last q values\n :return: vector of target rewards\n \"\"\"\n # in this case we handle both simple DQN and target DQN\n if self.target_model is None or not self.use_double_dqn:\n return q_last.max(axis=1)\n\n # here we have target_model set and use_double_dqn==True\n actions = q_last.argmax(axis=1)\n # calculate Q values using target net\n states_last_v = torch.tensor(states_last).to(self.device)\n q_last_target = self.target_model(states_last_v).data.cpu().numpy()\n return q_last_target[range(q_last_target.shape[0]), actions]\n\n def preprocess(self, batch):\n \"\"\"\n Calculates data for Q learning from batch of observations\n :param batch: list of lists of Experience objects\n :return: tuple of numpy arrays:\n 1. states -- observations\n 2. target Q-values\n 3. vector of td errors for every batch entry\n \"\"\"\n # first and last states for every entry\n state_0 = np.array([exp[0].state for exp in batch], dtype=np.float32)\n state_L = np.array([exp[-1].state for exp in batch], dtype=np.float32)\n\n q0, qL = self._calc_Q(state_0, state_L)\n rewards = self._calc_target_rewards(state_L, qL)\n\n td = np.zeros(shape=(len(batch),))\n\n for idx, (total_reward, exps) in enumerate(zip(rewards, batch)):\n # game is done, no final reward\n if exps[-1].done:\n total_reward = 0.0\n for exp in reversed(exps[:-1]):\n total_reward *= self.gamma\n total_reward += exp.reward\n # update total reward and calculate td error\n act = exps[0].action\n td[idx] = q0[idx][act] - total_reward\n q0[idx][act] = total_reward\n\n return state_0, q0, td\n"
] | [
[
"torch.nn.Sequential",
"torch.from_numpy",
"torch.nn.Linear",
"numpy.append",
"torch.FloatTensor",
"numpy.moveaxis",
"torch.device",
"scipy.signal.lfilter",
"numpy.array",
"numpy.zeros",
"numpy.empty"
],
[
"numpy.concatenate",
"numpy.array",
"numpy.zeros",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LeanderLXZ/oracle-recognition | [
"c82976333d4a72218b06fffc94192238d95fcf9e"
] | [
"src/capsulesEM_V1/capsules/nets.py"
] | [
"\"\"\"An implementation of matrix capsules with EM routing.\n\"\"\"\n\nimport tensorflow as tf\n\nfrom core import _conv2d_wrapper, capsules_init, capsules_conv, capsules_fc\n\nslim = tf.contrib.slim\n\n# ------------------------------------------------------------------------------#\n# -------------------------------- capsules net --------------------------------#\n# ------------------------------------------------------------------------------#\n\ndef capsules_v0(inputs, num_classes, iterations, name='CapsuleEM-V0'):\n \"\"\"Replicate the network in `Matrix Capsules with EM Routing.`\n \"\"\"\n\n with tf.variable_scope(name) as scope:\n\n # inputs [N, H, W, C] -> conv2d, 5x5, strides 2, channels 32 -> nets [N, OH, OW, 32]\n nets = _conv2d_wrapper(\n inputs, shape=[5, 5, 1, 32], strides=[1, 2, 2, 1], padding='SAME', add_bias=True, activation_fn=tf.nn.relu, name='conv1'\n )\n # inputs [N, H, W, C] -> conv2d, 1x1, strides 1, channels 32x(4x4+1) -> (poses, activations)\n nets = capsules_init(\n nets, shape=[1, 1, 32, 32], strides=[1, 1, 1, 1], padding='VALID', pose_shape=[4, 4], name='capsule_init'\n )\n # inputs: (poses, activations) -> capsule-conv 3x3x32x32x4x4, strides 2 -> (poses, activations)\n nets = capsules_conv(\n nets, shape=[3, 3, 32, 32], strides=[1, 2, 2, 1], iterations=iterations, name='capsule_conv1'\n )\n # inputs: (poses, activations) -> capsule-conv 3x3x32x32x4x4, strides 1 -> (poses, activations)\n nets = capsules_conv(\n nets, shape=[3, 3, 32, 32], strides=[1, 1, 1, 1], iterations=iterations, name='capsule_conv2'\n )\n # inputs: (poses, activations) -> capsule-fc 1x1x32x10x4x4 shared view transform matrix within each channel -> (poses, activations)\n nets = capsules_fc(\n nets, num_classes, iterations=iterations, name='capsule_fc'\n )\n\n poses, activations = nets\n\n return poses, activations\n\n# ------------------------------------------------------------------------------#\n# ------------------------------------ loss ------------------------------------#\n# ------------------------------------------------------------------------------#\n\ndef spread_loss(labels, activations, margin, name):\n \"\"\"This adds spread loss to total loss.\n\n :param labels: [N, O], where O is number of output classes, one hot vector, tf.uint8.\n :param activations: [N, O], activations.\n :param margin: margin 0.2 - 0.9 fixed schedule during training.\n\n :return: spread loss\n \"\"\"\n\n activations_shape = activations.get_shape().as_list()\n\n with tf.variable_scope(name) as scope:\n\n mask_t = tf.equal(labels, 1)\n mask_i = tf.equal(labels, 0)\n\n activations_t = tf.reshape(\n tf.boolean_mask(activations, mask_t), [activations_shape[0], 1]\n )\n activations_i = tf.reshape(\n tf.boolean_mask(activations, mask_i), [activations_shape[0], activations_shape[1] - 1]\n )\n\n # margin = tf.Print(\n # margin, [margin], 'margin', summarize=20\n # )\n\n gap_mit = tf.reduce_sum(\n tf.square(\n tf.nn.relu(\n margin - (activations_t - activations_i)\n )\n )\n )\n\n # tf.add_to_collection(\n # tf.GraphKeys.LOSSES, gap_mit\n # )\n #\n # total_loss = tf.add_n(\n # tf.get_collection(\n # tf.GraphKeys.LOSSES\n # ), name='total_loss'\n # )\n\n tf.losses.add_loss(gap_mit)\n\n return gap_mit\n\n# ------------------------------------------------------------------------------#\n\n"
] | [
[
"tensorflow.boolean_mask",
"tensorflow.nn.relu",
"tensorflow.equal",
"tensorflow.variable_scope",
"tensorflow.losses.add_loss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
broadinstitute/lincs-profiling-comparison | [
"075c3bc60eeb3934fc42c30bae6aeed8cda1cd6d",
"075c3bc60eeb3934fc42c30bae6aeed8cda1cd6d"
] | [
"2.MOA-prediction/4.model_viz/scripts/nbconverted/0.blend_test_predictions.py",
"1.Data-exploration/Profiles_level4/cell_painting/scripts/nbconverted/8.cellpainting_calculate_nonspherized_null_p_values.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# ### - Ensemble/Blend the 4 model predictions into a single prediction\n\n# In[1]:\n\n\nimport os\nimport datetime\nfrom time import time\nimport pathlib\nimport pandas as pd\nimport numpy as np\nfrom collections import defaultdict\nfrom collections import Counter\n\n\n# In[2]:\n\n\nfrom sklearn.metrics import precision_recall_curve,average_precision_score\nfrom sklearn.metrics import log_loss, roc_curve\nfrom sklearn.metrics import auc,roc_auc_score\n\n\n# In[3]:\n\n\nfrom numba import njit\nfrom scipy.optimize import minimize, fsolve\n\n\n# In[4]:\n\n\n# The two options here are \"\" and \"_subsample\"\nfile_indicator = \"\"\ndata_dir = pathlib.Path(\"../2.data_split/model_data\")\n\n\n# In[5]:\n\n\ncp_test = pathlib.Path(f\"{data_dir}/cp/test_lvl4_data{file_indicator}.csv.gz\")\nL1000_test = pathlib.Path(f\"{data_dir}/L1/test_lvl4_data.csv.gz\")\ncp_L1000_test = pathlib.Path(f\"{data_dir}/merged/test_lvl4_data.csv.gz\")\n\n\n# In[6]:\n\n\nmodel_preds_dir = '../L1000_CP_model_predictions/'\n\n\n# In[7]:\n\n\ndf_cp_test = pd.read_csv(cp_test, compression='gzip',low_memory = False)\ndf_L1000_test = pd.read_csv(L1000_test, compression='gzip',low_memory = False)\ndf_cp_L1000_test = pd.read_csv(cp_L1000_test, compression='gzip',low_memory = False)\n\n\n# In[8]:\n\n\ndf_cp_L1000_test.shape\n\n\n# In[9]:\n\n\n##resnet\ndf_cp_resnet_test = pd.read_csv(os.path.join(model_preds_dir, f'cp_test_preds_resnet{file_indicator}.csv'))\ndf_L1000_resnet_test = pd.read_csv(os.path.join(model_preds_dir, 'L1000_test_preds_resnet.csv'))\ndf_cp_L1000_resnet_test = pd.read_csv(os.path.join(model_preds_dir, 'cp_L1000_test_preds_resnet.csv'))\n\n\n# In[10]:\n\n\nprint(df_cp_L1000_resnet_test.shape)\ndf_cp_L1000_resnet_test.head()\n\n\n# In[11]:\n\n\n##1-d cnn\ndf_cp_cnn_test = pd.read_csv(os.path.join(model_preds_dir, f'cp_test_preds_1dcnn{file_indicator}.csv'))\ndf_L1000_cnn_test = pd.read_csv(os.path.join(model_preds_dir, 'L1000_test_preds_1dcnn.csv'))\ndf_cp_L1000_cnn_test = pd.read_csv(os.path.join(model_preds_dir, 'cp_L1000_test_preds_1dcnn.csv'))\n\n\n# In[12]:\n\n\nprint(df_cp_L1000_cnn_test.shape)\ndf_cp_L1000_cnn_test.head()\n\n\n# In[13]:\n\n\n##tabnet\ndf_cp_tabnet_test = pd.read_csv(os.path.join(model_preds_dir, f'cp_test_preds_tabnet{file_indicator}.csv'))\ndf_L1000_tabnet_test = pd.read_csv(os.path.join(model_preds_dir, 'L1000_test_preds_tabnet.csv'))\ndf_cp_L1000_tabnet_test = pd.read_csv(os.path.join(model_preds_dir, 'cp_L1000_test_preds_tabnet.csv'))\n\n\n# In[14]:\n\n\ndf_cp_L1000_tabnet_test.shape\n\n\n# In[15]:\n\n\n##stagedNN\ndf_cp_simplenn_test = pd.read_csv(os.path.join(model_preds_dir, f'cp_test_preds_simplenn{file_indicator}.csv'))\ndf_L1000_simplenn_test = pd.read_csv(os.path.join(model_preds_dir, 'L1000_test_preds_simplenn.csv'))\ndf_cp_L1000_simplenn_test = pd.read_csv(os.path.join(model_preds_dir, 'cp_L1000_test_preds_simplenn.csv'))\n\n\n# In[16]:\n\n\ndf_cp_L1000_simplenn_test.shape\n\n\n# In[17]:\n\n\ndf_cp_tst_targets = df_cp_test[df_cp_cnn_test.columns]\ndf_L1000_tst_targets = df_L1000_test[df_L1000_cnn_test.columns]\ndf_cp_L1000_tst_targets = df_cp_L1000_test[df_cp_L1000_cnn_test.columns]\n\n\n# In[18]:\n\n\ndf_cp_tst_targets.shape\n\n\n# In[19]:\n\n\ndf_L1000_tst_targets.shape\n\n\n# In[20]:\n\n\ndf_cp_L1000_tst_targets.shape\n\n\n# #### - Resnet, 1d-cnn, Tabnet, Simplenn --> 4 model predictions\n\n# In[21]:\n\n\n# CPMP's logloss from https://www.kaggle.com/c/lish-moa/discussion/183010\ndef log_loss_numpy(y_true, y_pred):\n y_true_ravel = np.asarray(y_true).ravel()\n y_pred = np.asarray(y_pred).ravel()\n y_pred = np.clip(y_pred, 1e-15, 1 - 1e-15)\n loss = np.where(y_true_ravel == 1, - np.log(y_pred), - np.log(1 - y_pred))\n return loss.mean()\n\ndef func_numpy_metric(weights, oof, y_true):\n oof_blend = np.tensordot(weights, oof, axes = ((0), (0)))\n return log_loss_numpy(y_true, oof_blend)\n\ndef grad_func(weights, oof, y_true):\n oof_clip = np.clip(oof, 1e-15, 1 - 1e-15)\n gradients = np.zeros(oof.shape[0])\n for i in range(oof.shape[0]):\n a, b, c = y_true, oof_clip[i], np.zeros((oof.shape[1], oof.shape[2]))\n for j in range(oof.shape[0]):\n if j != i:\n c += weights[j] * oof_clip[j]\n gradients[i] = -np.mean((-a*b+(b**2)*weights[i]+b*c)/((b**2)*(weights[i]**2)+2*b*c*weights[i]-b*weights[i]+(c**2)-c))\n return gradients\n\n@njit\ndef grad_func_jit(weights, oof, y_true):\n oof_clip = np.minimum(1 - 1e-15, np.maximum(oof, 1e-15))\n gradients = np.zeros(oof.shape[0])\n for i in range(oof.shape[0]):\n a, b, c = y_true, oof_clip[i], np.zeros((oof.shape[1], oof.shape[2]))\n for j in range(oof.shape[0]):\n if j != i:\n c += weights[j] * oof_clip[j]\n gradients[i] = -np.mean((-a*b+(b**2)*weights[i]+b*c)/((b**2)*(weights[i]**2)+2*b*c*weights[i]-b*weights[i]+(c**2)-c))\n return gradients\n\n\n# In[22]:\n\n\ncp_model_preds = [df_cp_cnn_test, df_cp_resnet_test, df_cp_tabnet_test, df_cp_simplenn_test]\nL1000_model_preds = [df_L1000_cnn_test, df_L1000_resnet_test, df_L1000_tabnet_test, df_L1000_simplenn_test]\ncp_L1000_model_preds = [df_cp_L1000_cnn_test, df_cp_L1000_resnet_test, df_cp_L1000_tabnet_test, df_cp_L1000_simplenn_test]\n\n\n# In[23]:\n\n\nmodels_name = ['1d-Cnn', 'Resnet', 'Tabnet', 'SimpleNN']\n\ndef get_optmized_blended_weights(model_oofs, df_targets, num_of_models = 4, models_name = models_name):\n \"\"\"\n This function assign weights to each of the models used in predicting MOAs based on the log-loss obtained \n when comparing each model prediction results with the actual MOA (Mechanism of actions) test labels.\n\n for more info:https://www.kaggle.com/gogo827jz/optimise-blending-weights-with-bonus-0/notebook\n \"\"\"\n model_oof_preds = np.zeros((num_of_models, df_targets.shape[0], df_targets.shape[1]))\n for idx in range(num_of_models):\n model_oof_preds[idx] = model_oofs[idx].values\n score_oof = log_loss_numpy(df_targets, model_oof_preds[idx])\n print(f'{idx} {models_name[idx]}, Test loss:\\t', score_oof)\n \n tol = 1e-10\n init_guess = [1 / model_oof_preds.shape[0]] * model_oof_preds.shape[0]\n bnds = [(0, 1) for _ in range(model_oof_preds.shape[0])]\n cons = {\n 'type': 'eq',\n 'fun': lambda x: np.sum(x) - 1,\n 'jac': lambda x: [1] * len(x)\n }\n print('Inital Blend OOF:', func_numpy_metric(init_guess, model_oof_preds, df_targets.values))\n \n start_time = time()\n \n res_scipy = minimize(fun = func_numpy_metric, x0 = init_guess, \n args=(model_oof_preds, df_targets.values), \n method = 'SLSQP', ##L-BFGS-B ##SLSQP\n jac = grad_func_jit, # grad_func \n bounds = bnds, constraints = cons, tol = tol)\n print(f'[{str(datetime.timedelta(seconds = time() - start_time))[2:7]}] Optimised Blend OOF:', res_scipy.fun)\n print('Optimised Weights:', res_scipy.x)\n return model_oof_preds, res_scipy.x\n\n\n# In[24]:\n\n\n_, L1000_model_weights = get_optmized_blended_weights(L1000_model_preds, df_L1000_tst_targets,)\n\n\n# In[25]:\n\n\n_, cp_model_weights = get_optmized_blended_weights(cp_model_preds, df_cp_tst_targets,)\n\n\n# In[26]:\n\n\n_, cp_L1000_model_weights = get_optmized_blended_weights(cp_L1000_model_preds, df_cp_L1000_tst_targets)\n\n\n# In[27]:\n\n\ndef model_eval_results(df_tst, df_tst_y, df_preds):\n \"\"\"\n This function prints out the model evaluation results from the train and test predictions.\n The evaluation metrics used in assessing the performance of the models are: ROC AUC score,\n log loss and Precision-Recall AUC score\n \"\"\"\n eval_metrics = ['log loss', 'ROC AUC score', 'PR-AUC/Average_precision_score',]\n moa_class_list = df_tst['moa'].unique()\n val_moas = [moa for moa_list in moa_class_list for moa in moa_list.split('|')]\n print('-' * 10, 'Test data prediction results', '-' * 10)\n print(f'{eval_metrics[0]}:', log_loss(np.ravel(df_tst_y), np.ravel(df_preds)))\n print(f'{eval_metrics[1]}:', roc_auc_score(df_tst_y[val_moas],df_preds[val_moas], average='macro'))\n print(f'{eval_metrics[2]}:', average_precision_score(df_tst_y[val_moas], df_preds[val_moas], average=\"micro\"))\n\n\n# In[28]:\n\n\n##[1.57502187e-01,1.15142271e-16,0.00000000e+00,8.42497813e-01] <-- modify the model weights\ndf_L1000_blend = pd.DataFrame(np.zeros(df_L1000_cnn_test.shape), columns = df_L1000_cnn_test.columns)\ndf_L1000_blend = df_L1000_cnn_test*0.45 + df_L1000_resnet_test*0.05 + df_L1000_tabnet_test*0.05 + df_L1000_simplenn_test*0.45\n\n\n# In[29]:\n\n\n0.45+(0.05*2)+0.45\n\n\n# In[30]:\n\n\nmodel_eval_results(df_L1000_test, df_L1000_tst_targets, df_L1000_blend)\n\n\n# In[31]:\n\n\n##[4.29598527e-01 3.27312317e-01 2.43089156e-01 5.42101086e-18] <-- modify the model weights\ndf_cp_blend = pd.DataFrame(np.zeros(df_cp_cnn_test.shape), columns = df_cp_cnn_test.columns)\ndf_cp_blend = df_cp_cnn_test*0.35 + df_cp_resnet_test*0.35 + df_cp_tabnet_test*0.25 + df_cp_simplenn_test*0.05\n\n\n# In[32]:\n\n\n0.35+0.35+0.25+0.05\n\n\n# In[33]:\n\n\nmodel_eval_results(df_cp_test, df_cp_tst_targets, df_cp_blend)\n\n\n# In[34]:\n\n\n##[0.28574384 0.09796798 0.06528908 0.5509991 ] <-- modify the model weights\ndf_cp_L1000_blend = pd.DataFrame(np.zeros(df_cp_L1000_cnn_test.shape), columns = df_cp_L1000_cnn_test.columns)\ndf_cp_L1000_blend = df_cp_L1000_cnn_test*0.30 + df_cp_L1000_resnet_test*0.20 + df_cp_L1000_tabnet_test*0.15 + df_cp_L1000_simplenn_test*0.35\n\n\n# In[35]:\n\n\n0.30+0.20+0.15+0.35\n\n\n# In[36]:\n\n\nmodel_eval_results(df_cp_L1000_test, df_cp_L1000_tst_targets, df_cp_L1000_blend)\n\n\n# In[37]:\n\n\ndef save_to_csv(df, path, file_name, compress=None):\n \"\"\"save dataframes to csv\"\"\"\n \n if not os.path.exists(path):\n os.mkdir(path)\n \n df.to_csv(os.path.join(path, file_name), index=False, compression=compress)\n\n\n# In[38]:\n\n\nsave_to_csv(df_cp_blend, model_preds_dir, f'cp_test_preds_blend{file_indicator}.csv')\nsave_to_csv(df_L1000_blend, model_preds_dir, 'L1000_test_preds_blend.csv')\nsave_to_csv(df_cp_L1000_blend, model_preds_dir, 'cp_L1000_test_preds_blend.csv')\n\n",
"#!/usr/bin/env python\n# coding: utf-8\n\n# ## Calculate null distribution from median Cell Painting scores with same sample size as L1000\n# \n# Code modified from @adeboyeML\n\n# In[1]:\n\n\nimport os\nimport pathlib\nimport pandas as pd\nimport numpy as np\nfrom collections import defaultdict\nfrom pycytominer import feature_select\nfrom statistics import median\nimport random\nfrom scipy import stats\nimport pickle\n\n\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nnp.warnings.filterwarnings('ignore', category=np.VisibleDeprecationWarning)\n\n\n# In[2]:\n\n\nnp.random.seed(42)\n\n\n# In[3]:\n\n\n# Load common compounds\ncommon_file = pathlib.Path(\n \"..\", \"..\", \"..\", \"6.paper_figures\", \"data\", \"significant_compounds_by_threshold_both_assays.tsv.gz\"\n)\ncommon_df = pd.read_csv(common_file, sep=\"\\t\")\n\ncommon_compounds = common_df.compound.unique()\nprint(len(common_compounds))\n\n\n# In[4]:\n\n\ncp_level4_path = \"cellpainting_lvl4_cpd_replicate_datasets\"\n\n\n# In[5]:\n\n\ndf_level4 = pd.read_csv(os.path.join(cp_level4_path, 'cp_level4_cpd_replicates_nonspherized.csv.gz'), \n compression='gzip',low_memory = False)\n\nprint(df_level4.shape)\ndf_level4.head()\n\n\n# In[6]:\n\n\ndf_cpd_med_scores = pd.read_csv(os.path.join(cp_level4_path, 'cpd_replicate_median_scores_nonspherized.csv'))\ndf_cpd_med_scores = df_cpd_med_scores.set_index('cpd').rename_axis(None, axis=0).copy()\n\n# Subset to common compound measurements\ndf_cpd_med_scores = df_cpd_med_scores.loc[df_cpd_med_scores.index.isin(common_compounds), :].rename(columns={\"cpd_size\": \"no_of_replicates\"})\n\nprint(df_cpd_med_scores.shape)\ndf_cpd_med_scores .head()\n\n\n# In[7]:\n\n\ndef get_cpds_replicates(df, df_lvl4):\n \"\"\"\n This function returns all replicates id/names found in each compound \n and in all doses(1-6)\n \"\"\"\n \n dose_list = list(set(df_lvl4['Metadata_dose_recode'].unique().tolist()))[1:7]\n replicates_in_all = []\n cpds_replicates = {}\n for dose in dose_list:\n rep_list = []\n df_doses = df_lvl4[df_lvl4['Metadata_dose_recode'] == dose].copy()\n for cpd in df.index:\n replicate_names = df_doses[df_doses['pert_iname'] == cpd]['replicate_name'].values.tolist()\n rep_list += replicate_names\n if cpd not in cpds_replicates:\n cpds_replicates[cpd] = [replicate_names]\n else:\n cpds_replicates[cpd] += [replicate_names]\n replicates_in_all.append(rep_list)\n \n return replicates_in_all, cpds_replicates\n\n\n# In[8]:\n\n\nreplicates_in_all, cpds_replicates = get_cpds_replicates(df_cpd_med_scores, df_level4)\n\n\n# In[9]:\n\n\ndef get_replicates_classes_per_dose(df, df_lvl4, cpds_replicates):\n \n \"\"\"\n This function gets all replicates ids for each distinct \n no_of_replicates (i.e. number of replicates per cpd) class per dose (1-6)\n \n Returns replicate_class_dict dictionary, with no_of_replicate classes as the keys, \n and all the replicate_ids for each no_of_replicate class as the values\n \"\"\"\n \n df['replicate_id'] = list(cpds_replicates.values())\n dose_list = list(set(df_lvl4['Metadata_dose_recode'].unique().tolist()))[1:7]\n replicate_class_dict = {}\n for dose in dose_list:\n for size in df['no_of_replicates'].unique():\n rep_lists = []\n for idx in range(df[df['no_of_replicates'] == size].shape[0]):\n rep_ids = df[df['no_of_replicates'] == size]['replicate_id'].values.tolist()[idx][dose-1]\n rep_lists += rep_ids\n if size not in replicate_class_dict:\n replicate_class_dict[size] = [rep_lists]\n else:\n replicate_class_dict[size] += [rep_lists]\n \n return replicate_class_dict\n\n\n# In[10]:\n\n\ncpd_replicate_class_dict = get_replicates_classes_per_dose(df_cpd_med_scores, df_level4, cpds_replicates)\n\n\n# In[11]:\n\n\ncpd_replicate_class_dict.keys()\n\n\n# In[12]:\n\n\ndef check_similar_replicates(replicates, dose, cpd_dict):\n \"\"\"This function checks if two replicates are of the same compounds\"\"\"\n \n for x in range(len(replicates)):\n for y in range(x+1, len(replicates)):\n for kys in cpd_dict:\n if all(i in cpd_dict[kys][dose-1] for i in [replicates[x], replicates[y]]):\n return True\n return False\n\n\n# In[13]:\n\n\ndef get_random_replicates(all_replicates, no_of_replicates, dose, replicates_ids, cpd_replicate_dict):\n \"\"\"\n This function return a list of random replicates that are not of the same compounds\n or found in the current cpd's size list\n \"\"\"\n while (True):\n random_replicates = random.sample(all_replicates, no_of_replicates)\n if not (any(rep in replicates_ids for rep in random_replicates) & \n (check_similar_replicates(random_replicates, dose, cpd_replicate_dict))):\n break\n return random_replicates\n\n\n# In[14]:\n\n\ndef get_null_distribution_replicates(\n cpd_replicate_class_dict,\n dose_list,\n replicates_lists,\n cpd_replicate_dict,\n rand_num = 1000\n):\n \n \"\"\"\n This function returns a null distribution dictionary, with no_of_replicates(replicate class) \n as the keys and 1000 lists of randomly selected replicate combinations as the values\n for each no_of_replicates class per DOSE(1-6)\n \"\"\"\n random.seed(1903)\n null_distribution_reps = {}\n for dose in dose_list:\n for replicate_class in cpd_replicate_class_dict:\n replicates_ids = cpd_replicate_class_dict[replicate_class][dose-1]\n replicate_list = []\n for idx in range(rand_num):\n start_again = True\n while (start_again):\n rand_cpds = get_random_replicates(replicates_lists[dose-1], replicate_class, dose, \n replicates_ids, cpd_replicate_dict)\n if rand_cpds not in replicate_list:\n start_again = False\n replicate_list.append(rand_cpds)\n if replicate_class not in null_distribution_reps:\n null_distribution_reps[replicate_class] = [replicate_list]\n else:\n null_distribution_reps[replicate_class] += [replicate_list]\n \n return null_distribution_reps\n\n\n# In[15]:\n\n\nlen(cpds_replicates.keys())\n\n\n# In[16]:\n\n\ndose_list = list(set(df_level4['Metadata_dose_recode'].unique().tolist()))[1:7]\n\nnull_distribution_replicates = get_null_distribution_replicates(\n cpd_replicate_class_dict, dose_list, replicates_in_all, cpds_replicates\n)\n\n\n# In[17]:\n\n\ndef save_to_pickle(null_distribution, path, file_name):\n \"\"\"This function saves the null distribution replicates ids into a pickle file\"\"\"\n \n if not os.path.exists(path):\n os.mkdir(path)\n \n with open(os.path.join(path, file_name), 'wb') as handle:\n pickle.dump(null_distribution, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n\n# In[18]:\n\n\n#save the null_distribution_moa to pickle\nsave_to_pickle(null_distribution_replicates, cp_level4_path, 'null_distribution_nonspherized.pickle')\n\n\n# In[19]:\n\n\n##load the null_distribution_moa from pickle\nwith open(os.path.join(cp_level4_path, 'null_distribution_nonspherized.pickle'), 'rb') as handle:\n null_distribution_replicates = pickle.load(handle)\n\n\n# In[20]:\n\n\ndef assert_null_distribution(null_distribution_reps, dose_list):\n \n \"\"\"\n This function assert that each of the list in the 1000 lists of random replicate \n combination (per dose) for each no_of_replicate class are distinct with no duplicates\n \"\"\"\n duplicates_reps = {}\n for dose in dose_list:\n for keys in null_distribution_reps:\n null_dist = null_distribution_reps[keys][dose-1]\n for reps in null_dist:\n dup_reps = []\n new_list = list(filter(lambda x: x != reps, null_dist))\n if (len(new_list) != len(null_dist) - 1):\n dup_reps.append(reps)\n if dup_reps:\n if keys not in duplicates_reps:\n duplicates_reps[keys] = [dup_reps]\n else:\n duplicates_reps[keys] += [dup_reps]\n return duplicates_reps\n\n\n# In[21]:\n\n\nduplicate_replicates = assert_null_distribution(null_distribution_replicates, dose_list)\n\n\n# In[22]:\n\n\nduplicate_replicates ##no duplicates\n\n\n# In[23]:\n\n\ndef calc_null_dist_median_scores(df, dose_num, replicate_lists):\n \"\"\"\n This function calculate the median of the correlation \n values for each list in the 1000 lists of random replicate \n combination for each no_of_replicate class per dose\n \"\"\"\n df_dose = df[df['Metadata_dose_recode'] == dose_num].copy()\n df_dose = df_dose.set_index('replicate_name').rename_axis(None, axis=0)\n df_dose.drop(['Metadata_broad_sample', 'Metadata_pert_id', 'Metadata_dose_recode', \n 'Metadata_Plate', 'Metadata_Well', 'Metadata_broad_id', 'Metadata_moa', \n 'broad_id', 'pert_iname', 'moa'], \n axis = 1, inplace = True)\n median_corr_list = []\n for rep_list in replicate_lists:\n df_reps = df_dose.loc[rep_list].copy()\n reps_corr = df_reps.astype('float64').T.corr(method = 'pearson').values\n median_corr_val = median(list(reps_corr[np.triu_indices(len(reps_corr), k = 1)]))\n median_corr_list.append(median_corr_val)\n return median_corr_list\n\n\n# In[24]:\n\n\ndef get_null_dist_median_scores(null_distribution_cpds, dose_list, df):\n \"\"\" \n This function calculate the median correlation scores for all \n 1000 lists of randomly combined compounds for each no_of_replicate class \n across all doses (1-6)\n \"\"\"\n null_distribution_medians = {}\n for key in null_distribution_cpds:\n median_score_list = []\n for dose in dose_list:\n replicate_median_scores = calc_null_dist_median_scores(df, dose, null_distribution_cpds[key][dose-1])\n median_score_list.append(replicate_median_scores)\n null_distribution_medians[key] = median_score_list\n return null_distribution_medians\n\n\n# In[25]:\n\n\nnull_distribution_medians = get_null_dist_median_scores(null_distribution_replicates, dose_list, df_level4)\n\n\n# In[26]:\n\n\ndef compute_dose_median_scores(null_dist_medians, dose_list):\n \"\"\"\n This function align median scores per dose, and return a dictionary, \n with keys as dose numbers and values as all median null distribution/non-replicate correlation \n scores for each dose\n \"\"\"\n median_scores_per_dose = {}\n for dose in dose_list:\n median_list = []\n for keys in null_distribution_medians:\n dose_median_list = null_distribution_medians[keys][dose-1]\n median_list += dose_median_list\n median_scores_per_dose[dose] = median_list\n return median_scores_per_dose\n\n\n# In[27]:\n\n\ndose_null_medians = compute_dose_median_scores(null_distribution_medians, dose_list)\n\n\n# In[28]:\n\n\n#save the null_distribution_medians_per_dose to pickle\nsave_to_pickle(dose_null_medians, cp_level4_path, 'null_dist_medians_per_dose_nonspherized.pickle')\n\n\n# In[29]:\n\n\ndef get_p_value(median_scores_list, df, dose_name, cpd_name):\n \"\"\"\n This function calculate the p-value from the \n null_distribution median scores for each compound\n \"\"\"\n actual_med = df.loc[cpd_name, dose_name]\n p_value = np.sum(median_scores_list >= actual_med) / len(median_scores_list)\n return p_value\n\n\n# In[30]:\n\n\ndef get_moa_p_vals(null_dist_median, dose_list, df_med_values):\n \"\"\"\n This function returns a dict, with compounds as the keys and the compound's \n p-values for each dose (1-6) as the values\n \"\"\"\n null_p_vals = {}\n for key in null_dist_median:\n df_replicate_class = df_med_values[df_med_values['no_of_replicates'] == key]\n for cpd in df_replicate_class.index:\n dose_p_values = []\n for num in dose_list:\n dose_name = 'dose_' + str(num)\n cpd_p_value = get_p_value(null_dist_median[key][num-1], df_replicate_class, dose_name, cpd)\n dose_p_values.append(cpd_p_value)\n null_p_vals[cpd] = dose_p_values\n sorted_null_p_vals = {key:value for key, value in sorted(null_p_vals.items(), key=lambda item: item[0])}\n return sorted_null_p_vals\n\n\n# In[31]:\n\n\nnull_p_vals = get_moa_p_vals(null_distribution_medians, dose_list, df_cpd_med_scores)\n\n\n# In[32]:\n\n\ndf_null_p_vals = pd.DataFrame.from_dict(null_p_vals, orient='index', columns = ['dose_' + str(x) for x in dose_list])\n\n\n# In[33]:\n\n\ndf_null_p_vals['no_of_replicates'] = df_cpd_med_scores['no_of_replicates']\n\n\n# In[34]:\n\n\ndf_null_p_vals.head(10)\n\n\n# In[35]:\n\n\ndef save_to_csv(df, path, file_name):\n \"\"\"saves dataframes to csv\"\"\"\n \n if not os.path.exists(path):\n os.mkdir(path)\n \n df.to_csv(os.path.join(path, file_name), index = False)\n\n\n# In[36]:\n\n\nsave_to_csv(df_null_p_vals.reset_index().rename({'index':'cpd'}, axis = 1), cp_level4_path, \n 'cpd_replicate_p_values_nonspherized.csv')\n\n\n# In[37]:\n\n\ncpd_summary_file = pathlib.Path(cp_level4_path, 'cpd_replicate_p_values_melted_nonspherized.csv')\n\ndose_recode_info = {\n 'dose_1': '0.04 uM', 'dose_2':'0.12 uM', 'dose_3':'0.37 uM',\n 'dose_4': '1.11 uM', 'dose_5':'3.33 uM', 'dose_6':'10 uM'\n}\n\n# Melt the p values\ncpd_score_summary_pval_df = (\n df_null_p_vals\n .reset_index()\n .rename(columns={\"index\": \"compound\"})\n .melt(\n id_vars=[\"compound\", \"no_of_replicates\"],\n value_vars=[\"dose_1\", \"dose_2\", \"dose_3\", \"dose_4\", \"dose_5\", \"dose_6\"],\n var_name=\"dose\",\n value_name=\"p_value\"\n )\n)\n\ncpd_score_summary_pval_df.dose = cpd_score_summary_pval_df.dose.replace(dose_recode_info)\n\n# Melt the median matching scores\ncpd_score_summary_df = (\n df_cpd_med_scores\n .reset_index()\n .rename(columns={\"index\": \"compound\"})\n .melt(\n id_vars=[\"compound\", \"no_of_replicates\"],\n value_vars=[\"dose_1\", \"dose_2\", \"dose_3\", \"dose_4\", \"dose_5\", \"dose_6\"],\n var_name=\"dose\",\n value_name=\"matching_score\"\n )\n\n)\n\ncpd_score_summary_df.dose = cpd_score_summary_df.dose.replace(dose_recode_info)\n\nsummary_df = (\n cpd_score_summary_pval_df\n .merge(cpd_score_summary_df, on=[\"compound\", \"no_of_replicates\", \"dose\"], how=\"inner\")\n .assign(\n assay=\"Cell Painting\",\n normalization=\"non_spherized\",\n category=\"all_data\"\n )\n)\n\nsummary_df.to_csv(cpd_summary_file, sep=\"\\t\", index=False)\n\nprint(summary_df.shape)\nsummary_df.head()\n\n"
] | [
[
"sklearn.metrics.roc_auc_score",
"numpy.log",
"pandas.read_csv",
"numpy.maximum",
"numpy.clip",
"numpy.asarray",
"scipy.optimize.minimize",
"numpy.tensordot",
"numpy.mean",
"sklearn.metrics.average_precision_score",
"numpy.ravel",
"numpy.zeros",
"numpy.sum"
],
[
"pandas.read_csv",
"numpy.sum",
"numpy.random.seed",
"numpy.warnings.filterwarnings"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
dbis-uibk/MediaEval2021 | [
"14d754d9cea36415090aaa115db81f5ace465964"
] | [
"plans/fixed_ensemble_vggish_linear_4.py"
] | [
"\"\"\"Ensemble plan manually split by type moode/theme.\"\"\"\nimport json\n\nfrom dbispipeline.evaluators import FixedSplitEvaluator\nfrom dbispipeline.evaluators import ModelCallbackWrapper\nimport numpy as np\nfrom sklearn.pipeline import Pipeline\n\nfrom mediaeval2021 import common\nfrom mediaeval2021.dataloaders.melspectrograms import MelSpectPickleLoader\nfrom mediaeval2021.models.ensemble import Ensemble\nfrom mediaeval2021.models.wrapper import TorchWrapper\n\ndataloader = MelSpectPickleLoader('data/mediaeval2020/melspect_1366.pickle')\n\nlabel_splits = [\n np.arange(0, 14, 1),\n np.arange(14, 28, 1),\n np.arange(28, 42, 1),\n np.arange(42, 56, 1),\n]\n\npipeline = Pipeline([\n ('model',\n Ensemble(\n base_estimator=TorchWrapper(\n model_name='CNN',\n dataloader=dataloader,\n batch_size=64,\n ),\n label_splits=label_splits,\n epochs=100,\n )),\n])\n\nevaluator = ModelCallbackWrapper(\n FixedSplitEvaluator(**common.fixed_split_params()),\n lambda model: common.store_prediction(model, dataloader),\n)\n\nresult_handlers = [\n lambda results: print(json.dumps(results, indent=4)),\n]\n"
] | [
[
"numpy.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
veronicatozzo/regain | [
"5eaa9685eb34afa77abaf80a4e5764444bc95dd7",
"5eaa9685eb34afa77abaf80a4e5764444bc95dd7",
"5eaa9685eb34afa77abaf80a4e5764444bc95dd7"
] | [
"regain/covariance/time_graphical_lasso_.py",
"regain/forward_backward/time_graphical_lasso_laplacian.py",
"regain/wrapper/paspal/wrapper.py"
] | [
"# BSD 3-Clause License\n\n# Copyright (c) 2017, Federico T.\n# All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"Sparse inverse covariance selection over time via ADMM.\n\nMore information can be found in the paper linked at:\nhttps://arxiv.org/abs/1703.01958\n\"\"\"\nfrom __future__ import division\n\nimport warnings\n\nimport numpy as np\nfrom scipy import linalg\nfrom six.moves import map, range, zip\nfrom sklearn.covariance import empirical_covariance, log_likelihood\nfrom sklearn.utils.extmath import squared_norm\nfrom sklearn.utils.validation import check_X_y\n\nfrom regain.covariance.graphical_lasso_ import GraphicalLasso, logl\nfrom regain.norm import l1_od_norm\nfrom regain.prox import prox_logdet, soft_thresholding\nfrom regain.update_rules import update_rho\nfrom regain.utils import convergence, error_norm_time\nfrom regain.validation import check_norm_prox\n\n\ndef loss(S, K, n_samples=None):\n \"\"\"Loss function for time-varying graphical lasso.\"\"\"\n if n_samples is None:\n n_samples = np.ones(S.shape[0])\n return sum(\n -ni * logl(emp_cov, precision)\n for emp_cov, precision, ni in zip(S, K, n_samples))\n\n\ndef objective(n_samples, S, K, Z_0, Z_1, Z_2, alpha, beta, psi):\n \"\"\"Objective function for time-varying graphical lasso.\"\"\"\n obj = loss(S, K, n_samples=n_samples)\n\n if isinstance(alpha, np.ndarray):\n obj += sum(l1_od_norm(a * z) for a, z in zip(alpha, Z_0))\n else:\n obj += alpha * sum(map(l1_od_norm, Z_0))\n\n if isinstance(beta, np.ndarray):\n obj += sum(b[0][0] * m for b, m in zip(beta, map(psi, Z_2 - Z_1)))\n else:\n obj += beta * sum(map(psi, Z_2 - Z_1))\n\n return obj\n\n\ndef init_precision(emp_cov, mode='empirical'):\n if isinstance(mode, np.ndarray):\n return mode.copy()\n\n if mode == 'empirical':\n n_times, _, n_features = emp_cov.shape\n covariance_ = emp_cov.copy()\n covariance_ *= 0.95\n K = np.empty_like(emp_cov)\n for i, (c, e) in enumerate(zip(covariance_, emp_cov)):\n c.flat[::n_features + 1] = e.flat[::n_features + 1]\n K[i] = linalg.pinvh(c)\n elif mode == 'zeros':\n K = np.zeros_like(emp_cov)\n\n return K\n\n\ndef time_graphical_lasso(\n emp_cov, alpha=0.01, rho=1, beta=1, max_iter=100, n_samples=None,\n verbose=False, psi='laplacian', tol=1e-4, rtol=1e-4,\n return_history=False, return_n_iter=True, mode='admm',\n compute_objective=True, stop_at=None, stop_when=1e-4,\n update_rho_options=None, init='empirical'):\n \"\"\"Time-varying graphical lasso solver.\n\n Solves the following problem via ADMM:\n min sum_{i=1}^T -n_i log_likelihood(S_i, K_i) + alpha*||K_i||_{od,1}\n + beta sum_{i=2}^T Psi(K_i - K_{i-1})\n\n where S_i = (1/n_i) X_i^T \\times X_i is the empirical covariance of data\n matrix X (training observations by features).\n\n Parameters\n ----------\n emp_cov : ndarray, shape (n_features, n_features)\n Empirical covariance of data.\n alpha, beta : float, optional\n Regularisation parameter.\n rho : float, optional\n Augmented Lagrangian parameter.\n max_iter : int, optional\n Maximum number of iterations.\n n_samples : ndarray\n Number of samples available for each time point.\n tol : float, optional\n Absolute tolerance for convergence.\n rtol : float, optional\n Relative tolerance for convergence.\n return_history : bool, optional\n Return the history of computed values.\n return_n_iter : bool, optional\n Return the number of iteration before convergence.\n verbose : bool, default False\n Print info at each iteration.\n update_rho_options : dict, optional\n Arguments for the rho update.\n See regain.update_rules.update_rho function for more information.\n compute_objective : bool, default True\n Choose to compute the objective value.\n init : {'empirical', 'zero', ndarray}\n Choose how to initialize the precision matrix, with the inverse\n empirical covariance, zero matrix or precomputed.\n\n Returns\n -------\n K : numpy.array, 3-dimensional (T x d x d)\n Solution to the problem for each time t=1...T .\n history : list\n If return_history, then also a structure that contains the\n objective value, the primal and dual residual norms, and tolerances\n for the primal and dual residual norms at each iteration.\n\n \"\"\"\n psi, prox_psi, psi_node_penalty = check_norm_prox(psi)\n\n Z_0 = init_precision(emp_cov, mode=init)\n Z_1 = Z_0.copy()[:-1] # np.zeros_like(emp_cov)[:-1]\n Z_2 = Z_0.copy()[1:] # np.zeros_like(emp_cov)[1:]\n\n U_0 = np.zeros_like(Z_0)\n U_1 = np.zeros_like(Z_1)\n U_2 = np.zeros_like(Z_2)\n\n Z_0_old = np.zeros_like(Z_0)\n Z_1_old = np.zeros_like(Z_1)\n Z_2_old = np.zeros_like(Z_2)\n\n # divisor for consensus variables, accounting for two less matrices\n divisor = np.full(emp_cov.shape[0], 3, dtype=float)\n divisor[0] -= 1\n divisor[-1] -= 1\n\n if n_samples is None:\n n_samples = np.ones(emp_cov.shape[0])\n\n checks = [\n convergence(\n obj=objective(\n n_samples, emp_cov, Z_0, Z_0, Z_1, Z_2, alpha, beta, psi))\n ]\n for iteration_ in range(max_iter):\n # update K\n A = Z_0 - U_0\n A[:-1] += Z_1 - U_1\n A[1:] += Z_2 - U_2\n A /= divisor[:, None, None]\n # soft_thresholding_ = partial(soft_thresholding, lamda=alpha / rho)\n # K = np.array(map(soft_thresholding_, A))\n A += A.transpose(0, 2, 1)\n A /= 2.\n\n A *= -rho * divisor[:, None, None] / n_samples[:, None, None]\n A += emp_cov\n\n K = np.array(\n [\n prox_logdet(a, lamda=ni / (rho * div))\n for a, div, ni in zip(A, divisor, n_samples)\n ])\n\n # update Z_0\n A = K + U_0\n A += A.transpose(0, 2, 1)\n A /= 2.\n Z_0 = soft_thresholding(A, lamda=alpha / rho)\n\n # other Zs\n A_1 = K[:-1] + U_1\n A_2 = K[1:] + U_2\n if not psi_node_penalty:\n prox_e = prox_psi(A_2 - A_1, lamda=2. * beta / rho)\n Z_1 = .5 * (A_1 + A_2 - prox_e)\n Z_2 = .5 * (A_1 + A_2 + prox_e)\n else:\n Z_1, Z_2 = prox_psi(\n np.concatenate((A_1, A_2), axis=1), lamda=.5 * beta / rho,\n rho=rho, tol=tol, rtol=rtol, max_iter=max_iter)\n\n # update residuals\n U_0 += K - Z_0\n U_1 += K[:-1] - Z_1\n U_2 += K[1:] - Z_2\n\n # diagnostics, reporting, termination checks\n rnorm = np.sqrt(\n squared_norm(K - Z_0) + squared_norm(K[:-1] - Z_1) +\n squared_norm(K[1:] - Z_2))\n\n snorm = rho * np.sqrt(\n squared_norm(Z_0 - Z_0_old) + squared_norm(Z_1 - Z_1_old) +\n squared_norm(Z_2 - Z_2_old))\n\n obj = objective(\n n_samples, emp_cov, Z_0, K, Z_1, Z_2, alpha, beta, psi) \\\n if compute_objective else np.nan\n\n # if np.isinf(obj):\n # Z_0 = Z_0_old\n # break\n\n check = convergence(\n obj=obj,\n rnorm=rnorm,\n snorm=snorm,\n e_pri=np.sqrt(K.size + 2 * Z_1.size) * tol + rtol * max(\n np.sqrt(\n squared_norm(Z_0) + squared_norm(Z_1) + squared_norm(Z_2)),\n np.sqrt(\n squared_norm(K) + squared_norm(K[:-1]) +\n squared_norm(K[1:]))),\n e_dual=np.sqrt(K.size + 2 * Z_1.size) * tol + rtol * rho *\n np.sqrt(squared_norm(U_0) + squared_norm(U_1) + squared_norm(U_2)),\n # precision=Z_0.copy()\n )\n Z_0_old = Z_0.copy()\n Z_1_old = Z_1.copy()\n Z_2_old = Z_2.copy()\n\n if verbose:\n print(\n \"obj: %.4f, rnorm: %.4f, snorm: %.4f,\"\n \"eps_pri: %.4f, eps_dual: %.4f\" % check[:5])\n\n checks.append(check)\n if stop_at is not None:\n if abs(check.obj - stop_at) / abs(stop_at) < stop_when:\n break\n\n if check.rnorm <= check.e_pri and check.snorm <= check.e_dual:\n break\n\n rho_new = update_rho(\n rho, rnorm, snorm, iteration=iteration_,\n **(update_rho_options or {}))\n # scaled dual variables should be also rescaled\n U_0 *= rho / rho_new\n U_1 *= rho / rho_new\n U_2 *= rho / rho_new\n rho = rho_new\n\n # assert is_pos_def(Z_0)\n else:\n warnings.warn(\"Objective did not converge.\")\n\n covariance_ = np.array([linalg.pinvh(x) for x in Z_0])\n return_list = [Z_0, covariance_]\n if return_history:\n return_list.append(checks)\n if return_n_iter:\n return_list.append(iteration_ + 1)\n return return_list\n\n\nclass TimeGraphicalLasso(GraphicalLasso):\n \"\"\"Sparse inverse covariance estimation with an l1-penalized estimator.\n\n Parameters\n ----------\n alpha : positive float, default 0.01\n Regularization parameter for precision matrix. The higher alpha,\n the more regularization, the sparser the inverse covariance.\n\n beta : positive float, default 1\n Regularization parameter to constrain precision matrices in time.\n The higher beta, the more regularization,\n and consecutive precision matrices in time are more similar.\n\n psi : {'laplacian', 'l1', 'l2', 'linf', 'node'}, default 'laplacian'\n Type of norm to enforce for consecutive precision matrices in time.\n\n rho : positive float, default 1\n Augmented Lagrangian parameter.\n\n over_relax : positive float, deafult 1\n Over-relaxation parameter (typically between 1.0 and 1.8).\n\n tol : positive float, default 1e-4\n Absolute tolerance to declare convergence.\n\n rtol : positive float, default 1e-4\n Relative tolerance to declare convergence.\n\n max_iter : integer, default 100\n The maximum number of iterations.\n\n verbose : boolean, default False\n If verbose is True, the objective function, rnorm and snorm are\n printed at each iteration.\n\n assume_centered : boolean, default False\n If True, data are not centered before computation.\n Useful when working with data whose mean is almost, but not exactly\n zero.\n If False, data are centered before computation.\n\n time_on_axis : {'first', 'last'}, default 'first'\n If data have time as the last dimension, set this to 'last'.\n Useful to use scikit-learn functions as train_test_split.\n\n update_rho_options : dict, default None\n Options for the update of rho. See `update_rho` function for details.\n\n compute_objective : boolean, default True\n Choose if compute the objective function during iterations\n (only useful if `verbose=True`).\n\n init : {'empirical', 'zeros', ndarray}, default 'empirical'\n How to initialise the inverse covariance matrix. Default is take\n the empirical covariance and inverting it.\n\n Attributes\n ----------\n covariance_ : array-like, shape (n_times, n_features, n_features)\n Estimated covariance matrix\n\n precision_ : array-like, shape (n_times, n_features, n_features)\n Estimated precision matrix.\n\n n_iter_ : int\n Number of iterations run.\n\n \"\"\"\n\n def __init__(\n self, alpha=0.01, beta=1., mode='admm', rho=1., tol=1e-4,\n rtol=1e-4, psi='laplacian', max_iter=100, verbose=False,\n assume_centered=False, return_history=False,\n update_rho_options=None, compute_objective=True, stop_at=None,\n stop_when=1e-4, suppress_warn_list=False, init='empirical'):\n super(TimeGraphicalLasso, self).__init__(\n alpha=alpha, rho=rho, tol=tol, rtol=rtol, max_iter=max_iter,\n verbose=verbose, assume_centered=assume_centered, mode=mode,\n update_rho_options=update_rho_options,\n compute_objective=compute_objective, init=init)\n self.beta = beta\n self.psi = psi\n self.return_history = return_history\n self.stop_at = stop_at\n self.stop_when = stop_when\n self.suppress_warn_list = suppress_warn_list\n\n def get_observed_precision(self):\n \"\"\"Getter for the observed precision matrix.\n\n Returns\n -------\n precision_ : array-like,\n The precision matrix associated to the current covariance object.\n\n \"\"\"\n return self.get_precision()\n\n def _fit(self, emp_cov, n_samples):\n \"\"\"Fit the TimeGraphicalLasso model to X.\n\n Parameters\n ----------\n emp_cov : ndarray, shape (n_time, n_features, n_features)\n Empirical covariance of data.\n\n \"\"\"\n\n out = time_graphical_lasso(\n emp_cov, alpha=self.alpha, rho=self.rho, beta=self.beta,\n mode=self.mode, n_samples=n_samples, tol=self.tol, rtol=self.rtol,\n psi=self.psi, max_iter=self.max_iter, verbose=self.verbose,\n return_n_iter=True, return_history=self.return_history,\n update_rho_options=self.update_rho_options,\n compute_objective=self.compute_objective, stop_at=self.stop_at,\n stop_when=self.stop_when, init=self.init)\n if self.return_history:\n self.precision_, self.covariance_, self.history_, self.n_iter_ = \\\n out\n else:\n self.precision_, self.covariance_, self.n_iter_ = out\n return self\n\n def fit(self, X, y):\n \"\"\"Fit the TimeGraphicalLasso model to X.\n\n Parameters\n ----------\n X : ndarray, shape = (n_samples * n_times, n_dimensions)\n Data matrix.\n y : ndarray, shape = (n_times,)\n Indicate the temporal belonging of each sample.\n\n \"\"\"\n # Covariance does not make sense for a single feature\n X, y = check_X_y(\n X, y, accept_sparse=False, dtype=np.float64, order=\"C\",\n ensure_min_features=2, estimator=self)\n\n n_dimensions = X.shape[1]\n self.classes_, n_samples = np.unique(y, return_counts=True)\n n_times = self.classes_.size\n\n # n_samples = np.array([x.shape[0] for x in X])\n if self.assume_centered:\n self.location_ = np.zeros((n_times, n_dimensions))\n else:\n self.location_ = np.array(\n [X[y == cl].mean(0) for cl in self.classes_])\n\n emp_cov = np.array(\n [\n empirical_covariance(\n X[y == cl], assume_centered=self.assume_centered)\n for cl in self.classes_\n ])\n\n return self._fit(emp_cov, n_samples)\n\n def score(self, X, y):\n \"\"\"Computes the log-likelihood of a Gaussian data set with\n `self.covariance_` as an estimator of its covariance matrix.\n\n Parameters\n ----------\n X : array-like, shape = (n_samples, n_features)\n Test data of which we compute the likelihood, where n_samples is\n the number of samples and n_features is the number of features.\n X is assumed to be drawn from the same distribution than\n the data used in fit (including centering).\n\n y : array-like, shape = (n_samples,)\n Class of samples.\n\n Returns\n -------\n res : float\n The likelihood of the data set with `self.covariance_` as an\n estimator of its covariance matrix.\n\n \"\"\"\n # Covariance does not make sense for a single feature\n X, y = check_X_y(\n X, y, accept_sparse=False, dtype=np.float64, order=\"C\",\n ensure_min_features=2, estimator=self)\n\n # compute empirical covariance of the test set\n test_cov = np.array(\n [\n empirical_covariance(\n X[y == cl] - self.location_[i], assume_centered=True)\n for i, cl in enumerate(self.classes_)\n ])\n\n res = sum(\n X[y == cl].shape[0] * log_likelihood(S, K) for S, K, cl in zip(\n test_cov, self.get_observed_precision(), self.classes_))\n\n return res\n\n def error_norm(\n self, comp_cov, norm='frobenius', scaling=True, squared=True):\n \"\"\"Compute the Mean Squared Error between two covariance estimators.\n (In the sense of the Frobenius norm).\n\n Parameters\n ----------\n comp_cov : array-like, shape = [n_features, n_features]\n The covariance to compare with.\n\n norm : str\n The type of norm used to compute the error. Available error types:\n - 'frobenius' (default): sqrt(tr(A^t.A))\n - 'spectral': sqrt(max(eigenvalues(A^t.A))\n where A is the error ``(comp_cov - self.covariance_)``.\n\n scaling : bool\n If True (default), the squared error norm is divided by n_features.\n If False, the squared error norm is not rescaled.\n\n squared : bool\n Whether to compute the squared error norm or the error norm.\n If True (default), the squared error norm is returned.\n If False, the error norm is returned.\n\n Returns\n -------\n The Mean Squared Error (in the sense of the Frobenius norm) between\n `self` and `comp_cov` covariance estimators.\n\n \"\"\"\n return error_norm_time(\n self.covariance_, comp_cov, norm=norm, scaling=scaling,\n squared=squared)\n",
"\"\"\"Time graph lasso via forward_backward (for now only in case of l1 norm).\"\"\"\nfrom __future__ import division, print_function\n\nimport warnings\nfrom functools import partial\n\nimport numpy as np\nfrom scipy import linalg\nfrom six.moves import map, range, zip\nfrom sklearn.covariance import empirical_covariance\nfrom sklearn.utils.extmath import squared_norm\n\nfrom regain.covariance.graph_lasso_ import logl\nfrom regain.covariance.time_graph_lasso_ import TimeGraphLasso\nfrom regain.norm import l1_od_norm, vector_p_norm\nfrom regain.prox import prox_FL, soft_thresholding, soft_thresholding_od\nfrom regain.utils import convergence, positive_definite\n\n\ndef loss(S, K, beta=0, n_samples=None, vareps=0):\n \"\"\"Loss function for time-varying graphical lasso.\"\"\"\n if n_samples is None:\n n_samples = np.ones(S.shape[0])\n loss_ = sum(-ni * logl(emp_cov, precision)\n for emp_cov, precision, ni in zip(S, K, n_samples))\n # loss_ += vareps / 2. * squared_norm(K)\n loss_ += vareps / 2. * _scalar_product(K, K)\n\n loss_ += beta * squared_norm(K[1:] - K[:-1])\n\n return loss_\n\n\ndef grad_loss(x, emp_cov, beta=0, n_samples=None, x_inv=None, vareps=0):\n \"\"\"Gradient of the loss function for the time-varying graphical lasso.\"\"\"\n if x_inv is None:\n x_inv = np.array([linalg.pinvh(_) for _ in x])\n grad = emp_cov - x_inv\n grad *= n_samples[:, None, None]\n\n # add coercitive term\n grad += vareps * x\n\n aux = np.empty_like(x)\n aux[0] = x[0] - x[1]\n aux[-1] = x[-1] - x[-2]\n for t in range(1, x.shape[0] - 1):\n aux[t] = 2 * x[t] - x[t-1] - x[t+1]\n aux *= 2 * beta\n grad += aux\n\n return grad\n\n\ndef penalty(precision, alpha):\n \"\"\"Penalty for time-varying graphical lasso.\"\"\"\n if isinstance(alpha, np.ndarray):\n obj = sum(a[0][0] * m for a, m in zip(alpha, map(l1_od_norm, precision)))\n else:\n obj = alpha * sum(map(l1_od_norm, precision))\n # obj += beta * psi(precision[1:] - precision[:-1])\n return obj\n\ndef prox_penalty(precision, alpha):\n # return soft_thresholding(precision, alpha)\n return np.array([soft_thresholding_od(p, alpha) for p in precision])\n\n\ndef objective(n_samples, emp_cov, precision, alpha, beta, vareps=0):\n \"\"\"Objective function for time-varying graphical lasso.\"\"\"\n obj = loss(emp_cov, precision, beta=beta, n_samples=n_samples, vareps=vareps)\n obj += penalty(precision, alpha)\n return obj\n\n\ndef _scalar_product(x, y):\n return (x * y).sum()\n\n\ndef choose_gamma(gamma, x, emp_cov, n_samples, beta, alpha, lamda, grad,\n delta=1e-4, eps=0.5, max_iter=1000, p=1, x_inv=None,\n vareps=1e-5, choose='gamma'):\n \"\"\"Choose gamma for backtracking.\n\n References\n ----------\n Salzo S. (2017). https://doi.org/10.1137/16M1073741\n\n \"\"\"\n # if grad is None:\n # grad = grad_loss(x, emp_cov, n_samples, x_inv=x_inv)\n\n partial_f = partial(loss, beta=beta,\n n_samples=n_samples, S=emp_cov, vareps=vareps)\n fx = partial_f(K=x)\n for i in range(max_iter):\n prox = prox_penalty(x - gamma * grad, alpha * gamma)\n if positive_definite(prox) and choose != \"gamma\":\n break\n\n if choose == \"gamma\":\n y_minus_x = prox - x\n loss_diff = partial_f(K=x + lamda * y_minus_x) - fx\n\n tolerance = _scalar_product(y_minus_x, grad)\n tolerance += delta / gamma * _scalar_product(y_minus_x, y_minus_x)\n if loss_diff <= lamda * tolerance:\n break\n gamma *= eps\n\n return gamma, prox\n\n\ndef choose_lamda(lamda, x, emp_cov, n_samples, beta, alpha, gamma, delta=1e-4,\n eps=0.5, max_iter=1000, criterion='b', p=1, x_inv=None,\n grad=None, prox=None, min_eigen_x=None,\n vareps=1e-5):\n \"\"\"Choose lambda for backtracking.\n\n References\n ----------\n Salzo S. (2017). https://doi.org/10.1137/16M1073741\n\n \"\"\"\n # if x_inv is None:\n # x_inv = np.array([linalg.pinvh(_) for _ in x])\n # if grad is None:\n # grad = grad_loss(x, emp_cov, n_samples, x_inv=x_inv)\n # if prox is None:\n # prox = prox_FL(x - gamma * grad, beta * gamma, alpha * gamma, p=p, symmetric=True)\n\n partial_f = partial(loss, beta=beta, n_samples=n_samples, S=emp_cov,\n vareps=vareps)\n fx = partial_f(K=x)\n\n # min_eigen_y = np.min([np.linalg.eigh(z)[0] for z in prox])\n\n y_minus_x = prox - x\n if criterion == 'b':\n tolerance = _scalar_product(y_minus_x, grad)\n tolerance += delta / gamma * _scalar_product(y_minus_x, y_minus_x)\n elif criterion == 'c':\n psi = partial(vector_p_norm, p=p)\n gx = penalty(x, alpha, beta, psi)\n gy = penalty(prox, alpha, beta, psi)\n objective_x = objective(\n n_samples, emp_cov, x, alpha, beta, psi, vareps=vareps)\n tolerance = (1 - delta) * (gy - gx + _scalar_product(y_minus_x, grad))\n\n for i in range(max_iter):\n # line-search\n x1 = x + lamda * y_minus_x\n\n if criterion == 'a':\n iter_diff = x1 - x\n gradx1 = grad_loss(x1, emp_cov, n_samples)\n grad_diff = gradx1 - grad\n norm_grad_diff = np.sqrt(_scalar_product(grad_diff, grad_diff))\n norm_iter_diff = np.sqrt(_scalar_product(iter_diff, iter_diff))\n tolerance = delta * norm_iter_diff / (gamma * lamda)\n if norm_grad_diff <= tolerance:\n break\n elif criterion == 'b':\n loss_diff = partial_f(K=x1) - fx\n if loss_diff <= lamda * tolerance and positive_definite(x1):\n break\n elif criterion == 'c':\n obj_diff = objective(\n n_samples, emp_cov, x1, alpha, beta, psi, vareps=vareps) \\\n - objective_x\n # if positive_definite(x1) and obj_diff <= lamda * tolerance:\n cond = True # lamda > 0 if min_eigen_y >= 0 else lamda < min_eigen_x / (min_eigen_x - min_eigen_y)\n if cond and obj_diff <= lamda * tolerance:\n break\n else:\n raise ValueError(criterion)\n lamda *= eps\n return lamda, i + 1\n\n\ndef fista_step(Y, Y_diff, t):\n t_next = (1. + np.sqrt(1.0 + 4.0 * t*t)) / 2.\n return Y + ((t - 1.0)/t_next) * Y_diff, t_next\n\n\ndef upper_diag_3d(x):\n \"\"\"Return the flattened upper diagonal of a 3d matrix.\"\"\"\n # n_times, n_dim, _ = x.shape\n # upper_idx = np.triu_indices(n_dim, 1)\n # return np.array([xx[upper_idx] for xx in x])\n return np.triu(x, 1)\n\n\ndef time_graph_lasso(\n emp_cov, n_samples, alpha=0.01, beta=1., max_iter=100, verbose=False,\n tol=1e-4, delta=1e-4, gamma=1., lamda=1., eps=0.5, debug=False,\n return_history=False, return_n_iter=True, choose='gamma',\n lamda_criterion='b', time_norm=1, compute_objective=True,\n return_n_linesearch=False, vareps=1e-5, stop_at=None, stop_when=1e-4):\n \"\"\"Time-varying graphical lasso solver.\n\n Solves the following problem via ADMM:\n minimize trace(S*X) - log det X + lambda*||X||_1\n\n where S is the empirical covariance of the data\n matrix D (training observations by features).\n\n Parameters\n ----------\n data_list : list of 2-dimensional matrices.\n Input matrices.\n lamda : float, optional\n Regularisation parameter.\n rho : float, optional\n Augmented Lagrangian parameter.\n alpha : float, optional\n Over-relaxation parameter (typically between 1.0 and 1.8).\n max_iter : int, optional\n Maximum number of iterations.\n tol : float, optional\n Absolute tolerance for convergence.\n rtol : float, optional\n Relative tolerance for convergence.\n return_history : bool, optional\n Return the history of computed values.\n\n Returns\n -------\n X : numpy.array, 2-dimensional\n Solution to the problem.\n history : list\n If return_history, then also a structure that contains the\n objective value, the primal and dual residual norms, and tolerances\n for the primal and dual residual norms at each iteration.\n\n \"\"\"\n available_choose = ('gamma', 'lamda', 'fixed', 'both')\n if choose not in available_choose:\n raise ValueError(\"`choose` parameter must be one of %s.\" %\n available_choose)\n\n n_times, _, n_features = emp_cov.shape\n covariance_ = emp_cov.copy()\n covariance_ *= 0.95\n\n K = np.empty_like(emp_cov)\n for i, (c, e) in enumerate(zip(covariance_, emp_cov)):\n c.flat[::n_features + 1] = e.flat[::n_features + 1]\n K[i] = linalg.pinvh(c)\n\n # K = np.array([np.eye(s.shape[0]) for s in emp_cov])\n # Y = K.copy()\n # assert positive_definite(K)\n\n obj_partial = partial(\n objective, n_samples=n_samples, emp_cov=emp_cov,\n alpha=alpha, beta=beta, vareps=vareps)\n\n max_residual = -np.inf\n n_linesearch = 0\n checks = [convergence(obj=obj_partial(precision=K))]\n for iteration_ in range(max_iter):\n # if not positive_definite(K):\n # print(\"precision is not positive definite.\")\n # break\n\n k_previous = K.copy()\n x_inv = np.array([linalg.pinvh(x) for x in K])\n # x_inv = []\n # eigens = []\n # for x in K:\n # es, Q = np.linalg.eigh(x)\n # Inv = np.linalg.multi_dot((Q, np.diag(1. / es), Q.T))\n # x_inv.append(Inv)\n # eigens.append(es)\n # x_inv = np.array(x_inv)\n # eigens = np.array(eigens)\n\n grad = grad_loss(K, emp_cov, beta=beta, n_samples=n_samples, x_inv=x_inv, vareps=vareps)\n if choose in ['gamma', 'both']:\n gamma, y = choose_gamma(\n gamma / eps if iteration_ > 0 else gamma, K, emp_cov,\n n_samples=n_samples,\n beta=beta, alpha=alpha, lamda=lamda, grad=grad,\n delta=delta, eps=eps, max_iter=200, p=time_norm, x_inv=x_inv,\n vareps=vareps, choose=choose)\n # gamma = min(gamma, 0.249)\n # print(gamma)\n\n x_hat = K - gamma * grad\n if choose not in ['gamma', 'both']:\n y = prox_penalty(x_hat, alpha * gamma)\n\n if choose in ['lamda', 'both']:\n lamda, n_ls = choose_lamda(\n min(lamda / eps if iteration_ > 0 else lamda, 1),\n K, emp_cov, n_samples=n_samples,\n beta=beta, alpha=alpha, gamma=gamma, delta=delta, eps=eps,\n criterion=lamda_criterion, max_iter=200, p=time_norm,\n x_inv=x_inv, grad=grad, prox=y,\n # min_eigen_x=np.min(eigens),\n vareps=vareps)\n n_linesearch += n_ls\n # print (\"lambda: \", lamda, n_ls)\n\n K = K + min(max(lamda, 0), 1) * (y - K)\n # K, t = fista_step(Y, Y - Y_old, t)\n\n check = convergence(\n obj=obj_partial(precision=K),\n rnorm=np.linalg.norm(upper_diag_3d(K) - upper_diag_3d(k_previous)),\n snorm=np.linalg.norm(\n obj_partial(precision=K) - obj_partial(precision=k_previous)),\n e_pri=np.sqrt(upper_diag_3d(K).size) * tol + tol * max(\n np.linalg.norm(upper_diag_3d(K)),\n np.linalg.norm(upper_diag_3d(k_previous))),\n e_dual=tol,\n # precision=K.copy()\n )\n\n if verbose and iteration_ % (50 if verbose < 2 else 1) == 0:\n print(\"obj: %.4f, rnorm: %.7f, snorm: %.4f,\"\n \"eps_pri: %.4f, eps_dual: %.4f\" % check[:5])\n\n if return_history:\n checks.append(check)\n\n if np.isnan(check.rnorm) or np.isnan(check.snorm):\n warnings.warn(\"precision is not positive definite.\")\n\n if stop_at is not None:\n if abs(check.obj - stop_at) / abs(stop_at) < stop_when:\n break\n\n # use this convergence criterion\n subgrad = (x_hat - K) / gamma\n if 0:\n grad = grad_loss(K, emp_cov, n_samples, vareps=vareps)\n res_norm = np.linalg.norm(grad + subgrad)\n\n if iteration_ == 0:\n normalizer = res_norm + 1e-6\n max_residual = max(np.linalg.norm(grad),\n np.linalg.norm(subgrad)) + 1e-6\n else:\n res_norm = np.linalg.norm(K - k_previous) / gamma\n max_residual = max(max_residual, res_norm)\n normalizer = max(np.linalg.norm(grad),\n np.linalg.norm(subgrad)) + 1e-6\n\n r_rel = res_norm / max_residual\n r_norm = res_norm / normalizer\n\n if not debug and (r_rel <= tol or r_norm <= tol) and iteration_ > 0: # or (\n # check.rnorm <= check.e_pri and iteration_ > 0):\n break\n # pass\n # if check.rnorm <= check.e_pri and iteration_ > 0:\n # # and check.snorm <= check.e_dual:\n # break\n else:\n warnings.warn(\"Objective did not converge.\")\n\n # for i in range(K.shape[0]):\n # covariance_[i] = linalg.pinvh(K[i])\n\n return_list = [K, covariance_]\n if return_history:\n return_list.append(checks)\n if return_n_iter:\n return_list.append(iteration_ + 1)\n if return_n_linesearch:\n return_list.append(n_linesearch)\n return return_list\n\n\nclass TimeGraphLassoForwardBackward(TimeGraphLasso):\n \"\"\"Sparse inverse covariance estimation with an l1-penalized estimator.\n\n Parameters\n ----------\n alpha : positive float, default 0.01\n Regularization parameter for precision matrix. The higher alpha,\n the more regularization, the sparser the inverse covariance.\n\n beta : positive float, default 1\n Regularization parameter to constrain precision matrices in time.\n The higher beta, the more regularization,\n and consecutive precision matrices in time are more similar.\n\n psi : {'laplacian', 'l1', 'l2', 'linf', 'node'}, default 'laplacian'\n Type of norm to enforce for consecutive precision matrices in time.\n\n rho : positive float, default 1\n Augmented Lagrangian parameter.\n\n over_relax : positive float, deafult 1\n Over-relaxation parameter (typically between 1.0 and 1.8).\n\n tol : positive float, default 1e-4\n Absolute tolerance to declare convergence.\n\n rtol : positive float, default 1e-4\n Relative tolerance to declare convergence.\n\n max_iter : integer, default 100\n The maximum number of iterations.\n\n verbose : boolean, default False\n If verbose is True, the objective function, rnorm and snorm are\n printed at each iteration.\n\n assume_centered : boolean, default False\n If True, data are not centered before computation.\n Useful when working with data whose mean is almost, but not exactly\n zero.\n If False, data are centered before computation.\n\n time_on_axis : {'first', 'last'}, default 'first'\n If data have time as the last dimension, set this to 'last'.\n Useful to use scikit-learn functions as train_test_split.\n\n update_rho_options : dict, default None\n Options for the update of rho. See `update_rho` function for details.\n\n compute_objective : boolean, default True\n Choose if compute the objective function during iterations\n (only useful if `verbose=True`).\n\n mode : {'admm'}, default 'admm'\n Minimisation algorithm. At the moment, only 'admm' is available,\n so this is ignored.\n\n Attributes\n ----------\n covariance_ : array-like, shape (n_times, n_features, n_features)\n Estimated covariance matrix\n\n precision_ : array-like, shape (n_times, n_features, n_features)\n Estimated pseudo inverse matrix.\n\n n_iter_ : int\n Number of iterations run.\n\n \"\"\"\n\n def __init__(self, alpha=0.01, beta=1., time_on_axis='first', tol=1e-4,\n max_iter=100, verbose=False, assume_centered=False,\n compute_objective=True, eps=0.5, choose='gamma', lamda=1,\n delta=1e-4, gamma=1., lamda_criterion='b', time_norm=1,\n return_history=False, debug=False,\n return_n_linesearch=False,\n vareps=1e-5, stop_at=None, stop_when=1e-4):\n super(TimeGraphLassoForwardBackward, self).__init__(\n alpha=alpha, tol=tol, max_iter=max_iter,\n verbose=verbose, assume_centered=assume_centered,\n compute_objective=compute_objective, beta=beta,\n time_on_axis=time_on_axis)\n self.delta = delta\n self.gamma = gamma\n self.lamda_criterion = lamda_criterion\n self.time_norm = time_norm\n self.eps = eps\n self.choose = choose\n self.lamda = lamda\n self.return_history = return_history\n self.debug = debug\n self.return_n_linesearch = return_n_linesearch\n self.vareps = vareps\n self.stop_at = stop_at\n self.stop_when = stop_when\n\n def _fit(self, emp_cov, n_samples):\n \"\"\"Fit the TimeGraphLasso model to X.\n\n Parameters\n ----------\n emp_cov : ndarray, shape (n_time, n_features, n_features)\n Empirical covariance of data.\n\n \"\"\"\n if self.alpha == 'max':\n # use sklearn alpha max\n self.alpha = self.alpha_max(emp_cov, is_covariance=True)\n\n out = time_graph_lasso(\n emp_cov, n_samples=n_samples, alpha=self.alpha, beta=self.beta,\n tol=self.tol, max_iter=self.max_iter, verbose=self.verbose,\n return_n_iter=True, return_history=self.return_history,\n compute_objective=self.compute_objective,\n time_norm=self.time_norm, lamda_criterion=self.lamda_criterion,\n gamma=self.gamma, delta=self.delta, eps=self.eps,\n choose=self.choose, lamda=self.lamda, debug=self.debug,\n return_n_linesearch=self.return_n_linesearch,\n vareps=self.vareps,\n stop_at=self.stop_at, stop_when=self.stop_when)\n\n if self.return_history:\n if self.return_n_linesearch:\n self.precision_, self.covariance_, self.history_, self.n_iter_, self.n_linesearch_ = out\n else:\n self.precision_, self.covariance_, self.history_, self.n_iter_ = out\n else:\n if self.return_n_linesearch:\n self.precision_, self.covariance_, self.n_iter_, self.n_linesearch_ = out\n else:\n self.precision_, self.covariance_, self.n_iter_ = out\n return self\n\n def alpha_max(self, X, is_covariance=False):\n \"\"\"Compute the alpha_max for the problem at hand, based on sklearn.\"\"\"\n from sklearn.covariance.graph_lasso_ import alpha_max\n if is_covariance:\n emp_cov = X\n else:\n emp_cov = np.array([empirical_covariance(\n x, assume_centered=self.assume_centered) for x in X])\n return max(alpha_max(e) for e in emp_cov)\n\n\ndef get_lipschitz(data):\n \"\"\"Get the Lipschitz constant for a specific loss function.\n\n Only square loss implemented.\n\n Parameters\n ----------\n data : (n, d) float ndarray\n data matrix\n loss : string\n the selected loss function in {'square', 'logit'}\n Returns\n ----------\n L : float\n the Lipschitz constant\n \"\"\"\n n, p = data.shape\n\n if p > n:\n tmp = np.dot(data, data.T)\n else:\n tmp = np.dot(data.T, data)\n return np.linalg.norm(tmp, 2)\n",
"\"\"\"Wrapper for PASPAL (Matlab implementation).\"\"\"\nimport os\n\nimport matlab\nimport matlab.engine\nimport numpy as np\n\nmatlab_engine = None # matlab.engine.start_matlab()\n\n\ndef check_matlab_engine(verbose=False):\n global matlab_engine\n if matlab_engine is None or not matlab_engine._check_matlab():\n if verbose:\n print(\"Starting matlab engine ...\")\n # close_engine = True\n matlab_engine = matlab.engine.start_matlab()\n # else:\n # close_engine = False\n\n\ndef lvglasso(emp_cov, alpha, tau, rho=1, verbose=False):\n global matlab_engine\n check_matlab_engine(verbose=verbose)\n\n lvglasso_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),\n 'lvglasso_')\n matlab_engine.addpath(lvglasso_path, nargout=0)\n\n if emp_cov.ndim > 2:\n result = matlab_engine.LVGLASSO(matlab.double(emp_cov.tolist()),\n float(alpha), float(tau), float(rho))\n else:\n result = matlab_engine.LVGLASSO_single_time(\n matlab.double(emp_cov.tolist()), float(alpha), float(tau),\n float(rho))\n return result\n\n\ndef total_variation_condat(y, lamda, verbose=False):\n global matlab_engine\n check_matlab_engine(verbose=verbose)\n\n tv_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),\n 'tv_condat')\n matlab_engine.addpath(tv_path, nargout=0)\n\n if verbose:\n print(\"Start GLOPRIDU algorithm ...\")\n x = matlab_engine.TV_Condat_v2(matlab.double(y[:, None].tolist()),\n float(lamda))\n\n # if close_engine:\n # matlab_engine.quit()\n x = np.asarray(x).ravel()\n return x\n\n\ndef group_lasso_overlap_paspal(X,\n y,\n groups=(),\n lamda=0.1,\n verbose=False,\n **kwargs):\n \"\"\"Group Lasso with Overlap via PASPAL (Matlab implementation).\n\n Parameters\n ----------\n X : ndarray\n Data.\n y : ndarray\n Classes.\n groups : list-type\n Groups of variables.\n lamda : float\n Regularization parameter.\n verbose : boolean\n If True, print debug information.\n\n Returns\n -------\n coef_\n Coefficient of the Lasso algorithm for each feature.\n\n \"\"\"\n global matlab_engine\n if matlab_engine is None or not matlab_engine._check_matlab():\n if verbose:\n print(\"Starting matlab engine ...\")\n # close_engine = True\n matlab_engine = matlab.engine.start_matlab()\n # else:\n # close_engine = False\n\n matlab_engine.addpath(os.path.join(\n os.path.abspath(os.path.dirname(__file__)),\n 'matlab/GLO_PRIMAL_DUAL_TOOLBOX/'),\n nargout=0)\n\n if verbose:\n print(\"Start GLOPRIDU algorithm ...\")\n coef_ = matlab_engine.glopridu_algorithm(\n matlab.double(X.tolist()),\n matlab.double(y[:, None].tolist()),\n [matlab.int32((np.array(x) + 1).tolist())\n for x in groups], # +1 because of the change of indices\n float(lamda))\n\n # if close_engine:\n # matlab_engine.quit()\n coef_ = np.asarray(coef_).ravel()\n return coef_, None, np.nan\n\n\ndef test():\n \"\"\"Test function for the module.\"\"\"\n from sklearn.datasets import make_regression\n X, y, coef = make_regression(n_features=10, coef=True, n_informative=5)\n\n group_lasso_overlap_paspal(X, y, np.ones(10), 0.1)\n"
] | [
[
"numpy.sqrt",
"numpy.unique",
"sklearn.covariance.log_likelihood",
"numpy.empty_like",
"numpy.ones",
"numpy.full",
"numpy.concatenate",
"sklearn.covariance.empirical_covariance",
"numpy.zeros_like",
"sklearn.utils.validation.check_X_y",
"sklearn.utils.extmath.squared_norm",
"scipy.linalg.pinvh",
"numpy.zeros"
],
[
"numpy.dot",
"numpy.sqrt",
"numpy.isnan",
"numpy.empty_like",
"numpy.linalg.norm",
"numpy.ones",
"sklearn.covariance.empirical_covariance",
"sklearn.covariance.graph_lasso_.alpha_max",
"sklearn.utils.extmath.squared_norm",
"scipy.linalg.pinvh",
"numpy.triu"
],
[
"numpy.asarray",
"sklearn.datasets.make_regression",
"numpy.array",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
peterataylor/evalml | [
"917f07845c4a319bb08c7aaa8df9e09623df11c8",
"917f07845c4a319bb08c7aaa8df9e09623df11c8",
"917f07845c4a319bb08c7aaa8df9e09623df11c8"
] | [
"evalml/tests/component_tests/test_prophet_regressor.py",
"evalml/tests/data_checks_tests/test_invalid_targets_data_check.py",
"evalml/tests/component_tests/test_components.py"
] | [
"import numpy as np\nimport pandas as pd\nimport pytest\nfrom pytest import importorskip\n\nfrom evalml.model_family import ModelFamily\nfrom evalml.pipelines.components import ProphetRegressor\nfrom evalml.problem_types import ProblemTypes\n\nprophet = importorskip(\"prophet\", reason=\"Skipping test because prophet not installed\")\n\n\ndef test_model_family():\n assert ProphetRegressor.model_family == ModelFamily.PROPHET\n\n\ndef test_cmdstanpy_backend():\n m = prophet.Prophet(stan_backend=\"CMDSTANPY\")\n assert m.stan_backend.get_type() == \"CMDSTANPY\"\n\n\ndef test_problem_types():\n assert set(ProphetRegressor.supported_problem_types) == {\n ProblemTypes.TIME_SERIES_REGRESSION\n }\n\n\ndef test_init_with_other_params():\n clf = ProphetRegressor(\n daily_seasonality=True,\n mcmc_samples=5,\n interval_width=0.8,\n uncertainty_samples=0,\n )\n assert clf.parameters == {\n \"changepoint_prior_scale\": 0.05,\n \"daily_seasonality\": True,\n \"date_index\": None,\n \"holidays_prior_scale\": 10,\n \"interval_width\": 0.8,\n \"mcmc_samples\": 5,\n \"seasonality_mode\": \"additive\",\n \"seasonality_prior_scale\": 10,\n \"uncertainty_samples\": 0,\n \"stan_backend\": \"CMDSTANPY\",\n }\n\n\ndef test_feature_importance(ts_data):\n X, y = ts_data\n clf = ProphetRegressor(uncertainty_samples=False, changepoint_prior_scale=2.0)\n clf.fit(X, y)\n clf.feature_importance == np.zeros(1)\n\n\ndef test_get_params(ts_data):\n clf = ProphetRegressor()\n assert clf.get_params() == {\n \"changepoint_prior_scale\": 0.05,\n \"date_index\": None,\n \"seasonality_prior_scale\": 10,\n \"holidays_prior_scale\": 10,\n \"seasonality_mode\": \"additive\",\n \"stan_backend\": \"CMDSTANPY\",\n }\n\n\ndef test_fit_predict_ts_with_X_index(ts_data):\n X, y = ts_data\n assert isinstance(X.index, pd.DatetimeIndex)\n\n p_clf = prophet.Prophet(uncertainty_samples=False, changepoint_prior_scale=2.0)\n prophet_df = ProphetRegressor.build_prophet_df(X=X, y=y, date_column=\"ds\")\n\n p_clf.fit(prophet_df)\n y_pred_p = p_clf.predict(prophet_df)[\"yhat\"]\n\n clf = ProphetRegressor(uncertainty_samples=False, changepoint_prior_scale=2.0)\n clf.fit(X, y)\n y_pred = clf.predict(X)\n np.array_equal(y_pred_p.values, y_pred.values)\n\n\ndef test_fit_predict_ts_with_y_index(ts_data):\n X, y = ts_data\n X = X.reset_index(drop=True)\n assert isinstance(y.index, pd.DatetimeIndex)\n\n p_clf = prophet.Prophet(uncertainty_samples=False, changepoint_prior_scale=2.0)\n prophet_df = ProphetRegressor.build_prophet_df(X=X, y=y, date_column=\"ds\")\n\n p_clf.fit(prophet_df)\n y_pred_p = p_clf.predict(prophet_df)[\"yhat\"]\n\n clf = ProphetRegressor(uncertainty_samples=False, changepoint_prior_scale=2.0)\n clf.fit(X, y)\n y_pred = clf.predict(X, y)\n\n np.array_equal(y_pred_p.values, y_pred.values)\n\n\ndef test_fit_predict_ts_no_X(ts_data):\n y = pd.Series(\n range(1, 32), name=\"dates\", index=pd.date_range(\"2020-10-01\", \"2020-10-31\")\n )\n\n p_clf = prophet.Prophet(uncertainty_samples=False, changepoint_prior_scale=2.0)\n prophet_df = ProphetRegressor.build_prophet_df(\n X=pd.DataFrame(), y=y, date_column=\"ds\"\n )\n p_clf.fit(prophet_df)\n y_pred_p = p_clf.predict(prophet_df)[\"yhat\"]\n\n clf = ProphetRegressor(uncertainty_samples=False, changepoint_prior_scale=2.0)\n clf.fit(X=None, y=y)\n y_pred = clf.predict(X=None, y=y)\n\n np.array_equal(y_pred_p.values, y_pred.values)\n\n\ndef test_fit_predict_date_col(ts_data):\n X = pd.DataFrame(\n {\n \"features\": range(100),\n \"these_dates\": pd.date_range(\"1/1/21\", periods=100),\n \"more_dates\": pd.date_range(\"7/4/1987\", periods=100),\n }\n )\n y = pd.Series(np.random.randint(1, 5, 100), name=\"y\")\n\n clf = ProphetRegressor(\n date_index=\"these_dates\", uncertainty_samples=False, changepoint_prior_scale=2.0\n )\n clf.fit(X, y)\n y_pred = clf.predict(X)\n\n p_clf = prophet.Prophet(uncertainty_samples=False, changepoint_prior_scale=2.0)\n prophet_df = ProphetRegressor.build_prophet_df(X=X, y=y, date_column=\"these_dates\")\n\n p_clf.fit(prophet_df)\n y_pred_p = p_clf.predict(prophet_df)[\"yhat\"]\n\n np.array_equal(y_pred_p.values, y_pred.values)\n\n\ndef test_fit_predict_no_date_col_or_index(ts_data):\n X, y = ts_data\n X = X.reset_index(drop=True)\n y = y.reset_index(drop=True)\n assert not isinstance(X.index, pd.DatetimeIndex)\n assert not isinstance(y.index, pd.DatetimeIndex)\n\n clf = ProphetRegressor()\n with pytest.raises(\n ValueError,\n match=\"Prophet estimator requires input data X to have a datetime column\",\n ):\n clf.fit(X, y)\n",
"import numpy as np\nimport pandas as pd\nimport pytest\n\nfrom evalml.automl import get_default_primary_search_objective\nfrom evalml.data_checks import (\n DataCheckAction,\n DataCheckActionCode,\n DataCheckError,\n DataCheckMessageCode,\n DataChecks,\n DataCheckWarning,\n InvalidTargetDataCheck,\n)\nfrom evalml.exceptions import DataCheckInitError\nfrom evalml.objectives import (\n MAPE,\n MeanSquaredLogError,\n RootMeanSquaredLogError,\n)\nfrom evalml.problem_types import (\n ProblemTypes,\n is_binary,\n is_multiclass,\n is_regression,\n)\nfrom evalml.utils.woodwork_utils import numeric_and_boolean_ww\n\ninvalid_targets_data_check_name = InvalidTargetDataCheck.name\n\n\ndef test_invalid_target_data_check_invalid_n_unique():\n with pytest.raises(\n ValueError, match=\"`n_unique` must be a non-negative integer value.\"\n ):\n InvalidTargetDataCheck(\n \"regression\",\n get_default_primary_search_objective(\"regression\"),\n n_unique=-1,\n )\n\n\ndef test_invalid_target_data_check_nan_error():\n X = pd.DataFrame({\"col\": [1, 2, 3]})\n invalid_targets_check = InvalidTargetDataCheck(\n \"regression\", get_default_primary_search_objective(\"regression\")\n )\n\n assert invalid_targets_check.validate(X, y=pd.Series([1, 2, 3])) == {\n \"warnings\": [],\n \"errors\": [],\n \"actions\": [],\n }\n assert invalid_targets_check.validate(X, y=pd.Series([np.nan, np.nan, np.nan])) == {\n \"warnings\": [],\n \"errors\": [\n DataCheckError(\n message=\"Target is either empty or fully null.\",\n data_check_name=invalid_targets_data_check_name,\n message_code=DataCheckMessageCode.TARGET_IS_EMPTY_OR_FULLY_NULL,\n details={},\n ).to_dict(),\n ],\n \"actions\": [],\n }\n\n\ndef test_invalid_target_data_check_numeric_binary_classification_valid_float():\n y = pd.Series([0.0, 1.0, 0.0, 1.0])\n X = pd.DataFrame({\"col\": range(len(y))})\n invalid_targets_check = InvalidTargetDataCheck(\n \"binary\", get_default_primary_search_objective(\"binary\")\n )\n assert invalid_targets_check.validate(X, y) == {\n \"warnings\": [],\n \"errors\": [],\n \"actions\": [],\n }\n\n\ndef test_invalid_target_data_check_multiclass_two_examples_per_class():\n y = pd.Series([0] + [1] * 19 + [2] * 80)\n X = pd.DataFrame({\"col\": range(len(y))})\n invalid_targets_check = InvalidTargetDataCheck(\n \"multiclass\", get_default_primary_search_objective(\"binary\")\n )\n expected_message = \"Target does not have at least two instances per class which is required for multiclass classification\"\n\n # with 1 class not having min 2 instances\n assert invalid_targets_check.validate(X, y) == {\n \"warnings\": [],\n \"errors\": [\n DataCheckError(\n message=expected_message,\n data_check_name=invalid_targets_data_check_name,\n message_code=DataCheckMessageCode.TARGET_MULTICLASS_NOT_TWO_EXAMPLES_PER_CLASS,\n details={\"least_populated_class_labels\": [0]},\n ).to_dict()\n ],\n \"actions\": [],\n }\n\n y = pd.Series([0] + [1] + [2] * 98)\n X = pd.DataFrame({\"col\": range(len(y))})\n # with 2 classes not having min 2 instances\n assert invalid_targets_check.validate(X, y) == {\n \"warnings\": [],\n \"errors\": [\n DataCheckError(\n message=expected_message,\n data_check_name=invalid_targets_data_check_name,\n message_code=DataCheckMessageCode.TARGET_MULTICLASS_NOT_TWO_EXAMPLES_PER_CLASS,\n details={\"least_populated_class_labels\": [0, 1]},\n ).to_dict()\n ],\n \"actions\": [],\n }\n\n\[email protected](\n \"pd_type\", [\"int16\", \"int32\", \"int64\", \"float16\", \"float32\", \"float64\", \"bool\"]\n)\ndef test_invalid_target_data_check_invalid_pandas_data_types_error(pd_type):\n y = pd.Series([0, 1, 0, 0, 1, 0, 1, 0])\n y = y.astype(pd_type)\n X = pd.DataFrame({\"col\": range(len(y))})\n\n invalid_targets_check = InvalidTargetDataCheck(\n \"binary\", get_default_primary_search_objective(\"binary\")\n )\n\n assert invalid_targets_check.validate(X, y) == {\n \"warnings\": [],\n \"errors\": [],\n \"actions\": [],\n }\n\n y = pd.Series(pd.date_range(\"2000-02-03\", periods=5, freq=\"W\"))\n X = pd.DataFrame({\"col\": range(len(y))})\n\n unique_values = y.value_counts().index.tolist()\n assert invalid_targets_check.validate(X, y) == {\n \"warnings\": [],\n \"errors\": [\n DataCheckError(\n message=\"Target is unsupported {} type. Valid Woodwork logical types include: {}\".format(\n \"Datetime\",\n \", \".join([ltype for ltype in numeric_and_boolean_ww]),\n ),\n data_check_name=invalid_targets_data_check_name,\n message_code=DataCheckMessageCode.TARGET_UNSUPPORTED_TYPE,\n details={\"unsupported_type\": \"datetime\"},\n ).to_dict(),\n DataCheckError(\n message=\"Binary class targets require exactly two unique values.\",\n data_check_name=invalid_targets_data_check_name,\n message_code=DataCheckMessageCode.TARGET_BINARY_NOT_TWO_UNIQUE_VALUES,\n details={\"target_values\": unique_values},\n ).to_dict(),\n ],\n \"actions\": [],\n }\n\n\ndef test_invalid_target_y_none():\n invalid_targets_check = InvalidTargetDataCheck(\n \"binary\", get_default_primary_search_objective(\"binary\")\n )\n assert invalid_targets_check.validate(pd.DataFrame(), y=None) == {\n \"warnings\": [],\n \"errors\": [\n DataCheckError(\n message=\"Target is None\",\n data_check_name=invalid_targets_data_check_name,\n message_code=DataCheckMessageCode.TARGET_IS_NONE,\n details={},\n ).to_dict()\n ],\n \"actions\": [],\n }\n\n\ndef test_invalid_target_data_input_formats():\n invalid_targets_check = InvalidTargetDataCheck(\n \"binary\", get_default_primary_search_objective(\"binary\")\n )\n\n # test empty pd.Series\n X = pd.DataFrame()\n messages = invalid_targets_check.validate(X, pd.Series())\n assert messages == {\n \"warnings\": [],\n \"errors\": [\n DataCheckError(\n message=\"Target is either empty or fully null.\",\n data_check_name=invalid_targets_data_check_name,\n message_code=DataCheckMessageCode.TARGET_IS_EMPTY_OR_FULLY_NULL,\n details={},\n ).to_dict()\n ],\n \"actions\": [],\n }\n\n expected = {\n \"warnings\": [],\n \"errors\": [\n DataCheckError(\n message=\"3 row(s) (75.0%) of target values are null\",\n data_check_name=invalid_targets_data_check_name,\n message_code=DataCheckMessageCode.TARGET_HAS_NULL,\n details={\"num_null_rows\": 3, \"pct_null_rows\": 75},\n ).to_dict(),\n DataCheckError(\n message=\"Binary class targets require exactly two unique values.\",\n data_check_name=invalid_targets_data_check_name,\n message_code=DataCheckMessageCode.TARGET_BINARY_NOT_TWO_UNIQUE_VALUES,\n details={\"target_values\": [0]},\n ).to_dict(),\n ],\n \"actions\": [\n DataCheckAction(\n DataCheckActionCode.IMPUTE_COL,\n data_check_name=invalid_targets_data_check_name,\n metadata={\n \"is_target\": True,\n \"impute_strategy\": \"most_frequent\",\n },\n ).to_dict()\n ],\n }\n # test Woodwork\n y = pd.Series([None, None, None, 0])\n X = pd.DataFrame({\"col\": range(len(y))})\n\n messages = invalid_targets_check.validate(X, y)\n assert messages == expected\n\n # test list\n y = [np.nan, np.nan, np.nan, 0]\n X = pd.DataFrame({\"col\": range(len(y))})\n\n messages = invalid_targets_check.validate(X, y)\n assert messages == expected\n\n # test np.array\n y = np.array([np.nan, np.nan, np.nan, 0])\n X = pd.DataFrame({\"col\": range(len(y))})\n\n messages = invalid_targets_check.validate(X, y)\n assert messages == expected\n\n\[email protected](\n \"problem_type\", [ProblemTypes.BINARY, ProblemTypes.TIME_SERIES_BINARY]\n)\ndef test_invalid_target_data_check_n_unique(problem_type):\n y = pd.Series(list(range(100, 200)) + list(range(200)))\n unique_values = y.value_counts().index.tolist()[:100] # n_unique defaults to 100\n X = pd.DataFrame({\"col\": range(len(y))})\n\n invalid_targets_check = InvalidTargetDataCheck(\n problem_type, get_default_primary_search_objective(problem_type)\n )\n # Test default value of n_unique\n assert invalid_targets_check.validate(X, y) == {\n \"warnings\": [],\n \"errors\": [\n DataCheckError(\n message=\"Binary class targets require exactly two unique values.\",\n data_check_name=invalid_targets_data_check_name,\n message_code=DataCheckMessageCode.TARGET_BINARY_NOT_TWO_UNIQUE_VALUES,\n details={\"target_values\": unique_values},\n ).to_dict()\n ],\n \"actions\": [],\n }\n\n # Test number of unique values < n_unique\n y = pd.Series(range(20))\n X = pd.DataFrame({\"col\": range(len(y))})\n\n unique_values = y.value_counts().index.tolist()\n assert invalid_targets_check.validate(X, y) == {\n \"warnings\": [],\n \"errors\": [\n DataCheckError(\n message=\"Binary class targets require exactly two unique values.\",\n data_check_name=invalid_targets_data_check_name,\n message_code=DataCheckMessageCode.TARGET_BINARY_NOT_TWO_UNIQUE_VALUES,\n details={\"target_values\": unique_values},\n ).to_dict()\n ],\n \"actions\": [],\n }\n\n # Test n_unique is None\n invalid_targets_check = InvalidTargetDataCheck(\n \"binary\", get_default_primary_search_objective(\"binary\"), n_unique=None\n )\n y = pd.Series(range(150))\n X = pd.DataFrame({\"col\": range(len(y))})\n\n unique_values = y.value_counts().index.tolist()\n assert invalid_targets_check.validate(X, y) == {\n \"warnings\": [],\n \"errors\": [\n DataCheckError(\n message=\"Binary class targets require exactly two unique values.\",\n data_check_name=invalid_targets_data_check_name,\n message_code=DataCheckMessageCode.TARGET_BINARY_NOT_TWO_UNIQUE_VALUES,\n details={\"target_values\": unique_values},\n ).to_dict()\n ],\n \"actions\": [],\n }\n\n\[email protected](\n \"objective\",\n [\n \"Root Mean Squared Log Error\",\n \"Mean Squared Log Error\",\n \"Mean Absolute Percentage Error\",\n ],\n)\ndef test_invalid_target_data_check_invalid_labels_for_nonnegative_objective_names(\n objective,\n):\n X = pd.DataFrame({\"column_one\": [100, 200, 100, 200, 200, 100, 200, 100] * 25})\n y = pd.Series([2, 2, 3, 3, -1, -1, 1, 1] * 25)\n\n data_checks = DataChecks(\n [InvalidTargetDataCheck],\n {\n \"InvalidTargetDataCheck\": {\n \"problem_type\": \"multiclass\",\n \"objective\": objective,\n }\n },\n )\n assert data_checks.validate(X, y) == {\n \"warnings\": [],\n \"errors\": [\n DataCheckError(\n message=f\"Target has non-positive values which is not supported for {objective}\",\n data_check_name=invalid_targets_data_check_name,\n message_code=DataCheckMessageCode.TARGET_INCOMPATIBLE_OBJECTIVE,\n details={\n \"Count of offending values\": sum(\n val <= 0 for val in y.values.flatten()\n )\n },\n ).to_dict()\n ],\n \"actions\": [],\n }\n\n X = pd.DataFrame({\"column_one\": [100, 200, 100, 200, 100]})\n y = pd.Series([2, 3, 0, 1, 1])\n\n invalid_targets_check = InvalidTargetDataCheck(\n problem_type=\"regression\", objective=objective\n )\n\n assert invalid_targets_check.validate(X, y) == {\n \"warnings\": [],\n \"errors\": [\n DataCheckError(\n message=f\"Target has non-positive values which is not supported for {objective}\",\n data_check_name=invalid_targets_data_check_name,\n message_code=DataCheckMessageCode.TARGET_INCOMPATIBLE_OBJECTIVE,\n details={\n \"Count of offending values\": sum(\n val <= 0 for val in y.values.flatten()\n )\n },\n ).to_dict()\n ],\n \"actions\": [],\n }\n\n\[email protected](\n \"objective\", [RootMeanSquaredLogError(), MeanSquaredLogError(), MAPE()]\n)\ndef test_invalid_target_data_check_invalid_labels_for_nonnegative_objective_instances(\n objective,\n):\n X = pd.DataFrame({\"column_one\": [100, 200, 100, 200, 200, 100, 200, 100] * 25})\n y = pd.Series([2, 2, 3, 3, -1, -1, 1, 1] * 25)\n\n data_checks = DataChecks(\n [InvalidTargetDataCheck],\n {\n \"InvalidTargetDataCheck\": {\n \"problem_type\": \"multiclass\",\n \"objective\": objective,\n }\n },\n )\n\n assert data_checks.validate(X, y) == {\n \"warnings\": [],\n \"errors\": [\n DataCheckError(\n message=f\"Target has non-positive values which is not supported for {objective.name}\",\n data_check_name=invalid_targets_data_check_name,\n message_code=DataCheckMessageCode.TARGET_INCOMPATIBLE_OBJECTIVE,\n details={\n \"Count of offending values\": sum(\n val <= 0 for val in y.values.flatten()\n )\n },\n ).to_dict()\n ],\n \"actions\": [],\n }\n\n\ndef test_invalid_target_data_check_invalid_labels_for_objectives(\n time_series_core_objectives,\n):\n X = pd.DataFrame({\"column_one\": [100, 200, 100, 200, 200, 100, 200, 100] * 25})\n y = pd.Series([2, 2, 3, 3, -1, -1, 1, 1] * 25)\n\n for objective in time_series_core_objectives:\n if not objective.positive_only:\n data_checks = DataChecks(\n [InvalidTargetDataCheck],\n {\n \"InvalidTargetDataCheck\": {\n \"problem_type\": \"multiclass\",\n \"objective\": objective,\n }\n },\n )\n assert data_checks.validate(X, y) == {\n \"warnings\": [],\n \"errors\": [],\n \"actions\": [],\n }\n\n X = pd.DataFrame({\"column_one\": [100, 200, 100, 200, 100]})\n y = pd.Series([2, 3, 0, 1, 1])\n\n for objective in time_series_core_objectives:\n if not objective.positive_only:\n invalid_targets_check = InvalidTargetDataCheck(\n problem_type=\"regression\", objective=objective\n )\n assert invalid_targets_check.validate(X, y) == {\n \"warnings\": [],\n \"errors\": [],\n \"actions\": [],\n }\n\n\[email protected](\n \"objective\",\n [\n \"Root Mean Squared Log Error\",\n \"Mean Squared Log Error\",\n \"Mean Absolute Percentage Error\",\n ],\n)\ndef test_invalid_target_data_check_valid_labels_for_nonnegative_objectives(objective):\n X = pd.DataFrame({\"column_one\": [100, 100, 200, 300, 100, 200, 100] * 25})\n y = pd.Series([2, 2, 3, 3, 1, 1, 1] * 25)\n\n data_checks = DataChecks(\n [InvalidTargetDataCheck],\n {\n \"InvalidTargetDataCheck\": {\n \"problem_type\": \"multiclass\",\n \"objective\": objective,\n }\n },\n )\n assert data_checks.validate(X, y) == {\"warnings\": [], \"errors\": [], \"actions\": []}\n\n\ndef test_invalid_target_data_check_initialize_with_none_objective():\n with pytest.raises(DataCheckInitError, match=\"Encountered the following error\"):\n DataChecks(\n [InvalidTargetDataCheck],\n {\n \"InvalidTargetDataCheck\": {\n \"problem_type\": \"multiclass\",\n \"objective\": None,\n }\n },\n )\n\n\ndef test_invalid_target_data_check_regression_problem_nonnumeric_data():\n y_categorical = pd.Series([\"Peace\", \"Is\", \"A\", \"Lie\"] * 100)\n y_mixed_cat_numeric = pd.Series([\"Peace\", 2, \"A\", 4] * 100)\n y_integer = pd.Series([1, 2, 3, 4])\n y_float = pd.Series([1.1, 2.2, 3.3, 4.4])\n y_numeric = pd.Series([1, 2.2, 3, 4.4])\n\n data_check_error = DataCheckError(\n message=f\"Target data type should be numeric for regression type problems.\",\n data_check_name=invalid_targets_data_check_name,\n message_code=DataCheckMessageCode.TARGET_UNSUPPORTED_TYPE,\n details={},\n ).to_dict()\n\n invalid_targets_check = InvalidTargetDataCheck(\n \"regression\", get_default_primary_search_objective(\"regression\")\n )\n assert invalid_targets_check.validate(\n X=pd.DataFrame({\"col\": range(len(y_categorical))}), y=y_categorical\n ) == {\"warnings\": [], \"errors\": [data_check_error], \"actions\": []}\n assert invalid_targets_check.validate(\n X=pd.DataFrame({\"col\": range(len(y_mixed_cat_numeric))}), y=y_mixed_cat_numeric\n ) == {\"warnings\": [], \"errors\": [data_check_error], \"actions\": []}\n assert invalid_targets_check.validate(\n X=pd.DataFrame({\"col\": range(len(y_integer))}), y=y_integer\n ) == {\"warnings\": [], \"errors\": [], \"actions\": []}\n assert invalid_targets_check.validate(\n X=pd.DataFrame({\"col\": range(len(y_float))}), y=y_float\n ) == {\"warnings\": [], \"errors\": [], \"actions\": []}\n assert invalid_targets_check.validate(\n X=pd.DataFrame({\"col\": range(len(y_numeric))}), y=y_numeric\n ) == {\"warnings\": [], \"errors\": [], \"actions\": []}\n\n\ndef test_invalid_target_data_check_multiclass_problem_binary_data():\n y_multiclass = pd.Series([1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3] * 25)\n y_binary = pd.Series([0, 1, 1, 1, 0, 0] * 25)\n\n data_check_error = DataCheckError(\n message=f\"Target has two or less classes, which is too few for multiclass problems. Consider changing to binary.\",\n data_check_name=invalid_targets_data_check_name,\n message_code=DataCheckMessageCode.TARGET_MULTICLASS_NOT_ENOUGH_CLASSES,\n details={\"num_classes\": len(set(y_binary))},\n ).to_dict()\n\n invalid_targets_check = InvalidTargetDataCheck(\n \"multiclass\", get_default_primary_search_objective(\"multiclass\")\n )\n assert invalid_targets_check.validate(\n X=pd.DataFrame({\"col\": range(len(y_multiclass))}), y=y_multiclass\n ) == {\"warnings\": [], \"errors\": [], \"actions\": []}\n assert invalid_targets_check.validate(\n X=pd.DataFrame({\"col\": range(len(y_binary))}), y=y_binary\n ) == {\"warnings\": [], \"errors\": [data_check_error], \"actions\": []}\n\n\[email protected](\n \"problem_type\", [ProblemTypes.MULTICLASS, ProblemTypes.TIME_SERIES_MULTICLASS]\n)\ndef test_invalid_target_data_check_multiclass_problem_almost_continuous_data(\n problem_type,\n):\n invalid_targets_check = InvalidTargetDataCheck(\n problem_type, get_default_primary_search_objective(problem_type)\n )\n y_multiclass_high_classes = pd.Series(\n list(range(0, 100)) * 3\n ) # 100 classes, 300 samples, .33 class/sample ratio\n X = pd.DataFrame({\"col\": range(len(y_multiclass_high_classes))})\n data_check_warning = DataCheckWarning(\n message=f\"Target has a large number of unique values, could be regression type problem.\",\n data_check_name=invalid_targets_data_check_name,\n message_code=DataCheckMessageCode.TARGET_MULTICLASS_HIGH_UNIQUE_CLASS,\n details={\"class_to_value_ratio\": 1 / 3},\n ).to_dict()\n assert invalid_targets_check.validate(X, y=y_multiclass_high_classes) == {\n \"warnings\": [data_check_warning],\n \"errors\": [],\n \"actions\": [],\n }\n\n y_multiclass_med_classes = pd.Series(\n list(range(0, 5)) * 20\n ) # 5 classes, 100 samples, .05 class/sample ratio\n X = pd.DataFrame({\"col\": range(len(y_multiclass_med_classes))})\n data_check_warning = DataCheckWarning(\n message=f\"Target has a large number of unique values, could be regression type problem.\",\n data_check_name=invalid_targets_data_check_name,\n message_code=DataCheckMessageCode.TARGET_MULTICLASS_HIGH_UNIQUE_CLASS,\n details={\"class_to_value_ratio\": 0.05},\n ).to_dict()\n assert invalid_targets_check.validate(X, y=y_multiclass_med_classes) == {\n \"warnings\": [data_check_warning],\n \"errors\": [],\n \"actions\": [],\n }\n\n y_multiclass_low_classes = pd.Series(\n list(range(0, 3)) * 100\n ) # 2 classes, 300 samples, .01 class/sample ratio\n X = pd.DataFrame({\"col\": range(len(y_multiclass_low_classes))})\n assert invalid_targets_check.validate(X, y=y_multiclass_low_classes) == {\n \"warnings\": [],\n \"errors\": [],\n \"actions\": [],\n }\n\n\ndef test_invalid_target_data_check_mismatched_indices():\n X = pd.DataFrame({\"col\": [1, 2, 3]})\n y_same_index = pd.Series([1, 0, 1])\n y_diff_index = pd.Series([0, 1, 0], index=[1, 5, 10])\n y_diff_index_order = pd.Series([0, 1, 0], index=[0, 2, 1])\n\n invalid_targets_check = InvalidTargetDataCheck(\n \"binary\", get_default_primary_search_objective(\"binary\")\n )\n assert invalid_targets_check.validate(X=None, y=y_same_index) == {\n \"warnings\": [],\n \"errors\": [],\n \"actions\": [],\n }\n assert invalid_targets_check.validate(X, y_same_index) == {\n \"warnings\": [],\n \"errors\": [],\n \"actions\": [],\n }\n\n X_index_missing = list(set(y_diff_index.index) - set(X.index))\n y_index_missing = list(set(X.index) - set(y_diff_index.index))\n assert invalid_targets_check.validate(X, y_diff_index) == {\n \"warnings\": [\n DataCheckWarning(\n message=\"Input target and features have mismatched indices\",\n data_check_name=invalid_targets_data_check_name,\n message_code=DataCheckMessageCode.MISMATCHED_INDICES,\n details={\n \"indices_not_in_features\": X_index_missing,\n \"indices_not_in_target\": y_index_missing,\n },\n ).to_dict()\n ],\n \"errors\": [],\n \"actions\": [],\n }\n assert invalid_targets_check.validate(X, y_diff_index_order) == {\n \"warnings\": [\n DataCheckWarning(\n message=\"Input target and features have mismatched indices order\",\n data_check_name=invalid_targets_data_check_name,\n message_code=DataCheckMessageCode.MISMATCHED_INDICES_ORDER,\n details={},\n ).to_dict()\n ],\n \"errors\": [],\n \"actions\": [],\n }\n\n # Test that we only store ten mismatches when there are more than 10 differences in indices found\n X_large = pd.DataFrame({\"col\": range(20)})\n y_more_than_ten_diff_indices = pd.Series([0, 1] * 10, index=range(20, 40))\n X_index_missing = list(set(y_more_than_ten_diff_indices.index) - set(X.index))\n y_index_missing = list(set(X_large.index) - set(y_more_than_ten_diff_indices.index))\n assert invalid_targets_check.validate(X_large, y_more_than_ten_diff_indices) == {\n \"warnings\": [\n DataCheckWarning(\n message=\"Input target and features have mismatched indices\",\n data_check_name=invalid_targets_data_check_name,\n message_code=DataCheckMessageCode.MISMATCHED_INDICES,\n details={\n \"indices_not_in_features\": X_index_missing[:10],\n \"indices_not_in_target\": y_index_missing[:10],\n },\n ).to_dict()\n ],\n \"errors\": [],\n \"actions\": [],\n }\n\n\ndef test_invalid_target_data_check_different_lengths():\n X = pd.DataFrame({\"col\": [1, 2, 3]})\n y_diff_len = pd.Series([0, 1])\n invalid_targets_check = InvalidTargetDataCheck(\n \"binary\", get_default_primary_search_objective(\"binary\")\n )\n assert invalid_targets_check.validate(X, y_diff_len) == {\n \"warnings\": [\n DataCheckWarning(\n message=\"Input target and features have different lengths\",\n data_check_name=invalid_targets_data_check_name,\n message_code=DataCheckMessageCode.MISMATCHED_LENGTHS,\n details={\n \"features_length\": len(X.index),\n \"target_length\": len(y_diff_len.index),\n },\n ).to_dict(),\n DataCheckWarning(\n message=\"Input target and features have mismatched indices\",\n data_check_name=invalid_targets_data_check_name,\n message_code=DataCheckMessageCode.MISMATCHED_INDICES,\n details={\"indices_not_in_features\": [], \"indices_not_in_target\": [2]},\n ).to_dict(),\n ],\n \"errors\": [],\n \"actions\": [],\n }\n\n\ndef test_invalid_target_data_check_numeric_binary_does_not_return_warnings():\n y = pd.Series([1, 5, 1, 5, 1, 1])\n X = pd.DataFrame({\"col\": range(len(y))})\n invalid_targets_check = InvalidTargetDataCheck(\n \"binary\", get_default_primary_search_objective(\"binary\")\n )\n assert invalid_targets_check.validate(X, y) == {\n \"warnings\": [],\n \"errors\": [],\n \"actions\": [],\n }\n\n\[email protected](\"problem_type\", ProblemTypes.all_problem_types)\ndef test_invalid_target_data_action_for_data_with_null(problem_type):\n y = pd.Series([None, None, None, 0, 0, 0, 0, 0, 0, 0])\n X = pd.DataFrame({\"col\": range(len(y))})\n invalid_targets_check = InvalidTargetDataCheck(\n problem_type, get_default_primary_search_objective(problem_type)\n )\n impute_strategy = \"mean\" if is_regression(problem_type) else \"most_frequent\"\n\n expected = {\n \"warnings\": [],\n \"errors\": [\n DataCheckError(\n message=\"3 row(s) (30.0%) of target values are null\",\n data_check_name=invalid_targets_data_check_name,\n message_code=DataCheckMessageCode.TARGET_HAS_NULL,\n details={\"num_null_rows\": 3, \"pct_null_rows\": 30.0},\n ).to_dict()\n ],\n \"actions\": [\n DataCheckAction(\n DataCheckActionCode.IMPUTE_COL,\n data_check_name=invalid_targets_data_check_name,\n metadata={\n \"is_target\": True,\n \"impute_strategy\": impute_strategy,\n },\n ).to_dict()\n ],\n }\n if is_binary(problem_type):\n expected[\"errors\"].append(\n DataCheckError(\n message=\"Binary class targets require exactly two unique values.\",\n data_check_name=invalid_targets_data_check_name,\n message_code=DataCheckMessageCode.TARGET_BINARY_NOT_TWO_UNIQUE_VALUES,\n details={\"target_values\": [0]},\n ).to_dict()\n )\n elif is_multiclass(problem_type):\n expected[\"errors\"].append(\n DataCheckError(\n message=f\"Target has two or less classes, which is too few for multiclass problems. Consider changing to binary.\",\n data_check_name=invalid_targets_data_check_name,\n message_code=DataCheckMessageCode.TARGET_MULTICLASS_NOT_ENOUGH_CLASSES,\n details={\"num_classes\": 1},\n ).to_dict()\n )\n expected[\"warnings\"].append(\n DataCheckWarning(\n message=f\"Target has a large number of unique values, could be regression type problem.\",\n data_check_name=invalid_targets_data_check_name,\n message_code=DataCheckMessageCode.TARGET_MULTICLASS_HIGH_UNIQUE_CLASS,\n details={\"class_to_value_ratio\": 0.1},\n ).to_dict()\n )\n\n messages = invalid_targets_check.validate(X, y)\n assert messages == expected\n\n\[email protected](\"problem_type\", ProblemTypes.all_problem_types)\ndef test_invalid_target_data_action_for_all_null(problem_type):\n invalid_targets_check = InvalidTargetDataCheck(\n problem_type, get_default_primary_search_objective(problem_type)\n )\n\n y_all_null = pd.Series([None, None, None])\n X = pd.DataFrame({\"col\": range(len(y_all_null))})\n\n expected = {\n \"warnings\": [],\n \"errors\": [\n DataCheckError(\n message=\"Target is either empty or fully null.\",\n data_check_name=invalid_targets_data_check_name,\n message_code=DataCheckMessageCode.TARGET_IS_EMPTY_OR_FULLY_NULL,\n details={},\n ).to_dict(),\n ],\n \"actions\": [],\n }\n messages = invalid_targets_check.validate(X, y_all_null)\n assert messages == expected\n",
"import importlib\nimport inspect\nimport os\nimport warnings\nfrom unittest.mock import patch\n\nimport cloudpickle\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom skopt.space import Categorical\n\nfrom evalml.exceptions import (\n ComponentNotYetFittedError,\n EnsembleMissingPipelinesError,\n MethodPropertyNotFoundError,\n)\nfrom evalml.model_family import ModelFamily\nfrom evalml.pipelines import BinaryClassificationPipeline\nfrom evalml.pipelines.components import (\n LSA,\n PCA,\n ARIMARegressor,\n BaselineClassifier,\n BaselineRegressor,\n CatBoostClassifier,\n CatBoostRegressor,\n ComponentBase,\n DateTimeFeaturizer,\n DFSTransformer,\n DropColumns,\n DropNullColumns,\n DropRowsTransformer,\n ElasticNetClassifier,\n ElasticNetRegressor,\n Estimator,\n ExtraTreesClassifier,\n ExtraTreesRegressor,\n Imputer,\n LightGBMClassifier,\n LightGBMRegressor,\n LinearDiscriminantAnalysis,\n LinearRegressor,\n LogisticRegressionClassifier,\n NaturalLanguageFeaturizer,\n OneHotEncoder,\n Oversampler,\n PerColumnImputer,\n PolynomialDetrender,\n ProphetRegressor,\n RandomForestClassifier,\n RandomForestRegressor,\n RFClassifierSelectFromModel,\n RFRegressorSelectFromModel,\n SelectByType,\n SelectColumns,\n SimpleImputer,\n StandardScaler,\n SVMClassifier,\n SVMRegressor,\n TargetImputer,\n TimeSeriesBaselineEstimator,\n TimeSeriesFeaturizer,\n Transformer,\n Undersampler,\n XGBoostClassifier,\n XGBoostRegressor,\n)\nfrom evalml.pipelines.components.ensemble import (\n StackedEnsembleBase,\n StackedEnsembleClassifier,\n StackedEnsembleRegressor,\n)\nfrom evalml.pipelines.components.estimators.classifiers.vowpal_wabbit_classifiers import (\n VowpalWabbitBinaryClassifier,\n VowpalWabbitMulticlassClassifier,\n)\nfrom evalml.pipelines.components.estimators.regressors.vowpal_wabbit_regressor import (\n VowpalWabbitRegressor,\n)\nfrom evalml.pipelines.components.transformers.encoders.label_encoder import (\n LabelEncoder,\n)\nfrom evalml.pipelines.components.transformers.preprocessing.log_transformer import (\n LogTransformer,\n)\nfrom evalml.pipelines.components.transformers.samplers.base_sampler import (\n BaseSampler,\n)\nfrom evalml.pipelines.components.utils import (\n _all_estimators,\n _all_transformers,\n all_components,\n generate_component_code,\n)\nfrom evalml.problem_types import ProblemTypes\n\n\[email protected](scope=\"module\")\ndef test_classes():\n class MockComponent(ComponentBase):\n name = \"Mock Component\"\n modifies_features = True\n modifies_target = False\n training_only = False\n\n class MockEstimator(Estimator):\n name = \"Mock Estimator\"\n model_family = ModelFamily.LINEAR_MODEL\n supported_problem_types = [\"binary\"]\n\n class MockTransformer(Transformer):\n name = \"Mock Transformer\"\n\n def transform(self, X, y=None):\n return X\n\n return MockComponent, MockEstimator, MockTransformer\n\n\[email protected](scope=\"module\")\ndef test_estimator_needs_fitting_false():\n class MockEstimatorNeedsFittingFalse(Estimator):\n name = \"Mock Estimator Needs Fitting False\"\n model_family = ModelFamily.LINEAR_MODEL\n supported_problem_types = [\"binary\"]\n needs_fitting = False\n\n def predict(self, X):\n pass\n\n return MockEstimatorNeedsFittingFalse\n\n\nclass MockFitComponent(ComponentBase):\n name = \"Mock Fit Component\"\n modifies_features = True\n modifies_target = False\n training_only = False\n\n def __init__(self, param_a=2, param_b=10, random_seed=0):\n parameters = {\"param_a\": param_a, \"param_b\": param_b}\n super().__init__(parameters=parameters, component_obj=None, random_seed=0)\n\n def fit(self, X, y=None):\n pass\n\n def predict(self, X):\n return np.array(\n [self.parameters[\"param_a\"] * 2, self.parameters[\"param_b\"] * 10]\n )\n\n\ndef test_init(test_classes):\n MockComponent, MockEstimator, MockTransformer = test_classes\n assert MockComponent().name == \"Mock Component\"\n assert MockEstimator().name == \"Mock Estimator\"\n assert MockTransformer().name == \"Mock Transformer\"\n\n\ndef test_describe(test_classes):\n MockComponent, MockEstimator, MockTransformer = test_classes\n params = {\"param_a\": \"value_a\", \"param_b\": 123}\n component = MockComponent(parameters=params)\n assert component.describe(return_dict=True) == {\n \"name\": \"Mock Component\",\n \"parameters\": params,\n }\n estimator = MockEstimator(parameters=params)\n assert estimator.describe(return_dict=True) == {\n \"name\": \"Mock Estimator\",\n \"parameters\": params,\n }\n transformer = MockTransformer(parameters=params)\n assert transformer.describe(return_dict=True) == {\n \"name\": \"Mock Transformer\",\n \"parameters\": params,\n }\n\n\ndef test_describe_component():\n enc = OneHotEncoder()\n imputer = Imputer()\n simple_imputer = SimpleImputer(\"mean\")\n column_imputer = PerColumnImputer({\"a\": \"mean\", \"b\": (\"constant\", 100)})\n scaler = StandardScaler()\n feature_selection_clf = RFClassifierSelectFromModel(\n n_estimators=10, number_features=5, percent_features=0.3, threshold=-np.inf\n )\n feature_selection_reg = RFRegressorSelectFromModel(\n n_estimators=10, number_features=5, percent_features=0.3, threshold=-np.inf\n )\n drop_col_transformer = DropColumns(columns=[\"col_one\", \"col_two\"])\n drop_null_transformer = DropNullColumns()\n datetime = DateTimeFeaturizer()\n natural_language_featurizer = NaturalLanguageFeaturizer()\n lsa = LSA()\n pca = PCA()\n lda = LinearDiscriminantAnalysis()\n ft = DFSTransformer()\n us = Undersampler()\n assert enc.describe(return_dict=True) == {\n \"name\": \"One Hot Encoder\",\n \"parameters\": {\n \"top_n\": 10,\n \"features_to_encode\": None,\n \"categories\": None,\n \"drop\": \"if_binary\",\n \"handle_unknown\": \"ignore\",\n \"handle_missing\": \"error\",\n },\n }\n assert imputer.describe(return_dict=True) == {\n \"name\": \"Imputer\",\n \"parameters\": {\n \"categorical_impute_strategy\": \"most_frequent\",\n \"categorical_fill_value\": None,\n \"numeric_impute_strategy\": \"mean\",\n \"numeric_fill_value\": None,\n },\n }\n assert simple_imputer.describe(return_dict=True) == {\n \"name\": \"Simple Imputer\",\n \"parameters\": {\"impute_strategy\": \"mean\", \"fill_value\": None},\n }\n assert column_imputer.describe(return_dict=True) == {\n \"name\": \"Per Column Imputer\",\n \"parameters\": {\n \"impute_strategies\": {\"a\": \"mean\", \"b\": (\"constant\", 100)},\n \"default_impute_strategy\": \"most_frequent\",\n },\n }\n assert scaler.describe(return_dict=True) == {\n \"name\": \"Standard Scaler\",\n \"parameters\": {},\n }\n assert feature_selection_clf.describe(return_dict=True) == {\n \"name\": \"RF Classifier Select From Model\",\n \"parameters\": {\n \"number_features\": 5,\n \"n_estimators\": 10,\n \"max_depth\": None,\n \"percent_features\": 0.3,\n \"threshold\": -np.inf,\n \"n_jobs\": -1,\n },\n }\n assert feature_selection_reg.describe(return_dict=True) == {\n \"name\": \"RF Regressor Select From Model\",\n \"parameters\": {\n \"number_features\": 5,\n \"n_estimators\": 10,\n \"max_depth\": None,\n \"percent_features\": 0.3,\n \"threshold\": -np.inf,\n \"n_jobs\": -1,\n },\n }\n assert drop_col_transformer.describe(return_dict=True) == {\n \"name\": \"Drop Columns Transformer\",\n \"parameters\": {\"columns\": [\"col_one\", \"col_two\"]},\n }\n assert drop_null_transformer.describe(return_dict=True) == {\n \"name\": \"Drop Null Columns Transformer\",\n \"parameters\": {\"pct_null_threshold\": 1.0},\n }\n assert datetime.describe(return_dict=True) == {\n \"name\": \"DateTime Featurization Component\",\n \"parameters\": {\n \"features_to_extract\": [\"year\", \"month\", \"day_of_week\", \"hour\"],\n \"encode_as_categories\": False,\n \"date_index\": None,\n },\n }\n assert natural_language_featurizer.describe(return_dict=True) == {\n \"name\": \"Natural Language Featurization Component\",\n \"parameters\": {},\n }\n assert lsa.describe(return_dict=True) == {\n \"name\": \"LSA Transformer\",\n \"parameters\": {},\n }\n assert pca.describe(return_dict=True) == {\n \"name\": \"PCA Transformer\",\n \"parameters\": {\"n_components\": None, \"variance\": 0.95},\n }\n assert lda.describe(return_dict=True) == {\n \"name\": \"Linear Discriminant Analysis Transformer\",\n \"parameters\": {\"n_components\": None},\n }\n assert ft.describe(return_dict=True) == {\n \"name\": \"DFS Transformer\",\n \"parameters\": {\"index\": \"index\"},\n }\n assert us.describe(return_dict=True) == {\n \"name\": \"Undersampler\",\n \"parameters\": {\n \"sampling_ratio\": 0.25,\n \"sampling_ratio_dict\": None,\n \"min_samples\": 100,\n \"min_percentage\": 0.1,\n },\n }\n try:\n oversampler = Oversampler()\n assert oversampler.describe(return_dict=True) == {\n \"name\": \"Oversampler\",\n \"parameters\": {\n \"sampling_ratio\": 0.25,\n \"sampling_ratio_dict\": None,\n \"k_neighbors_default\": 5,\n \"n_jobs\": -1,\n },\n }\n except ImportError:\n pass\n # testing estimators\n base_classifier = BaselineClassifier()\n base_regressor = BaselineRegressor()\n lr_classifier = LogisticRegressionClassifier()\n en_classifier = ElasticNetClassifier()\n en_regressor = ElasticNetRegressor()\n et_classifier = ExtraTreesClassifier(n_estimators=10, max_features=\"auto\")\n et_regressor = ExtraTreesRegressor(n_estimators=10, max_features=\"auto\")\n rf_classifier = RandomForestClassifier(n_estimators=10, max_depth=3)\n rf_regressor = RandomForestRegressor(n_estimators=10, max_depth=3)\n linear_regressor = LinearRegressor()\n svm_classifier = SVMClassifier()\n svm_regressor = SVMRegressor()\n assert base_classifier.describe(return_dict=True) == {\n \"name\": \"Baseline Classifier\",\n \"parameters\": {\"strategy\": \"mode\"},\n }\n assert base_regressor.describe(return_dict=True) == {\n \"name\": \"Baseline Regressor\",\n \"parameters\": {\"strategy\": \"mean\"},\n }\n assert lr_classifier.describe(return_dict=True) == {\n \"name\": \"Logistic Regression Classifier\",\n \"parameters\": {\n \"penalty\": \"l2\",\n \"C\": 1.0,\n \"n_jobs\": -1,\n \"multi_class\": \"auto\",\n \"solver\": \"lbfgs\",\n },\n }\n assert en_classifier.describe(return_dict=True) == {\n \"name\": \"Elastic Net Classifier\",\n \"parameters\": {\n \"C\": 1.0,\n \"l1_ratio\": 0.15,\n \"n_jobs\": -1,\n \"multi_class\": \"auto\",\n \"solver\": \"saga\",\n \"penalty\": \"elasticnet\",\n },\n }\n assert en_regressor.describe(return_dict=True) == {\n \"name\": \"Elastic Net Regressor\",\n \"parameters\": {\n \"alpha\": 0.0001,\n \"l1_ratio\": 0.15,\n \"max_iter\": 1000,\n \"normalize\": False,\n },\n }\n assert et_classifier.describe(return_dict=True) == {\n \"name\": \"Extra Trees Classifier\",\n \"parameters\": {\n \"n_estimators\": 10,\n \"max_features\": \"auto\",\n \"max_depth\": 6,\n \"min_samples_split\": 2,\n \"min_weight_fraction_leaf\": 0.0,\n \"n_jobs\": -1,\n },\n }\n assert et_regressor.describe(return_dict=True) == {\n \"name\": \"Extra Trees Regressor\",\n \"parameters\": {\n \"n_estimators\": 10,\n \"max_features\": \"auto\",\n \"max_depth\": 6,\n \"min_samples_split\": 2,\n \"min_weight_fraction_leaf\": 0.0,\n \"n_jobs\": -1,\n },\n }\n assert rf_classifier.describe(return_dict=True) == {\n \"name\": \"Random Forest Classifier\",\n \"parameters\": {\"n_estimators\": 10, \"max_depth\": 3, \"n_jobs\": -1},\n }\n assert rf_regressor.describe(return_dict=True) == {\n \"name\": \"Random Forest Regressor\",\n \"parameters\": {\"n_estimators\": 10, \"max_depth\": 3, \"n_jobs\": -1},\n }\n assert linear_regressor.describe(return_dict=True) == {\n \"name\": \"Linear Regressor\",\n \"parameters\": {\"fit_intercept\": True, \"normalize\": False, \"n_jobs\": -1},\n }\n assert svm_classifier.describe(return_dict=True) == {\n \"name\": \"SVM Classifier\",\n \"parameters\": {\n \"C\": 1.0,\n \"kernel\": \"rbf\",\n \"gamma\": \"auto\",\n \"probability\": True,\n },\n }\n assert svm_regressor.describe(return_dict=True) == {\n \"name\": \"SVM Regressor\",\n \"parameters\": {\"C\": 1.0, \"kernel\": \"rbf\", \"gamma\": \"auto\"},\n }\n try:\n xgb_classifier = XGBoostClassifier(\n eta=0.1, min_child_weight=1, max_depth=3, n_estimators=75\n )\n xgb_regressor = XGBoostRegressor(\n eta=0.1, min_child_weight=1, max_depth=3, n_estimators=75\n )\n assert xgb_classifier.describe(return_dict=True) == {\n \"name\": \"XGBoost Classifier\",\n \"parameters\": {\n \"eta\": 0.1,\n \"max_depth\": 3,\n \"min_child_weight\": 1,\n \"n_estimators\": 75,\n \"n_jobs\": 12,\n \"eval_metric\": \"logloss\",\n },\n }\n assert xgb_regressor.describe(return_dict=True) == {\n \"name\": \"XGBoost Regressor\",\n \"parameters\": {\n \"eta\": 0.1,\n \"max_depth\": 3,\n \"min_child_weight\": 1,\n \"n_estimators\": 75,\n \"n_jobs\": 12,\n },\n }\n except ImportError:\n pass\n try:\n cb_classifier = CatBoostClassifier()\n cb_regressor = CatBoostRegressor()\n assert cb_classifier.describe(return_dict=True) == {\n \"name\": \"CatBoost Classifier\",\n \"parameters\": {\n \"allow_writing_files\": False,\n \"n_estimators\": 10,\n \"eta\": 0.03,\n \"max_depth\": 6,\n \"bootstrap_type\": None,\n \"silent\": True,\n \"n_jobs\": -1,\n },\n }\n assert cb_regressor.describe(return_dict=True) == {\n \"name\": \"CatBoost Regressor\",\n \"parameters\": {\n \"allow_writing_files\": False,\n \"n_estimators\": 10,\n \"eta\": 0.03,\n \"max_depth\": 6,\n \"bootstrap_type\": None,\n \"silent\": False,\n \"n_jobs\": -1,\n },\n }\n except ImportError:\n pass\n try:\n lg_classifier = LightGBMClassifier()\n lg_regressor = LightGBMRegressor()\n assert lg_classifier.describe(return_dict=True) == {\n \"name\": \"LightGBM Classifier\",\n \"parameters\": {\n \"boosting_type\": \"gbdt\",\n \"learning_rate\": 0.1,\n \"n_estimators\": 100,\n \"max_depth\": 0,\n \"num_leaves\": 31,\n \"min_child_samples\": 20,\n \"n_jobs\": -1,\n \"bagging_fraction\": 0.9,\n \"bagging_freq\": 0,\n },\n }\n assert lg_regressor.describe(return_dict=True) == {\n \"name\": \"LightGBM Regressor\",\n \"parameters\": {\n \"boosting_type\": \"gbdt\",\n \"learning_rate\": 0.1,\n \"n_estimators\": 20,\n \"max_depth\": 0,\n \"num_leaves\": 31,\n \"min_child_samples\": 20,\n \"n_jobs\": -1,\n \"bagging_fraction\": 0.9,\n \"bagging_freq\": 0,\n },\n }\n except ImportError:\n pass\n try:\n prophet_regressor = ProphetRegressor()\n assert prophet_regressor.describe(return_dict=True) == {\n \"name\": \"Prophet Regressor\",\n \"parameters\": {\n \"changepoint_prior_scale\": 0.05,\n \"date_index\": None,\n \"holidays_prior_scale\": 10,\n \"seasonality_mode\": \"additive\",\n \"seasonality_prior_scale\": 10,\n \"stan_backend\": \"CMDSTANPY\",\n },\n }\n except ImportError:\n pass\n try:\n vw_binary_classifier = VowpalWabbitBinaryClassifier(\n loss_function=\"classic\",\n learning_rate=0.1,\n decay_learning_rate=1.0,\n power_t=0.1,\n passes=1,\n )\n vw_multi_classifier = VowpalWabbitMulticlassClassifier(\n loss_function=\"classic\",\n learning_rate=0.1,\n decay_learning_rate=1.0,\n power_t=0.1,\n passes=1,\n )\n vw_regressor = VowpalWabbitRegressor(\n learning_rate=0.1, decay_learning_rate=1.0, power_t=0.1, passes=1\n )\n\n assert vw_binary_classifier.describe(return_dict=True) == {\n \"name\": \"Vowpal Wabbit Binary Classifier\",\n \"parameters\": {\n \"loss_function\": \"classic\",\n \"learning_rate\": 0.1,\n \"decay_learning_rate\": 1.0,\n \"power_t\": 0.1,\n \"passes\": 1,\n },\n }\n assert vw_multi_classifier.describe(return_dict=True) == {\n \"name\": \"Vowpal Wabbit Multiclass Classifier\",\n \"parameters\": {\n \"loss_function\": \"classic\",\n \"learning_rate\": 0.1,\n \"decay_learning_rate\": 1.0,\n \"power_t\": 0.1,\n \"passes\": 1,\n },\n }\n assert vw_regressor.describe(return_dict=True) == {\n \"name\": \"Vowpal Wabbit Regressor\",\n \"parameters\": {\n \"learning_rate\": 0.1,\n \"decay_learning_rate\": 1.0,\n \"power_t\": 0.1,\n \"passes\": 1,\n },\n }\n except ImportError:\n pass\n\n\ndef test_missing_attributes(X_y_binary):\n class MockComponentName(ComponentBase):\n pass\n\n with pytest.raises(TypeError):\n MockComponentName()\n\n class MockComponentModelFamily(ComponentBase):\n name = \"Mock Component\"\n\n with pytest.raises(TypeError):\n MockComponentModelFamily()\n\n class MockEstimatorWithoutAttribute(Estimator):\n name = \"Mock Estimator\"\n model_family = ModelFamily.LINEAR_MODEL\n\n with pytest.raises(TypeError):\n MockEstimatorWithoutAttribute()\n\n\ndef test_missing_methods_on_components(X_y_binary, test_classes):\n X, y = X_y_binary\n MockComponent, MockEstimator, MockTransformer = test_classes\n\n component = MockComponent()\n with pytest.raises(\n MethodPropertyNotFoundError,\n match=\"Component requires a fit method or a component_obj that implements fit\",\n ):\n component.fit(X)\n\n estimator = MockEstimator()\n estimator._is_fitted = True\n with pytest.raises(\n MethodPropertyNotFoundError,\n match=\"Estimator requires a predict method or a component_obj that implements predict\",\n ):\n estimator.predict(X)\n with pytest.raises(\n MethodPropertyNotFoundError,\n match=\"Estimator requires a predict_proba method or a component_obj that implements predict_proba\",\n ):\n estimator.predict_proba(X)\n with pytest.raises(\n MethodPropertyNotFoundError,\n match=\"Estimator requires a feature_importance property or a component_obj that implements feature_importances_\",\n ):\n estimator.feature_importance\n\n transformer = MockTransformer()\n transformer._is_fitted = True\n with pytest.raises(\n MethodPropertyNotFoundError,\n match=\"Component requires a fit method or a component_obj that implements fit\",\n ):\n transformer.fit(X, y)\n transformer.transform(X)\n with pytest.raises(\n MethodPropertyNotFoundError,\n match=\"Component requires a fit method or a component_obj that implements fit\",\n ):\n transformer.fit_transform(X)\n\n\ndef test_component_fit(X_y_binary):\n X, y = X_y_binary\n\n class MockEstimator:\n def fit(self, X, y):\n pass\n\n class MockComponent(Estimator):\n name = \"Mock Estimator\"\n model_family = ModelFamily.LINEAR_MODEL\n supported_problem_types = [\"binary\"]\n hyperparameter_ranges = {}\n\n def __init__(self):\n parameters = {}\n est = MockEstimator()\n super().__init__(parameters=parameters, component_obj=est, random_seed=0)\n\n est = MockComponent()\n assert isinstance(est.fit(X, y), ComponentBase)\n\n\ndef test_component_fit_transform(X_y_binary):\n X, y = X_y_binary\n\n class MockTransformerWithFitTransform(Transformer):\n name = \"Mock Transformer\"\n hyperparameter_ranges = {}\n\n def fit_transform(self, X, y=None):\n return X\n\n def transform(self, X, y=None):\n return X\n\n def __init__(self):\n parameters = {}\n super().__init__(parameters=parameters, component_obj=None, random_seed=0)\n\n class MockTransformerWithFitTransformButError(Transformer):\n name = \"Mock Transformer\"\n hyperparameter_ranges = {}\n\n def fit_transform(self, X, y=None):\n raise RuntimeError\n\n def transform(self, X, y=None):\n return X\n\n def __init__(self):\n parameters = {}\n super().__init__(parameters=parameters, component_obj=None, random_seed=0)\n\n class MockTransformerWithFitAndTransform(Transformer):\n name = \"Mock Transformer\"\n hyperparameter_ranges = {}\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n return X\n\n def __init__(self):\n parameters = {}\n super().__init__(parameters=parameters, component_obj=None, random_seed=0)\n\n # convert data to pd DataFrame, because the component classes don't\n # standardize to pd DataFrame\n X = pd.DataFrame(X)\n y = pd.Series(y)\n\n component = MockTransformerWithFitTransform()\n assert isinstance(component.fit_transform(X, y), pd.DataFrame)\n\n component = MockTransformerWithFitTransformButError()\n with pytest.raises(RuntimeError):\n component.fit_transform(X, y)\n\n component = MockTransformerWithFitAndTransform()\n assert isinstance(component.fit_transform(X, y), pd.DataFrame)\n\n\ndef test_model_family_components(test_classes):\n _, MockEstimator, _ = test_classes\n\n assert MockEstimator.model_family == ModelFamily.LINEAR_MODEL\n\n\ndef test_regressor_call_predict_proba(test_classes):\n X = np.array([])\n _, MockEstimator, _ = test_classes\n component = MockEstimator()\n component._is_fitted = True\n with pytest.raises(MethodPropertyNotFoundError):\n component.predict_proba(X)\n\n\ndef test_component_describe(test_classes, caplog):\n MockComponent, _, _ = test_classes\n component = MockComponent()\n component.describe(print_name=True)\n out = caplog.text\n assert \"Mock Component\" in out\n\n\ndef test_component_parameters_getter(test_classes):\n MockComponent, _, _ = test_classes\n component = MockComponent({\"test\": \"parameter\"})\n assert component.parameters == {\"test\": \"parameter\"}\n component.parameters[\"test\"] = \"new\"\n assert component.parameters == {\"test\": \"parameter\"}\n\n\ndef test_component_parameters_init(\n logistic_regression_binary_pipeline_class, linear_regression_pipeline_class\n):\n for component_class in all_components():\n print(\"Testing component {}\".format(component_class.name))\n component = component_class()\n parameters = component.parameters\n\n component2 = component_class(**parameters)\n parameters2 = component2.parameters\n\n assert parameters == parameters2\n\n\ndef test_clone_init():\n params = {\"param_a\": 2, \"param_b\": 11}\n clf = MockFitComponent(**params)\n clf_clone = clf.clone()\n assert clf.parameters == clf_clone.parameters\n assert clf_clone.random_seed == clf.random_seed\n\n\ndef test_clone_fitted(X_y_binary):\n X, y = X_y_binary\n params = {\"param_a\": 3, \"param_b\": 7}\n clf = MockFitComponent(**params)\n clf.fit(X, y)\n predicted = clf.predict(X)\n\n clf_clone = clf.clone()\n assert clf_clone.random_seed == clf.random_seed\n assert clf.parameters == clf_clone.parameters\n\n with pytest.raises(ComponentNotYetFittedError, match=\"You must fit\"):\n clf_clone.predict(X)\n\n clf_clone.fit(X, y)\n predicted_clone = clf_clone.predict(X)\n np.testing.assert_almost_equal(predicted, predicted_clone)\n\n\ndef test_components_init_kwargs():\n for component_class in all_components():\n try:\n component = component_class()\n except EnsembleMissingPipelinesError:\n continue\n if component._component_obj is None:\n continue\n if isinstance(component, StackedEnsembleBase):\n continue\n\n obj_class = component._component_obj.__class__.__name__\n module = component._component_obj.__module__\n importlib.import_module(module, obj_class)\n patched = module + \".\" + obj_class + \".__init__\"\n if component_class == LabelEncoder:\n # scikit-learn's LabelEncoder found in different module than where we import from\n patched = module[: module.rindex(\".\")] + \".\" + obj_class + \".__init__\"\n\n def all_init(self, *args, **kwargs):\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n with patch(patched, new=all_init) as _:\n component = component_class(test_arg=\"test\")\n component_with_different_kwargs = component_class(diff_test_arg=\"test\")\n assert component.parameters[\"test_arg\"] == \"test\"\n if not isinstance(component, (PolynomialDetrender, LabelEncoder)):\n assert component._component_obj.test_arg == \"test\"\n # Test equality of different components with same or different kwargs\n assert component == component_class(test_arg=\"test\")\n assert component != component_with_different_kwargs\n\n\ndef test_component_has_random_seed():\n for component_class in all_components():\n params = inspect.signature(component_class.__init__).parameters\n assert \"random_seed\" in params\n\n\ndef test_transformer_transform_output_type(X_y_binary):\n X_np, y_np = X_y_binary\n assert isinstance(X_np, np.ndarray)\n assert isinstance(y_np, np.ndarray)\n y_list = list(y_np)\n X_df_no_col_names = pd.DataFrame(X_np)\n range_index = pd.RangeIndex(start=0, stop=X_np.shape[1], step=1)\n X_df_with_col_names = pd.DataFrame(\n X_np, columns=[\"x\" + str(i) for i in range(X_np.shape[1])]\n )\n y_series_no_name = pd.Series(y_np)\n y_series_with_name = pd.Series(y_np, name=\"target\")\n datatype_combos = [\n (X_np, y_np, range_index),\n (X_np, y_list, range_index),\n (X_df_no_col_names, y_series_no_name, range_index),\n (X_df_with_col_names, y_series_with_name, X_df_with_col_names.columns),\n ]\n\n for component_class in _all_transformers():\n if component_class in [PolynomialDetrender, LogTransformer, LabelEncoder]:\n # Skipping because these tests are handled in their respective test files\n continue\n print(\"Testing transformer {}\".format(component_class.name))\n for X, y, X_cols_expected in datatype_combos:\n print(\n 'Checking output of transform for transformer \"{}\" on X type {} cols {}, y type {} name {}'.format(\n component_class.name,\n type(X),\n X.columns if isinstance(X, pd.DataFrame) else None,\n type(y),\n y.name if isinstance(y, pd.Series) else None,\n )\n )\n\n component = component_class()\n # SMOTE will throw an error if we pass a ratio lower than the current class balance\n if \"Oversampler\" == component_class.name:\n # we cover this case in test_oversamplers\n continue\n elif component_class == TimeSeriesFeaturizer:\n # covered in test_delayed_feature_transformer.py\n continue\n\n component.fit(X, y=y)\n transform_output = component.transform(X, y=y)\n\n if component.modifies_target:\n assert isinstance(transform_output[0], pd.DataFrame)\n assert isinstance(transform_output[1], pd.Series)\n else:\n assert isinstance(transform_output, pd.DataFrame)\n\n if isinstance(component, SelectColumns) or isinstance(\n component, SelectByType\n ):\n assert transform_output.shape == (X.shape[0], 0)\n elif isinstance(component, PCA) or isinstance(\n component, LinearDiscriminantAnalysis\n ):\n assert transform_output.shape[0] == X.shape[0]\n assert transform_output.shape[1] <= X.shape[1]\n elif isinstance(component, DFSTransformer):\n assert transform_output.shape[0] == X.shape[0]\n assert transform_output.shape[1] >= X.shape[1]\n elif component.modifies_target:\n assert transform_output[0].shape == X.shape\n assert transform_output[1].shape[0] == X.shape[0]\n assert len(transform_output[1].shape) == 1\n else:\n assert transform_output.shape == X.shape\n assert list(transform_output.columns) == list(X_cols_expected)\n\n transform_output = component.fit_transform(X, y=y)\n if component.modifies_target:\n assert isinstance(transform_output[0], pd.DataFrame)\n assert isinstance(transform_output[1], pd.Series)\n else:\n assert isinstance(transform_output, pd.DataFrame)\n\n if isinstance(component, SelectColumns) or isinstance(\n component, SelectByType\n ):\n assert transform_output.shape == (X.shape[0], 0)\n elif isinstance(component, PCA) or isinstance(\n component, LinearDiscriminantAnalysis\n ):\n assert transform_output.shape[0] == X.shape[0]\n assert transform_output.shape[1] <= X.shape[1]\n elif isinstance(component, DFSTransformer):\n assert transform_output.shape[0] == X.shape[0]\n assert transform_output.shape[1] >= X.shape[1]\n elif component.modifies_target:\n assert transform_output[0].shape == X.shape\n assert transform_output[1].shape[0] == X.shape[0]\n assert len(transform_output[1].shape) == 1\n\n else:\n assert transform_output.shape == X.shape\n assert list(transform_output.columns) == list(X_cols_expected)\n\n\[email protected](\n \"cls\",\n [\n cls\n for cls in all_components()\n if cls\n not in [\n StackedEnsembleClassifier,\n StackedEnsembleRegressor,\n ]\n ],\n)\ndef test_default_parameters(cls):\n assert (\n cls.default_parameters == cls().parameters\n ), f\"{cls.__name__}'s default parameters don't match __init__.\"\n\n\[email protected](\"cls\", [cls for cls in all_components()])\ndef test_default_parameters_raise_no_warnings(cls):\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n cls()\n assert len(w) == 0\n\n\ndef test_estimator_check_for_fit(X_y_binary):\n class MockEstimatorObj:\n def __init__(self):\n pass\n\n def fit(self, X, y):\n return self\n\n def predict(self, X):\n series = pd.Series([0] * len(X))\n series.ww.init()\n return series\n\n def predict_proba(self, X):\n df = pd.DataFrame({0: [0] * len(X)})\n df.ww.init()\n return df\n\n class MockEstimator(Estimator):\n name = \"Mock Estimator\"\n model_family = ModelFamily.LINEAR_MODEL\n supported_problem_types = [\"binary\"]\n\n def __init__(self, parameters=None, component_obj=None, random_seed=0):\n est = MockEstimatorObj()\n super().__init__(\n parameters=parameters, component_obj=est, random_seed=random_seed\n )\n\n X, y = X_y_binary\n est = MockEstimator()\n with pytest.raises(ComponentNotYetFittedError, match=\"You must fit\"):\n est.predict(X)\n with pytest.raises(ComponentNotYetFittedError, match=\"You must fit\"):\n est.predict_proba(X)\n\n est.fit(X, y)\n est.predict(X)\n est.predict_proba(X)\n\n\ndef test_transformer_check_for_fit(X_y_binary):\n class MockTransformerObj:\n def __init__(self):\n pass\n\n def fit(self, X, y):\n return self\n\n def transform(self, X, y=None):\n return X\n\n def fit_transform(self, X, y=None):\n return X\n\n class MockTransformer(Transformer):\n name = \"Mock Transformer\"\n\n def __init__(self, parameters=None, component_obj=None, random_seed=0):\n transformer = MockTransformerObj()\n super().__init__(\n parameters=parameters,\n component_obj=transformer,\n random_seed=random_seed,\n )\n\n def transform(self, X, y=None):\n return X\n\n def inverse_transform(self, X, y=None):\n return X, y\n\n X, y = X_y_binary\n trans = MockTransformer()\n with pytest.raises(ComponentNotYetFittedError, match=\"You must fit\"):\n trans.transform(X)\n\n with pytest.raises(ComponentNotYetFittedError, match=\"You must fit\"):\n trans.inverse_transform(X, y)\n\n trans.fit(X, y)\n trans.transform(X)\n trans.fit_transform(X, y)\n trans.inverse_transform(X, y)\n\n\ndef test_transformer_check_for_fit_with_overrides(X_y_binary):\n class MockTransformerWithOverride(Transformer):\n name = \"Mock Transformer\"\n\n def fit(self, X, y):\n return self\n\n def transform(self, X, y=None):\n df = pd.DataFrame()\n df.ww.init()\n return df\n\n class MockTransformerWithOverrideSubclass(Transformer):\n name = \"Mock Transformer Subclass\"\n\n def fit(self, X, y):\n return self\n\n def transform(self, X, y=None):\n df = pd.DataFrame()\n df.ww.init()\n return df\n\n X, y = X_y_binary\n transformer = MockTransformerWithOverride()\n transformer_subclass = MockTransformerWithOverrideSubclass()\n\n with pytest.raises(ComponentNotYetFittedError, match=\"You must fit\"):\n transformer.transform(X)\n with pytest.raises(ComponentNotYetFittedError, match=\"You must fit\"):\n transformer_subclass.transform(X)\n\n transformer.fit(X, y)\n transformer.transform(X)\n transformer_subclass.fit(X, y)\n transformer_subclass.transform(X)\n\n\ndef test_all_transformers_needs_fitting():\n for component_class in _all_transformers() + _all_estimators():\n if component_class.__name__ in [\n \"DropColumns\",\n \"SelectColumns\",\n \"SelectByType\",\n ]:\n assert not component_class.needs_fitting\n else:\n assert component_class.needs_fitting\n\n\ndef test_all_transformers_check_fit(X_y_binary, ts_data_binary):\n for component_class in _all_transformers():\n X, y = X_y_binary\n if not component_class.needs_fitting:\n continue\n\n component = component_class()\n # SMOTE will throw errors if we call it but cannot oversample\n if \"Oversampler\" == component_class.name:\n component = component_class(sampling_ratio=1)\n elif component_class == TimeSeriesFeaturizer:\n X, y = ts_data_binary\n component = component_class(date_index=\"date\")\n\n with pytest.raises(\n ComponentNotYetFittedError, match=f\"You must fit {component_class.__name__}\"\n ):\n component.transform(X, y)\n\n component.fit(X, y)\n component.transform(X, y)\n\n component = component_class()\n if \"Oversampler\" == component_class.name:\n component = component_class(sampling_ratio=1)\n elif component_class == TimeSeriesFeaturizer:\n component = component_class(date_index=\"date\")\n component.fit_transform(X, y)\n component.transform(X, y)\n\n\ndef test_all_estimators_check_fit(\n X_y_binary, ts_data, test_estimator_needs_fitting_false, helper_functions\n):\n estimators_to_check = [\n estimator\n for estimator in _all_estimators()\n if estimator\n not in [\n StackedEnsembleClassifier,\n StackedEnsembleRegressor,\n TimeSeriesBaselineEstimator,\n VowpalWabbitBinaryClassifier,\n VowpalWabbitMulticlassClassifier,\n VowpalWabbitRegressor,\n ]\n ] + [test_estimator_needs_fitting_false]\n for component_class in estimators_to_check:\n if not component_class.needs_fitting:\n continue\n\n if (\n ProblemTypes.TIME_SERIES_REGRESSION\n in component_class.supported_problem_types\n ):\n X, y = ts_data\n else:\n X, y = X_y_binary\n\n component = helper_functions.safe_init_component_with_njobs_1(component_class)\n\n with patch.object(component, \"_component_obj\") as mock_component_obj:\n with patch.object(\n mock_component_obj, \"predict\"\n ) as mock_component_obj_predict:\n mock_component_obj_predict.return_value = pd.Series([0] * len(y))\n\n if \"Prophet\" in component.name:\n mock_component_obj_predict.return_value = {\n \"yhat\": pd.Series([0] * len(y)),\n \"ds\": pd.Series([0] * len(y)),\n }\n\n with pytest.raises(\n ComponentNotYetFittedError,\n match=f\"You must fit {component_class.__name__}\",\n ):\n component.predict(X)\n if (\n ProblemTypes.BINARY in component.supported_problem_types\n or ProblemTypes.MULTICLASS in component.supported_problem_types\n ):\n with pytest.raises(\n ComponentNotYetFittedError,\n match=f\"You must fit {component_class.__name__}\",\n ):\n component.predict_proba(X)\n\n with pytest.raises(\n ComponentNotYetFittedError,\n match=f\"You must fit {component_class.__name__}\",\n ):\n component.feature_importance\n\n component.fit(X, y)\n\n if (\n ProblemTypes.BINARY in component.supported_problem_types\n or ProblemTypes.MULTICLASS in component.supported_problem_types\n ):\n component.predict_proba(X)\n component.predict(X)\n component.feature_importance\n\n\[email protected](\"data_type\", [\"li\", \"np\", \"pd\", \"ww\"])\ndef test_all_transformers_check_fit_input_type(\n data_type, X_y_binary, make_data_type, ts_data_binary\n):\n\n for component_class in _all_transformers():\n X, y = X_y_binary\n X = make_data_type(data_type, X)\n y = make_data_type(data_type, y)\n kwargs = {}\n if not component_class.needs_fitting or \"Oversampler\" in component_class.name:\n # since SMOTE determines categorical columns through the logical type, it can only accept ww data\n continue\n if component_class == TimeSeriesFeaturizer:\n X, y = ts_data_binary\n kwargs = {\"date_index\": \"date\"}\n\n component = component_class(**kwargs)\n component.fit(X, y)\n\n\ndef test_no_fitting_required_components(\n X_y_binary, test_estimator_needs_fitting_false, helper_functions\n):\n X, y = X_y_binary\n for component_class in all_components() + [test_estimator_needs_fitting_false]:\n if not component_class.needs_fitting:\n component = helper_functions.safe_init_component_with_njobs_1(\n component_class\n )\n if issubclass(component_class, Estimator):\n component.predict(X)\n else:\n component.transform(X, y)\n\n\ndef test_serialization(X_y_binary, ts_data, tmpdir, helper_functions):\n path = os.path.join(str(tmpdir), \"component.pkl\")\n requires_date_index = [ARIMARegressor, ProphetRegressor, TimeSeriesFeaturizer]\n for component_class in all_components():\n print(\"Testing serialization of component {}\".format(component_class.name))\n component = helper_functions.safe_init_component_with_njobs_1(component_class)\n if component_class in requires_date_index:\n component = component_class(date_index=\"date\")\n X, y = ts_data\n else:\n X, y = X_y_binary\n\n component.fit(X, y)\n\n for pickle_protocol in range(cloudpickle.DEFAULT_PROTOCOL + 1):\n component.save(path, pickle_protocol=pickle_protocol)\n loaded_component = ComponentBase.load(path)\n assert component.parameters == loaded_component.parameters\n assert component.describe(return_dict=True) == loaded_component.describe(\n return_dict=True\n )\n if issubclass(component_class, Estimator) and not (\n isinstance(\n component,\n (\n StackedEnsembleClassifier,\n StackedEnsembleRegressor,\n VowpalWabbitBinaryClassifier,\n VowpalWabbitMulticlassClassifier,\n VowpalWabbitRegressor,\n ),\n )\n ):\n assert (\n component.feature_importance == loaded_component.feature_importance\n ).all()\n\n\n@patch(\"cloudpickle.dump\")\ndef test_serialization_protocol(mock_cloudpickle_dump, tmpdir):\n path = os.path.join(str(tmpdir), \"pipe.pkl\")\n component = LogisticRegressionClassifier()\n\n component.save(path)\n assert len(mock_cloudpickle_dump.call_args_list) == 1\n assert (\n mock_cloudpickle_dump.call_args_list[0][1][\"protocol\"]\n == cloudpickle.DEFAULT_PROTOCOL\n )\n\n mock_cloudpickle_dump.reset_mock()\n\n component.save(path, pickle_protocol=42)\n assert len(mock_cloudpickle_dump.call_args_list) == 1\n assert mock_cloudpickle_dump.call_args_list[0][1][\"protocol\"] == 42\n\n\[email protected](\"estimator_class\", _all_estimators())\ndef test_estimators_accept_all_kwargs(\n estimator_class,\n logistic_regression_binary_pipeline_class,\n linear_regression_pipeline_class,\n):\n estimator = estimator_class()\n if estimator._component_obj is None:\n pytest.skip(\n f\"Skipping {estimator_class} because does not have component object.\"\n )\n if estimator_class.model_family == ModelFamily.ENSEMBLE:\n params = estimator.parameters\n elif estimator_class.model_family == ModelFamily.PROPHET:\n params = estimator.get_params()\n else:\n params = estimator._component_obj.get_params()\n if \"random_state\" in params:\n del params[\"random_state\"]\n estimator_class(**params)\n\n\ndef test_component_equality_different_classes():\n # Tests that two classes which are equivalent are not equal\n class MockComponent(ComponentBase):\n name = \"Mock Component\"\n model_family = ModelFamily.NONE\n modifies_features = True\n modifies_target = False\n training_only = False\n\n class MockComponentWithADifferentName(ComponentBase):\n name = \"Mock Component\"\n model_family = ModelFamily.NONE\n modifies_features = True\n modifies_target = False\n training_only = False\n\n assert MockComponent() != MockComponentWithADifferentName()\n\n\ndef test_component_equality_subclasses():\n class MockComponent(ComponentBase):\n name = \"Mock Component\"\n model_family = ModelFamily.NONE\n modifies_features = True\n modifies_target = False\n training_only = False\n\n class MockEstimatorSubclass(MockComponent):\n pass\n\n assert MockComponent() != MockEstimatorSubclass()\n\n\ndef test_component_equality():\n class MockComponent(ComponentBase):\n name = \"Mock Component\"\n model_family = ModelFamily.NONE\n modifies_features = True\n modifies_target = False\n training_only = False\n\n def __init__(self, param_1=0, param_2=0, random_seed=0, **kwargs):\n parameters = {\"param_1\": param_1, \"param_2\": param_2}\n parameters.update(kwargs)\n super().__init__(\n parameters=parameters, component_obj=None, random_seed=random_seed\n )\n\n def fit(self, X, y=None):\n return self\n\n # Test self-equality\n mock_component = MockComponent()\n assert mock_component == mock_component\n\n # Test defaults\n assert MockComponent() == MockComponent()\n\n # Test random_state and random_seed\n assert MockComponent(random_seed=10) == MockComponent(random_seed=10)\n assert MockComponent(random_seed=10) != MockComponent(random_seed=0)\n\n # Test parameters\n assert MockComponent(1, 2) == MockComponent(1, 2)\n assert MockComponent(1, 2) != MockComponent(1, 0)\n assert MockComponent(0, 2) != MockComponent(1, 2)\n\n # Test fitted equality\n mock_component.fit(pd.DataFrame({}))\n assert mock_component != MockComponent()\n\n\[email protected](\"component_class\", all_components())\ndef test_component_equality_all_components(\n component_class,\n logistic_regression_binary_pipeline_class,\n linear_regression_pipeline_class,\n):\n component = component_class()\n parameters = component.parameters\n equal_component = component_class(**parameters)\n assert component == equal_component\n\n\ndef test_component_equality_with_subclasses(test_classes):\n MockComponent, MockEstimator, MockTransformer = test_classes\n mock_component = MockComponent()\n mock_estimator = MockEstimator()\n mock_transformer = MockTransformer()\n assert mock_component != mock_estimator\n assert mock_component != mock_transformer\n assert mock_estimator != mock_component\n assert mock_estimator != mock_transformer\n assert mock_transformer != mock_component\n assert mock_transformer != mock_estimator\n\n\ndef test_mock_component_str(test_classes):\n MockComponent, MockEstimator, MockTransformer = test_classes\n\n assert str(MockComponent()) == \"Mock Component\"\n assert str(MockEstimator()) == \"Mock Estimator\"\n assert str(MockTransformer()) == \"Mock Transformer\"\n\n\ndef test_mock_component_repr():\n component = MockFitComponent()\n assert repr(component) == \"MockFitComponent(param_a=2, param_b=10)\"\n\n component_with_params = MockFitComponent(param_a=29, param_b=None, random_seed=42)\n assert repr(component_with_params) == \"MockFitComponent(param_a=29, param_b=None)\"\n\n component_with_nan = MockFitComponent(param_a=np.nan, param_b=float(\"nan\"))\n assert (\n repr(component_with_nan) == \"MockFitComponent(param_a=np.nan, param_b=np.nan)\"\n )\n\n component_with_inf = MockFitComponent(param_a=np.inf, param_b=float(\"-inf\"))\n assert (\n repr(component_with_inf)\n == \"MockFitComponent(param_a=float('inf'), param_b=float('-inf'))\"\n )\n\n\[email protected](\"component_class\", all_components())\ndef test_component_str(\n component_class,\n logistic_regression_binary_pipeline_class,\n linear_regression_pipeline_class,\n):\n component = component_class()\n assert str(component) == component.name\n\n\[email protected](\n \"categorical\",\n [\n {\n \"type\": Categorical([\"mean\", \"median\", \"mode\"]),\n \"categories\": Categorical([\"blue\", \"green\"]),\n },\n {\"type\": [\"mean\", \"median\", \"mode\"], \"categories\": [\"blue\", \"green\"]},\n ],\n)\ndef test_categorical_hyperparameters(X_y_binary, categorical):\n X, y = X_y_binary\n\n class MockEstimator:\n def fit(self, X, y):\n pass\n\n class MockComponent(Estimator):\n name = \"Mock Estimator\"\n model_family = ModelFamily.LINEAR_MODEL\n supported_problem_types = [\"binary\"]\n hyperparameter_ranges = categorical\n\n def __init__(self, agg_type, category=\"green\"):\n parameters = {\"type\": agg_type, \"categories\": category}\n est = MockEstimator()\n super().__init__(parameters=parameters, component_obj=est, random_seed=0)\n\n assert MockComponent(agg_type=\"mean\").fit(X, y)\n assert MockComponent(agg_type=\"moat\", category=\"blue\").fit(X, y)\n\n\ndef test_generate_code_errors():\n with pytest.raises(ValueError, match=\"Element must be a component instance\"):\n generate_component_code(BinaryClassificationPipeline([RandomForestClassifier]))\n\n with pytest.raises(ValueError, match=\"Element must be a component instance\"):\n generate_component_code(LinearRegressor)\n\n with pytest.raises(ValueError, match=\"Element must be a component instance\"):\n generate_component_code(Imputer)\n\n with pytest.raises(ValueError, match=\"Element must be a component instance\"):\n generate_component_code(ComponentBase)\n\n\ndef test_generate_code():\n expected_code = (\n \"from evalml.pipelines.components.estimators.classifiers.logistic_regression_classifier import LogisticRegressionClassifier\"\n \"\\n\\nlogisticRegressionClassifier = LogisticRegressionClassifier(**{'penalty': 'l2', 'C': 1.0, 'n_jobs': -1, 'multi_class': 'auto', 'solver': 'lbfgs'})\"\n )\n component_code = generate_component_code(LogisticRegressionClassifier())\n assert component_code == expected_code\n\n expected_code = (\n \"from evalml.pipelines.components.estimators.regressors.et_regressor import ExtraTreesRegressor\"\n \"\\n\\nextraTreesRegressor = ExtraTreesRegressor(**{'n_estimators': 50, 'max_features': 'auto', 'max_depth': 6, 'min_samples_split': 2, 'min_weight_fraction_leaf': 0.0, 'n_jobs': -1})\"\n )\n component_code = generate_component_code(ExtraTreesRegressor(n_estimators=50))\n assert component_code == expected_code\n\n expected_code = (\n \"from evalml.pipelines.components.transformers.imputers.imputer import Imputer\"\n \"\\n\\nimputer = Imputer(**{'categorical_impute_strategy': 'most_frequent', 'numeric_impute_strategy': 'mean', 'categorical_fill_value': None, 'numeric_fill_value': None})\"\n )\n component_code = generate_component_code(Imputer())\n assert component_code == expected_code\n\n\ndef test_generate_code_custom(test_classes):\n MockComponent, MockEstimator, MockTransformer = test_classes\n\n expected_code = \"mockComponent = MockComponent(**{})\"\n component_code = generate_component_code(MockComponent())\n assert component_code == expected_code\n\n expected_code = \"mockEstimator = MockEstimator(**{})\"\n component_code = generate_component_code(MockEstimator())\n assert component_code == expected_code\n\n expected_code = \"mockTransformer = MockTransformer(**{})\"\n component_code = generate_component_code(MockTransformer())\n assert component_code == expected_code\n\n\[email protected](\"transformer_class\", _all_transformers())\[email protected](\"use_custom_index\", [True, False])\ndef test_transformer_fit_and_transform_respect_custom_indices(\n use_custom_index, transformer_class, X_y_binary, ts_data_binary\n):\n check_names = True\n if transformer_class == DFSTransformer:\n check_names = False\n if use_custom_index:\n pytest.skip(\"The DFSTransformer changes the index so we skip it.\")\n if transformer_class == PolynomialDetrender:\n pytest.skip(\n \"Skipping PolynomialDetrender because we test that it respects custom indices in \"\n \"test_polynomial_detrender.py\"\n )\n\n X, y = X_y_binary\n\n kwargs = {}\n if transformer_class == TimeSeriesFeaturizer:\n kwargs.update({\"date_index\": \"date\"})\n X, y = ts_data_binary\n\n X = pd.DataFrame(X)\n y = pd.Series(y)\n\n if use_custom_index:\n custom_index = range(100, 100 + X.shape[0])\n X.index = custom_index\n y.index = custom_index\n\n X_original_index = X.index.copy()\n y_original_index = y.index.copy()\n\n transformer = transformer_class(**kwargs)\n\n transformer.fit(X, y)\n pd.testing.assert_index_equal(X.index, X_original_index)\n pd.testing.assert_index_equal(y.index, y_original_index)\n\n if isinstance(transformer, BaseSampler):\n return\n elif transformer_class.modifies_target:\n X_t, y_t = transformer.transform(X, y)\n pd.testing.assert_index_equal(\n y_t.index, y_original_index, check_names=check_names\n )\n else:\n X_t = transformer.transform(X, y)\n pd.testing.assert_index_equal(\n y.index, y_original_index, check_names=check_names\n )\n\n if hasattr(transformer_class, \"inverse_transform\"):\n y_inv = transformer.inverse_transform(y)\n pd.testing.assert_index_equal(\n y_inv.index, y_original_index, check_names=check_names\n )\n pd.testing.assert_index_equal(X_t.index, X_original_index, check_names=check_names)\n\n\[email protected](\"estimator_class\", _all_estimators())\[email protected](\"use_custom_index\", [True, False])\ndef test_estimator_fit_respects_custom_indices(\n use_custom_index,\n estimator_class,\n X_y_binary,\n X_y_regression,\n ts_data,\n helper_functions,\n):\n\n input_pipelines = []\n supported_problem_types = estimator_class.supported_problem_types\n\n ts_problem = False\n if ProblemTypes.REGRESSION in supported_problem_types:\n X, y = X_y_regression\n elif ProblemTypes.TIME_SERIES_REGRESSION in supported_problem_types:\n X, y = ts_data\n ts_problem = True\n else:\n X, y = X_y_binary\n\n X = pd.DataFrame(X)\n y = pd.Series(y)\n\n if use_custom_index and ts_problem:\n X.index = pd.date_range(\"2020-10-01\", \"2020-10-31\")\n y.index = pd.date_range(\"2020-10-01\", \"2020-10-31\")\n elif use_custom_index and not ts_problem:\n custom_index = range(100, 100 + X.shape[0])\n X.index = custom_index\n y.index = custom_index\n\n X_original_index = X.index.copy()\n y_original_index = y.index.copy()\n\n if input_pipelines:\n estimator = estimator_class(n_jobs=1, input_pipelines=input_pipelines)\n else:\n estimator = helper_functions.safe_init_component_with_njobs_1(estimator_class)\n\n estimator.fit(X, y)\n pd.testing.assert_index_equal(X.index, X_original_index)\n pd.testing.assert_index_equal(y.index, y_original_index)\n\n\ndef test_component_modifies_feature_or_target():\n for component_class in all_components():\n if (\n issubclass(component_class, BaseSampler)\n or hasattr(component_class, \"inverse_transform\")\n or component_class in [TargetImputer, DropRowsTransformer]\n ):\n assert component_class.modifies_target\n else:\n assert not component_class.modifies_target\n if hasattr(component_class, \"inverse_transform\") or component_class in [\n TargetImputer\n ]:\n assert not component_class.modifies_features\n else:\n assert component_class.modifies_features\n\n\ndef test_component_parameters_supported_by_list_API():\n for component_class in all_components():\n if (\n issubclass(component_class, BaseSampler)\n or hasattr(component_class, \"inverse_transform\")\n or component_class in [TargetImputer, DropRowsTransformer]\n ):\n assert not component_class._supported_by_list_API\n else:\n assert component_class._supported_by_list_API\n"
] | [
[
"numpy.array_equal",
"pandas.DataFrame",
"pandas.date_range",
"numpy.zeros",
"numpy.random.randint"
],
[
"numpy.array",
"pandas.Series",
"pandas.DataFrame",
"pandas.date_range"
],
[
"pandas.Series",
"pandas.RangeIndex",
"pandas.DataFrame",
"numpy.testing.assert_almost_equal",
"pandas.date_range",
"pandas.testing.assert_index_equal",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
choderalab/saltswap | [
"d30804beb158960a62f94182c694df6dd9130fb8"
] | [
"development/performance/langevin_error_example.py"
] | [
"import numpy as np\nfrom simtk import openmm, unit\nfrom simtk.openmm import app\nfrom openmmtools.testsystems import WaterBox\nfrom openmmtools.integrators import ExternalPerturbationLangevinIntegrator\nprint('OpenMM version: ', openmm.version.full_version)\n\n# Using one CPU thread\nimport os\nos.environ['OPENMM_CPU_THREADS'] = '1'\n\n# Long range method\nnonbonded_method = 'CutoffPeriodic'\n\n# Creating a waterbox\nwbox = WaterBox(box_edge=21.0*unit.angstrom , nonbondedMethod=getattr(app, nonbonded_method))\nwbox.system.addForce(openmm.MonteCarloBarostat(1*unit.atmospheres, 300*unit.kelvin))\n\n# Extracting the nonbonded force\nnon_bonded_force = wbox.system.getForce(2)\n\n# The integrator to perform the equilibrium dynamics\nintegrator = ExternalPerturbationLangevinIntegrator(temperature=300*unit.kelvin, collision_rate=50.0 / unit.picosecond, timestep=1.0 * unit.femtosecond)\n\n# Creating the context\nplatform = openmm.Platform.getPlatformByName('CPU')\ncontext = openmm.Context(wbox.system, integrator, platform)\ncontext.setPositions(wbox.positions)\n\n# Running some equilibrium dynamics\nintegrator.step(100)\n\n# The number of NCMC type iterations and NCMC steps per iteration.\nniterations = 20\nncmc_steps = 10\n\ninternal_work = np.zeros(niterations)\nexternal_work = np.zeros(niterations)\n\n# Whether to call updateParametersInContext. If True, then assertion below will fail.\nupdate_parameters = True\n\n# A model of NCMC without perturbation but using updateParametersInContext\nfor i in range(niterations):\n #integrator.reset_protocol_work()\n #integrator.setGlobalVariableByName('first_step',0)\n integrator.setGlobalVariableByName('protocol_work',0)\n for s in range(ncmc_steps):\n integrator.step(1)\n initial_external_energy = context.getState(getEnergy=True).getPotentialEnergy() / unit.kilojoule_per_mole\n ###---- Not perturbing the system but updating parameters anyway----###\n if update_parameters:\n non_bonded_force.updateParametersInContext(context)\n final_external_energy = context.getState(getEnergy=True).getPotentialEnergy() / unit.kilojoule_per_mole\n integrator.step(1)\n internal_work[i] = integrator.getGlobalVariableByName('protocol_work')\n external_work[i] = final_external_energy - initial_external_energy\nassert np.all(np.abs(internal_work - external_work) < 1E-5)\n"
] | [
[
"numpy.zeros",
"numpy.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.