repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
possible_versions
list
llv22/jax-macOS-cuda
[ "fd1f8e6d612ae3eee24cfa5ee19e8d16ed89aecb" ]
[ "jax/_src/numpy/lax_numpy.py" ]
[ "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# pytype: skip-file\n\"\"\"\nImplements the NumPy API, using the primitives in :mod:`jax.lax`.\n\nNumPy operations are implemented in Python in terms of the primitive operations\nin :mod:`jax.lax`. Since NumPy operations are not primitive and instead are\nimplemented in terms of :mod:`jax.lax` operations, we do not need to define\ntransformation rules such as gradient or batching rules. Instead,\ntransformations for NumPy primitives can be derived from the transformation\nrules for the underlying :code:`lax` primitives.\n\"\"\"\n\nimport builtins\nimport collections\nimport operator\nimport os\nimport types\nfrom typing import Sequence, FrozenSet, Tuple, Union, Iterable\nfrom textwrap import dedent as _dedent\nimport warnings\n\nimport numpy as np\nimport opt_einsum\n\nimport jax\nfrom jax import jit, custom_jvp\nfrom .vectorize import vectorize\nfrom .util import _wraps\nfrom jax import core\nfrom jax import dtypes\nfrom jax.abstract_arrays import UnshapedArray, ShapedArray, ConcreteArray, canonicalize_shape\nfrom jax.config import flags, config\nfrom jax.interpreters.xla import DeviceArray\nfrom jax.interpreters.masking import Poly\nfrom jax import lax\nfrom jax._src.lax.lax import _device_put_raw\nfrom jax import ops\nfrom jax.util import (partial, unzip2, prod as _prod,\n subvals, safe_zip, canonicalize_axis as _canonicalize_axis)\nfrom jax.tree_util import tree_leaves, tree_flatten\n\nFLAGS = flags.FLAGS\nflags.DEFINE_enum(\n 'jax_numpy_rank_promotion', os.getenv('JAX_NUMPY_RANK_PROMOTION', 'allow'),\n enum_values=['allow', 'warn', 'raise'],\n help=\n 'Control NumPy-style automatic rank promotion broadcasting '\n '(\"allow\", \"warn\", or \"raise\").')\n\nnewaxis = None\n\n# Common docstring additions:\n\n_PRECISION_DOC = \"\"\"\\\nIn addition to the original NumPy arguments listed below, also supports\n``precision`` for extra control over matrix-multiplication precision\non supported devices. ``precision`` may be set to ``None``, which means\ndefault precision for the backend, a ``lax.Precision`` enum value\n(``Precision.DEFAULT``, ``Precision.HIGH`` or ``Precision.HIGHEST``) or a tuple\nof two ``lax.Precision`` enums indicating separate precision for each argument.\n\"\"\"\n\n# We replace some builtin names to follow Numpy's API, so we capture here.\n_abs = builtins.abs\n_all = builtins.all\n_any = builtins.any\n_max = builtins.max\n_min = builtins.min\n_sum = builtins.sum\n_divmod = builtins.divmod\n\n# NumPy constants\n\npi = np.pi\ne = np.e\neuler_gamma = np.euler_gamma\ninf = np.inf\nNINF = np.NINF\nPZERO = np.PZERO\nNZERO = np.NZERO\nnan = np.nan\n\n# And some numpy utility functions\nset_printoptions = np.set_printoptions\n\n# We want isinstance(x, np.ndarray) checks in user code to work with the our\n# array-like types, including DeviceArray and UnshapedArray (i.e. the abstract\n# array base class). We can override the isinstance behavior directly, without\n# having the complexity of multiple inheritance on those classes, by defining\n# the ndarray class to have a metaclass with special __instancecheck__ behavior.\n_arraylike_types = (np.ndarray, UnshapedArray, DeviceArray)\n\nclass _ArrayMeta(type(np.ndarray)): # type: ignore\n \"\"\"Metaclass for overriding ndarray isinstance checks.\"\"\"\n\n def __instancecheck__(self, instance):\n try:\n return isinstance(instance.aval, _arraylike_types)\n except AttributeError:\n return isinstance(instance, _arraylike_types)\n\nclass ndarray(np.ndarray, metaclass=_ArrayMeta):\n dtype: np.dtype\n shape: Tuple[int, ...]\n size: int\n\n def __init__(shape, dtype=None, buffer=None, offset=0, strides=None,\n order=None):\n raise TypeError(\"jax.numpy.ndarray() should not be instantiated explicitly.\"\n \" Use jax.numpy.array, or jax.numpy.zeros instead.\")\n\n\niscomplexobj = np.iscomplexobj\n\nshape = _shape = np.shape\nndim = _ndim = np.ndim\nsize = np.size\n_dtype = dtypes.result_type\n\n# At present JAX doesn't have a reason to distinguish between scalars and arrays\n# in its object system. Further, we want JAX scalars to have the same type\n# promotion behaviors as JAX arrays. Rather than introducing a new type of JAX\n# scalar object with JAX promotion behaviors, instead we make the JAX scalar\n# types return JAX arrays when instantiated.\n\nclass _ScalarMeta(type):\n def __hash__(self):\n return hash(self.dtype.type)\n\n def __eq__(self, other):\n return id(self) == id(other) or self.dtype.type == other\n\n def __ne__(self, other):\n return not (self == other)\n\n def __call__(self, x):\n return array(x, dtype=self.dtype)\n\ndef _make_scalar_type(np_scalar_type):\n return _ScalarMeta(np_scalar_type.__name__, (object,),\n {\"dtype\": np.dtype(np_scalar_type)})\n\nbool_ = _make_scalar_type(np.bool_)\nuint8 = _make_scalar_type(np.uint8)\nuint16 = _make_scalar_type(np.uint16)\nuint32 = _make_scalar_type(np.uint32)\nuint64 = _make_scalar_type(np.uint64)\nint8 = _make_scalar_type(np.int8)\nint16 = _make_scalar_type(np.int16)\nint32 = _make_scalar_type(np.int32)\nint64 = _make_scalar_type(np.int64)\nbfloat16 = _make_scalar_type(dtypes.bfloat16)\nfloat16 = _make_scalar_type(np.float16)\nfloat32 = single = _make_scalar_type(np.float32)\nfloat64 = double = _make_scalar_type(np.float64)\ncomplex64 = csingle = _make_scalar_type(np.complex64)\ncomplex128 = cdouble = _make_scalar_type(np.complex128)\n\nint_ = int32 if dtypes.int_ == np.int32 else int64\nfloat_ = float32 if dtypes.float_ == np.float32 else float64\ncomplex_ = complex64 if dtypes.complex_ == np.complex64 else complex128\n\nnumber = np.number\ninexact = np.inexact\ncomplexfloating = np.complexfloating\nfloating = np.floating\ninteger = np.integer\nsignedinteger = np.signedinteger\nunsignedinteger = np.unsignedinteger\n\nflexible = np.flexible\ncharacter = np.character\nobject_ = np.object_\n\niinfo = dtypes.iinfo\n\ndtype = np.dtype\ncan_cast = dtypes.can_cast\nissubsctype = dtypes.issubsctype\npromote_types = dtypes.promote_types\n\nComplexWarning = np.ComplexWarning\n\narray_str = np.array_str\narray_repr = np.array_repr\n\nsave = np.save\nsavez = np.savez\nload = np.load\n\n\n### utility functions\n\n_DEFAULT_TYPEMAP = {\n np.bool_: bool_,\n np.int_: int_,\n np.float_: float_,\n np.complex_: complex_\n}\n\n_INT_DTYPES = {\n 16: np.int16,\n 32: np.int32,\n 64: np.int64,\n}\n\ndef _np_array(obj, dtype=None, **kwargs):\n \"\"\"Return a properly-typed numpy array.\n\n `_np_array(obj, **kwds)` is equivalent to `np.array(obj, **kwds)`, with the\n exception that when obj.dtype is not defined and dtype is not specified, it\n uses Jax's default dtypes.\n \"\"\"\n arr = np.array(obj, dtype=dtype, **kwargs)\n obj_dtype = getattr(obj, 'dtype', None)\n arr_dtype = np.dtype(arr.dtype).type\n if dtype is None and obj_dtype is None and arr_dtype in _DEFAULT_TYPEMAP:\n arr = arr.astype(_DEFAULT_TYPEMAP[arr_dtype])\n return arr\n\n_np_asarray = partial(_np_array, copy=False)\n\ndef _promote_shapes(fun_name, *args):\n \"\"\"Prepend implicit leading singleton dimensions for Numpy broadcasting.\"\"\"\n if len(args) < 2:\n return args\n else:\n shapes = [shape(arg) for arg in args]\n nonscalar_ranks = [len(shp) for shp in shapes if shp]\n if not nonscalar_ranks or len(set(nonscalar_ranks)) == 1:\n return args\n else:\n if FLAGS.jax_numpy_rank_promotion != \"allow\":\n _rank_promotion_warning_or_error(fun_name, shapes)\n result_rank = len(lax.broadcast_shapes(*shapes))\n return [broadcast_to(arg, (1,) * (result_rank - len(shp)) + shp)\n for arg, shp in zip(args, shapes)]\n\ndef _rank_promotion_warning_or_error(fun_name, shapes):\n if FLAGS.jax_numpy_rank_promotion == \"warn\":\n msg = (\"Following NumPy automatic rank promotion for {} on shapes {}. \"\n \"Set the jax_numpy_rank_promotion config option to 'allow' to \"\n \"disable this warning; for more information, see \"\n \"https://jax.readthedocs.io/en/latest/rank_promotion_warning.html.\")\n warnings.warn(msg.format(fun_name, ' '.join(map(str, shapes))))\n elif FLAGS.jax_numpy_rank_promotion == \"raise\":\n msg = (\"Operands could not be broadcast together for {} on shapes {} \"\n \"and with the config option jax_numpy_rank_promotion='raise'. \"\n \"For more information, see \"\n \"https://jax.readthedocs.io/en/latest/rank_promotion_warning.html.\")\n raise ValueError(msg.format(fun_name, ' '.join(map(str, shapes))))\n\ndef _promote_dtypes(*args):\n \"\"\"Convenience function to apply Numpy argument dtype promotion.\"\"\"\n # TODO(dougalm,mattjj): This is a performance bottleneck. Consider memoizing.\n if len(args) < 2:\n return args\n else:\n to_dtype = result_type(*args)\n return [lax.convert_element_type(x, to_dtype) for x in args]\n\ndef _promote_dtypes_inexact(*args):\n \"\"\"Convenience function to apply Numpy argument dtype promotion.\n\n Promotes arguments to an inexact type.\"\"\"\n to_dtype = _to_inexact_dtype(result_type(*args))\n return [lax.convert_element_type(x, to_dtype) for x in args]\n\ndef _to_inexact_dtype(dtype):\n \"\"\"Promotes a dtype into an inexact dtype, if it is not already one.\"\"\"\n return dtype if issubdtype(dtype, inexact) else promote_types(dtype, float_)\n\ndef _complex_elem_type(dtype):\n \"\"\"Returns the float type of the real/imaginary parts of a complex dtype.\"\"\"\n return np.abs(np.zeros((), dtype)).dtype\n\ndef _result_dtype(op, *args):\n \"\"\"Compute result dtype of applying op to arguments with given dtypes.\"\"\"\n args = [np.ones((0,) * ndim(arg), _dtype(arg)) for arg in args]\n return _dtype(op(*args))\n\n\ndef _arraylike(x): return isinstance(x, ndarray) or isscalar(x)\ndef _check_arraylike(fun_name, *args):\n \"\"\"Check if all args fit JAX's definition of arraylike (ndarray or scalar).\"\"\"\n if _any(not _arraylike(arg) for arg in args):\n pos, arg = next((i, arg) for i, arg in enumerate(args)\n if not _arraylike(arg))\n msg = \"{} requires ndarray or scalar arguments, got {} at position {}.\"\n raise TypeError(msg.format(fun_name, type(arg), pos))\n\ndef _check_no_float0s(fun_name, *args):\n \"\"\"Check if none of the args have dtype float0.\"\"\"\n if _any(dtypes.dtype(arg) is dtypes.float0 for arg in args):\n raise TypeError(\n f\"Called {fun_name} with a float0 array. \"\n \"float0s do not support any operations by design because they \"\n \"are not compatible with non-trivial vector spaces. No implicit dtype \"\n \"conversion is done. You can use np.zeros_like(arr, dtype=np.float) \"\n \"to cast a float0 array to a regular zeros array. \\n\"\n \"If you didn't expect to get a float0 you might have accidentally \"\n \"taken a gradient with respect to an integer argument.\")\n\ndef _promote_args(fun_name, *args):\n \"\"\"Convenience function to apply Numpy argument shape and dtype promotion.\"\"\"\n _check_arraylike(fun_name, *args)\n _check_no_float0s(fun_name, *args)\n return _promote_shapes(fun_name, *_promote_dtypes(*args))\n\ndef _promote_args_inexact(fun_name, *args):\n \"\"\"Convenience function to apply Numpy argument shape and dtype promotion.\n\n Promotes non-inexact types to an inexact type.\"\"\"\n _check_arraylike(fun_name, *args)\n _check_no_float0s(fun_name, *args)\n return _promote_shapes(fun_name, *_promote_dtypes_inexact(*args))\n\ndef _constant_like(x, const):\n return np.array(const, dtype=_dtype(x))\n\n### implementations of numpy functions in terms of lax\n\n@_wraps(np.fmin)\ndef fmin(x1, x2):\n return where((x1 < x2) | isnan(x2), x1, x2)\n\n@_wraps(np.fmax)\ndef fmax(x1, x2):\n return where((x1 > x2) | isnan(x2), x1, x2)\n\n@_wraps(np.finfo)\ndef finfo(dtype):\n return dtypes.finfo(dtype)\n\n@_wraps(np.issubdtype)\ndef issubdtype(arg1, arg2):\n return dtypes.issubdtype(arg1, arg2)\n\n@_wraps(np.isscalar)\ndef isscalar(element):\n return dtypes.is_python_scalar(element) or np.isscalar(element)\n\niterable = np.iterable\n\n@_wraps(np.result_type)\ndef result_type(*args):\n return dtypes.result_type(*args)\n\ndef _one_to_one_unop(numpy_fn, lax_fn, promote_to_inexact=False, lax_doc=False):\n if promote_to_inexact:\n fn = lambda x: lax_fn(*_promote_args_inexact(numpy_fn.__name__, x))\n else:\n fn = lambda x: lax_fn(*_promote_args(numpy_fn.__name__, x))\n if lax_doc:\n doc = _dedent('\\n\\n'.join(lax_fn.__doc__.split('\\n\\n')[1:])).strip()\n return _wraps(numpy_fn, lax_description=doc)(fn)\n else:\n return _wraps(numpy_fn)(fn)\n\ndef _one_to_one_binop(numpy_fn, lax_fn, promote_to_inexact=False, lax_doc=False):\n if promote_to_inexact:\n fn = lambda x1, x2: lax_fn(*_promote_args_inexact(numpy_fn.__name__, x1, x2))\n else:\n fn = lambda x1, x2: lax_fn(*_promote_args(numpy_fn.__name__, x1, x2))\n if lax_doc:\n doc = _dedent('\\n\\n'.join(lax_fn.__doc__.split('\\n\\n')[1:])).strip()\n return _wraps(numpy_fn, lax_description=doc)(fn)\n else:\n return _wraps(numpy_fn)(fn)\n\ndef _maybe_bool_binop(numpy_fn, lax_fn, bool_lax_fn, lax_doc=False):\n def fn(x1, x2):\n x1, x2 = _promote_args(numpy_fn.__name__, x1, x2)\n return lax_fn(x1, x2) if x1.dtype != bool_ else bool_lax_fn(x1, x2)\n return _wraps(numpy_fn)(fn)\n if lax_doc:\n doc = _dedent('\\n\\n'.join(lax_fn.__doc__.split('\\n\\n')[1:])).strip()\n return _wraps(numpy_fn, lax_description=doc)(fn)\n else:\n return _wraps(numpy_fn)(fn)\n\nfabs = _one_to_one_unop(np.fabs, lax.abs, True)\nbitwise_not = _one_to_one_unop(np.bitwise_not, lax.bitwise_not)\ninvert = _one_to_one_unop(np.invert, lax.bitwise_not)\nnegative = _one_to_one_unop(np.negative, lax.neg)\npositive = _one_to_one_unop(np.positive, lambda x: x)\n\nfloor = _one_to_one_unop(np.floor, lax.floor, True)\nceil = _one_to_one_unop(np.ceil, lax.ceil, True)\nexp = _one_to_one_unop(np.exp, lax.exp, True)\nlog = _one_to_one_unop(np.log, lax.log, True)\nexpm1 = _one_to_one_unop(np.expm1, lax.expm1, True)\nlog1p = _one_to_one_unop(np.log1p, lax.log1p, True)\nsin = _one_to_one_unop(np.sin, lax.sin, True)\ncos = _one_to_one_unop(np.cos, lax.cos, True)\ntan = _one_to_one_unop(np.tan, lax.tan, True)\narcsin = _one_to_one_unop(np.arcsin, lax.asin, True)\narccos = _one_to_one_unop(np.arccos, lax.acos, True)\narctan = _one_to_one_unop(np.arctan, lax.atan, True)\nsinh = _one_to_one_unop(np.sinh, lax.sinh, True)\ncosh = _one_to_one_unop(np.cosh, lax.cosh, True)\narcsinh = _one_to_one_unop(np.arcsinh, lax.asinh, True)\ntanh = _one_to_one_unop(np.tanh, lax.tanh, True)\narcsinh = _one_to_one_unop(np.arcsinh, lax.asinh, True)\narccosh = _one_to_one_unop(np.arccosh, lax.acosh, True)\narctanh = _one_to_one_unop(np.arctanh, lax.atanh, True)\nsqrt = _one_to_one_unop(np.sqrt, lax.sqrt, True)\n\n\nadd = _maybe_bool_binop(np.add, lax.add, lax.bitwise_or)\nbitwise_and = _one_to_one_binop(np.bitwise_and, lax.bitwise_and)\nbitwise_or = _one_to_one_binop(np.bitwise_or, lax.bitwise_or)\nbitwise_xor = _one_to_one_binop(np.bitwise_xor, lax.bitwise_xor)\nleft_shift = _one_to_one_binop(np.left_shift, lax.shift_left)\nequal = _one_to_one_binop(np.equal, lax.eq)\nmultiply = _maybe_bool_binop(np.multiply, lax.mul, lax.bitwise_and)\nnot_equal = _one_to_one_binop(np.not_equal, lax.ne)\nsubtract = _one_to_one_binop(np.subtract, lax.sub)\narctan2 = _one_to_one_binop(np.arctan2, lax.atan2, True)\nminimum = _one_to_one_binop(np.minimum, lax.min)\nmaximum = _one_to_one_binop(np.maximum, lax.max)\nfloat_power = _one_to_one_binop(np.float_power, lax.pow, True)\nnextafter = _one_to_one_binop(np.nextafter, lax.nextafter, True, True)\n\n\ndef _comparison_op(numpy_fn, lax_fn):\n def fn(x1, x2):\n x1, x2 = _promote_args(numpy_fn.__name__, x1, x2)\n # Comparison on complex types are defined as a lexicographic ordering on\n # the (real, imag) pair.\n if issubdtype(_dtype(x1), complexfloating):\n rx = lax.real(x1)\n ry = lax.real(x2)\n return lax.select(lax.eq(rx, ry), lax_fn(lax.imag(x1), lax.imag(x2)),\n lax_fn(rx, ry))\n return lax_fn(x1, x2)\n return _wraps(numpy_fn)(fn)\n\ngreater_equal = _comparison_op(np.greater_equal, lax.ge)\ngreater = _comparison_op(np.greater, lax.gt)\nless_equal = _comparison_op(np.less_equal, lax.le)\nless = _comparison_op(np.less, lax.lt)\n\n\ndef _logical_op(np_op, bitwise_op):\n @_wraps(np_op, update_doc=False)\n def op(*args):\n zero = lambda x: lax.full_like(x, shape=(), fill_value=0)\n args = (x if issubdtype(_dtype(x), bool_) else lax.ne(x, zero(x))\n for x in args)\n return bitwise_op(*_promote_args(np_op.__name__, *args))\n return op\n\nlogical_and = _logical_op(np.logical_and, lax.bitwise_and)\nlogical_not = _logical_op(np.logical_not, lax.bitwise_not)\nlogical_or = _logical_op(np.logical_or, lax.bitwise_or)\nlogical_xor = _logical_op(np.logical_xor, lax.bitwise_xor)\n\n\n@_wraps(np.right_shift)\ndef right_shift(x1, x2):\n x1, x2 = _promote_args(np.right_shift.__name__, x1, x2)\n lax_fn = lax.shift_right_logical if \\\n np.issubdtype(x1.dtype, np.unsignedinteger) else lax.shift_right_arithmetic\n return lax_fn(x1, x2)\n\n\n@_wraps(np.absolute)\ndef absolute(x):\n _check_arraylike('absolute', x)\n dt = _dtype(x)\n return x if dt == bool_ or issubdtype(dt, unsignedinteger) else lax.abs(x)\nabs = _wraps(np.abs)(absolute)\n\n\n@_wraps(np.rint)\ndef rint(x):\n _check_arraylike('rint', x)\n dtype = _dtype(x)\n if issubdtype(dtype, integer):\n return lax.convert_element_type(x, float_)\n if issubdtype(dtype, complexfloating):\n return lax.complex(rint(lax.real(x)), rint(lax.imag(x)))\n return _round_to_nearest_even(x)\n\n\n@_wraps(np.sign)\ndef sign(x):\n _check_arraylike('sign', x)\n dtype = _dtype(x)\n if issubdtype(dtype, complexfloating):\n re = lax.real(x)\n return lax.complex(\n lax.sign(where(re != 0, re, lax.imag(x))), _constant_like(re, 0))\n return lax.sign(x)\n\n\n@_wraps(np.copysign)\ndef copysign(x1, x2):\n x1, x2 = _promote_args_inexact(\"copysign\", x1, x2)\n if issubdtype(_dtype(x1), complexfloating):\n raise TypeError(\"copysign does not support complex-valued inputs\")\n return where(signbit(x2), -lax.abs(x1), lax.abs(x1))\n\n\n@_wraps(np.true_divide)\ndef true_divide(x1, x2):\n x1, x2 = _promote_args_inexact(\"true_divide\", x1, x2)\n return lax.div(x1, x2)\n\ndivide = true_divide\n\n@_wraps(np.floor_divide)\ndef floor_divide(x1, x2):\n x1, x2 = _promote_args(\"floor_divide\", x1, x2)\n dtype = _dtype(x1)\n if issubdtype(dtype, integer):\n quotient = lax.div(x1, x2)\n select = logical_and(lax.sign(x1) != lax.sign(x2), lax.rem(x1, x2) != 0)\n # TODO(mattjj): investigate why subtracting a scalar was causing promotion\n return where(select, quotient - np.array(1, _dtype(quotient)), quotient)\n elif issubdtype(dtype, complexfloating):\n x1r = lax.real(x1)\n x1i = lax.imag(x1)\n x2r = lax.real(x2)\n x2i = lax.imag(x2)\n which = lax.ge(lax.abs(x2r), lax.abs(x2i))\n rat1 = where(which, lax._const(x2i, 1), lax.div(x2r, x2i))\n rat2 = where(which, lax.div(x2i, x2r), lax._const(x2i, 1))\n out = lax.floor(lax.div(lax.add(lax.mul(x1r, rat1), lax.mul(x1i, rat2)),\n lax.add(lax.mul(x2r, rat1), lax.mul(x2i, rat2))))\n return lax.convert_element_type(out, dtype)\n else:\n return _float_divmod(x1, x2)[0]\n\n\n@_wraps(np.divmod)\ndef divmod(x1, x2):\n x1, x2 = _promote_args(\"divmod\", x1, x2)\n if issubdtype(_dtype(x1), integer):\n return floor_divide(x1, x2), remainder(x1, x2)\n else:\n return _float_divmod(x1, x2)\n\n\ndef _float_divmod(x1, x2):\n # see float_divmod in floatobject.c of CPython\n mod = lax.rem(x1, x2)\n div = lax.div(lax.sub(x1, mod), x2)\n\n ind = lax.bitwise_and(mod != 0, lax.sign(x2) != lax.sign(mod))\n mod = lax.select(ind, mod + x2, mod)\n div = lax.select(ind, div - _constant_like(div, 1), div)\n\n return lax.round(div), mod\n\n\n@_wraps(np.power)\ndef power(x1, x2):\n # Special case for small positive integer scalars: use binary exponentiation.\n # Using lax.pow may be imprecise for floating-point values; the goal of this\n # code path is to make sure we end up with a precise output for the common\n # pattern ``x ** 2`` or similar.\n if isinstance(x2, int):\n return lax.integer_pow(x1, x2)\n\n x1, x2 = _promote_args(\"power\", x1, x2)\n dtype = _dtype(x1)\n if not issubdtype(dtype, integer):\n return lax.pow(x1, x2)\n\n # Integer power => use binary exponentiation.\n\n # TODO(phawkins): add integer pow support to XLA.\n bits = 6 # Anything more would overflow for any x1 > 1\n acc = ones(shape(x1), dtype=dtype)\n for _ in range(bits):\n acc = where(lax.bitwise_and(x2, _constant_like(x2, 1)),\n lax.mul(acc, x1), acc)\n x1 = lax.mul(x1, x1)\n x2 = lax.shift_right_logical(x2, _constant_like(x2, 1))\n return acc\n\n\n@custom_jvp\n@_wraps(np.logaddexp)\ndef logaddexp(x1, x2):\n x1, x2 = _promote_shapes(\"logaddexp\", *_promote_dtypes_inexact(x1, x2))\n amax = lax.max(x1, x2)\n delta = lax.sub(x1, x2)\n return lax.select(isnan(delta),\n lax.add(x1, x2), # NaNs or infinities of the same sign.\n lax.add(amax, lax.log1p(lax.exp(-lax.abs(delta)))))\n\[email protected]\ndef _logaddexp_jvp(primals, tangents):\n x1, x2 = primals\n t1, t2 = tangents\n x1, x2, t1, t2 = broadcast_arrays(x1, x2, t1, t2)\n primal_out = logaddexp(x1, x2)\n tangent_out = (t1 * exp(_replace_inf(x1) - _replace_inf(primal_out)) +\n t2 * exp(_replace_inf(x2) - _replace_inf(primal_out)))\n return primal_out, tangent_out\n\ndef _replace_inf(x):\n return lax.select(isposinf(x), zeros_like(x), x)\n\n\n@custom_jvp\n@_wraps(np.logaddexp2)\ndef logaddexp2(x1, x2):\n x1, x2 = _promote_shapes(\"logaddexp2\", *_promote_dtypes_inexact(x1, x2))\n amax = lax.max(x1, x2)\n delta = lax.sub(x1, x2)\n return lax.select(isnan(delta),\n lax.add(x1, x2), # NaNs or infinities of the same sign.\n lax.add(amax, lax.div(lax.log1p(exp2(-lax.abs(delta))),\n _constant_like(x1, np.log(2)))))\[email protected]\ndef _logaddexp2_jvp(primals, tangents):\n x1, x2 = primals\n t1, t2 = tangents\n x1, x2, t1, t2 = broadcast_arrays(x1, x2, t1, t2)\n primal_out = logaddexp2(x1, x2)\n tangent_out = (t1 * 2 ** (_replace_inf(x1) - _replace_inf(primal_out)) +\n t2 * 2 ** (_replace_inf(x2) - _replace_inf(primal_out)))\n return primal_out, tangent_out\n\n\n@_wraps(np.log2)\ndef log2(x):\n x, = _promote_dtypes_inexact(x)\n return lax.div(lax.log(x), lax.log(_constant_like(x, 2)))\n\n\n@_wraps(np.log10)\ndef log10(x):\n x, = _promote_dtypes_inexact(x)\n return lax.div(lax.log(x), lax.log(_constant_like(x, 10)))\n\n\n@_wraps(np.exp2)\ndef exp2(x):\n x, = _promote_dtypes_inexact(x)\n return lax.exp(lax.mul(lax.log(_constant_like(x, 2)), x))\n\n@_wraps(np.signbit)\ndef signbit(x):\n x, = _promote_shapes(\"signbit\", x)\n dtype = _dtype(x)\n if issubdtype(dtype, integer):\n return lax.lt(x, _constant_like(x, 0))\n elif issubdtype(dtype, bool_):\n return full_like(x, False, dtype=bool_)\n elif not issubdtype(dtype, floating):\n raise ValueError(\n \"jax.numpy.signbit is not well defined for %s\" % dtype)\n\n # TPU supports BF16 but not S16 types, so as a workaround, convert BF16 to\n # F32.\n if dtype == bfloat16:\n dtype = float32\n x = lax.convert_element_type(x, float32)\n\n info = finfo(dtype)\n if info.bits not in _INT_DTYPES:\n raise NotImplementedError(\n \"jax.numpy.signbit only supports 16, 32, and 64-bit types.\")\n int_type = _INT_DTYPES[info.bits]\n x = lax.bitcast_convert_type(x, int_type)\n return lax.convert_element_type(x >> (info.nexp + info.nmant), np.bool_)\n\n\n@_wraps(np.trapz)\ndef trapz(y, x=None, dx=1.0, axis=-1):\n _check_arraylike('trapz', y)\n y = moveaxis(y, axis, -1)\n if x is not None:\n if ndim(x) == 1:\n dx = diff(x)\n else:\n dx = moveaxis(diff(x, axis=axis), axis, -1)\n return 0.5 * (dx * (y[..., 1:] + y[..., :-1])).sum(-1)\n\n\n@_wraps(np.trunc)\ndef trunc(x):\n _check_arraylike('trunc', x)\n return where(lax.lt(x, lax._const(x, 0)), ceil(x), floor(x))\n\n\ndef _conv(x, y, mode, op, precision):\n if issubdtype(_dtype(x), complexfloating) or issubdtype(_dtype(y), complexfloating):\n raise NotImplementedError(f\"{op}() does not support complex inputs\")\n if ndim(x) != 1 or ndim(y) != 1:\n raise ValueError(f\"{op}() only support 1-dimensional inputs.\")\n x, y = _promote_dtypes_inexact(x, y)\n if len(x) == 0 or len(y) == 0:\n raise ValueError(f\"{op}: inputs cannot be empty, got shapes {x.shape} and {y.shape}.\")\n\n out_order = slice(None)\n if len(x) < len(y):\n x, y = y, x\n if op == \"correlate\":\n out_order = slice(None, None, -1)\n if op == 'convolve':\n y = y[::-1]\n\n if mode == 'valid':\n padding = [(0, 0)]\n elif mode == 'same':\n padding = [(y.shape[0] // 2, y.shape[0] - y.shape[0] // 2 - 1)]\n elif mode == 'full':\n padding = [(y.shape[0] - 1, y.shape[0] - 1)]\n else:\n raise ValueError(\"mode must be one of ['full', 'same', 'valid']\")\n\n result = lax.conv_general_dilated(x[None, None, :], y[None, None, :], (1,),\n padding, precision=precision)\n return result[0, 0, out_order]\n\n\n@_wraps(np.convolve, lax_description=_PRECISION_DOC)\ndef convolve(a, v, mode='full', *, precision=None):\n _check_arraylike(\"convolve\", a, v)\n return _conv(a, v, mode, 'convolve', precision)\n\n\n@_wraps(np.correlate, lax_description=_PRECISION_DOC)\ndef correlate(a, v, mode='valid', *, precision=None):\n _check_arraylike(\"correlate\", a, v)\n return _conv(a, v, mode, 'correlate', precision)\n\n\ndef _normalize_float(x):\n info = finfo(_dtype(x))\n cond = lax.abs(x) < info.tiny\n x1 = where(cond, x * lax._const(x, 1 << info.nmant), x)\n x2 = where(cond, lax._const(np.int32, -info.nmant), lax._const(np.int32, 0))\n int_type = _INT_DTYPES[info.bits]\n return lax.bitcast_convert_type(x1, int_type), x2\n\n\n@_wraps(np.ldexp)\n@jit\ndef ldexp(x1, x2):\n dtype = dtypes.canonicalize_dtype(_result_dtype(np.ldexp, x1, x2))\n x1, x2 = _promote_shapes(\"ldexp\", x1, x2)\n x1 = lax.convert_element_type(x1, dtype)\n\n info = finfo(dtype)\n mask = (1 << info.nexp) - 1\n bias = ((1 << info.nexp) - 1) >> 1\n\n int_type = _INT_DTYPES[info.bits]\n\n x, e = _normalize_float(x1)\n x2 += e + ((x >> info.nmant) & mask) - bias\n\n # find underflow/overflow before denormalization\n underflow_cond = x2 < -(bias + info.nmant)\n overflow_cond = x2 > bias\n\n m = ones_like(x, dtype=dtype)\n\n # denormals\n cond = x2 < -bias + 1\n x2 = where(cond, x2 + info.nmant, x2)\n m = where(cond, m / (1 << info.nmant), m)\n\n x2 = lax.convert_element_type(x2, np.int32)\n x &= ~(mask << info.nmant)\n x |= ((lax.convert_element_type(x2, int_type) + bias) << info.nmant)\n\n x = lax.convert_element_type(m, dtype) * lax.bitcast_convert_type(x, dtype)\n\n # underflow\n x = where(underflow_cond, zeros_like(x, dtype=dtype), x)\n # overflow\n x = where(overflow_cond, lax.sign(x1) * full_like(x, np.inf), x)\n # ldexp(x1, x2) = x1 for x1 = inf, -inf, nan, 0\n return where(isinf(x1) | isnan(x1) | (x1 == 0), x1, x)\n\n\n@_wraps(np.frexp)\n@jit\ndef frexp(x):\n x = asarray(x)\n if issubdtype(x.dtype, complexfloating):\n raise TypeError(\"frexp does not support complex-valued inputs\")\n elif not issubdtype(x.dtype, floating):\n x = lax.convert_element_type(x, float_)\n\n dtype = _dtype(x)\n info = finfo(dtype)\n mask = (1 << info.nexp) - 1\n bias = ((1 << info.nexp) - 1) >> 1\n\n x1, x2 = _normalize_float(x)\n x2 += ((x1 >> info.nmant) & mask) - bias + 1\n x1 &= ~(mask << info.nmant)\n x1 |= (bias - 1) << info.nmant\n x1 = lax.bitcast_convert_type(x1, dtype)\n\n cond = isinf(x) | isnan(x) | (x == 0)\n x2 = where(cond, zeros_like(x2), x2)\n return where(cond, x, x1), lax.convert_element_type(x2, int32)\n\n\n@_wraps(np.remainder)\ndef remainder(x1, x2):\n x1, x2 = _promote_args(\"remainder\", x1, x2)\n zero = _constant_like(x1, 0)\n trunc_mod = lax.rem(x1, x2)\n trunc_mod_not_zero = lax.ne(trunc_mod, zero)\n do_plus = lax.bitwise_and(\n lax.ne(lax.lt(trunc_mod, zero), lax.lt(x2, zero)), trunc_mod_not_zero)\n return lax.select(do_plus, lax.add(trunc_mod, x2), trunc_mod)\nmod = _wraps(np.mod)(remainder)\n\n\n@_wraps(np.fmod)\ndef fmod(x1, x2):\n _check_arraylike(\"fmod\", x1, x2)\n if issubdtype(_dtype(x1, x2), integer):\n x2 = where(x2 == 0, 1, x2)\n return lax.rem(*_promote_args(\"fmod\", x1, x2))\n\n\n@_wraps(np.cbrt)\ndef cbrt(x):\n _check_arraylike(\"cbrt\", x)\n x, = _promote_dtypes_inexact(x)\n return lax.sign(x) * power(lax.abs(x), _constant_like(x, 1. / 3.))\n\n\n@_wraps(np.square)\ndef square(x):\n _check_arraylike(\"square\", x)\n return lax.integer_pow(x, 2)\n\n\n@_wraps(np.deg2rad)\ndef deg2rad(x):\n _check_arraylike(\"deg2rad\", x)\n x, = _promote_dtypes_inexact(x)\n return lax.mul(x, lax._const(x, pi / 180))\n\n\n@_wraps(np.rad2deg)\ndef rad2deg(x):\n _check_arraylike(\"rad2deg\", x)\n x, = _promote_dtypes_inexact(x)\n return lax.mul(x, lax._const(x, 180 / pi))\n\n\ndegrees = rad2deg\nradians = deg2rad\n\n\n@_wraps(np.histogram_bin_edges)\ndef histogram_bin_edges(a, bins=10, range=None, weights=None):\n if isinstance(bins, str):\n raise NotImplementedError(\"string values for `bins` not implemented.\")\n a = ravel(a)\n b = array(bins)\n if b.ndim == 1:\n return b\n if range is None:\n range = (a.min(), a.max())\n assert len(range) == 2\n range = asarray(range)\n range = (where(ptp(range) == 0, range[0] - 0.5, range[0]),\n where(ptp(range) == 0, range[1] + 0.5, range[1]))\n dtype = _dtype(a)\n if issubdtype(dtype, integer):\n dtype = promote_types(dtype, float32)\n return linspace(range[0], range[1], bins + 1, dtype=dtype)\n\n\n@_wraps(np.histogram)\ndef histogram(a, bins=10, range=None, weights=None, density=None):\n if weights is not None and a.shape != weights.shape:\n raise ValueError(\"weights should have the same shape as a.\")\n a = ravel(a)\n if weights is not None:\n weights = ravel(weights)\n else:\n weights = ones_like(a)\n bin_edges = histogram_bin_edges(a, bins, range, weights)\n bin_idx = searchsorted(bin_edges, a, side='right')\n bin_idx = where(a == bin_edges[-1], len(bin_edges) - 1, bin_idx)\n counts = bincount(bin_idx, weights, length=len(bin_edges))[1:]\n if density:\n bin_widths = diff(bin_edges)\n counts = counts / bin_widths / counts.sum()\n return counts, bin_edges\n\n@_wraps(np.histogram2d)\ndef histogram2d(x, y, bins=10, range=None, weights=None, density=None):\n\n try:\n N = len(bins)\n except TypeError:\n N = 1\n\n if N != 1 and N != 2:\n x_edges = y_edges = asarray(bins)\n bins = [x_edges, y_edges]\n\n sample = transpose(asarray([x, y]))\n hist, edges = histogramdd(sample, bins, range, weights, density)\n return hist, edges[0], edges[1]\n\n@_wraps(np.histogramdd)\ndef histogramdd(sample, bins=10, range=None, weights=None, density=None):\n _check_arraylike(\"histogramdd\", sample)\n N, D = shape(sample)\n\n if weights is not None and weights.shape != (N,):\n raise ValueError(\"should have one weight for each sample.\")\n\n try:\n num_bins = len(bins)\n if num_bins != D:\n raise ValueError(\"should be a bin for each dimension.\")\n except TypeError:\n # when bin_size is integer, the same bin is used for each dimension\n bins = D * [bins]\n\n bin_idx_by_dim = D*[None]\n nbins = np.empty(D, int)\n bin_edges_by_dim = D*[None]\n dedges = D*[None]\n\n for i in builtins.range(D):\n bin_edges = histogram_bin_edges(sample[:, i], bins[i], range, weights)\n bin_idx = searchsorted(bin_edges, sample[:, i], side='right')\n bin_idx = where(sample[:, i] == bin_edges[-1], bin_idx - 1, bin_idx)\n bin_idx_by_dim[i] = bin_idx\n nbins[i] = len(bin_edges) + 1\n bin_edges_by_dim[i] = bin_edges\n dedges[i] = diff(bin_edges_by_dim[i])\n\n xy = ravel_multi_index(bin_idx_by_dim, nbins, mode='clip')\n hist = bincount(xy, weights, length=nbins.prod())\n hist = reshape(hist, nbins)\n core = D*(slice(1, -1),)\n hist = hist[core]\n\n if density:\n s = sum(hist)\n for i in builtins.range(D):\n _shape = np.ones(D, int)\n _shape[i] = nbins[i] - 2\n hist = hist / reshape(dedges[i], _shape)\n\n hist /= s\n\n return hist, bin_edges_by_dim\n\n@_wraps(np.heaviside)\ndef heaviside(x1, x2):\n _check_arraylike(\"heaviside\", x1, x2)\n x1, x2 = _promote_dtypes_inexact(x1, x2)\n zero = lax._const(x1, 0)\n return where(lax.lt(x1, zero), zero,\n where(lax.gt(x1, zero), lax._const(x1, 1), x2))\n\n\n@_wraps(np.hypot)\ndef hypot(x1, x2):\n _check_arraylike(\"hypot\", x1, x2)\n x1, x2 = _promote_dtypes_inexact(x1, x2)\n return lax.sqrt(x1*x1 + x2*x2)\n\n\n@_wraps(np.reciprocal)\ndef reciprocal(x):\n _check_arraylike(\"reciprocal\", x)\n x, = _promote_dtypes_inexact(x)\n return lax.integer_pow(x, -1)\n\n\n@_wraps(np.sinc, update_doc=False)\ndef sinc(x):\n _check_arraylike(\"sinc\", x)\n x, = _promote_dtypes_inexact(x)\n eq_zero = lax.eq(x, lax._const(x, 0))\n safe_x = where(eq_zero, lax._const(x, 0), x)\n pi_x = lax.mul(lax._const(x, pi), safe_x)\n return where(eq_zero,\n lax._const(x, 1), lax.div(lax.sin(pi_x), pi_x))\n\n\n@_wraps(np.transpose)\ndef transpose(a, axes=None):\n _check_arraylike(\"transpose\", a)\n axes = np.arange(ndim(a))[::-1] if axes is None else axes\n return lax.transpose(a, axes)\n\n\n@_wraps(np.rot90)\ndef rot90(m, k=1, axes=(0, 1)):\n _check_arraylike(\"rot90\", m)\n ax1, ax2 = axes\n ax1 = _canonicalize_axis(ax1, ndim(m))\n ax2 = _canonicalize_axis(ax2, ndim(m))\n if ax1 == ax2:\n raise ValueError(\"Axes must be different\") # same as numpy error\n k = k % 4\n if k == 0:\n return m\n elif k == 2:\n return flip(flip(m, ax1), ax2)\n else:\n perm = list(range(m.ndim))\n perm[ax1], perm[ax2] = perm[ax2], perm[ax1]\n if k == 1:\n return transpose(flip(m, ax2), perm)\n else:\n return flip(transpose(m, perm), ax2)\n\n\n@_wraps(np.flip)\ndef flip(m, axis=None):\n _check_arraylike(\"flip\", m)\n if axis is None:\n return lax.rev(m, list(range(len(shape(m)))))\n return lax.rev(m, [_canonicalize_axis(axis, ndim(m))])\n\n\n@_wraps(np.fliplr)\ndef fliplr(m):\n return flip(m, 1)\n\n\n@_wraps(np.flipud)\ndef flipud(m):\n return flip(m, 0)\n\n\n@_wraps(np.conjugate)\ndef conjugate(x):\n _check_arraylike(\"conjugate\", x)\n return lax.conj(x) if iscomplexobj(x) else x\nconj = conjugate\n\n\n@_wraps(np.imag)\ndef imag(val):\n _check_arraylike(\"imag\", val)\n return lax.imag(val) if iscomplexobj(val) else zeros_like(val)\n\n\n@_wraps(np.real)\ndef real(val):\n _check_arraylike(\"real\", val)\n return lax.real(val) if iscomplexobj(val) else val\n\n\n@_wraps(np.iscomplex)\ndef iscomplex(x):\n i = imag(x)\n return lax.ne(i, lax._const(i, 0))\n\n@_wraps(np.isreal)\ndef isreal(x):\n i = imag(x)\n return lax.eq(i, lax._const(i, 0))\n\n@_wraps(np.angle)\ndef angle(z):\n re = real(z)\n im = imag(z)\n dtype = _dtype(re)\n if not issubdtype(dtype, inexact) or (\n issubdtype(_dtype(z), floating) and ndim(z) == 0):\n dtype = dtypes.canonicalize_dtype(float_)\n re = lax.convert_element_type(re, dtype)\n im = lax.convert_element_type(im, dtype)\n return lax.atan2(im, re)\n\n\n@_wraps(np.diff)\ndef diff(a, n=1, axis=-1):\n _check_arraylike(\"diff\", a)\n if n == 0:\n return a\n if n < 0:\n raise ValueError(f\"order must be non-negative but got {n}\")\n if ndim(a) == 0:\n raise ValueError(f\"diff requires input that is at least one dimensional; got {a}\")\n\n nd = a.ndim\n\n slice1 = [slice(None)] * nd\n slice2 = [slice(None)] * nd\n slice1[axis] = slice(1, None)\n slice2[axis] = slice(None, -1)\n slice1 = tuple(slice1)\n slice2 = tuple(slice2)\n\n op = not_equal if a.dtype == np.bool_ else subtract\n for _ in range(n):\n a = op(a[slice1], a[slice2])\n\n return a\n\n_EDIFF1D_DOC = \"\"\"\\\nUnlike NumPy's implementation of ediff1d, :py:func:`jax.numpy.ediff1d` will not\nissue an error if casting ``to_end`` or ``to_begin`` to the type of ``ary``\nloses precision.\n\"\"\"\n\n@_wraps(np.ediff1d, lax_description=_EDIFF1D_DOC)\ndef ediff1d(ary, to_end=None, to_begin=None):\n ary = ravel(asarray(ary))\n result = lax.sub(ary[1:], ary[:-1])\n if to_begin is not None:\n result = concatenate((ravel(asarray(to_begin, dtype=ary.dtype)), result))\n if to_end is not None:\n result = concatenate((result, ravel(asarray(to_end, dtype=ary.dtype))))\n return result\n\n\n@partial(jit, static_argnums=2)\ndef _gradient(a, varargs, axis):\n def gradient_along_axis(a, h, axis):\n sliced = partial(lax.slice_in_dim, a, axis=axis)\n a_grad = concatenate((\n (sliced(1, 2) - sliced(0, 1)), # upper edge\n (sliced(2, None) - sliced(None, -2)) * 0.5, # inner\n (sliced(-1, None) - sliced(-2, -1)), # lower edge\n ), axis)\n return a_grad / h\n\n if axis is None:\n axis = range(a.ndim)\n else:\n if isinstance(axis, int):\n axis = (axis,)\n if not isinstance(axis, tuple) and not isinstance(axis, list):\n raise ValueError(\"Give `axis` either as int or iterable\")\n elif len(axis) == 0:\n return []\n axis = [_canonicalize_axis(i, a.ndim) for i in axis]\n\n if _min([s for i, s in enumerate(a.shape) if i in axis]) < 2:\n raise ValueError(\"Shape of array too small to calculate \"\n \"a numerical gradient, \"\n \"at least 2 elements are required.\")\n len_axes = len(axis)\n n = len(varargs)\n if n == 0 or varargs is None:\n # no spacing\n dx = [1.0] * len_axes\n elif n == 1:\n # single value for all axes\n dx = varargs * len_axes\n elif n == len_axes:\n dx = varargs\n else:\n TypeError(\"Invalid number of spacing arguments %d\" % n)\n\n if ndim(dx[0]) != 0:\n raise NotImplementedError(\"Non-constant spacing not implemented\")\n\n # TODO: use jax.lax loop tools if possible\n a_grad = [gradient_along_axis(a, h, ax) for ax, h in zip(axis, dx)]\n\n if len(axis) == 1:\n a_grad = a_grad[0]\n\n return a_grad\n\n\n@_wraps(np.gradient)\ndef gradient(f, *args, **kwargs):\n axis = kwargs.pop(\"axis\", None)\n if not len(kwargs) == 0:\n raise ValueError(\"Only `axis` keyword is implemented\")\n return _gradient(f, args, axis)\n\n\n@_wraps(np.isrealobj)\ndef isrealobj(x):\n return not iscomplexobj(x)\n\n\n@_wraps(np.reshape)\ndef reshape(a, newshape, order=\"C\"):\n try:\n return a.reshape(newshape, order=order) # forward to method for ndarrays\n except AttributeError:\n return _reshape(a, newshape, order=order)\n\ndef _compute_newshape(a, newshape):\n \"\"\"Fixes a -1 value in newshape, if present.\"\"\"\n # other errors, like having more than one -1, are caught downstream\n try: iter(newshape)\n except: iterable = False\n else: iterable = True\n def check(size):\n return size if type(size) is Poly else core.concrete_or_error(\n int, size, \"The error arose in jax.numpy.reshape.\")\n newshape = [check(size) for size in newshape] if iterable else check(newshape)\n if np.any(np.equal(newshape, -1)):\n fix = -a.size // (newshape if type(newshape) is Poly else _prod(newshape))\n return [d if d != -1 else fix for d in newshape]\n else:\n return newshape\n\ndef _reshape(a, newshape, order=\"C\"):\n computed_newshape = _compute_newshape(a, newshape)\n if order == \"C\":\n return lax.reshape(a, computed_newshape, None)\n elif order == \"F\":\n dims = np.arange(ndim(a))[::-1]\n return lax.reshape(a, computed_newshape[::-1], dims).T\n elif order == \"A\":\n raise NotImplementedError(\"np.reshape order=A is not implemented.\")\n else:\n raise ValueError(\"Unexpected value for 'order' argument: {}.\".format(order))\n\ndef _reshape_method(a, *newshape, **kwargs):\n order = kwargs.pop(\"order\", \"C\")\n if len(kwargs) == 1:\n invalid_kwarg, = kwargs\n msg = \"'{}' is an invalid keyword argument for this function\"\n raise TypeError(msg.format(invalid_kwarg)) # same as NumPy error\n elif kwargs:\n invalid_kwargs = \"'{}'\".format(\"'\".join(kwargs))\n msg = \"{} are invalid keyword arguments for this function\"\n raise TypeError(msg.format(invalid_kwargs)) # different from NumPy error\n if (len(newshape) == 1 and not isinstance(newshape[0], int) and\n type(newshape[0]) is not Poly):\n newshape = newshape[0]\n return _reshape(a, newshape, order=order)\n\n\n@_wraps(np.ravel)\ndef ravel(a, order=\"C\"):\n if order == \"K\":\n raise NotImplementedError(\"Ravel not implemented for order='K'.\")\n return reshape(a, (size(a),), order)\n\n\n@_wraps(np.ravel_multi_index)\ndef ravel_multi_index(multi_index, dims, mode='raise', order='C'):\n assert len(multi_index) == len(dims), f\"len(multi_index)={len(multi_index)} != len(dims)={len(dims)}\"\n dims = tuple(core.concrete_or_error(int, d, \"in `dims` argument of ravel_multi_index().\") for d in dims)\n _check_arraylike(\"ravel_multi_index\", *multi_index)\n for index in multi_index:\n if mode == 'raise':\n core.concrete_or_error(array, index,\n \"The error occurred because ravel_multi_index was jit-compiled\"\n \" with mode='raise'. Use mode='wrap' or mode='clip' instead.\")\n if not issubdtype(_dtype(index), integer):\n raise TypeError(\"only int indices permitted\")\n if mode == \"raise\":\n if _any(any((i < 0) | (i >= d)) for i, d in zip(multi_index, dims)):\n raise ValueError(\"invalid entry in coordinates array\")\n elif mode == \"clip\":\n multi_index = [clip(i, 0, d - 1) for i, d in zip(multi_index, dims)]\n elif mode == \"wrap\":\n multi_index = [i % d for i, d in zip(multi_index, dims)]\n else:\n raise ValueError(f\"invalid mode={mode!r}. Expected 'raise', 'wrap', or 'clip'\")\n\n if order == \"F\":\n strides = np.cumprod((1,) + dims[:-1])\n elif order == \"C\":\n strides = np.cumprod((1,) + dims[1:][::-1])[::-1]\n else:\n raise ValueError(f\"invalid order={order!r}. Expected 'C' or 'F'\")\n\n result = 0\n for i, s in zip(multi_index, strides):\n result = result + i * s\n return result\n\n\n_UNRAVEL_INDEX_DOC = \"\"\"\\\nUnlike numpy's implementation of unravel_index, negative indices are accepted\nand out-of-bounds indices are clipped.\n\"\"\"\n\n@_wraps(np.unravel_index, lax_description=_UNRAVEL_INDEX_DOC)\ndef unravel_index(indices, shape):\n indices = asarray(indices)\n sizes = pad(shape, (0, 1), constant_values=1)\n cumulative_sizes = cumprod(sizes[::-1])[::-1]\n total_size = cumulative_sizes[0]\n # Clip so raveling and unraveling an oob index will not change the behavior\n clipped_indices = clip(indices, -total_size, total_size - 1)\n # Add enough trailing dims to avoid conflict with flat_index\n cumulative_sizes = cumulative_sizes.reshape([-1] + [1] * indices.ndim)\n idx = clipped_indices % cumulative_sizes[:-1] // cumulative_sizes[1:]\n return tuple(idx)\n\n\n@_wraps(np.squeeze)\ndef squeeze(a, axis: Union[int, Tuple[int, ...]] = None):\n _check_arraylike(\"squeeze\", a)\n if axis is None:\n a_shape = shape(a)\n axis = tuple(i for i, d in enumerate(a_shape) if d == 1)\n elif not isinstance(axis, tuple):\n axis = (axis,)\n return lax.squeeze(a, axis)\n\n\n@_wraps(np.expand_dims)\ndef expand_dims(a, axis: Union[int, Tuple[int, ...]]):\n _check_arraylike(\"expand_dims\", a)\n if not isinstance(axis, tuple):\n axis = (axis,)\n return lax.expand_dims(a, axis)\n\n\n@_wraps(np.swapaxes)\ndef swapaxes(a, axis1, axis2):\n _check_arraylike(\"swapaxes\", a)\n perm = np.arange(ndim(a))\n perm[axis1], perm[axis2] = perm[axis2], perm[axis1]\n return lax.transpose(a, perm)\n\n\n@_wraps(np.moveaxis)\ndef moveaxis(a, source, destination):\n _check_arraylike(\"moveaxis\", a)\n try:\n source = (operator.index(source),)\n except TypeError:\n pass\n try:\n destination = (operator.index(destination),)\n except TypeError:\n pass\n source = tuple(_canonicalize_axis(i, ndim(a)) for i in source)\n destination = tuple(_canonicalize_axis(i, ndim(a)) for i in destination)\n if len(source) != len(destination):\n raise ValueError(\"Inconsistent number of elements: {} vs {}\"\n .format(len(source), len(destination)))\n perm = [i for i in range(ndim(a)) if i not in source]\n for dest, src in sorted(zip(destination, source)):\n perm.insert(dest, src)\n return lax.transpose(a, perm)\n\n\n@_wraps(np.isclose)\ndef isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False):\n a, b = _promote_args(\"isclose\", asarray(a), asarray(b))\n dtype = _dtype(a)\n if issubdtype(dtype, inexact):\n if issubdtype(dtype, complexfloating):\n dtype = _complex_elem_type(dtype)\n rtol = lax.convert_element_type(rtol, dtype)\n atol = lax.convert_element_type(atol, dtype)\n out = lax.le(\n lax.abs(lax.sub(a, b)),\n lax.add(atol, lax.mul(rtol, lax.abs(b))))\n # This corrects the comparisons for infinite and nan values\n a_inf = isinf(a)\n b_inf = isinf(b)\n any_inf = logical_or(a_inf, b_inf)\n both_inf = logical_and(a_inf, b_inf)\n # Make all elements where either a or b are infinite to False\n out = logical_and(out, logical_not(any_inf))\n # Make all elements where both a or b are the same inf to True\n same_value = lax.eq(a, b)\n same_inf = logical_and(both_inf, same_value)\n out = logical_or(out, same_inf)\n\n # Make all elements where either a or b is NaN to False\n a_nan = isnan(a)\n b_nan = isnan(b)\n any_nan = logical_or(a_nan, b_nan)\n out = logical_and(out, logical_not(any_nan))\n if equal_nan:\n # Make all elements where both a and b is NaN to True\n both_nan = logical_and(a_nan, b_nan)\n out = logical_or(out, both_nan)\n return _maybe_numpy_1_13_isclose_behavior(a, out)\n else:\n return lax.eq(a, b)\n\nnumpy_version = tuple(map(int, np.version.version.split('.')[:2]))\nif numpy_version < (1, 14):\n # see discussion at https://github.com/numpy/numpy/pull/9720\n def _maybe_numpy_1_13_isclose_behavior(a, out):\n if size(out) == 1 and issubdtype(_dtype(a), complexfloating):\n return lax.reshape(out, (1,))\n else:\n return out\nelse:\n def _maybe_numpy_1_13_isclose_behavior(a, out):\n return out\n\n@_wraps(np.interp)\ndef interp(x, xp, fp, left=None, right=None, period=None):\n if shape(xp) != shape(fp) or ndim(xp) != 1:\n raise ValueError(\"xp and fp must be one-dimensional arrays of equal size\")\n x, xp, fp = map(asarray, _promote_dtypes_inexact(x, xp, fp))\n if period is not None:\n if period == 0:\n raise ValueError(f\"period must be a non-zero value; got {period}\")\n period = abs(period)\n x = x % period\n xp = xp % period\n xp, fp = lax.sort_key_val(xp, fp)\n xp = concatenate([xp[-1:] - period, xp, xp[:1] + period])\n fp = concatenate([fp[-1:], fp, fp[:1]])\n\n i = clip(searchsorted(xp, x, side='right'), 1, len(xp) - 1)\n df = fp[i] - fp[i - 1]\n dx = xp[i] - xp[i - 1]\n delta = x - xp[i - 1]\n f = where((dx == 0), fp[i], fp[i - 1] + (delta / dx) * df)\n\n if period is None:\n f = where(x < xp[0], fp[0] if left is None else left, f)\n f = where(x > xp[-1], fp[-1] if right is None else right, f)\n return f\n\n\n@_wraps(np.in1d, lax_description=\"\"\"\nIn the JAX version, the `assume_unique` argument is not referenced.\n\"\"\")\ndef in1d(ar1, ar2, assume_unique=False, invert=False):\n ar1 = ravel(ar1)\n ar2 = ravel(ar2)\n # Note: an algorithm based on searchsorted has better scaling, but in practice\n # is very slow on accelerators because it relies on lax control flow. If XLA\n # ever supports binary search natively, we should switch to this:\n # ar2 = jnp.sort(ar2)\n # ind = jnp.searchsorted(ar2, ar1)\n # if invert:\n # return ar1 != ar2[ind]\n # else:\n # return ar1 == ar2[ind]\n if invert:\n return (ar1[:, None] != ar2).all(-1)\n else:\n return (ar1[:, None] == ar2).any(-1)\n\n@_wraps(np.setdiff1d, lax_description=\"\"\"\nIn the JAX version, the `assume_unique` argument is not referenced.\n\"\"\")\ndef setdiff1d(ar1, ar2, assume_unique=False):\n ar1 = core.concrete_or_error(asarray, ar1, \"The error arose in setdiff1d()\")\n ar2 = core.concrete_or_error(asarray, ar2, \"The error arose in setdiff1d()\")\n\n ar1 = unique(ar1)\n ar2 = unique(ar2)\n\n idx = in1d(ar1, ar2, invert=True)\n return ar1[idx]\n\n@partial(jit, static_argnums=2)\ndef _intersect1d_sorted_mask(ar1, ar2, return_indices=False):\n \"\"\"\n Helper function for intersect1d which is jit-able\n \"\"\"\n ar = concatenate((ar1, ar2))\n if return_indices:\n iota = lax.broadcasted_iota(np.int64, shape(ar), dimension=0)\n aux, indices = lax.sort_key_val(ar, iota)\n else:\n aux = sort(ar)\n\n mask = aux[1:] == aux[:-1]\n if return_indices:\n return aux, mask, indices\n else:\n return aux, mask\n\n\n@_wraps(np.intersect1d)\ndef intersect1d(ar1, ar2, assume_unique=False, return_indices=False):\n ar1 = core.concrete_or_error(asarray, ar1, \"The error arose in intersect1d()\")\n ar2 = core.concrete_or_error(asarray, ar2, \"The error arose in intersect1d()\")\n\n if not assume_unique:\n if return_indices:\n ar1, ind1 = unique(ar1, return_index=True)\n ar2, ind2 = unique(ar2, return_index=True)\n else:\n ar1 = unique(ar1)\n ar2 = unique(ar2)\n else:\n ar1 = ravel(ar1)\n ar2 = ravel(ar2)\n\n if return_indices:\n aux, mask, aux_sort_indices = _intersect1d_sorted_mask(ar1, ar2, return_indices)\n else:\n aux, mask = _intersect1d_sorted_mask(ar1, ar2, return_indices)\n\n int1d = aux[:-1][mask]\n\n if return_indices:\n ar1_indices = aux_sort_indices[:-1][mask]\n ar2_indices = aux_sort_indices[1:][mask] - ar1.size\n if not assume_unique:\n ar1_indices = ind1[ar1_indices]\n ar2_indices = ind2[ar2_indices]\n\n return int1d, ar1_indices, ar2_indices\n else:\n return int1d\n\n\n@_wraps(np.isin, lax_description=\"\"\"\nIn the JAX version, the `assume_unique` argument is not referenced.\n\"\"\")\ndef isin(element, test_elements, assume_unique=False, invert=False):\n result = in1d(element, test_elements, assume_unique=assume_unique, invert=invert)\n return result.reshape(shape(element))\n\n\n# The `jit` on `where` exists to avoid materializing constants in cases like\n# `np.where(np.zeros(1000), 7, 4)`. In op-by-op mode, we don't want to\n# materialize the broadcast forms of scalar arguments.\n@jit\ndef _where(condition, x=None, y=None):\n if x is None or y is None:\n raise ValueError(\"Either both or neither of the x and y arguments should \"\n \"be provided to jax.numpy.where, got {} and {}.\"\n .format(x, y))\n if not issubdtype(_dtype(condition), bool_):\n condition = lax.ne(condition, zeros_like(condition))\n x, y = _promote_dtypes(x, y)\n condition, x, y = broadcast_arrays(condition, x, y)\n return lax.select(condition, x, y) if np.size(x) else x\n\n\n_WHERE_DOC = \"\"\"\\\nAt present, JAX does not support JIT-compilation of the single-argument form\nof :py:func:`jax.numpy.where` because its output shape is data-dependent. The\nthree-argument form does not have a data-dependent shape and can be JIT-compiled\nsuccessfully.\n\"\"\"\n\n@_wraps(np.where, update_doc=False, lax_description=_WHERE_DOC)\ndef where(condition, x=None, y=None):\n if x is None and y is None:\n return nonzero(asarray(condition))\n else:\n return _where(condition, x, y)\n\n\n@_wraps(np.select)\ndef select(condlist, choicelist, default=0):\n if len(condlist) != len(choicelist):\n msg = \"condlist must have length equal to choicelist ({} vs {})\"\n raise ValueError(msg.format(len(condlist), len(choicelist)))\n if len(condlist) == 0:\n raise ValueError(\"condlist must be non-empty\")\n choices = _promote_dtypes(default, *choicelist)\n choicelist = choices[1:]\n output = choices[0]\n for cond, choice in zip(condlist[::-1], choicelist[::-1]):\n output = where(cond, choice, output)\n return output\n\n\n@_wraps(np.bincount, lax_description=\"\"\"\\\nJax adds the optional `length` parameter which specifies the output length, and\ndefaults to ``x.max() + 1``. It must be specified for bincount to be compilable.\nValues larger than the specified length will be discarded.\n\nAdditionally, while ``np.bincount`` raises an error if the input array contains\nnegative values, ``jax.numpy.bincount`` treats negative values as zero.\n\"\"\")\ndef bincount(x, weights=None, minlength=0, *, length=None):\n _check_arraylike(\"bincount\", x)\n if not issubdtype(_dtype(x), integer):\n msg = f\"x argument to bincount must have an integer type; got {x.dtype}\"\n raise TypeError(msg)\n if length is None:\n x = core.concrete_or_error(array, x,\n \"The error occured because of argument 'x' of jnp.bincount. \"\n \"To avoid this error, pass a static `length` argument.\")\n length = max(x) + 1\n length = _max(length, minlength)\n if ndim(x) != 1:\n raise ValueError(\"only 1-dimensional input supported.\")\n if weights is None:\n weights = array(1, dtype=int32)\n else:\n if shape(x) != shape(weights):\n raise ValueError(\"shape of weights must match shape of x.\")\n return ops.index_add(zeros((length,), _dtype(weights)), ops.index[clip(x, 0)], weights)\n\n\ndef broadcast_arrays(*args):\n \"\"\"Like Numpy's broadcast_arrays but doesn't return views.\"\"\"\n shapes = [shape(arg) for arg in args]\n if len(set(shapes)) == 1:\n return [arg if isinstance(arg, ndarray) or isscalar(arg) else array(arg)\n for arg in args]\n result_shape = lax.broadcast_shapes(*shapes)\n return [broadcast_to(arg, result_shape) for arg in args]\n\n\n@_wraps(np.broadcast_to, lax_description=\"\"\"\\\nThe JAX version does not necessarily return a view of the input.\n\"\"\")\ndef broadcast_to(arr, shape):\n arr = arr if isinstance(arr, ndarray) else array(arr)\n shape = canonicalize_shape(shape) # check that shape is concrete\n arr_shape = _shape(arr)\n if arr_shape == shape:\n return arr\n else:\n nlead = len(shape) - len(arr_shape)\n compatible = np.equal(arr_shape, shape[nlead:]) | np.equal(arr_shape, 1)\n if nlead < 0 or not np.all(compatible):\n msg = \"Incompatible shapes for broadcasting: {} and requested shape {}\"\n raise ValueError(msg.format(arr_shape, shape))\n diff, = np.where(np.not_equal(shape[nlead:], arr_shape))\n new_dims = tuple(range(nlead)) + tuple(nlead + diff)\n kept_dims = tuple(np.delete(np.arange(len(shape)), new_dims))\n return lax.broadcast_in_dim(squeeze(arr, tuple(diff)), shape, kept_dims)\n\ndef _split(op, ary, indices_or_sections, axis=0):\n axis = core.concrete_or_error(int, axis, f\"in jax.numpy.{op} argument `axis`\")\n size = ary.shape[axis]\n if isinstance(indices_or_sections, (tuple, list) + _arraylike_types):\n indices_or_sections = [core.concrete_or_error(int, i_s, f\"in jax.numpy.{op} argument 1\")\n for i_s in indices_or_sections]\n split_indices = np.concatenate([[0], indices_or_sections, [size]])\n else:\n indices_or_sections = core.concrete_or_error(int, indices_or_sections,\n f\"in jax.numpy.{op} argument 1\")\n part_size, r = _divmod(size, indices_or_sections)\n if r == 0:\n split_indices = np.arange(indices_or_sections + 1) * part_size\n elif op == \"array_split\":\n split_indices = np.concatenate([np.arange(r + 1) * (part_size + 1),\n np.arange(indices_or_sections - r) * part_size\n + ((r + 1) * (part_size + 1) - 1)])\n else:\n raise ValueError(\"array split does not result in an equal division\")\n split_indices = split_indices.astype(int)\n starts, ends = [0] * ndim(ary), shape(ary)\n _subval = lambda x, i, v: subvals(x, [(i, v)])\n return [lax.slice(ary, _subval(starts, axis, start), _subval(ends, axis, end))\n for start, end in zip(split_indices[:-1], split_indices[1:])]\n\n@_wraps(np.split)\ndef split(ary, indices_or_sections, axis=0):\n return _split(\"split\", ary, indices_or_sections, axis=axis)\n\ndef _split_on_axis(np_fun, axis):\n @_wraps(np_fun, update_doc=False)\n def f(ary, indices_or_sections):\n return split(ary, indices_or_sections, axis=axis)\n return f\n\nvsplit = _split_on_axis(np.vsplit, axis=0)\nhsplit = _split_on_axis(np.hsplit, axis=1)\ndsplit = _split_on_axis(np.dsplit, axis=2)\n\n@_wraps(np.array_split)\ndef array_split(ary, indices_or_sections, axis=0):\n return _split(\"array_split\", ary, indices_or_sections, axis=axis)\n\n@_wraps(np.clip)\ndef clip(a, a_min=None, a_max=None, out=None):\n _check_arraylike(\"clip\", a)\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.clip is not supported.\")\n if a_min is None and a_max is None:\n raise ValueError(\"At most one of a_min and a_max may be None\")\n if a_min is not None:\n a = maximum(a_min, a)\n if a_max is not None:\n a = minimum(a_max, a)\n return a\n\n\ndef _round_to_nearest_even(x):\n half = lax._const(x, 0.5)\n one = lax._const(x, 1)\n round_val = lax.floor(x)\n fraction = x - round_val\n nearest_even_int = lax.sub(\n round_val, lax.mul(lax._const(x, 2), lax.floor(lax.mul(half, x))))\n is_odd = lax.eq(nearest_even_int, one)\n return lax.select(\n lax.bitwise_or(lax.gt(fraction, half),\n lax.bitwise_and(lax.eq(fraction, half), is_odd)),\n lax.add(round_val, one), round_val)\n\n@_wraps(np.round, update_doc=False)\ndef round(a, decimals=0, out=None):\n _check_arraylike(\"round\", a)\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.round is not supported.\")\n dtype = _dtype(a)\n if issubdtype(dtype, integer):\n if decimals < 0:\n raise NotImplementedError(\n \"integer np.round not implemented for decimals < 0\")\n return a # no-op on integer types\n\n def _round_float(x):\n if decimals == 0:\n return _round_to_nearest_even(x)\n\n # TODO(phawkins): the strategy of rescaling the value isn't necessarily a\n # good one since we may be left with an incorrectly rounded value at the\n # end due to precision problems. As a workaround for float16, convert to\n # float32,\n x = lax.convert_element_type(x, np.float32) if dtype == np.float16 else x\n factor = _constant_like(x, 10 ** decimals)\n out = lax.div(_round_to_nearest_even(lax.mul(x, factor)), factor)\n return lax.convert_element_type(out, dtype) if dtype == np.float16 else out\n\n if issubdtype(dtype, complexfloating):\n return lax.complex(_round_float(lax.real(a)), _round_float(lax.imag(a)))\n else:\n return _round_float(a)\naround = round\n\n\n@_wraps(np.fix)\ndef fix(x, out=None):\n _check_arraylike(\"fix\", x)\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.fix is not supported.\")\n zero = lax._const(x, 0)\n return where(lax.ge(x, zero), floor(x), ceil(x))\n\n\n@_wraps(np.modf)\ndef modf(x, out=None):\n _check_arraylike(\"modf\", x)\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.modf is not supported.\")\n whole = fix(x)\n return x - whole, whole\n\n\n@_wraps(np.isfinite)\ndef isfinite(x):\n _check_arraylike(\"isfinite\", x)\n dtype = _dtype(x)\n if issubdtype(dtype, floating):\n return lax.is_finite(x)\n elif issubdtype(dtype, complexfloating):\n return lax.bitwise_and(lax.is_finite(real(x)), lax.is_finite(imag(x)))\n else:\n return full_like(x, True, dtype=bool_)\n\n@_wraps(np.isinf)\ndef isinf(x):\n _check_arraylike(\"isinf\", x)\n dtype = _dtype(x)\n if issubdtype(dtype, floating):\n return lax.eq(lax.abs(x), _constant_like(x, inf))\n elif issubdtype(dtype, complexfloating):\n re = lax.real(x)\n im = lax.imag(x)\n return lax.bitwise_or(lax.eq(lax.abs(re), _constant_like(re, inf)),\n lax.eq(lax.abs(im), _constant_like(im, inf)))\n else:\n return full_like(x, False, dtype=bool_)\n\ndef _isposneginf(infinity, x, out):\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to isneginf/isposinf is not supported.\")\n dtype = _dtype(x)\n if issubdtype(dtype, floating):\n return lax.eq(x, _constant_like(x, infinity))\n elif issubdtype(dtype, complexfloating):\n raise ValueError(\"isposinf/isneginf are not well defined for complex types\")\n else:\n return full_like(x, False, dtype=bool_)\n\nisposinf = _wraps(np.isposinf)(lambda x, out=None: _isposneginf(inf, x, out))\n\nisneginf = _wraps(np.isneginf)(lambda x, out=None: _isposneginf(-inf, x, out))\n\n@_wraps(np.isnan)\ndef isnan(x):\n _check_arraylike(\"isnan\", x)\n return lax.ne(x, x)\n\n@_wraps(np.nan_to_num)\ndef nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):\n del copy\n _check_arraylike(\"nan_to_num\", x)\n dtype = _dtype(x)\n if issubdtype(dtype, complexfloating):\n return lax.complex(\n nan_to_num(lax.real(x), nan=nan, posinf=posinf, neginf=neginf),\n nan_to_num(lax.imag(x), nan=nan, posinf=posinf, neginf=neginf))\n info = finfo(dtypes.canonicalize_dtype(dtype))\n posinf = info.max if posinf is None else posinf\n neginf = info.min if neginf is None else neginf\n x = where(isnan(x), _constant_like(x, nan), x)\n x = where(isposinf(x), _constant_like(x, posinf), x)\n x = where(isneginf(x), _constant_like(x, neginf), x)\n return x\n\n### Reducers\n\n\ndef _make_reduction(name, np_fun, op, init_val, preproc=None, bool_op=None,\n upcast_f16_for_computation=False):\n \"\"\"Creates reduction function given a binary operation and monoid identity.\"\"\"\n\n bool_op = bool_op or op\n\n @_wraps(np_fun)\n def reduction(a, axis=None, dtype=None, out=None, keepdims=False):\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.reduction is not supported.\")\n _check_arraylike(name, a)\n lax._check_user_dtype_supported(dtype, name)\n axis = core.concrete_or_error(None, axis, f\"axis argument to jnp.{name}().\")\n\n a = a if isinstance(a, ndarray) else asarray(a)\n a = preproc(a) if preproc else a\n dims = _reduction_dims(a, axis)\n result_dtype = dtype or _dtype(np_fun(np.ones((), dtype=_dtype(a))))\n if upcast_f16_for_computation and issubdtype(result_dtype, inexact):\n computation_dtype = promote_types(result_dtype, float32)\n else:\n computation_dtype = result_dtype\n a = lax.convert_element_type(a, computation_dtype)\n result = lax.reduce(a, _reduction_init_val(a, init_val),\n op if computation_dtype != np.bool_ else bool_op, dims)\n if keepdims:\n result = expand_dims(result, dims)\n return lax.convert_element_type(result, dtype or result_dtype)\n\n return reduction\n\ndef _reduction_dims(a, axis):\n if axis is None:\n return tuple(range(ndim(a)))\n elif isinstance(axis, (np.ndarray, tuple, list)):\n if len(axis) != len(set(axis)):\n raise ValueError(f\"duplicate value in 'axis': {axis}\")\n return tuple(_canonicalize_axis(x, ndim(a)) for x in axis)\n elif isinstance(axis, int):\n return (_canonicalize_axis(axis, ndim(a)),)\n else:\n raise TypeError(\"Unexpected type of axis argument: {}\".format(type(axis)))\n\ndef _reduction_init_val(a, init_val):\n a_dtype = dtypes.canonicalize_dtype(_dtype(a))\n if a_dtype == 'bool':\n return np.array(init_val > 0, dtype=a_dtype)\n try:\n return np.array(init_val, dtype=a_dtype)\n except OverflowError:\n assert issubdtype(a_dtype, integer)\n sign, info = np.sign(init_val), iinfo(a_dtype)\n return np.array(info.min if sign < 0 else info.max, dtype=a_dtype)\n\n_cast_to_bool = partial(lax.convert_element_type, new_dtype=bool_)\n\nsum = _make_reduction(\"sum\", np.sum, lax.add, 0, upcast_f16_for_computation=True,\n bool_op=lax.bitwise_or)\nproduct = prod = _make_reduction(\"prod\", np.prod, lax.mul, 1, bool_op=lax.bitwise_and,\n upcast_f16_for_computation=True)\namax = max = _make_reduction(\"max\", np.max, lax.max, -np.inf)\namin = min = _make_reduction(\"min\", np.min, lax.min, np.inf)\nall = alltrue = _make_reduction(\"all\", np.all, lax.bitwise_and, True, _cast_to_bool)\nany = sometrue = _make_reduction(\"any\", np.any, lax.bitwise_or, False, _cast_to_bool)\n\n\n@_wraps(np.mean)\ndef mean(a, axis=None, dtype=None, out=None, keepdims=False):\n _check_arraylike(\"mean\", a)\n lax._check_user_dtype_supported(dtype, \"mean\")\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.mean is not supported.\")\n\n if axis is None:\n normalizer = size(a)\n else:\n normalizer = np.prod(np.take(shape(a), axis))\n if dtype is None:\n if issubdtype(_dtype(a), bool_) or issubdtype(_dtype(a), integer):\n dtype = float_\n else:\n dtype = _dtype(a)\n dtype = dtypes.canonicalize_dtype(dtype)\n\n return lax.div(\n sum(a, axis, dtype=dtype, keepdims=keepdims),\n lax.convert_element_type(normalizer, dtype))\n\n@_wraps(np.average)\ndef average(a, axis=None, weights=None, returned=False):\n a = asarray(a)\n\n if weights is None: # Treat all weights as 1\n avg = mean(a, axis=axis)\n if axis is None:\n weights_sum = full((), size(a), dtype=avg.dtype)\n else:\n weights_sum = full_like(avg, a.shape[axis], dtype=avg.dtype)\n else:\n weights = asarray(weights)\n\n if issubdtype(a.dtype, inexact):\n out_dtype = result_type(a.dtype, weights.dtype)\n else:\n out_dtype = result_type(a.dtype, weights.dtype, float_)\n out_dtype = dtypes.canonicalize_dtype(out_dtype)\n\n a_shape = shape(a)\n a_ndim = len(a_shape)\n weights_shape = shape(weights)\n axis = None if axis is None else _canonicalize_axis(axis, a_ndim)\n\n if a_shape != weights_shape:\n # Make sure the dimensions work out\n if axis is None:\n raise ValueError(\"Axis must be specified when shapes of a and \"\n \"weights differ.\")\n if len(weights_shape) != 1:\n raise ValueError(\"1D weights expected when shapes of a and \"\n \"weights differ.\")\n if weights_shape[0] != a_shape[axis]:\n raise ValueError(\"Length of weights not \"\n \"compatible with specified axis.\")\n\n weights = broadcast_to(weights, (a_ndim - 1) * (1,) + weights_shape)\n weights = moveaxis(weights, -1, axis)\n\n weights_sum = sum(weights, axis=axis, dtype=out_dtype)\n avg = sum(multiply(a, weights), axis=axis, dtype=out_dtype) / weights_sum\n\n if returned:\n if avg.shape != weights_sum.shape:\n weights_sum = broadcast_to(weights_sum, avg.shape)\n return avg, weights_sum\n return avg\n\n\n@_wraps(np.var)\ndef var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):\n _check_arraylike(\"var\", a)\n lax._check_user_dtype_supported(dtype, \"var\")\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.var is not supported.\")\n\n a_dtype, dtype = _var_promote_types(_dtype(a), dtype)\n a_mean = mean(a, axis, dtype=a_dtype, keepdims=True)\n centered = a - a_mean\n if issubdtype(centered.dtype, complexfloating):\n centered = lax.real(lax.mul(centered, lax.conj(centered)))\n else:\n centered = lax.square(centered)\n\n if axis is None:\n normalizer = size(a)\n else:\n normalizer = np.prod(np.take(shape(a), axis))\n normalizer = normalizer - ddof\n\n result = sum(centered, axis, keepdims=keepdims)\n out = lax.div(result, lax.convert_element_type(normalizer, result.dtype))\n return lax.convert_element_type(out, dtype)\n\n\ndef _var_promote_types(a_dtype, dtype):\n if dtype:\n if (not issubdtype(dtype, complexfloating) and\n issubdtype(a_dtype, complexfloating)):\n msg = (\"jax.numpy.var does not yet support real dtype parameters when \"\n \"computing the variance of an array of complex values. The \"\n \"semantics of numpy.var seem unclear in this case. Please comment \"\n \"on https://github.com/google/jax/issues/2283 if this behavior is \"\n \"important to you.\")\n raise ValueError(msg)\n a_dtype = promote_types(a_dtype, dtype)\n else:\n if not issubdtype(a_dtype, inexact):\n dtype = a_dtype = dtypes.canonicalize_dtype(float_)\n else:\n dtype = _complex_elem_type(a_dtype)\n a_dtype = promote_types(a_dtype, float32)\n return a_dtype, dtype\n\n\n@_wraps(np.std)\ndef std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):\n _check_arraylike(\"std\", a)\n lax._check_user_dtype_supported(dtype, \"std\")\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.std is not supported.\")\n return sqrt(var(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims))\n\n\n@_wraps(np.ptp)\ndef ptp(a, axis=None, out=None, keepdims=False):\n _check_arraylike(\"ptp\", a)\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.ptp is not supported.\")\n x = amax(a, axis=axis, keepdims=keepdims)\n y = amin(a, axis=axis, keepdims=keepdims)\n return lax.sub(x, y)\n\n\n@_wraps(np.allclose)\ndef allclose(a, b, rtol=1e-05, atol=1e-08):\n return all(isclose(a, b, rtol, atol))\n\n\n@_wraps(np.count_nonzero)\ndef count_nonzero(a, axis=None, keepdims=False):\n _check_arraylike(\"count_nonzero\", a)\n return sum(lax.ne(a, _constant_like(a, 0)), axis=axis,\n dtype=dtypes.canonicalize_dtype(np.int_), keepdims=keepdims)\n\n\n_NONZERO_DOC = \"\"\"\\\nAt present, JAX does not support JIT-compilation of :py:func:`jax.numpy.nonzero`\nbecause its output shape is data-dependent.\n\"\"\"\n\n@_wraps(np.nonzero, lax_description=_NONZERO_DOC)\ndef nonzero(a):\n # Note: this function cannot be jitted because its output has a dynamic\n # shape.\n a = core.concrete_or_error(atleast_1d, a, \"The error arose in jnp.nonzero\")\n dims = shape(a)\n ndims = len(dims)\n ds = [lax.broadcasted_iota(int_, dims + (1,), i) for i in range(ndims)]\n d = concatenate(ds, axis=-1)\n indexes = d[a != 0]\n return tuple(indexes[..., i] for i in range(ndims))\n\n\n@_wraps(np.flatnonzero)\ndef flatnonzero(a):\n return nonzero(ravel(a))[0]\n\n\ndef _make_nan_reduction(np_reduction, jnp_reduction, init_val, nan_if_all_nan):\n @_wraps(np_reduction)\n def nan_reduction(a, axis=None, out=None, keepdims=False, **kwargs):\n _check_arraylike(np_reduction.__name__, a)\n out = jnp_reduction(where(isnan(a), _reduction_init_val(a, init_val), a),\n axis=axis, out=out, keepdims=keepdims, **kwargs)\n if nan_if_all_nan:\n return where(all(isnan(a), axis=axis, keepdims=keepdims),\n _constant_like(a, nan), out)\n else:\n return out\n\n return nan_reduction\n\nnanmin = _make_nan_reduction(np.nanmin, min, inf, nan_if_all_nan=True)\nnanmax = _make_nan_reduction(np.nanmax, max, -inf, nan_if_all_nan=True)\nnansum = _make_nan_reduction(np.nansum, sum, 0, nan_if_all_nan=False)\nnanprod = _make_nan_reduction(np.nanprod, prod, 1, nan_if_all_nan=False)\n\n@_wraps(np.nanmean)\ndef nanmean(a, axis=None, dtype=None, out=None, keepdims=False):\n _check_arraylike(\"nanmean\", a)\n lax._check_user_dtype_supported(dtype, \"nanmean\")\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.nanmean is not supported.\")\n if issubdtype(_dtype(a), bool_) or issubdtype(_dtype(a), integer):\n return mean(a, axis, dtype, out, keepdims)\n if dtype is None:\n dtype = _dtype(a)\n nan_mask = logical_not(isnan(a))\n normalizer = sum(nan_mask, axis=axis, dtype=int32, keepdims=keepdims)\n normalizer = lax.convert_element_type(normalizer, dtype)\n td = lax.div(nansum(a, axis, dtype=dtype, keepdims=keepdims), normalizer)\n return td\n\n\n@_wraps(np.nanvar)\ndef nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):\n _check_arraylike(\"nanvar\", a)\n lax._check_user_dtype_supported(dtype, \"nanvar\")\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.nanvar is not supported.\")\n\n a_dtype, dtype = _var_promote_types(_dtype(a), dtype)\n a_mean = nanmean(a, axis, dtype=a_dtype, keepdims=True)\n centered = a - a_mean\n if issubdtype(centered.dtype, complexfloating):\n centered = lax.real(lax.mul(centered, lax.conj(centered)))\n else:\n centered = lax.square(centered)\n\n normalizer = sum(logical_not(isnan(a)), axis=axis, keepdims=keepdims)\n normalizer = normalizer - ddof\n if config.omnistaging_enabled:\n normalizer_mask = lax.le(normalizer, 0)\n else:\n zero = lax.full_like(normalizer, 0, shape=())\n normalizer_mask = lax.le(normalizer, zero)\n\n result = nansum(centered, axis, keepdims=keepdims)\n result = where(normalizer_mask, nan, result)\n divisor = where(normalizer_mask, 1, normalizer)\n out = lax.div(result, lax.convert_element_type(divisor, result.dtype))\n return lax.convert_element_type(out, dtype)\n\n\n@_wraps(np.nanstd)\ndef nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):\n _check_arraylike(\"nanstd\", a)\n lax._check_user_dtype_supported(dtype, \"nanstd\")\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.nanstd is not supported.\")\n return sqrt(nanvar(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims))\n\n\ndef _make_cumulative_reduction(np_reduction, reduction, fill_nan=False, fill_value=0):\n # We want to allow XLA to fuse the pad and reduce-window operators to\n # avoid materializing the padded output.\n # Consider removing `jit` once again if reduce-window is generalized to\n # support arbitrary padding.\n @partial(jit, static_argnums=(1, 2))\n def _cumulative_reduction(a, axis, dtype):\n if axis is None or isscalar(a):\n a = ravel(a)\n axis = 0\n\n a_shape = list(shape(a))\n num_dims = len(a_shape)\n axis = _canonicalize_axis(axis, num_dims)\n\n if fill_nan:\n a = where(isnan(a), _constant_like(a, fill_value), a)\n\n if not dtype and _dtype(a) == bool_:\n dtype = int_\n if dtype:\n a = lax.convert_element_type(a, dtype)\n\n return reduction(a, axis)\n\n @_wraps(np_reduction)\n def cumulative_reduction(a, axis=None, dtype=None, out=None):\n _check_arraylike(np_reduction.__name__, a)\n if out is not None:\n raise NotImplementedError(f\"The 'out' argument to jnp.{np_reduction.__name__} \"\n f\"is not supported.\")\n lax._check_user_dtype_supported(dtype, np_reduction.__name__)\n # jit doesn't support kwargs as static_args.\n return _cumulative_reduction(a, axis, dtype)\n return cumulative_reduction\n\n\ncumsum = _make_cumulative_reduction(np.cumsum, lax.cumsum, fill_nan=False)\ncumprod = _make_cumulative_reduction(np.cumprod, lax.cumprod, fill_nan=False)\ncumproduct = cumprod\nnancumsum = _make_cumulative_reduction(np.nancumsum, lax.cumsum,\n fill_nan=True, fill_value=0)\nnancumprod = _make_cumulative_reduction(np.nancumprod, lax.cumprod,\n fill_nan=True, fill_value=1)\n\n\n@_wraps(np.unwrap)\ndef unwrap(p, discont=pi, axis=-1):\n _check_arraylike(\"unwrap\", p)\n dd = diff(p, axis=axis)\n ddmod = mod(dd + pi, 2 * pi) - pi\n ddmod = where((ddmod == -pi) & (dd > 0), pi, ddmod)\n\n ph_correct = where(abs(dd) < discont, 0, ddmod - dd)\n\n up = concatenate((\n lax.slice_in_dim(p, 0, 1, axis=axis),\n lax.slice_in_dim(p, 1, None, axis=axis) + cumsum(ph_correct, axis=axis)\n ), axis=axis)\n\n return up\n\n\n### Array-creation functions\n\ndef _check_no_padding(axis_padding, mode):\n if (axis_padding[0] > 0 or axis_padding[1] > 0):\n msg = \"Cannot apply '{}' padding to empty axis\"\n raise ValueError(msg.format(mode))\n\n\ndef _pad_constant(array, pad_width, constant_values):\n nd = ndim(array)\n constant_values = broadcast_to(asarray(constant_values), (nd, 2))\n constant_values = lax.convert_element_type(constant_values, array.dtype)\n for i in range(nd):\n widths = [(0, 0, 0)] * nd\n widths[i] = (pad_width[i, 0], 0, 0)\n array = lax.pad(array, constant_values[i, 0], widths)\n widths[i] = (0, pad_width[i, 1], 0)\n array = lax.pad(array, constant_values[i, 1], widths)\n return array\n\n\ndef _pad_wrap(array, pad_width):\n for i in range(ndim(array)):\n if array.shape[i] == 0:\n _check_no_padding(pad_width[i], \"wrap\")\n continue\n size = array.shape[i]\n repeats, (left_remainder, right_remainder) = _divmod(pad_width[i], size)\n total_repeats = repeats.sum() + 1\n parts = []\n if left_remainder:\n parts += [lax.slice_in_dim(array, size - left_remainder, size, axis=i)]\n parts += total_repeats * [array]\n if right_remainder:\n parts += [lax.slice_in_dim(array, 0, right_remainder, axis=i)]\n array = lax.concatenate(parts, dimension=i)\n return array\n\n\ndef _pad_symmetric_or_reflect(array, pad_width, mode):\n assert mode in (\"symmetric\", \"reflect\")\n\n for i in range(ndim(array)):\n if array.shape[i] == 0:\n _check_no_padding(pad_width[i], mode)\n continue\n\n n = array.shape[i]\n rarray = lax.rev(array, dimensions=(i,))\n offset = 1 if (mode == \"reflect\" and n > 1) else 0\n\n def build_padding(padding, forward):\n xs = []\n delta = n - offset\n while padding > delta:\n padding -= delta\n p = array if forward else rarray\n xs.append(lax.slice_in_dim(p, offset, n, axis=i))\n forward = not forward\n if padding > 0:\n x = lax.slice_in_dim(array if forward else rarray, offset,\n padding + offset, axis=i)\n xs.append(x)\n return xs\n\n parts = reversed(build_padding(pad_width[i, 0], forward=True))\n parts = [lax.rev(x, dimensions=(i,)) for x in parts]\n parts += [array]\n parts += build_padding(pad_width[i, 1], forward=False)\n array = lax.concatenate(parts, dimension=i)\n return array\n\n\ndef _pad_edge(array, pad_width):\n nd = ndim(array)\n for i in range(nd):\n if array.shape[i] == 0:\n _check_no_padding(pad_width[i], \"edge\")\n continue\n\n n = array.shape[i]\n npad_before, npad_after = pad_width[i]\n\n edge_before = lax.slice_in_dim(array, 0, 1, axis=i)\n pad_before = repeat(edge_before, npad_before, axis=i)\n\n edge_after = lax.slice_in_dim(array, n-1, n, axis=i)\n pad_after = repeat(edge_after, npad_after, axis=i)\n\n array = lax.concatenate([pad_before, array, pad_after], dimension=i)\n return array\n\n\n@partial(jit, static_argnums=(1, 2))\ndef _pad(array, pad_width, mode, constant_values):\n array = asarray(array)\n nd = ndim(array)\n\n if nd == 0:\n return array\n\n pad_width_shape = np.shape(pad_width)\n if pad_width_shape == (nd, 2):\n # ((before_1, after_1), ..., (before_N, after_N))\n pass\n elif pad_width_shape == (1, 2):\n # ((before, after),)\n pad_width = pad_width * nd\n elif pad_width_shape == (2,):\n # (before, after) (not in the numpy docstring but works anyway)\n before, after = pad_width\n pad_width = (pad_width,) * nd\n elif pad_width_shape == (1,):\n # (pad,)\n pad_width, = pad_width\n pad_width = ((pad_width, pad_width),) * nd\n elif pad_width_shape == ():\n # pad\n pad_width = ((pad_width, pad_width),) * nd\n else:\n raise ValueError(f\"pad_width given unexpected structure: {pad_width}. \"\n \"See docstring for valid pad_width formats.\")\n pad_width = np.array(pad_width)\n assert pad_width.shape == (nd, 2), pad_width\n\n if np.any(pad_width < 0):\n raise ValueError(\"index can't contain negative values\")\n\n if mode == \"constant\":\n return _pad_constant(array, pad_width, constant_values)\n\n elif mode == \"wrap\":\n return _pad_wrap(array, pad_width)\n\n elif mode in (\"symmetric\", \"reflect\"):\n return _pad_symmetric_or_reflect(array, pad_width, mode)\n\n elif mode == \"edge\":\n return _pad_edge(array, pad_width)\n\n else:\n msg = \"Unimplemented padding mode '{}' for np.pad.\"\n raise NotImplementedError(msg.format(mode))\n\n\n@_wraps(np.pad)\ndef pad(array, pad_width, mode=\"constant\", constant_values=0):\n if isinstance(pad_width, Iterable):\n pad_width = tuple(\n tuple(int(i) for i in x) if isinstance(x, Iterable) else x\n for x in pad_width)\n return _pad(array, pad_width, mode, constant_values)\n\n\n@_wraps(np.stack)\ndef stack(arrays, axis=0, out=None):\n if not len(arrays):\n raise ValueError(\"Need at least one array to stack.\")\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.stack is not supported.\")\n _check_arraylike(\"stack\", *arrays)\n shape0 = shape(arrays[0])\n axis = _canonicalize_axis(axis, len(shape0) + 1)\n new_arrays = []\n for a in arrays:\n if shape(a) != shape0:\n raise ValueError(\"All input arrays must have the same shape.\")\n new_arrays.append(expand_dims(a, axis))\n return concatenate(new_arrays, axis=axis)\n\n@_wraps(np.tile)\ndef tile(A, reps):\n _check_arraylike(\"tile\", A)\n if isinstance(reps, int):\n reps = (reps,)\n A_shape = (1,) * (len(reps) - ndim(A)) + shape(A)\n reps = (1,) * (len(A_shape) - len(reps)) + tuple(reps)\n result = broadcast_to(reshape(A, [j for i in A_shape for j in [1, i]]),\n [k for pair in zip(reps, A_shape) for k in pair])\n return reshape(result, tuple(np.multiply(A_shape, reps)))\n\n@_wraps(np.concatenate)\ndef concatenate(arrays, axis=0):\n _check_arraylike(\"concatenate\", *arrays)\n if not len(arrays):\n raise ValueError(\"Need at least one array to concatenate.\")\n if ndim(arrays[0]) == 0:\n raise ValueError(\"Zero-dimensional arrays cannot be concatenated.\")\n if axis is None:\n return concatenate([ravel(a) for a in arrays], axis=0)\n axis = _canonicalize_axis(axis, ndim(arrays[0]))\n arrays = _promote_dtypes(*arrays)\n # lax.concatenate can be slow to compile for wide concatenations, so form a\n # tree of concatenations as a workaround especially for op-by-op mode.\n # (https://github.com/google/jax/issues/653).\n k = 16\n if len(arrays) == 1:\n return array(arrays[0])\n else:\n while len(arrays) > 1:\n arrays = [lax.concatenate(arrays[i:i+k], axis)\n for i in range(0, len(arrays), k)]\n return arrays[0]\n\n\n@_wraps(np.vstack)\ndef vstack(tup):\n return concatenate([atleast_2d(m) for m in tup], axis=0)\nrow_stack = vstack\n\n\n@_wraps(np.hstack)\ndef hstack(tup):\n arrs = [atleast_1d(m) for m in tup]\n if arrs[0].ndim == 1:\n return concatenate(arrs, 0)\n return concatenate(arrs, 1)\n\n\n@_wraps(np.dstack)\ndef dstack(tup):\n return concatenate([atleast_3d(m) for m in tup], axis=2)\n\n\n@_wraps(np.column_stack)\ndef column_stack(tup):\n arrays = []\n for v in tup:\n arr = array(v)\n if arr.ndim < 2:\n arr = atleast_2d(arr).T\n arrays.append(arr)\n return concatenate(arrays, 1)\n\n\n@_wraps(np.choose)\ndef choose(a, choices, out=None, mode='raise'):\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.choose is not supported.\")\n _check_arraylike('choose', a, *choices)\n if not issubdtype(_dtype(a), integer):\n raise ValueError(\"`a` array must be integer typed\")\n N = len(choices)\n\n if mode == 'raise':\n a = core.concrete_or_error(array, a,\n \"The error occurred because jnp.choose was jit-compiled\"\n \" with mode='raise'. Use mode='wrap' or mode='clip' instead.\")\n if any((a < 0) | (a >= N)):\n raise ValueError(\"invalid entry in choice array\")\n elif mode == 'wrap':\n a = a % N\n elif mode == 'clip':\n a = clip(a, 0, N - 1)\n else:\n raise ValueError(f\"mode={mode!r} not understood. Must be 'raise', 'wrap', or 'clip'\")\n\n a, *choices = broadcast_arrays(a, *choices)\n return array(choices)[(a,) + indices(a.shape, sparse=True)]\n\n\ndef _atleast_nd(x, n):\n m = ndim(x)\n return lax.broadcast(x, (1,) * (n - m)) if m < n else x\n\ndef _block(xs):\n if isinstance(xs, tuple):\n raise ValueError(\"jax.numpy.block does not allow tuples, got {}\"\n .format(xs))\n elif isinstance(xs, list):\n if len(xs) == 0:\n raise ValueError(\"jax.numpy.block does not allow empty list arguments\")\n xs, depths = unzip2([_block(x) for x in xs])\n if _any(d != depths[0] for d in depths[1:]):\n raise ValueError(\"Mismatched list depths in jax.numpy.block\")\n rank = _max(depths[0], _max(ndim(x) for x in xs))\n xs = [_atleast_nd(x, rank) for x in xs]\n return concatenate(xs, axis=-depths[0]), depths[0] + 1\n else:\n return asarray(xs), 1\n\n@_wraps(np.block)\n@jit\ndef block(arrays):\n out, _ = _block(arrays)\n return out\n\n\n@_wraps(np.atleast_1d, update_doc=False)\ndef atleast_1d(*arys):\n if len(arys) == 1:\n arr = array(arys[0])\n return arr if ndim(arr) >= 1 else reshape(arr, -1)\n else:\n return [atleast_1d(arr) for arr in arys]\n\n\n@_wraps(np.atleast_2d, update_doc=False)\ndef atleast_2d(*arys):\n if len(arys) == 1:\n arr = array(arys[0])\n if ndim(arr) >= 2:\n return arr\n elif ndim(arr) == 1:\n return expand_dims(arr, axis=0)\n else:\n return expand_dims(arr, axis=(0, 1))\n else:\n return [atleast_2d(arr) for arr in arys]\n\n\n@_wraps(np.atleast_3d, update_doc=False)\ndef atleast_3d(*arys):\n if len(arys) == 1:\n arr = array(arys[0])\n if ndim(arr) == 0:\n arr = expand_dims(arr, axis=(0, 1, 2))\n elif ndim(arr) == 1:\n arr = expand_dims(arr, axis=(0, 2))\n elif ndim(arr) == 2:\n arr = expand_dims(arr, axis=2)\n return arr\n else:\n return [atleast_3d(arr) for arr in arys]\n\n\n@_wraps(np.array)\ndef array(object, dtype=None, copy=True, order=\"K\", ndmin=0):\n if order is not None and order != \"K\":\n raise NotImplementedError(\"Only implemented for order='K'\")\n lax._check_user_dtype_supported(dtype, \"array\")\n dtype = dtype and dtypes.canonicalize_dtype(dtype)\n\n if _can_call_numpy_array(object):\n object = _np_array(object, dtype=dtype, ndmin=ndmin)\n assert type(object) not in dtypes.python_scalar_dtypes\n\n if type(object) is np.ndarray:\n out = _device_put_raw(object)\n if dtype: assert _dtype(out) == dtype\n elif isinstance(object, (DeviceArray, core.Tracer)):\n if isinstance(object, DeviceArray) and copy:\n # We perform a copy by bouncing back to the host\n # TODO(phawkins): add a device runtime function to copy a buffer\n out = _device_put_raw(_np_asarray(object))\n else:\n out = object\n elif isinstance(object, (list, tuple)):\n if object:\n out = stack([array(elt, dtype=dtype) for elt in object])\n else:\n out = _device_put_raw(_np_array([], dtype=dtype))\n else:\n try:\n view = memoryview(object)\n except TypeError:\n pass # `object` does not support the buffer interface.\n else:\n return array(_np_asarray(view), dtype, copy)\n\n raise TypeError(\"Unexpected input type for array: {}\".format(type(object)))\n\n if dtype and _dtype(out) != dtype:\n out = lax.convert_element_type(out, dtype)\n\n if ndmin > ndim(out):\n out = lax.broadcast(out, (1,) * (ndmin - ndim(out)))\n return out\n\ndef _can_call_numpy_array(x):\n return _all(not isinstance(l, (core.Tracer, DeviceArray))\n for l in tree_leaves(x))\n\n\n@_wraps(np.asarray)\ndef asarray(a, dtype=None, order=None):\n lax._check_user_dtype_supported(dtype, \"asarray\")\n dtype = dtypes.canonicalize_dtype(dtype) if dtype is not None else dtype\n return array(a, dtype=dtype, copy=False, order=order)\n\n\n@_wraps(np.zeros_like)\ndef zeros_like(a, dtype=None, shape=None):\n _check_arraylike(\"zeros_like\", a)\n lax._check_user_dtype_supported(dtype, \"zeros_like\")\n if np.isscalar(shape):\n shape = (shape,)\n return lax.full_like(a, 0, dtype, shape)\n\n\n@_wraps(np.ones_like)\ndef ones_like(a, dtype=None, shape=None):\n _check_arraylike(\"ones_like\", a)\n lax._check_user_dtype_supported(dtype, \"ones_like\")\n if np.isscalar(shape):\n shape = (shape,)\n return lax.full_like(a, 1, dtype, shape)\n\n\n@_wraps(np.full)\ndef full(shape, fill_value, dtype=None):\n lax._check_user_dtype_supported(dtype, \"full\")\n shape = (shape,) if ndim(shape) == 0 else shape\n return lax.full(shape, fill_value, dtype)\n\n\n@_wraps(np.full_like)\ndef full_like(a, fill_value, dtype=None, shape=None):\n _check_arraylike(\"full_like\", a)\n lax._check_user_dtype_supported(dtype, \"full_like\")\n if np.isscalar(shape):\n shape = (shape,)\n return lax.full_like(a, fill_value, dtype, shape)\n\n\n@_wraps(np.zeros)\ndef zeros(shape, dtype=None):\n if isinstance(shape, types.GeneratorType):\n raise TypeError(\"expected sequence object with len >= 0 or a single integer\")\n lax._check_user_dtype_supported(dtype, \"zeros\")\n dtype = float_ if dtype is None else dtype\n shape = (shape,) if ndim(shape) == 0 else shape\n return lax.full(shape, 0, dtype)\n\n@_wraps(np.ones)\ndef ones(shape, dtype=None):\n if isinstance(shape, types.GeneratorType):\n raise TypeError(\"expected sequence object with len >= 0 or a single integer\")\n lax._check_user_dtype_supported(dtype, \"ones\")\n dtype = float_ if dtype is None else dtype\n shape = (shape,) if ndim(shape) == 0 else shape\n return lax.full(shape, 1, dtype)\n\n\n@_wraps(np.array_equal)\ndef array_equal(a1, a2, equal_nan=False):\n try:\n a1, a2 = asarray(a1), asarray(a2)\n except Exception:\n return False\n if shape(a1) != shape(a2):\n return False\n eq = asarray(a1 == a2)\n if equal_nan:\n eq = logical_or(eq, logical_and(isnan(a1), isnan(a2)))\n return all(eq)\n\n\n@_wraps(np.array_equiv)\ndef array_equiv(a1, a2):\n try:\n a1, a2 = asarray(a1), asarray(a2)\n except Exception:\n return False\n try:\n eq = equal(a1, a2)\n except ValueError:\n # shapes are not broadcastable\n return False\n return all(eq)\n\n\n# We can't create uninitialized arrays in XLA; use zeros for empty.\nempty_like = zeros_like\nempty = zeros\n\n\n@_wraps(np.eye)\ndef eye(N, M=None, k=0, dtype=None):\n lax._check_user_dtype_supported(dtype, \"eye\")\n dtype = float_ if dtype is None else dtype\n M = N if M is None else M\n k = int(k)\n if N < 0 or M < 0:\n msg = \"negative dimensions are not allowed, got {} and {}\"\n raise ValueError(msg.format(N, M))\n if k is not None:\n k_dtype = _dtype(k)\n if not issubdtype(k_dtype, integer):\n msg = \"eye argument `k` must be of integer dtype, got {}\"\n raise TypeError(msg.format(k_dtype))\n return lax._eye(dtype, (N, M), k)\n\n\n@_wraps(np.identity)\ndef identity(n, dtype=None):\n lax._check_user_dtype_supported(dtype, \"identity\")\n return eye(n, dtype=dtype)\n\n\n@_wraps(np.arange)\ndef arange(start, stop=None, step=None, dtype=None):\n lax._check_user_dtype_supported(dtype, \"arange\")\n require = partial(core.concrete_or_error, _np_asarray)\n msg = \"It arose in jax.numpy.arange argument `{}`.\".format\n if stop is None and step is None:\n start = require(start, msg(\"stop\"))\n dtype = dtype or _dtype(start)\n return lax.iota(dtype, np.ceil(start)) # avoids materializing\n else:\n start = require(start, msg(\"start\"))\n stop = None if stop is None else require(stop, msg(\"stop\"))\n step = None if step is None else require(step, msg(\"step\"))\n if dtype is None:\n dtype = _dtype(start, *(x for x in [stop, step] if x is not None))\n return array(np.arange(start, stop=stop, step=step, dtype=dtype))\n\n\ndef _wrap_numpy_nullary_function(f):\n \"\"\"Adapts `f` to return a DeviceArray instead of an np.ndarray.\n\n `f` cannot have any non-static array arguments.\n \"\"\"\n @_wraps(f, update_doc=False)\n def wrapper(*args, **kwargs):\n return asarray(f(*args, **kwargs))\n return wrapper\n\n\n@_wraps(np.linspace)\ndef linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,\n axis=0):\n \"\"\"Implementation of linspace differentiable in start and stop args.\"\"\"\n lax._check_user_dtype_supported(dtype, \"linspace\")\n if num < 0:\n raise ValueError(\"Number of samples, %s, must be non-negative.\" % num)\n\n dtype = dtype or result_type(start, stop, dtypes.canonicalize_dtype(float_))\n computation_dtype = promote_types(dtype, dtypes.canonicalize_dtype(float_))\n start = asarray(start, dtype=computation_dtype)\n stop = asarray(stop, dtype=computation_dtype)\n\n bounds_shape = list(lax.broadcast_shapes(shape(start), shape(stop)))\n broadcast_start = broadcast_to(start, bounds_shape)\n broadcast_stop = broadcast_to(stop, bounds_shape)\n axis = len(bounds_shape) + axis + 1 if axis < 0 else axis\n bounds_shape.insert(axis, 1)\n iota_shape = [1,] * len(bounds_shape)\n iota_shape[axis] = num\n div = (num - 1) if endpoint else num\n if num > 1:\n delta = lax.convert_element_type(stop - start, computation_dtype) / div\n if issubdtype(dtype, integer):\n # This is similar to how numpy computes linspace, but it\n # can fail to recover the endpoints in float32 arithmetic.\n out = (reshape(broadcast_start, bounds_shape) +\n reshape(lax.iota(dtype, num), iota_shape) *\n reshape(delta, bounds_shape))\n else:\n # This approach recovers the endpoints with float32 arithmetic,\n # but can lead to rounding errors for integer outputs.\n step = reshape(lax.iota(computation_dtype, num), iota_shape) / div\n out = (reshape(broadcast_start, bounds_shape) * (1 - step) +\n reshape(broadcast_stop, bounds_shape) * step)\n elif num == 1:\n delta = nan if endpoint else stop - start\n out = reshape(broadcast_start, bounds_shape)\n else: # num == 0 degenerate case, match numpy behavior\n empty_shape = list(lax.broadcast_shapes(shape(start), shape(stop)))\n empty_shape.insert(axis, 0)\n delta = nan\n out = reshape(array([], dtype=dtype), empty_shape)\n if retstep:\n return lax.convert_element_type(out, dtype), delta\n else:\n return lax.convert_element_type(out, dtype)\n\n\n@_wraps(np.logspace)\ndef logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0):\n \"\"\"Implementation of logspace differentiable in start and stop args.\"\"\"\n lax._check_user_dtype_supported(dtype, \"logspace\")\n dtype = dtype or result_type(start, stop, dtypes.canonicalize_dtype(float_))\n computation_dtype = promote_types(dtype, dtypes.canonicalize_dtype(float_))\n start = asarray(start, dtype=computation_dtype)\n stop = asarray(stop, dtype=computation_dtype)\n lin = linspace(start, stop, num,\n endpoint=endpoint, retstep=False, dtype=None, axis=axis)\n return lax.convert_element_type(power(base, lin), dtype)\n\n\n@_wraps(np.geomspace)\ndef geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0):\n \"\"\"Implementation of geomspace differentiable in start and stop args.\"\"\"\n lax._check_user_dtype_supported(dtype, \"geomspace\")\n dtype = dtype or result_type(start, stop, dtypes.canonicalize_dtype(float_))\n computation_dtype = promote_types(dtype, dtypes.canonicalize_dtype(float_))\n start = asarray(start, dtype=computation_dtype)\n stop = asarray(stop, dtype=computation_dtype)\n # follow the numpy geomspace convention for negative and complex endpoints\n signflip = 1 - (1 - sign(real(start))) * (1 - sign(real(stop))) // 2\n res = signflip * logspace(log10(signflip * start),\n log10(signflip * stop), num,\n endpoint=endpoint, base=10.0,\n dtype=computation_dtype, axis=0)\n if axis != 0:\n res = moveaxis(res, 0, axis)\n return lax.convert_element_type(res, dtype)\n\n\n@_wraps(np.meshgrid)\ndef meshgrid(*args, **kwargs):\n indexing = kwargs.get(\"indexing\", \"xy\")\n sparse = kwargs.get(\"sparse\", False)\n copy = kwargs.get(\"copy\", True)\n if not copy:\n raise ValueError(\"jax.numpy.meshgrid only supports copy=True\")\n\n args = list(args)\n if indexing == \"xy\":\n if len(args) >= 2:\n args[0], args[1] = args[1], args[0]\n elif indexing != \"ij\":\n raise ValueError(\"Valid values for indexing are 'xy' and 'ij', got {}\"\n .format(indexing))\n\n shape = []\n for i, a in enumerate(args):\n args[i] = a = asarray(a)\n if len(a.shape) != 1:\n msg = \"Arguments to jax.numpy.meshgrid must be 1D, got shape {}\"\n raise ValueError(msg.format(a.shape))\n shape.append(1 if sparse else a.shape[0])\n\n output = []\n for i, a in enumerate(args):\n a = asarray(a)\n s = shape\n if sparse:\n s = list(s)\n s[i] = a.shape[0]\n output.append(lax.broadcast_in_dim(a, s, (i,)))\n\n if indexing == \"xy\" and len(args) >= 2:\n output[0], output[1] = output[1], output[0]\n\n return output\n\n\n@_wraps(np.i0)\ndef i0(x):\n x = lax.abs(*_promote_args_inexact(\"i0\", x))\n return lax.mul(lax.exp(x), lax.bessel_i0e(x))\n\n\n@_wraps(np.ix_)\ndef ix_(*args):\n n = len(args)\n output = []\n for i, a in enumerate(args):\n a = asarray(a)\n if len(a.shape) != 1:\n msg = \"Arguments to jax.numpy.ix_ must be 1-dimensional, got shape {}\"\n raise ValueError(msg.format(a.shape))\n if _dtype(a) == bool_:\n raise NotImplementedError(\n \"Boolean arguments to jax.numpy.ix_ are not implemented\")\n shape = [1] * n\n shape[i] = a.shape[0]\n if a.size == 0:\n # Numpy uses an integer index type for empty arrays.\n output.append(lax.full(shape, np.zeros((), np.intp)))\n else:\n output.append(lax.broadcast_in_dim(a, shape, (i,)))\n return tuple(output)\n\n\n@_wraps(np.indices)\ndef indices(dimensions, dtype=int32, sparse=False):\n dimensions = tuple(\n core.concrete_or_error(int, d, \"dimensions argument of jnp.indices\")\n for d in dimensions)\n N = len(dimensions)\n output = []\n s = dimensions\n for i, dim in enumerate(dimensions):\n idx = lax.iota(dtype, dim)\n if sparse:\n s = (1,)*i + (dim,) + (1,)*(N - i - 1)\n output.append(lax.broadcast_in_dim(idx, s, (i,)))\n if sparse:\n return tuple(output)\n return stack(output, 0) if output else array([], dtype=dtype)\n\n\n_TOTAL_REPEAT_LENGTH_DOC = \"\"\"\\\nJax adds the optional `total_repeat_length` parameter which specifies the total\nnumber of repeat, and defaults to sum(repeats). It must be specified for repeat\nto be compilable. If `sum(repeats)` is larger than the specified\n`total_repeat_length` the remaining values will be discarded. In the case of\n`sum(repeats)` being smaller than the specified target length, the final value\nwill be repeated.\n\"\"\"\n\n\n@_wraps(np.repeat, lax_description=_TOTAL_REPEAT_LENGTH_DOC)\ndef repeat(a, repeats, axis=None, *, total_repeat_length=None):\n _check_arraylike(\"repeat\", a)\n\n if axis is None:\n a = ravel(a)\n axis = 0\n\n # If total_repeat_length is not given, can't compile, use a default.\n if total_repeat_length is None:\n repeats = core.concrete_or_error(np.array, repeats,\n \"When jit-compiling jnp.repeat, the total number of repeats must be static. \"\n \"To fix this, either specify a static value for `repeats`, or pass a static \"\n \"value to `total_repeat_length`.\")\n repeats = np.ravel(repeats)\n if ndim(a) != 0:\n repeats = np.broadcast_to(repeats, [a.shape[axis]])\n total_repeat_length = np.sum(repeats)\n else:\n repeats = ravel(repeats)\n if ndim(a) != 0:\n repeats = broadcast_to(repeats, [a.shape[axis]])\n\n # Special case when a is a scalar.\n if ndim(a) == 0:\n if repeats.shape == (1,):\n return full([total_repeat_length], a)\n else:\n raise ValueError('`repeat` with a scalar parameter `a` is only '\n 'implemented for scalar values of the parameter `repeats`.')\n\n # Special case if total_repeat_length is zero.\n if total_repeat_length == 0:\n result_shape = list(a.shape)\n result_shape[axis] = 0\n return reshape(array([], dtype=a.dtype), result_shape)\n\n # If repeats is on a zero sized axis, then return the array.\n if a.shape[axis] == 0:\n return a\n\n # This implementation of repeat avoid having to instantiate a large.\n # intermediate tensor.\n\n # Modify repeats from e.g. [1,2,0,5] -> [0,1,2,0] for exclusive repeat.\n exclusive_repeats = roll(repeats, shift=1).at[0].set(0)\n # Cumsum to get indices of new number in repeated tensor, e.g. [0, 1, 3, 3]\n scatter_indices = cumsum(exclusive_repeats)\n # Scatter these onto a zero buffer, e.g. [1,1,0,2,0,0,0,0]\n block_split_indicators = ops.index_add(\n x=zeros([total_repeat_length], dtype=int32),\n idx=scatter_indices,\n y=1)\n # Cumsum again to get scatter indices for repeat, e.g. [0,1,1,3,3,3,3,3]\n gather_indices = cumsum(block_split_indicators) - 1\n return take(a, gather_indices, axis=axis)\n\n\n@_wraps(np.tri)\ndef tri(N, M=None, k=0, dtype=None):\n lax._check_user_dtype_supported(dtype, \"tri\")\n M = M if M is not None else N\n dtype = dtype or float32\n return lax._tri(dtype, (N, M), k)\n\n\n@_wraps(np.tril)\ndef tril(m, k=0):\n _check_arraylike(\"tril\", m)\n m_shape = shape(m)\n if len(m_shape) < 2:\n raise ValueError(\"Argument to jax.numpy.tril must be at least 2D\")\n mask = tri(*m_shape[-2:], k=k, dtype=bool)\n return lax.select(lax.broadcast(mask, m_shape[:-2]), m, zeros_like(m))\n\n\n@_wraps(np.triu, update_doc=False)\ndef triu(m, k=0):\n _check_arraylike(\"triu\", m)\n m_shape = shape(m)\n if len(m_shape) < 2:\n raise ValueError(\"Argument to jax.numpy.triu must be at least 2D\")\n mask = tri(*m_shape[-2:], k=k - 1, dtype=bool)\n return lax.select(lax.broadcast(mask, m_shape[:-2]), zeros_like(m), m)\n\n\n@_wraps(np.trace)\ndef trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):\n _check_arraylike(\"trace\", a)\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.trace is not supported.\")\n lax._check_user_dtype_supported(dtype, \"trace\")\n\n axis1 = _canonicalize_axis(axis1, ndim(a))\n axis2 = _canonicalize_axis(axis2, ndim(a))\n\n a_shape = shape(a)\n if dtype is None:\n dtype = _dtype(a)\n if issubdtype(dtype, integer):\n default_int = dtypes.canonicalize_dtype(np.int_)\n if iinfo(dtype).bits < iinfo(default_int).bits:\n dtype = default_int\n\n # Move the axis? dimensions to the end.\n perm = [i for i in range(len(a_shape)) if i != axis1 and i != axis2]\n perm = perm + [axis1, axis2]\n a = lax.transpose(a, perm)\n\n # Mask out the diagonal and reduce.\n a = where(eye(a_shape[axis1], a_shape[axis2], k=offset, dtype=bool),\n a, zeros_like(a))\n return sum(a, axis=(-2, -1), dtype=dtype)\n\n\ndef _wrap_indices_function(f):\n @_wraps(f, update_doc=False)\n def wrapper(*args, **kwargs):\n return tuple(asarray(x) for x in f(*args, **kwargs))\n return wrapper\n\ntril_indices = _wrap_indices_function(np.tril_indices)\ntriu_indices = _wrap_indices_function(np.triu_indices)\nmask_indices = _wrap_indices_function(np.mask_indices)\n\n\n@_wraps(np.triu_indices_from)\ndef triu_indices_from(arr, k=0):\n return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])\n\n\n@_wraps(np.tril_indices_from)\ndef tril_indices_from(arr, k=0):\n return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])\n\n\n@_wraps(np.diag_indices)\ndef diag_indices(n, ndim=2):\n if n < 0:\n raise ValueError(\"n argument to diag_indices must be nonnegative, got {}\"\n .format(n))\n if ndim < 0:\n raise ValueError(\"ndim argument to diag_indices must be nonnegative, got {}\"\n .format(ndim))\n return (lax.iota(int_, n),) * ndim\n\n@_wraps(np.diag_indices_from)\ndef diag_indices_from(arr):\n _check_arraylike(\"diag_indices_from\", arr)\n if not arr.ndim >= 2:\n raise ValueError(\"input array must be at least 2-d\")\n\n if len(set(arr.shape)) != 1:\n raise ValueError(\"All dimensions of input must be of equal length\")\n\n return diag_indices(arr.shape[0], ndim=arr.ndim)\n\n@_wraps(np.diagonal)\ndef diagonal(a, offset=0, axis1=0, axis2=1):\n _check_arraylike(\"diagonal\", a)\n a_shape = shape(a)\n a_ndims = len(a_shape)\n\n # Move the two dimensions to the end.\n axis1 = _canonicalize_axis(axis1, a_ndims)\n axis2 = _canonicalize_axis(axis2, a_ndims)\n perm = [i for i in range(a_ndims) if i != axis1 and i != axis2]\n perm = perm + [axis1, axis2]\n a = lax.transpose(a, perm)\n\n # Mask out the diagonal and reduce over one of the axes\n a = where(eye(a_shape[axis1], a_shape[axis2], k=offset, dtype=bool),\n a, zeros_like(a))\n reduce_axis = -2 if offset < 0 else -1\n d = sum(a, axis=reduce_axis, dtype=_dtype(a))\n\n # Slice out the correct diagonal size.\n diag_size = _max(0, _min(a_shape[axis1] + _min(offset, 0),\n a_shape[axis2] - _max(offset, 0)))\n return lax.slice_in_dim(d, 0, diag_size, axis=-1)\n\n\n@_wraps(np.diag)\ndef diag(v, k=0):\n _check_arraylike(\"diag\", v)\n v_shape = shape(v)\n if len(v_shape) == 1:\n zero = lambda x: lax.full_like(x, shape=(), fill_value=0)\n n = v_shape[0] + _abs(k)\n v = lax.pad(v, zero(v), ((_max(0, k), _max(0, -k), 0),))\n return where(eye(n, k=k, dtype=bool), v, zeros_like(v))\n elif len(v_shape) == 2:\n return diagonal(v, offset=k)\n else:\n raise ValueError(\"diag input must be 1d or 2d\")\n\n_SCALAR_VALUE_DOC=\"\"\"\\\nThis differs from np.diagflat for some scalar values of v,\njax always returns a two-dimensional array, whereas numpy may\nreturn a scalar depending on the type of v.\n\"\"\"\n\n@_wraps(np.diagflat, lax_description=_SCALAR_VALUE_DOC)\ndef diagflat(v, k=0):\n _check_arraylike(\"diagflat\", v)\n v = ravel(v)\n v_length = len(v)\n adj_length = v_length + _abs(k)\n res = zeros(adj_length*adj_length, dtype=v.dtype)\n i = arange(0, adj_length-_abs(k))\n if (k >= 0):\n fi = i+k+i*adj_length\n else:\n fi = i+(i-k)*adj_length\n res = ops.index_update(res, ops.index[fi], v)\n res = res.reshape(adj_length,adj_length)\n return res\n\n\n@_wraps(np.polyval)\ndef polyval(p, x):\n if isinstance(p, np.poly1d):\n p = np.asarray(p)\n if isinstance(x, np.poly1d):\n y = 0\n else:\n y = zeros_like(x)\n for i in range(len(p)):\n y = y * x + p[i]\n return y\n\n@_wraps(np.polyadd)\ndef polyadd(a1, a2):\n a1 = asarray(a1)\n a2 = asarray(a2)\n\n if a2.shape[0] <= a1.shape[0]:\n return a1.at[-a2.shape[0]:].add(a2)\n else:\n return a2.at[-a1.shape[0]:].add(a1)\n\n\n@_wraps(np.polyder)\ndef polyder(p, m=1):\n p = asarray(p)\n if m < 0:\n raise ValueError(\"Order of derivative must be positive\")\n if m == 0:\n return p\n if m % 1:\n raise ValueError(\"m must be an integer\")\n coeff = (arange(len(p), m, -1) - 1 - arange(m)[:, newaxis]).prod(0)\n return p[:-m] * coeff\n\n@_wraps(np.trim_zeros)\ndef trim_zeros(filt, trim='fb'):\n filt = core.concrete_or_error(asarray, filt,\n \"Error arose in the `filt` argument of trim_zeros()\")\n nz = asarray(filt) == 0\n if all(nz):\n return empty(0, _dtype(filt))\n start = argmin(nz) if 'f' in trim.lower() else 0\n end = argmin(nz[::-1]) if 'b' in trim.lower() else 0\n return filt[start:len(filt) - end]\n\n_LEADING_ZEROS_DOC=\"\"\"\\\nSetting trim_leading_zeros=True makes the output match that of numpy.\nBut prevents the function from being able to be used in compiled code.\n\"\"\"\n\n@_wraps(np.polymul, lax_description=_LEADING_ZEROS_DOC)\ndef polymul(a1, a2, *, trim_leading_zeros=False):\n if isinstance(a1, np.poly1d):\n a1 = asarray(a1)\n if isinstance(a2, np.poly1d):\n a2 = asarray(a2)\n if trim_leading_zeros and (len(a1) > 1 or len(a2) > 1):\n a1, a2 = trim_zeros(a1, trim='f'), trim_zeros(a2, trim='f')\n if len(a1) == 0:\n a1 = asarray([0.])\n if len(a2) == 0:\n a2 = asarray([0.])\n val = convolve(a1, a2, mode='full')\n return val\n\n@_wraps(np.polysub)\ndef polysub(a1, a2):\n return polyadd(asarray(a1), -asarray(a2))\n\n\n@_wraps(np.append)\ndef append(arr, values, axis=None):\n if axis is None:\n return concatenate([ravel(arr), ravel(values)], 0)\n else:\n return concatenate([arr, values], axis=axis)\n\n\n@_wraps(np.apply_along_axis)\ndef apply_along_axis(func1d, axis, arr, *args, **kwargs):\n num_dims = ndim(arr)\n axis = _canonicalize_axis(axis, num_dims)\n func = lambda arr: func1d(arr, *args, **kwargs)\n for i in range(1, num_dims - axis):\n func = jax.vmap(func, in_axes=i, out_axes=-1)\n for i in range(axis):\n func = jax.vmap(func, in_axes=0, out_axes=0)\n return func(arr)\n\n\n@_wraps(np.apply_over_axes)\ndef apply_over_axes(func, a, axes):\n for axis in axes:\n b = func(a, axis=axis)\n if b.ndim == a.ndim:\n a = b\n elif b.ndim == a.ndim - 1:\n a = expand_dims(b, axis)\n else:\n raise ValueError(\"function is not returning an array of the correct shape\")\n return a\n\n\n### Tensor contraction operations\n\n\n@_wraps(np.dot, lax_description=_PRECISION_DOC)\ndef dot(a, b, *, precision=None): # pylint: disable=missing-docstring\n _check_arraylike(\"dot\", a, b)\n a, b = _promote_dtypes(a, b)\n a_ndim, b_ndim = ndim(a), ndim(b)\n if a_ndim == 0 or b_ndim == 0:\n return lax.mul(a, b)\n if _max(a_ndim, b_ndim) <= 2:\n return lax.dot(a, b, precision=precision)\n\n if b_ndim == 1:\n contract_dims = ((a_ndim - 1,), (0,))\n else:\n contract_dims = ((a_ndim - 1,), (b_ndim - 2,))\n batch_dims = ((), ())\n return lax.dot_general(a, b, (contract_dims, batch_dims), precision)\n\n\n@_wraps(np.matmul, lax_description=_PRECISION_DOC)\ndef matmul(a, b, *, precision=None): # pylint: disable=missing-docstring\n _check_arraylike(\"matmul\", a, b)\n for i, x in enumerate((a, b)):\n if ndim(x) < 1:\n msg = (f\"matmul input operand {i} must have ndim at least 1, \"\n f\"but it has ndim {ndim(x)}\")\n raise ValueError(msg)\n\n a, b = _promote_dtypes(a, b)\n\n a_is_mat, b_is_mat = (ndim(a) > 1), (ndim(b) > 1)\n a_batch_dims = shape(a)[:-2] if a_is_mat else ()\n b_batch_dims = shape(b)[:-2] if b_is_mat else ()\n num_batch_dims = _max(len(a_batch_dims), len(b_batch_dims))\n a_batch_dims = (None,) * (num_batch_dims - len(a_batch_dims)) + a_batch_dims\n b_batch_dims = (None,) * (num_batch_dims - len(b_batch_dims)) + b_batch_dims\n\n # Dimensions to squeeze from the inputs.\n a_squeeze = []\n b_squeeze = []\n\n # Positions of batch dimensions in squeezed inputs.\n a_batch = []\n b_batch = []\n\n # Desired index in final output of each kind of dimension, in the order that\n # lax.dot_general will emit them.\n idx_batch = []\n idx_a_other = [] # other = non-batch, non-contracting.\n idx_b_other = []\n for i, (ba, bb) in enumerate(zip(a_batch_dims, b_batch_dims)):\n if ba is None:\n idx_b_other.append(i)\n elif bb is None:\n idx_a_other.append(i)\n elif ba == 1:\n idx_b_other.append(i)\n a_squeeze.append(len(idx_batch) + len(idx_a_other) + len(a_squeeze))\n elif bb == 1:\n idx_a_other.append(i)\n b_squeeze.append(len(idx_batch) + len(idx_b_other) + len(b_squeeze))\n elif ba == bb:\n a_batch.append(len(idx_batch) + len(idx_a_other))\n b_batch.append(len(idx_batch) + len(idx_b_other))\n idx_batch.append(i)\n else:\n raise ValueError(\"Incompatible shapes for matmul arguments: {} and {}\"\n .format(shape(a), shape(b)))\n\n if a_is_mat: idx_a_other.append(num_batch_dims)\n if b_is_mat: idx_b_other.append(num_batch_dims + a_is_mat)\n perm = np.argsort(np.concatenate([idx_batch, idx_a_other, idx_b_other]))\n\n a = lax.squeeze(a, tuple(a_squeeze))\n b = lax.squeeze(b, tuple(b_squeeze))\n out = lax.dot_general(\n a, b, (((ndim(a) - 1,), (ndim(b) - 1 - b_is_mat,)), (a_batch, b_batch)),\n precision=precision)\n return lax.transpose(out, perm)\n\n\n@_wraps(np.vdot, lax_description=_PRECISION_DOC)\ndef vdot(a, b, *, precision=None):\n _check_arraylike(\"vdot\", a, b)\n if issubdtype(_dtype(a), complexfloating):\n a = conj(a)\n return dot(a.ravel(), b.ravel(), precision=precision)\n\n\n@_wraps(np.tensordot, lax_description=_PRECISION_DOC)\ndef tensordot(a, b, axes=2, *, precision=None):\n _check_arraylike(\"tensordot\", a, b)\n a_ndim = ndim(a)\n b_ndim = ndim(b)\n\n a, b = _promote_dtypes(a, b)\n if type(axes) is int:\n if axes > _min(a_ndim, b_ndim):\n msg = \"Number of tensordot axes (axes {}) exceeds input ranks ({} and {})\"\n raise TypeError(msg.format(axes, a.shape, b.shape))\n contracting_dims = tuple(range(a_ndim - axes, a_ndim)), tuple(range(axes))\n elif type(axes) in (list, tuple) and len(axes) == 2:\n ax1, ax2 = axes\n if type(ax1) == type(ax2) == int:\n contracting_dims = ((_canonicalize_axis(ax1, a_ndim),),\n (_canonicalize_axis(ax2, b_ndim),))\n elif type(ax1) in (list, tuple) and type(ax2) in (list, tuple):\n if len(ax1) != len(ax2):\n msg = \"tensordot requires axes lists to have equal length, got {} and {}.\"\n raise TypeError(msg.format(ax1, ax2))\n contracting_dims = (tuple(_canonicalize_axis(i, a_ndim) for i in ax1),\n tuple(_canonicalize_axis(i, b_ndim) for i in ax2))\n else:\n msg = (\"tensordot requires both axes lists to be either ints, tuples or \"\n \"lists, got {} and {}\")\n raise TypeError(msg.format(ax1, ax2))\n else:\n msg = (\"tensordot axes argument must be an int, a pair of ints, or a pair \"\n \"of lists/tuples of ints.\")\n raise TypeError(msg)\n return lax.dot_general(a, b, (contracting_dims, ((), ())),\n precision=precision)\n\n\n@_wraps(np.einsum, lax_description=_PRECISION_DOC)\ndef einsum(*operands, out=None, optimize='greedy', precision=None):\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.einsum is not supported.\")\n optimize = 'greedy' if optimize is True else optimize\n # using einsum_call=True here is an internal api for opt_einsum\n operands, contractions = opt_einsum.contract_path(\n *operands, einsum_call=True, use_blas=True, optimize=optimize)\n contractions = tuple((a, frozenset(b), c) for a, b, c, *_ in contractions)\n return _einsum(operands, contractions, precision)\n\n@_wraps(np.einsum_path)\ndef einsum_path(subscripts, *operands, optimize='greedy'):\n # using einsum_call=True here is an internal api for opt_einsum\n return opt_einsum.contract_path(subscripts, *operands, optimize=optimize)\n\ndef _removechars(s, chars):\n return s.translate(str.maketrans(dict.fromkeys(chars)))\n\n@partial(jit, static_argnums=(1, 2))\ndef _einsum(operands: Sequence,\n contractions: Sequence[Tuple[Tuple[int, ...], FrozenSet[str], str]],\n precision):\n operands = list(_promote_dtypes(*operands))\n def sum(x, axes):\n return lax.reduce(x, np.array(0, x.dtype),\n lax.add if x.dtype != bool_ else lax.bitwise_or, axes)\n\n def sum_uniques(operand, names, uniques):\n if uniques:\n axes = [names.index(name) for name in uniques]\n operand = sum(operand, axes)\n names = _removechars(names, uniques)\n return operand, names\n\n def sum_repeats(operand, names, counts, keep_names):\n for name, count in counts.items():\n if count > 1:\n axes = [i for i, n in enumerate(names) if n == name]\n eye = lax._delta(operand.dtype, operand.shape, axes)\n if name not in keep_names:\n operand = sum(operand * eye, axes)\n names = names.replace(name, '')\n else:\n operand = sum(operand * eye, axes[:-1])\n names = names.replace(name, '', count - 1)\n return operand, names\n\n def filter_singleton_dims(operand, names, other_shape, other_names):\n s = shape(operand)\n new_shape = []\n new_names = []\n for i, d in enumerate(names):\n other_i = other_names.find(d)\n if s[i] != 1 or other_i == -1 or other_shape[other_i] == 1:\n new_shape.append(s[i])\n new_names.append(d)\n return reshape(operand, tuple(new_shape)), \"\".join(new_names)\n\n for operand_indices, contracted_names_set, einstr in contractions:\n contracted_names = sorted(contracted_names_set)\n input_str, result_names = einstr.split('->')\n input_names = input_str.split(',')\n\n # switch on the number of operands to be processed in this loop iteration.\n # every case here sets 'operand' and 'names'.\n if len(operand_indices) == 1:\n operand = operands.pop(operand_indices[0])\n names, = input_names\n counts = collections.Counter(names)\n\n # sum out unique contracted indices with a single reduce-sum\n uniques = [name for name in contracted_names if counts[name] == 1]\n operand, names = sum_uniques(operand, names, uniques)\n\n # for every repeated index, do a contraction against an identity matrix\n operand, names = sum_repeats(operand, names, counts, result_names)\n\n elif len(operand_indices) == 2:\n lhs, rhs = map(operands.pop, operand_indices)\n lhs_names, rhs_names = input_names\n\n # handle cases where one side of a contracting or batch dimension is 1\n # but its counterpart is not.\n lhs, lhs_names = filter_singleton_dims(lhs, lhs_names, shape(rhs),\n rhs_names)\n rhs, rhs_names = filter_singleton_dims(rhs, rhs_names, shape(lhs),\n lhs_names)\n\n lhs_counts = collections.Counter(lhs_names)\n rhs_counts = collections.Counter(rhs_names)\n\n # sum out unique contracted indices in lhs and rhs\n lhs_uniques = [name for name in contracted_names\n if lhs_counts[name] == 1 and rhs_counts[name] == 0]\n lhs, lhs_names = sum_uniques(lhs, lhs_names, lhs_uniques)\n\n rhs_uniques = [name for name in contracted_names\n if rhs_counts[name] == 1 and lhs_counts[name] == 0]\n rhs, rhs_names = sum_uniques(rhs, rhs_names, rhs_uniques)\n\n # for every repeated index, contract against an identity matrix\n lhs, lhs_names = sum_repeats(lhs, lhs_names, lhs_counts,\n result_names + rhs_names)\n rhs, rhs_names = sum_repeats(rhs, rhs_names, rhs_counts,\n result_names + lhs_names)\n\n lhs_or_rhs_names = set(lhs_names) | set(rhs_names)\n contracted_names = [x for x in contracted_names if x in lhs_or_rhs_names]\n lhs_and_rhs_names = set(lhs_names) & set(rhs_names)\n batch_names = [x for x in result_names if x in lhs_and_rhs_names]\n\n lhs_batch, rhs_batch = unzip2((lhs_names.find(n), rhs_names.find(n))\n for n in batch_names)\n\n # NOTE(mattjj): this can fail non-deterministically in python3, maybe\n # due to opt_einsum\n assert _all(\n name in lhs_names and name in rhs_names and\n lhs.shape[lhs_names.index(name)] == rhs.shape[rhs_names.index(name)]\n for name in contracted_names)\n\n # contract using lax.dot_general\n batch_names_str = ''.join(batch_names)\n lhs_cont, rhs_cont = unzip2((lhs_names.index(n), rhs_names.index(n))\n for n in contracted_names)\n dimension_numbers = ((lhs_cont, rhs_cont), (lhs_batch, rhs_batch))\n operand = lax.dot_general(lhs, rhs, dimension_numbers, precision)\n deleted_names = batch_names_str + ''.join(contracted_names)\n names = (batch_names_str + _removechars(lhs_names, deleted_names)\n + _removechars(rhs_names, deleted_names))\n else:\n raise NotImplementedError # if this is actually reachable, open an issue!\n\n # the resulting 'operand' with axis labels 'names' should be a permutation\n # of the desired result\n assert len(names) == len(result_names) == len(set(names))\n assert set(names) == set(result_names)\n if names != result_names:\n perm = tuple([names.index(name) for name in result_names])\n operand = lax.transpose(operand, perm)\n operands.append(operand) # used in next iteration\n\n return operands[0]\n\n\ndef _movechars(s, src, dst):\n \"\"\"Helper for einsum string munging, like moveaxis on identifier strings.\"\"\"\n chars = [c for i, c in enumerate(s) if i not in src]\n for i, j in sorted(zip(dst, src)):\n chars.insert(i, s[j])\n return ''.join(chars)\n\n\n@_wraps(np.inner, lax_description=_PRECISION_DOC)\ndef inner(a, b, *, precision=None):\n if ndim(a) == 0 or ndim(b) == 0:\n return a * b\n return tensordot(a, b, (-1, -1), precision=precision)\n\n\n@_wraps(np.outer)\ndef outer(a, b, out=None):\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.outer is not supported.\")\n a, b = _promote_dtypes(a, b)\n return ravel(a)[:, None] * ravel(b)[None, :]\n\n@partial(jit, static_argnums=(2, 3, 4))\ndef _cross(a, b, axisa, axisb, axisc):\n a = moveaxis(a, axisa, -1)\n b = moveaxis(b, axisb, -1)\n\n if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3):\n raise ValueError(\"Dimension must be either 2 or 3 for cross product\")\n\n if a.shape[-1] == 2 and b.shape[-1] == 2:\n return a[..., 0] * b[..., 1] - a[..., 1] * b[..., 0]\n\n a0 = a[..., 0]\n a1 = a[..., 1]\n a2 = a[..., 2] if a.shape[-1] == 3 else zeros_like(a0)\n b0 = b[..., 0]\n b1 = b[..., 1]\n b2 = b[..., 2] if b.shape[-1] == 3 else zeros_like(b0)\n c = array([a1 * b2 - a2 * b1, a2 * b0 - a0 * b2, a0 * b1 - a1 * b0])\n return moveaxis(c, 0, axisc)\n\n@_wraps(np.cross)\ndef cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):\n if axis is not None:\n axisa = axis\n axisb = axis\n axisc = axis\n return _cross(a, b, axisa, axisb, axisc)\n\n@_wraps(np.kron)\ndef kron(a, b):\n a, b = _promote_dtypes(a, b)\n if ndim(a) < ndim(b):\n a = reshape(a, (1,) * (ndim(b) - ndim(a)) + shape(a))\n elif ndim(b) < ndim(a):\n b = reshape(b, (1,) * (ndim(a) - ndim(b)) + shape(b))\n a_reshaped = reshape(a, [i for d in shape(a) for i in (d, 1)])\n b_reshaped = reshape(b, [i for d in shape(b) for i in (1, d)])\n out_shape = tuple(np.multiply(shape(a), shape(b)))\n return reshape(lax.mul(a_reshaped, b_reshaped), out_shape)\n\n\n@_wraps(np.vander)\ndef vander(x, N=None, increasing=False):\n x = asarray(x)\n dtype = _dtype(x)\n if ndim(x) != 1:\n raise ValueError(\"x must be a one-dimensional array\")\n x_shape = shape(x)\n N = N or x_shape[0]\n if N < 0:\n raise ValueError(\"N must be nonnegative\")\n\n iota = lax.iota(dtype, N)\n if not increasing:\n iota = lax.sub(lax._const(iota, N - 1), iota)\n\n return power(x[..., None], iota)\n\n\n### Misc\n\n\n@_wraps(np.argwhere)\ndef argwhere(a):\n result = transpose(vstack(nonzero(a)))\n if ndim(a) == 0:\n return result[:0].reshape(result.shape[0], 0)\n return result.reshape(result.shape[0], ndim(a))\n\n\n@_wraps(np.argmax)\ndef argmax(a, axis=None, out=None):\n _check_arraylike(\"argmax\", a)\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.argmax is not supported.\")\n if axis is None:\n a = ravel(a)\n axis = 0\n if a.shape[axis] == 0:\n raise ValueError(\"attempt to get argmax of an empty sequence\")\n return lax.argmax(a, _canonicalize_axis(axis, a.ndim), int64)\n\n@_wraps(np.argmin)\ndef argmin(a, axis=None, out=None):\n _check_arraylike(\"argmin\", a)\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.argmin is not supported.\")\n if axis is None:\n a = ravel(a)\n axis = 0\n if a.shape[axis] == 0:\n raise ValueError(\"attempt to get argmin of an empty sequence\")\n return lax.argmin(a, _canonicalize_axis(axis, a.ndim), int64)\n\n\n_NANARG_DOC = \"\"\"\\\nWarning: jax.numpy.arg{} returns -1 for all-NaN slices and does not raise\nan error.\n\"\"\"\n\n@_wraps(np.nanargmax, lax_description=_NANARG_DOC.format(\"max\"))\ndef nanargmax(a, axis=None):\n _check_arraylike(\"nanargmax\", a)\n if not issubdtype(_dtype(a), inexact):\n return argmax(a, axis=axis)\n nan_mask = isnan(a)\n a = where(nan_mask, -inf, a)\n res = argmax(a, axis=axis)\n return where(all(nan_mask, axis=axis), -1, res)\n\n@_wraps(np.nanargmin, lax_description=_NANARG_DOC.format(\"min\"))\ndef nanargmin(a, axis=None):\n _check_arraylike(\"nanargmin\", a)\n if not issubdtype(_dtype(a), inexact):\n return argmin(a, axis=axis)\n nan_mask = isnan(a)\n a = where(nan_mask, inf, a)\n res = argmin(a, axis=axis)\n return where(all(nan_mask, axis=axis), -1, res)\n\n\n@_wraps(np.sort)\ndef sort(a, axis=-1, kind='quicksort', order=None):\n _check_arraylike(\"sort\", a)\n if kind != 'quicksort':\n warnings.warn(\"'kind' argument to sort is ignored.\")\n if order is not None:\n raise ValueError(\"'order' argument to sort is not supported.\")\n\n if axis is None:\n return lax.sort(a.ravel(), dimension=0)\n else:\n return lax.sort(a, dimension=_canonicalize_axis(axis, ndim(a)))\n\n@_wraps(np.sort_complex)\ndef sort_complex(a):\n _check_arraylike(\"sort_complex\", a)\n a = lax.sort(a, dimension=0)\n return lax.convert_element_type(a, result_type(a, dtypes.canonicalize_dtype(complex_)))\n\n@_wraps(np.lexsort)\ndef lexsort(keys, axis=-1):\n keys = tuple(keys)\n if len(keys) == 0:\n raise TypeError(\"need sequence of keys with len > 0 in lexsort\")\n if len({shape(key) for key in keys}) > 1:\n raise ValueError(\"all keys need to be the same shape\")\n if ndim(keys[0]) == 0:\n return np.int64(0)\n axis = _canonicalize_axis(axis, ndim(keys[0]))\n iota = lax.broadcasted_iota(np.int64, shape(keys[0]), axis)\n return lax.sort((*keys[::-1], iota), dimension=axis, num_keys=len(keys))[-1]\n\n\n@_wraps(np.argsort)\ndef argsort(a, axis=-1, kind='quicksort', order=None):\n _check_arraylike(\"argsort\", a)\n if kind != 'quicksort':\n warnings.warn(\"'kind' argument to argsort is ignored.\")\n if order is not None:\n raise ValueError(\"'order' argument to argsort is not supported.\")\n\n if axis is None:\n return argsort(a.ravel(), 0)\n else:\n axis = _canonicalize_axis(axis, ndim(a))\n iota = lax.broadcasted_iota(np.int64, shape(a), axis)\n _, perm = lax.sort_key_val(a, iota, dimension=axis)\n return perm\n\n\n@_wraps(np.msort)\ndef msort(a):\n return sort(a, axis=0)\n\n\n@partial(jit, static_argnums=(2,))\ndef _roll(a, shift, axis):\n a = asarray(a)\n a_shape = shape(a)\n if axis is None:\n return lax.reshape(roll(ravel(a), shift, axis=0), a_shape)\n\n a_ndim = len(a_shape)\n shift = asarray(shift)\n axis = np.asarray(axis)\n b_shape = lax.broadcast_shapes(shift.shape, axis.shape, (1,))\n if len(b_shape) != 1:\n msg = \"'shift' and 'axis' arguments to roll must be scalars or 1D arrays\"\n raise ValueError(msg)\n\n for x, i in zip(broadcast_to(shift, b_shape),\n np.broadcast_to(axis, b_shape)):\n i = _canonicalize_axis(i, a_ndim)\n x = remainder(x, (a_shape[i] or 1))\n a = lax.concatenate((a, a), i)\n a = lax.dynamic_slice_in_dim(a, a_shape[i] - x, a_shape[i], axis=i)\n return a\n\n\n@_wraps(np.roll)\ndef roll(a, shift, axis=None):\n if isinstance(axis, list):\n axis = tuple(axis)\n return _roll(a, shift, axis)\n\n\n@_wraps(np.rollaxis)\ndef rollaxis(a, axis, start=0):\n _check_arraylike(\"rollaxis\", a)\n a_ndim = ndim(a)\n axis = _canonicalize_axis(axis, a_ndim)\n if not (-a_ndim <= start <= a_ndim):\n raise ValueError(f\"start={start} must satisfy {-a_ndim}<=start<={a_ndim}\")\n if start < 0:\n start += a_ndim\n if start > axis:\n start -= 1\n return moveaxis(a, axis, start)\n\n\n@_wraps(np.packbits)\ndef packbits(a, axis=None, bitorder='big'):\n a = asarray(a)\n if not (issubdtype(dtype(a), integer) or issubdtype(dtype(a), bool_)):\n raise TypeError('Expected an input array of integer or boolean data type')\n if bitorder not in ['little', 'big']:\n raise ValueError(\"'order' must be either 'little' or 'big'\")\n a = (a > 0).astype('uint8')\n bits = arange(8, dtype='uint8')\n if bitorder == 'big':\n bits = bits[::-1]\n if axis is None:\n a = ravel(a)\n axis = 0\n a = swapaxes(a, axis, -1)\n\n remainder = a.shape[-1] % 8\n if remainder:\n a = pad(a, (a.ndim - 1) * [(0, 0)] + [(0, 8 - remainder)])\n\n a = a.reshape(a.shape[:-1] + (a.shape[-1] // 8, 8))\n packed = (a << bits).sum(-1).astype('uint8')\n return swapaxes(packed, axis, -1)\n\n\n@_wraps(np.unpackbits)\ndef unpackbits(a, axis=None, count=None, bitorder='big'):\n a = asarray(a)\n if dtype(a) != uint8:\n raise TypeError(\"Expected an input array of unsigned byte data type\")\n if bitorder not in ['little', 'big']:\n raise ValueError(\"'order' must be either 'little' or 'big'\")\n bits = asarray(1) << arange(8, dtype='uint8')\n if bitorder == 'big':\n bits = bits[::-1]\n if axis is None:\n a = a.ravel()\n axis = 0\n a = swapaxes(a, axis, -1)\n unpacked = ((a[..., None] & bits) > 0).astype('uint8')\n unpacked = unpacked.reshape(unpacked.shape[:-2] + (-1,))[..., :count]\n return swapaxes(unpacked, axis, -1)\n\n\n@_wraps(np.take)\ndef take(a, indices, axis=None, out=None, mode=None):\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.take is not supported.\")\n\n a = asarray(a)\n indices = asarray(indices)\n\n if axis is None:\n a = ravel(a)\n axis = 0\n axis = _canonicalize_axis(axis, ndim(a))\n\n if mode == \"raise\":\n # TODO(phawkins): we have no way to report out of bounds errors yet.\n raise NotImplementedError(\"The 'raise' mode to jnp.take is not supported.\")\n elif mode == \"wrap\":\n indices = mod(indices, _constant_like(indices, a.shape[axis]))\n elif mode != \"clip\" and mode is not None:\n raise ValueError(\"Invalid mode '{}' for np.take\".format(mode))\n\n index_dims = len(shape(indices))\n slice_sizes = list(shape(a))\n slice_sizes[axis] = _min(indices.size, 1)\n dnums = lax.GatherDimensionNumbers(\n offset_dims=tuple(\n list(range(axis)) +\n list(range(axis + index_dims, len(a.shape) + index_dims - 1))),\n collapsed_slice_dims=(axis,),\n start_index_map=(axis,))\n return lax.gather(a, indices[..., None], dimension_numbers=dnums,\n slice_sizes=tuple(slice_sizes))\n\n\ndef _normalize_index(index, axis_size):\n \"\"\"Normalizes an index value in the range [-N, N) to the range [0, N).\"\"\"\n if type(axis_size) is Poly:\n return index + axis_size if index < 0 else index\n\n return lax.select(\n lax.lt(index, _constant_like(index, 0)),\n lax.add(index, _constant_like(index, axis_size)),\n index)\n\n@partial(jit, static_argnums=(2,))\ndef _take_along_axis(arr, indices, axis):\n if axis is None:\n if ndim(indices) != 1:\n msg = \"take_along_axis indices must be 1D if axis=None, got shape {}\"\n raise ValueError(msg.format(indices.shape))\n return take_along_axis(arr.ravel(), indices, 0)\n rank = ndim(arr)\n if rank != ndim(indices):\n msg = \"indices and arr must have the same number of dimensions; {} vs. {}\"\n raise ValueError(msg.format(ndim(indices), ndim(arr)))\n axis = _canonicalize_axis(axis, rank)\n\n def replace(tup, val):\n lst = list(tup)\n lst[axis] = val\n return tuple(lst)\n\n bcast_shape = lax.broadcast_shapes(replace(arr.shape, 1), replace(indices.shape, 1))\n indices = broadcast_to(indices, replace(bcast_shape, indices.shape[axis]))\n arr = broadcast_to(arr, replace(bcast_shape, arr.shape[axis]))\n\n axis_size = arr.shape[axis]\n arr_shape = replace(arr.shape, 1)\n idx_shape = indices.shape\n out_shape = lax.broadcast_shapes(idx_shape, arr_shape)\n\n index_dims = [i for i, idx in enumerate(idx_shape) if i == axis or idx != 1]\n\n gather_index_shape = tuple(np.array(out_shape)[index_dims]) + (1,)\n gather_indices = []\n slice_sizes = []\n offset_dims = []\n start_index_map = []\n collapsed_slice_dims = []\n j = 0\n for i in range(rank):\n if i == axis:\n indices = _normalize_index(indices, axis_size)\n gather_indices.append(lax.reshape(indices, gather_index_shape))\n slice_sizes.append(1)\n start_index_map.append(i)\n collapsed_slice_dims.append(i)\n j += 1\n elif idx_shape[i] != 1:\n iota = lax.iota(_dtype(indices), out_shape[i])\n if not config.omnistaging_enabled:\n iota = lax.tie_in(arr, iota)\n iota = lax.broadcast_in_dim(iota, gather_index_shape, (j,))\n gather_indices.append(iota)\n slice_sizes.append(1)\n start_index_map.append(i)\n collapsed_slice_dims.append(i)\n j += 1\n else:\n # If idx_shape[i] == 1, we can just take the entirety of the arr's axis\n # and avoid forming an iota index.\n offset_dims.append(i)\n slice_sizes.append(arr_shape[i])\n\n gather_indices = lax.concatenate(gather_indices, dimension=j)\n dnums = lax.GatherDimensionNumbers(\n offset_dims=tuple(offset_dims),\n collapsed_slice_dims=tuple(collapsed_slice_dims),\n start_index_map=tuple(start_index_map))\n return lax.gather(arr, gather_indices, dnums, tuple(slice_sizes))\n\n\n@_wraps(getattr(np, \"take_along_axis\", None), update_doc=False)\ndef take_along_axis(arr, indices, axis):\n _check_arraylike(\"take_along_axis\", arr)\n return _take_along_axis(arr, indices, axis)\n\n\n### SetOps\n\n@partial(jit, static_argnums=1)\ndef _unique1d_sorted_mask(ar, optional_indices=False):\n \"\"\"\n Helper function for unique which is jit-able\n \"\"\"\n\n ar = asarray(ar).flatten()\n\n if optional_indices:\n perm = ar.argsort()\n aux = ar[perm]\n else:\n aux = ar.sort()\n\n mask = empty(aux.shape, dtype=bool_)\n mask = ops.index_update(mask, ops.index[:1], True)\n mask = ops.index_update(mask, ops.index[1:], aux[1:] != aux[:-1])\n\n if optional_indices:\n return aux, mask, perm\n else:\n return aux, mask\n\ndef _unique1d(ar, return_index=False, return_inverse=False,\n return_counts=False):\n \"\"\"\n Find the unique elements of an array, ignoring shape.\n \"\"\"\n\n optional_indices = return_index or return_inverse\n\n if optional_indices:\n aux, mask, perm = _unique1d_sorted_mask(ar, optional_indices)\n else:\n aux, mask = _unique1d_sorted_mask(ar, optional_indices)\n\n ret = (aux[mask],)\n if return_index:\n ret += (perm[mask],)\n if return_inverse:\n imask = cumsum(mask) - 1\n inv_idx = zeros(mask.shape, dtype=dtypes.canonicalize_dtype(int_))\n inv_idx = ops.index_update(inv_idx, perm, imask)\n ret += (inv_idx,)\n if return_counts:\n idx = concatenate(nonzero(mask) + (array([mask.size]),))\n ret += (diff(idx),)\n return ret\n\n@_wraps(np.unique)\ndef unique(ar, return_index=False, return_inverse=False,\n return_counts=False, axis=None):\n ar = core.concrete_or_error(array, ar, \"The error arose in jnp.unique()\")\n\n if iscomplexobj(ar):\n raise NotImplementedError(\n \"np.unique is not implemented for complex valued arrays\")\n\n if axis is None:\n ret = _unique1d(ar, return_index, return_inverse, return_counts)\n if len(ret) == 1:\n return ret[0]\n else:\n return ret\n\n raise NotImplementedError(\n \"np.unique is not implemented for the axis argument\")\n\n### Indexing\n\ndef _rewriting_take(arr, idx):\n # Computes arr[idx].\n # All supported cases of indexing can be implemented as an XLA gather,\n # followed by an optional reverse and broadcast_in_dim.\n arr = asarray(arr)\n treedef, static_idx, dynamic_idx = _split_index_for_jit(idx)\n return _gather(arr, treedef, static_idx, dynamic_idx)\n\n# TODO(phawkins): re-enable jit after fixing excessive recompilation for\n# slice indexes (e.g., slice(0, 5, None), slice(10, 15, None), etc.).\n# @partial(jit, static_argnums=(1, 2))\ndef _gather(arr, treedef, static_idx, dynamic_idx):\n idx = _merge_static_and_dynamic_indices(treedef, static_idx, dynamic_idx)\n indexer = _index_to_gather(shape(arr), idx) # shared with _scatter_update\n y = arr\n\n # Avoid calling gather if the slice shape is empty, both as a fast path and to\n # handle cases like zeros(0)[array([], int32)].\n if _prod(indexer.slice_shape) == 0:\n return zeros(indexer.slice_shape, dtype=y.dtype)\n\n # We avoid generating a gather when indexer.gather_indices.size is empty.\n if indexer.gather_indices.size:\n y = lax.gather(y, indexer.gather_indices, indexer.dnums,\n indexer.gather_slice_shape)\n\n # Reverses axes with negative strides.\n if indexer.reversed_y_dims:\n y = lax.rev(y, indexer.reversed_y_dims)\n\n # This adds np.newaxis/None dimensions.\n return expand_dims(y, indexer.newaxis_dims)\n\n_Indexer = collections.namedtuple(\"_Indexer\", [\n # The expected shape of the slice output.\n \"slice_shape\",\n\n # The slice shape to pass to lax.gather().\n \"gather_slice_shape\",\n\n # The gather indices to use.\n \"gather_indices\",\n\n # A GatherDimensionNumbers object describing the gather to perform.\n \"dnums\",\n\n # Slice dimensions that have negative strides, and so must be reversed after\n # the gather.\n \"reversed_y_dims\",\n\n # Keep track of any axes created by `newaxis`. These must be inserted for\n # gathers and eliminated for scatters.\n \"newaxis_dims\",\n])\n\ndef _split_index_for_jit(idx):\n \"\"\"Splits indices into necessarily-static and dynamic parts.\n\n Used to pass indices into `jit`-ted function.\n \"\"\"\n # Convert list indices to tuples in cases (deprecated by NumPy.)\n idx = _eliminate_deprecated_list_indexing(idx)\n\n # Expand any (concrete) boolean indices. We can then use advanced integer\n # indexing logic to handle them.\n idx = _expand_bool_indices(idx)\n\n leaves, treedef = tree_flatten(idx)\n dynamic = [None] * len(leaves)\n static = [None] * len(leaves)\n for i, x in enumerate(leaves):\n if x is Ellipsis:\n static[i] = x\n elif isinstance(x, slice):\n # slice objects aren't hashable.\n static[i] = (x.start, x.stop, x.step)\n else:\n dynamic[i] = x\n return treedef, tuple(static), dynamic\n\ndef _merge_static_and_dynamic_indices(treedef, static_idx, dynamic_idx):\n \"\"\"Recombines indices that were split by _split_index_for_jit.\"\"\"\n idx = []\n for s, d in zip(static_idx, dynamic_idx):\n if d is not None:\n idx.append(d)\n elif isinstance(s, tuple):\n idx.append(slice(s[0], s[1], s[2]))\n else:\n idx.append(s)\n return treedef.unflatten(idx)\n\ndef _int(aval):\n return not aval.shape and issubdtype(aval.dtype, integer)\n\ndef _index_to_gather(x_shape, idx):\n # Remove ellipses and add trailing slice(None)s.\n idx = _canonicalize_tuple_index(len(x_shape), idx)\n\n # Check for advanced indexing:\n # https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing\n\n # Do the advanced indexing axes appear contiguously? If not, NumPy semantics\n # move the advanced axes to the front.\n advanced_axes_are_contiguous = False\n\n advanced_indexes = None\n\n # The positions of the advanced indexing axes in `idx`.\n idx_advanced_axes = []\n\n # The positions of the advanced indexes in x's shape.\n # collapsed, after None axes have been removed. See below.\n x_advanced_axes = None\n\n if _is_advanced_int_indexer(idx):\n idx_no_nones = [(i, d) for i, d in enumerate(idx) if d is not None]\n advanced_pairs = (\n (asarray(e), i, j) for j, (i, e) in enumerate(idx_no_nones)\n if isscalar(e) or isinstance(e, (Sequence, ndarray)))\n advanced_pairs = ((_normalize_index(e, x_shape[j]), i, j)\n for e, i, j in advanced_pairs)\n advanced_indexes, idx_advanced_axes, x_advanced_axes = zip(*advanced_pairs)\n advanced_axes_are_contiguous = np.all(np.diff(idx_advanced_axes) == 1)\n\n x_axis = 0 # Current axis in x.\n y_axis = 0 # Current axis in y, before collapsing. See below.\n collapsed_y_axis = 0 # Current axis in y, after collapsing.\n\n # Scatter dimension numbers.\n offset_dims = []\n collapsed_slice_dims = []\n start_index_map = []\n\n use_64bit_index = _any([type(d) is Poly or d >= (1 << 31) for d in x_shape])\n index_dtype = int64 if use_64bit_index else int32\n gather_indices = np.zeros((0,), dtype=index_dtype) # use np to save a compilation\n\n # We perform three transformations to y before the scatter op, in order:\n # First, y is broadcast to slice_shape. In general `y` only need broadcast to\n # the right shape.\n slice_shape = []\n\n # Next, y is squeezed to remove newaxis_dims. This removes np.newaxis/`None`\n # indices, which the scatter cannot remove itself.\n newaxis_dims = []\n\n # Finally, we reverse reversed_y_dims to handle slices with negative strides.\n reversed_y_dims = []\n\n gather_slice_shape = []\n\n for idx_pos, i in enumerate(idx):\n # Handle the advanced indices here if:\n # * the advanced indices were not contiguous and we are the start.\n # * we are at the position of the first advanced index.\n if (advanced_indexes is not None and\n (advanced_axes_are_contiguous and idx_pos == idx_advanced_axes[0] or\n not advanced_axes_are_contiguous and idx_pos == 0)):\n advanced_indexes = broadcast_arrays(*advanced_indexes)\n shape = advanced_indexes[0].shape\n ndim = len(shape)\n advanced_indexes = [\n lax.convert_element_type(lax.reshape(a, shape + (1,)), index_dtype)\n for a in advanced_indexes]\n\n # Broadcast gather_indices from [..., k] to [..., 1, 1, ..., 1, k].\n gather_indices = lax.broadcast_in_dim(\n gather_indices, np.insert(gather_indices.shape, -1, shape),\n tuple(range(gather_indices.ndim - 1)) + (gather_indices.ndim + ndim - 1,))\n gather_indices = concatenate([gather_indices] + advanced_indexes, -1)\n start_index_map.extend(x_advanced_axes)\n collapsed_slice_dims.extend(x_advanced_axes)\n slice_shape.extend(shape)\n y_axis += ndim\n collapsed_y_axis += ndim\n\n # Per-index bookkeeping for advanced indexes.\n if idx_pos in idx_advanced_axes:\n x_axis += 1\n gather_slice_shape.append(1)\n continue\n\n try:\n abstract_i = core.get_aval(i)\n except TypeError:\n abstract_i = None\n # Handle basic int indexes.\n if isinstance(abstract_i, (ConcreteArray,ShapedArray)) and _int(abstract_i):\n if x_shape[x_axis] == 0:\n # XLA gives error when indexing into an axis of size 0\n raise IndexError(f\"index is out of bounds for axis {x_axis} with size 0\")\n i = _normalize_index(i, x_shape[x_axis])\n if type(i) is Poly:\n # dummy index if i is polynomial, doesn't matter for shape inference\n # TODO(mattjj,j-towns,juliuskunze): revise this logic\n i = 0\n i = lax.convert_element_type(i, index_dtype)\n i = broadcast_to(i, tuple(gather_indices.shape[:-1]) + (1,))\n gather_indices = concatenate((gather_indices, i), -1)\n collapsed_slice_dims.append(x_axis)\n gather_slice_shape.append(1)\n start_index_map.append(x_axis)\n x_axis += 1\n # Handle np.newaxis (None)\n elif i is None:\n slice_shape.append(1)\n newaxis_dims.append(y_axis)\n y_axis += 1\n # Handle slice(None)\n elif _is_slice_none(i):\n slice_shape.append(x_shape[x_axis])\n gather_slice_shape.append(x_shape[x_axis])\n offset_dims.append(collapsed_y_axis)\n collapsed_y_axis += 1\n y_axis += 1\n x_axis += 1\n # Handle slice index (only static, otherwise an error is raised)\n elif isinstance(i, slice):\n if not _all(elt is None or type(elt) is Poly\n or type(core.get_aval(elt)) is ConcreteArray\n for elt in (i.start, i.stop, i.step)):\n msg = (\"Array slice indices must have static start/stop/step to be used \"\n \"with NumPy indexing syntax. To index a statically sized \"\n \"array at a dynamic position, try lax.dynamic_slice/\"\n \"dynamic_update_slice (JAX does not support dynamically sized \"\n \"arrays within JIT compiled functions).\")\n raise IndexError(msg)\n start, limit, stride, needs_rev = _static_idx(i, x_shape[x_axis])\n if needs_rev:\n reversed_y_dims.append(collapsed_y_axis)\n if stride == 1:\n i = lax.convert_element_type(start, index_dtype)\n i = broadcast_to(i, tuple(gather_indices.shape[:-1]) + (1,))\n gather_indices = concatenate((gather_indices, i), -1)\n slice_shape.append(limit - start)\n gather_slice_shape.append(limit - start)\n offset_dims.append(collapsed_y_axis)\n start_index_map.append(x_axis)\n else:\n i = arange(start, limit, stride, dtype=index_dtype)\n size = i.shape[0]\n slice_shape.append(size)\n gather_slice_shape.append(1)\n gather_indices_shape = tuple(gather_indices.shape[:-1]) + (size,)\n i = lax.broadcast_in_dim(\n i, shape=gather_indices_shape + (1,),\n broadcast_dimensions=(len(gather_indices_shape) - 1,))\n gather_indices = lax.broadcast_in_dim(\n gather_indices,\n shape=gather_indices_shape + (len(start_index_map),),\n broadcast_dimensions=(\n tuple(range(len(gather_indices_shape) - 1)) +\n (len(gather_indices_shape),)))\n gather_indices = concatenate(\n (gather_indices, i), len(gather_indices_shape))\n start_index_map.append(x_axis)\n collapsed_slice_dims.append(x_axis)\n\n collapsed_y_axis += 1\n y_axis += 1\n x_axis += 1\n else:\n if (abstract_i is not None and\n not (issubdtype(abstract_i.dtype, integer) or issubdtype(abstract_i.dtype, bool_))):\n msg = (\"Indexer must have integer or boolean type, got indexer \"\n \"with type {} at position {}, indexer value {}\")\n raise TypeError(msg.format(abstract_i.dtype.name, idx_pos, i))\n\n msg = \"Indexing mode not yet supported. Open a feature request!\\n{}\"\n raise IndexError(msg.format(idx))\n\n dnums = lax.GatherDimensionNumbers(\n offset_dims = tuple(offset_dims),\n collapsed_slice_dims = tuple(sorted(collapsed_slice_dims)),\n start_index_map = tuple(start_index_map)\n )\n return _Indexer(\n slice_shape=slice_shape,\n newaxis_dims=tuple(newaxis_dims),\n gather_slice_shape=gather_slice_shape,\n reversed_y_dims=reversed_y_dims,\n dnums=dnums,\n gather_indices=gather_indices)\n\ndef _should_unpack_list_index(x):\n \"\"\"Helper for _eliminate_deprecated_list_indexing.\"\"\"\n return (isinstance(x, ndarray) and np.ndim(x) != 0\n or isinstance(x, (Sequence, slice))\n or x is Ellipsis or x is None)\n\ndef _eliminate_deprecated_list_indexing(idx):\n # \"Basic slicing is initiated if the selection object is a non-array,\n # non-tuple sequence containing slice objects, [Ellipses, or newaxis\n # objects]\". Detects this case and canonicalizes to a tuple. This case is\n # deprecated by NumPy and exists for backward compatibility.\n if not isinstance(idx, tuple):\n if isinstance(idx, Sequence) and not isinstance(idx, ndarray):\n # As of numpy 1.16, some non-tuple sequences of indices result in a warning, while\n # others are converted to arrays, based on a set of somewhat convoluted heuristics\n # (See https://github.com/numpy/numpy/blob/v1.19.2/numpy/core/src/multiarray/mapping.c#L179-L343)\n # In JAX, we raise a warning for *all* non-tuple sequences, and in the future will\n # *always* raise a TypeError here, rather than silently converting to an array or tuple\n # depending on the contents of the list as numpy will. \"Explicit is better than implicit\".\n # TODO(jakevdp): raise a TypeError here.\n if _any(_should_unpack_list_index(i) for i in idx):\n msg = (\"Using a non-tuple sequence for multidimensional indexing is deprecated; \"\n \"use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will \"\n \"result in a TypeError. See https://github.com/google/jax/issues/4564 \"\n \"for discussion of why this type of indexing is being deprecated.\")\n idx = tuple(idx)\n else:\n msg = (\"Using a non-tuple sequence for multidimensional indexing is deprecated; \"\n \"use `arr[array(seq)]` instead of `arr[seq]`. In the future this will \"\n \"result in a TypeError. See https://github.com/google/jax/issues/4564 \"\n \"for discussion of why this type of indexing is being deprecated.\")\n idx = (idx,)\n # TODO(jakevdp): this stacklevel is appropriate for x[idx]; for ops.index_update\n # we should use stacklevel=5; for x.at[idx].set() we should use stacklevel=6.\n warnings.warn(msg, FutureWarning, stacklevel=4)\n else:\n idx = (idx,)\n return idx\n\ndef _expand_bool_indices(idx):\n \"\"\"Converts concrete bool indexes into advanced integer indexes.\"\"\"\n out = []\n for i in idx:\n try:\n abstract_i = core.get_aval(i)\n except TypeError:\n abstract_i = None\n if (isinstance(abstract_i, ShapedArray) and issubdtype(abstract_i.dtype, bool_)\n or isinstance(i, list) and _all(not _shape(e) and issubdtype(_dtype(e), bool_)\n for e in i)):\n if isinstance(i, list):\n i = array(i)\n abstract_i = core.get_aval(i)\n\n if not type(abstract_i) is ConcreteArray:\n # TODO(mattjj): improve this error by tracking _why_ the indices are not\n # concrete\n raise IndexError(\"Array boolean indices must be concrete.\")\n else:\n out.extend(np.where(i))\n else:\n out.append(i)\n return tuple(out)\n\ndef _is_slice_none(idx):\n \"\"\"Return True if idx is equal to slice(None), False otherwise.\"\"\"\n if isinstance(idx, slice):\n return idx.start is None and idx.stop is None and idx.step is None\n\n# TODO(mattjj): clean up this logic\ndef _is_advanced_int_indexer(idx):\n \"\"\"Returns True if idx should trigger int array indexing, False otherwise.\"\"\"\n # https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing\n assert isinstance(idx, tuple)\n if _all(np.ndim(elt) == 0 for elt in idx):\n return False\n return _all(e is None or e is Ellipsis or isinstance(e, slice)\n or _is_int_arraylike(e) for e in idx)\n\ndef _is_int_arraylike(x):\n \"\"\"Returns True if x is array-like with integer dtype, False otherwise.\"\"\"\n return (isinstance(x, int) and not isinstance(x, bool)\n or issubdtype(getattr(x, \"dtype\", None), np.integer)\n or isinstance(x, (list, tuple)) and _all(_is_int_arraylike(e) for e in x))\n\n\ndef _canonicalize_tuple_index(arr_ndim, idx):\n \"\"\"Helper to remove Ellipsis and add in the implicit trailing slice(None).\"\"\"\n len_without_none = _sum(1 for e in idx if e is not None and e is not Ellipsis)\n if len_without_none > arr_ndim:\n msg = \"Too many indices for array: {} non-None/Ellipsis indices for dim {}.\"\n raise IndexError(msg.format(len_without_none, arr_ndim))\n ellipses = (i for i, elt in enumerate(idx) if elt is Ellipsis)\n ellipsis_index = next(ellipses, None)\n if ellipsis_index is not None:\n if next(ellipses, None) is not None:\n msg = \"Multiple ellipses (...) not supported: {}.\"\n raise IndexError(msg.format(list(map(type, idx))))\n colons = (slice(None),) * (arr_ndim - len_without_none)\n idx = idx[:ellipsis_index] + colons + idx[ellipsis_index + 1:]\n elif len_without_none < arr_ndim:\n colons = (slice(None),) * (arr_ndim - len_without_none)\n idx = tuple(idx) + colons\n return idx\n\ndef _polymorphic_slice_indices(idx: slice, size: Union[int, Poly]):\n # like idx.indices(size), but allows for polymorphic indices and size\n # see https://github.com/python/cpython/blob/6d6508765514c7c10719478a0430f5e47c9a96ac/Objects/sliceobject.c#L372\n assert isinstance(idx, slice)\n\n step = 1 if idx.step is None else idx.step\n step_is_negative = step < 0\n lower = -1 if step_is_negative else 0\n upper = size + lower\n\n def sanitize(index, default):\n if index is None:\n return default\n elif type(index) is Poly:\n return index\n elif index < 0:\n return _max(index + size, lower)\n else:\n return _min(index, upper)\n\n start = sanitize(idx.start, default=upper if step_is_negative else lower)\n stop = sanitize(idx.stop, default=lower if step_is_negative else upper)\n return start, stop, step\n\ndef _static_idx(idx: slice, size: Union[int, Poly]):\n \"\"\"Helper function to compute the static slice start/limit/stride values.\"\"\"\n if _any(type(s) is Poly for s in (idx.start, idx.stop, idx.step, size)):\n start, stop, step = _polymorphic_slice_indices(idx, size)\n elif isinstance(size, int):\n start, stop, step = idx.indices(size)\n else:\n raise TypeError(size)\n\n if type(start) is not Poly and type(stop) is not Poly:\n if (step < 0 and stop >= start) or (step > 0 and start >= stop):\n return 0, 0, 1, False # sliced to size zero\n\n if step > 0:\n return start, stop, step, False\n else:\n k = (start - stop - 1) % (-step)\n return stop + k + 1, start + 1, -step, True\n\n\nblackman = _wrap_numpy_nullary_function(np.blackman)\nbartlett = _wrap_numpy_nullary_function(np.bartlett)\nhamming = _wrap_numpy_nullary_function(np.hamming)\nhanning = _wrap_numpy_nullary_function(np.hanning)\n# TODO: lower `kaiser` via lax to allow non-constant beta values.\nkaiser = _wrap_numpy_nullary_function(np.kaiser)\n\ndef _gcd_cond_fn(xs):\n x1, x2 = xs\n return any(x2 != 0)\n\ndef _gcd_body_fn(xs):\n x1, x2 = xs\n x1, x2 = (where(x2 != 0, x2, x1),\n where(x2 != 0, lax.rem(x1, x2), lax._const(x2, 0)))\n return (where(x1 < x2, x2, x1), where(x1 < x2, x1, x2))\n\n@_wraps(getattr(np, \"gcd\", None))\ndef gcd(x1, x2):\n _check_arraylike(\"gcd\", x1, x2)\n if (not issubdtype(_dtype(x1), integer) or\n not issubdtype(_dtype(x2), integer)):\n raise ValueError(\"Arguments to jax.numpy.gcd must be integers.\")\n x1, x2 = _promote_dtypes(x1, x2)\n x1, x2 = broadcast_arrays(x1, x2)\n gcd, _ = lax.while_loop(_gcd_cond_fn, _gcd_body_fn, (abs(x1), abs(x2)))\n return gcd\n\n\n@_wraps(getattr(np, \"lcm\", None))\ndef lcm(x1, x2):\n _check_arraylike(\"lcm\", x1, x2)\n x1, x2 = _promote_dtypes(x1, x2)\n d = gcd(x1, x2)\n return where(d == 0, lax._const(d, 0),\n abs(multiply(x1, floor_divide(x2, d))))\n\n\n@_wraps(np.extract)\ndef extract(condition, arr):\n return compress(ravel(condition), ravel(arr))\n\n\n@_wraps(np.compress)\ndef compress(condition, a, axis=None, out=None):\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.compress is not supported.\")\n if ndim(condition) != 1:\n raise ValueError(\"condition must be a 1D array\")\n condition = array(condition).astype(bool)\n a = array(a)\n if axis is None:\n axis = 0\n a = ravel(a)\n else:\n a = moveaxis(a, axis, 0)\n condition, extra = condition[:a.shape[0]], condition[a.shape[0]:]\n if any(extra):\n raise ValueError(\"condition contains entries that are out of bounds\")\n a = a[:condition.shape[0]]\n return moveaxis(a[condition], 0, axis)\n\n\n@_wraps(np.cov)\ndef cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,\n aweights=None):\n if y is not None: raise NotImplementedError(\n \"jax.numpy.cov not implemented for nontrivial y. \"\n \"Open a feature request at https://github.com/google/jax/issues !\")\n\n m, = _promote_args_inexact(\"cov\", m)\n\n if m.ndim > 2:\n raise ValueError(\"m has more than 2 dimensions\") # same as numpy error\n X = atleast_2d(m)\n if not rowvar and X.shape[0] != 1:\n X = X.T\n if X.shape[0] == 0:\n return array([]).reshape(0, 0)\n if ddof is None:\n ddof = 1 if bias == 0 else 0\n\n w = None\n if fweights is not None:\n _check_arraylike(\"cov\", fweights)\n if ndim(fweights) > 1:\n raise RuntimeError(\"cannot handle multidimensional fweights\")\n if shape(fweights)[0] != X.shape[1]:\n raise RuntimeError(\"incompatible numbers of samples and fweights\")\n if not issubdtype(_dtype(fweights), integer):\n raise TypeError(\"fweights must be integer.\")\n # Ensure positive fweights; note that numpy raises an error on negative fweights.\n w = asarray(abs(fweights))\n if aweights is not None:\n _check_arraylike(\"cov\", aweights)\n if ndim(aweights) > 1:\n raise RuntimeError(\"cannot handle multidimensional aweights\")\n if shape(aweights)[0] != X.shape[1]:\n raise RuntimeError(\"incompatible numbers of samples and aweights\")\n # Ensure positive aweights: note that numpy raises an error for negative aweights.\n aweights = abs(aweights)\n w = aweights if w is None else w * aweights\n\n avg, w_sum = average(X, axis=1, weights=w, returned=True)\n w_sum = w_sum[0]\n\n if w is None:\n f = X.shape[1] - ddof\n elif ddof == 0:\n f = w_sum\n elif aweights is None:\n f = w_sum - ddof\n else:\n f = w_sum - ddof * sum(w * aweights) / w_sum\n\n X = X - avg[:, None]\n X_T = X.T if w is None else (X * w).T\n return true_divide(dot(X, X_T.conj()), f).squeeze()\n\n\n@_wraps(np.corrcoef)\ndef corrcoef(x, y=None, rowvar=True):\n _check_arraylike(\"corrcoef\", x)\n c = cov(x, y, rowvar)\n if len(shape(c)) == 0:\n # scalar - this should yield nan for values (nan/nan, inf/inf, 0/0), 1 otherwise\n return divide(c, c)\n d = diag(c)\n stddev = sqrt(real(d))\n c = divide(c, stddev[:,None])\n c = divide(c, stddev[None,:])\n\n real_part = clip(real(c), -1, 1)\n if iscomplexobj(c):\n complex_part = clip(imag(c), -1, 1)\n c = lax.complex(real_part, complex_part)\n else:\n c = real_part\n return c\n\n\n@_wraps(getattr(np, \"quantile\", None))\ndef quantile(a, q, axis=None, out=None, overwrite_input=False,\n interpolation=\"linear\", keepdims=False):\n _check_arraylike(\"quantile\", a, q)\n if overwrite_input or out is not None:\n msg = (\"jax.numpy.quantile does not support overwrite_input=True or \"\n \"out != None\")\n raise ValueError(msg)\n return _quantile(a, q, axis, interpolation, keepdims, False)\n\n@_wraps(getattr(np, \"nanquantile\", None))\ndef nanquantile(a, q, axis=None, out=None, overwrite_input=False,\n interpolation=\"linear\", keepdims=False):\n _check_arraylike(\"nanquantile\", a, q)\n if overwrite_input or out is not None:\n msg = (\"jax.numpy.nanquantile does not support overwrite_input=True or \"\n \"out != None\")\n raise ValueError(msg)\n return _quantile(a, q, axis, interpolation, keepdims, True)\n\n\n@partial(jit, static_argnums=(2, 3, 4, 5))\ndef _quantile(a, q, axis, interpolation, keepdims, squash_nans):\n if interpolation not in [\"linear\", \"lower\", \"higher\", \"midpoint\", \"nearest\"]:\n raise ValueError(\"interpolation can only be 'linear', 'lower', 'higher', \"\n \"'midpoint', or 'nearest'\")\n a = asarray(a, dtype=promote_types(_dtype(a), float32))\n q = asarray(q, dtype=promote_types(_dtype(q), float32))\n if axis is None:\n a = ravel(a)\n axis = 0\n elif isinstance(axis, tuple):\n raise NotImplementedError(\"Tuple values for axis are not implemented\")\n else:\n axis = _canonicalize_axis(axis, ndim(a))\n\n q_shape = shape(q)\n q_ndim = ndim(q)\n if q_ndim > 1:\n raise ValueError(\"q must be have rank <= 1, got shape {}\".format(shape(q)))\n\n a_shape = shape(a)\n a = lax.sort(a, dimension=axis)\n\n if squash_nans:\n counts = sum(logical_not(isnan(a)), axis=axis, dtype=q.dtype,\n keepdims=keepdims)\n shape_after_reduction = counts.shape\n q = lax.expand_dims(\n q, tuple(range(q_ndim, len(shape_after_reduction) + q_ndim)))\n counts = lax.expand_dims(counts, tuple(range(q_ndim)))\n q = lax.mul(q, lax.sub(counts, _constant_like(q, 1)))\n low = lax.floor(q)\n high = lax.ceil(q)\n high_weight = lax.sub(q, low)\n low_weight = lax.sub(_constant_like(high_weight, 1), high_weight)\n\n low = lax.max(_constant_like(low, 0), lax.min(low, counts - 1))\n high = lax.max(_constant_like(high, 0), lax.min(high, counts - 1))\n low = lax.convert_element_type(low, int64)\n high = lax.convert_element_type(high, int64)\n out_shape = q_shape + shape_after_reduction\n index = [lax.broadcasted_iota(int64, out_shape, dim + q_ndim)\n for dim in range(len(shape_after_reduction))]\n if keepdims:\n index[axis] = low\n else:\n index.insert(axis, low)\n low_value = a[tuple(index)]\n index[axis] = high\n high_value = a[tuple(index)]\n else:\n n = a_shape[axis]\n q = lax.mul(q, _constant_like(q, n - 1))\n low = lax.floor(q)\n high = lax.ceil(q)\n high_weight = lax.sub(q, low)\n low_weight = lax.sub(_constant_like(high_weight, 1), high_weight)\n\n low = lax.clamp(_constant_like(low, 0), low, _constant_like(low, n - 1))\n high = lax.clamp(_constant_like(high, 0), high, _constant_like(high, n - 1))\n low = lax.convert_element_type(low, int64)\n high = lax.convert_element_type(high, int64)\n\n slice_sizes = list(a_shape)\n slice_sizes[axis] = 1\n dnums = lax.GatherDimensionNumbers(\n offset_dims=tuple(range(\n q_ndim,\n len(a_shape) + q_ndim if keepdims else len(a_shape) + q_ndim - 1)),\n collapsed_slice_dims=() if keepdims else (axis,),\n start_index_map=(axis,))\n low_value = lax.gather(a, low[..., None], dimension_numbers=dnums,\n slice_sizes=slice_sizes)\n high_value = lax.gather(a, high[..., None], dimension_numbers=dnums,\n slice_sizes=slice_sizes)\n if q_ndim == 1:\n low_weight = lax.broadcast_in_dim(low_weight, low_value.shape,\n broadcast_dimensions=(0,))\n high_weight = lax.broadcast_in_dim(high_weight, high_value.shape,\n broadcast_dimensions=(0,))\n\n if interpolation == \"linear\":\n result = lax.add(lax.mul(low_value.astype(q.dtype), low_weight),\n lax.mul(high_value.astype(q.dtype), high_weight))\n elif interpolation == \"lower\":\n result = low_value\n elif interpolation == \"higher\":\n result = high_value\n elif interpolation == \"nearest\":\n pred = lax.le(high_weight, _constant_like(high_weight, 0.5))\n result = lax.select(pred, low_value, high_value)\n elif interpolation == \"midpoint\":\n result = lax.mul(lax.add(low_value, high_value), _constant_like(low_value, 0.5))\n else:\n raise ValueError(f\"interpolation={interpolation!r} not recognized\")\n\n return lax.convert_element_type(result, a.dtype)\n\n\n@partial(jit, static_argnums=2)\n@partial(vectorize, excluded={0, 2})\ndef _searchsorted(a, v, side):\n if len(a) == 0:\n return 0\n op = operator.le if side == 'left' else operator.lt\n\n def body_fun(i, state):\n low, high = state\n mid = (low + high) // 2\n go_left = op(v, a[mid])\n return (where(go_left, low, mid), where(go_left, mid, high))\n\n n_levels = int(np.ceil(np.log2(len(a) + 1)))\n return lax.fori_loop(0, n_levels, body_fun, (0, len(a)))[1]\n\n\n@_wraps(np.searchsorted)\ndef searchsorted(a, v, side='left', sorter=None):\n if side not in ['left', 'right']:\n raise ValueError(f\"{side!r} is an invalid value for keyword 'side'\")\n if sorter is not None:\n raise NotImplementedError(\"sorter is not implemented\")\n a = asarray(a)\n v = asarray(v)\n if ndim(a) != 1:\n raise ValueError(\"a should be 1-dimensional\")\n return _searchsorted(a, v, side)\n\n\n@_wraps(np.digitize)\ndef digitize(x, bins, right=False):\n if len(bins) == 0:\n return zeros(x, dtype=dtypes.canonicalize_dtype(int_))\n side = 'right' if not right else 'left'\n return where(\n bins[-1] >= bins[0],\n searchsorted(bins, x, side=side),\n len(bins) - searchsorted(bins[::-1], x, side=side)\n )\n\n_PIECEWISE_DOC = \"\"\"\\\nUnlike `np.piecewise`, :py:func:`jax.numpy.piecewise` requires functions in\n`funclist` to be traceable by JAX, as it is implemeted via :func:`jax.lax.switch`.\nSee the :func:`jax.lax.switch` documentation for more information.\n\"\"\"\n\n@_wraps(np.piecewise, lax_description=_PIECEWISE_DOC)\ndef piecewise(x, condlist, funclist, *args, **kw):\n _check_arraylike(\"piecewise\", x)\n condlist = array(condlist, dtype=bool_)\n nc, nf = len(condlist), len(funclist)\n if nf == nc + 1:\n funclist = funclist[-1:] + funclist[:-1]\n elif nf == nc:\n funclist = [0] + list(funclist)\n else:\n raise ValueError(f\"with {nc} condition(s), either {nc} or {nc+1} functions are expected; got {nf}\")\n indices = argmax(cumsum(vstack([zeros_like(condlist[:1]), condlist]), 0), 0)\n dtype = _dtype(x)\n def _call(f):\n return lambda x: f(x, *args, **kw).astype(dtype)\n def _const(v):\n return lambda x: full_like(x, v)\n funclist = [_call(f) if callable(f) else _const(f) for f in funclist]\n return vectorize(lax.switch, excluded=(1,))(indices, funclist, x)\n\n\n@_wraps(np.percentile)\ndef percentile(a, q, axis=None, out=None, overwrite_input=False,\n interpolation=\"linear\", keepdims=False):\n _check_arraylike(\"percentile\", a)\n q = true_divide(asarray(q), float32(100.0))\n return quantile(a, q, axis=axis, out=out, overwrite_input=overwrite_input,\n interpolation=interpolation, keepdims=keepdims)\n\n@_wraps(np.nanpercentile)\ndef nanpercentile(a, q, axis=None, out=None, overwrite_input=False,\n interpolation=\"linear\", keepdims=False):\n _check_arraylike(\"nanpercentile\", a)\n q = true_divide(asarray(q), float32(100.0))\n return nanquantile(a, q, axis=axis, out=out, overwrite_input=overwrite_input,\n interpolation=interpolation, keepdims=keepdims)\n\n@_wraps(np.median)\ndef median(a, axis=None, out=None, overwrite_input=False, keepdims=False):\n _check_arraylike(\"median\", a)\n return quantile(a, 0.5, axis=axis, out=out, overwrite_input=overwrite_input,\n keepdims=keepdims, interpolation='midpoint')\n\n@_wraps(np.nanmedian)\ndef nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=False):\n _check_arraylike(\"nanmedian\", a)\n return nanquantile(a, 0.5, axis=axis, out=out,\n overwrite_input=overwrite_input, keepdims=keepdims,\n interpolation='midpoint')\n\n\ndef _astype(arr, dtype):\n lax._check_user_dtype_supported(dtype, \"astype\")\n return lax.convert_element_type(arr, dtype)\n\n\ndef _nbytes(arr):\n return size(arr) * _dtype(arr).itemsize\n\n\ndef _view(arr, dtype=None, type=None):\n lax._check_user_dtype_supported(dtype, \"view\")\n if type is not None:\n raise NotImplementedError(\"`type` argument of array.view()\")\n if dtype is None:\n return arr\n arr_dtype = _dtype(arr)\n if arr_dtype == dtype:\n return arr\n # bool is implemented as lax:PRED, which is not compatible with lax.bitcast_convert_type.\n # We work around this by casting bool to uint8.\n if arr_dtype == bool_:\n arr = arr.astype(uint8)\n nbits_in = 8 * arr_dtype.itemsize\n nbits_out = 8 * _dtype(dtype).itemsize\n if nbits_in == nbits_out:\n if dtype == bool_:\n return lax.bitcast_convert_type(arr, uint8).astype(dtype)\n return lax.bitcast_convert_type(arr, dtype)\n if nbits_out > nbits_in and (shape(arr)[-1] * nbits_in) % nbits_out != 0:\n raise ValueError(\"When changing to a larger dtype, its size must be a divisor \"\n \"of the total size in bytes of the last axis of the array.\")\n byte_dtypes = {8: uint8, 16: uint16, 32: uint32, 64: uint64}\n if nbits_in not in byte_dtypes:\n raise NotImplementedError(f\"arr.view() for arr.dtype={arr_dtype}\")\n if nbits_out not in byte_dtypes:\n raise NotImplementedError(f\"arr.view(dtype) for dtype={dtype}\")\n dt_in = byte_dtypes[nbits_in]\n dt_out = byte_dtypes[nbits_out]\n arr_bytes = lax.bitcast_convert_type(arr, dt_in)\n if nbits_in < nbits_out:\n shifts = arange(0, nbits_out, nbits_in, dtype=dt_out)\n arr_bytes = arr_bytes.reshape(arr.shape[:-1] + (-1, nbits_out // nbits_in)).astype(dt_out)\n arr_bytes = (arr_bytes << shifts).sum(-1).astype(dt_out)\n else:\n shifts = arange(0, nbits_in, nbits_out, dtype=dt_in)\n arr_bytes = ((arr_bytes[..., newaxis] >> shifts) & iinfo(dt_out).max).astype(dt_out)\n arr_bytes = arr_bytes.reshape(arr_bytes.shape[:-2] + (-1,))\n if dtype == bool_:\n return lax.bitcast_convert_type(arr_bytes, uint8).astype(dtype)\n return lax.bitcast_convert_type(arr_bytes, dtype)\n\n### track unimplemented functions\n\n_NOT_IMPLEMENTED_DESC = \"\"\"\n*** This function is not yet implemented by jax.numpy, and will raise NotImplementedError ***\n\"\"\"\n\ndef _not_implemented(fun):\n @_wraps(fun, update_doc=False, lax_description=_NOT_IMPLEMENTED_DESC)\n def wrapped(*args, **kwargs):\n msg = \"Numpy function {} not yet implemented\"\n raise NotImplementedError(msg.format(fun))\n return wrapped\n\n\n### add method and operator overloads to arraylike classes\n\n# We add operator overloads to DeviceArray and ShapedArray. These method and\n# operator overloads mainly just forward calls to the corresponding lax_numpy\n# functions, which can themselves handle instances from any of these classes.\n\n_scalar_types = (int, float, complex, np.generic)\n\ndef _defer_to_unrecognized_arg(binary_op):\n # Ensure that other array types have the chance to override arithmetic.\n def deferring_binary_op(self, other):\n if not isinstance(other, _scalar_types + _arraylike_types + (core.Tracer,)):\n return NotImplemented\n return binary_op(self, other)\n return deferring_binary_op\n\ndef _swap_args(f):\n return lambda x, y: f(y, x)\n\ndef _unimplemented_setitem(self, i, x):\n msg = (\"'{}' object does not support item assignment. JAX arrays are \"\n \"immutable; perhaps you want jax.ops.index_update or \"\n \"jax.ops.index_add instead?\")\n raise TypeError(msg.format(type(self)))\n\ndef _operator_round(number, ndigits=None):\n out = round(number, decimals=ndigits or 0)\n # If `ndigits` is None, for a builtin float round(7.5) returns an integer.\n return out.astype(int_) if ndigits is None else out\n\n_operators = {\n \"getitem\": _rewriting_take,\n \"setitem\": _unimplemented_setitem,\n \"neg\": negative,\n \"pos\": positive,\n \"eq\": _defer_to_unrecognized_arg(equal),\n \"ne\": _defer_to_unrecognized_arg(not_equal),\n \"lt\": _defer_to_unrecognized_arg(less),\n \"le\": _defer_to_unrecognized_arg(less_equal),\n \"gt\": _defer_to_unrecognized_arg(greater),\n \"ge\": _defer_to_unrecognized_arg(greater_equal),\n \"abs\": abs,\n \"add\": _defer_to_unrecognized_arg(add),\n \"radd\": _defer_to_unrecognized_arg(add),\n \"sub\": _defer_to_unrecognized_arg(subtract),\n \"rsub\": _defer_to_unrecognized_arg(_swap_args(subtract)),\n \"mul\": _defer_to_unrecognized_arg(multiply),\n \"rmul\": _defer_to_unrecognized_arg(multiply),\n \"div\": _defer_to_unrecognized_arg(divide),\n \"rdiv\": _defer_to_unrecognized_arg(_swap_args(divide)),\n \"truediv\": _defer_to_unrecognized_arg(true_divide),\n \"rtruediv\": _defer_to_unrecognized_arg(_swap_args(true_divide)),\n \"floordiv\": _defer_to_unrecognized_arg(floor_divide),\n \"rfloordiv\": _defer_to_unrecognized_arg(_swap_args(floor_divide)),\n \"divmod\": _defer_to_unrecognized_arg(divmod),\n \"rdivmod\": _defer_to_unrecognized_arg(_swap_args(divmod)),\n \"mod\": _defer_to_unrecognized_arg(mod),\n \"rmod\": _defer_to_unrecognized_arg(_swap_args(mod)),\n \"pow\": _defer_to_unrecognized_arg(power),\n \"rpow\": _defer_to_unrecognized_arg(_swap_args(power)),\n \"matmul\": _defer_to_unrecognized_arg(matmul),\n \"rmatmul\": _defer_to_unrecognized_arg(_swap_args(matmul)),\n \"and\": _defer_to_unrecognized_arg(bitwise_and),\n \"rand\": _defer_to_unrecognized_arg(bitwise_and),\n \"or\": _defer_to_unrecognized_arg(bitwise_or),\n \"ror\": _defer_to_unrecognized_arg(bitwise_or),\n \"xor\": _defer_to_unrecognized_arg(bitwise_xor),\n \"rxor\": _defer_to_unrecognized_arg(bitwise_xor),\n \"invert\": bitwise_not,\n \"lshift\": _defer_to_unrecognized_arg(left_shift),\n \"rshift\": _defer_to_unrecognized_arg(right_shift),\n \"rlshift\": _defer_to_unrecognized_arg(_swap_args(left_shift)),\n \"rrshift\": _defer_to_unrecognized_arg(_swap_args(right_shift)),\n \"round\": _operator_round,\n}\n\n# These numpy.ndarray methods are just refs to an equivalent numpy function\n_nondiff_methods = [\"all\", \"any\", \"argmax\", \"argmin\", \"argpartition\", \"argsort\",\n \"nonzero\", \"searchsorted\", \"round\"]\n_diff_methods = [\"clip\", \"conj\", \"conjugate\", \"cumprod\", \"cumsum\",\n \"diagonal\", \"dot\", \"max\", \"mean\", \"min\", \"prod\", \"ptp\",\n \"ravel\", \"repeat\", \"sort\", \"squeeze\", \"std\", \"sum\",\n \"swapaxes\", \"take\", \"tile\", \"trace\", \"transpose\", \"var\"]\n\n# These methods are mentioned explicitly by nondiff_methods, so we create\n# _not_implemented implementations of them here rather than in __init__.py.\n# TODO(phawkins): implement these.\nargpartition = _not_implemented(np.argpartition)\n_NOT_IMPLEMENTED = ['argpartition']\n\n# Set up operator, method, and property forwarding on Tracer instances containing\n# ShapedArray avals by following the forwarding conventions for Tracer.\n# Forward operators using a single-underscore-prefix naming convention:\nfor operator_name, function in _operators.items():\n setattr(ShapedArray, \"_{}\".format(operator_name), staticmethod(function))\n# Forward methods and properties using core.aval_method and core.aval_property:\nfor method_name in _nondiff_methods + _diff_methods:\n setattr(ShapedArray, method_name, core.aval_method(globals()[method_name]))\nsetattr(ShapedArray, \"reshape\", core.aval_method(_reshape_method))\nsetattr(ShapedArray, \"flatten\", core.aval_method(ravel))\nsetattr(ShapedArray, \"T\", core.aval_property(transpose))\nsetattr(ShapedArray, \"real\", core.aval_property(real))\nsetattr(ShapedArray, \"imag\", core.aval_property(imag))\nsetattr(ShapedArray, \"astype\", core.aval_method(_astype))\nsetattr(ShapedArray, \"view\", core.aval_method(_view))\nsetattr(ShapedArray, \"nbytes\", core.aval_property(_nbytes))\n\n\n# Forward operators, methods, and properties on DeviceArray to lax_numpy\n# functions (with no Tracers involved; this forwarding is direct)\nfor operator_name, function in _operators.items():\n setattr(DeviceArray, \"__{}__\".format(operator_name), function)\nfor method_name in _nondiff_methods + _diff_methods:\n setattr(DeviceArray, method_name, globals()[method_name])\nsetattr(DeviceArray, \"reshape\", _reshape_method)\nsetattr(DeviceArray, \"flatten\", ravel)\nsetattr(DeviceArray, \"T\", property(transpose))\nsetattr(DeviceArray, \"real\", property(real))\nsetattr(DeviceArray, \"imag\", property(imag))\nsetattr(DeviceArray, \"astype\", _astype)\nsetattr(DeviceArray, \"view\", _view)\nsetattr(DeviceArray, \"nbytes\", property(_nbytes))\n\n\n# Experimental support for NumPy's module dispatch with NEP-37.\n# Currently requires https://github.com/seberg/numpy-dispatch\n_JAX_ARRAY_TYPES = (DeviceArray, core.Tracer)\n_HANDLED_ARRAY_TYPES = _JAX_ARRAY_TYPES + (np.ndarray,)\n\ndef __array_module__(self, types):\n if builtins.all(issubclass(t, _HANDLED_ARRAY_TYPES) for t in types):\n return jax.numpy\n else:\n return NotImplemented\n\nsetattr(ShapedArray, \"_array_module\", staticmethod(__array_module__))\nsetattr(DeviceArray, \"__array_module__\", __array_module__)\n\n\n# Extra methods that are handy\nsetattr(ShapedArray, \"broadcast\", core.aval_method(lax.broadcast))\nsetattr(ShapedArray, \"broadcast_in_dim\", core.aval_method(lax.broadcast_in_dim))\nsetattr(ShapedArray, \"split\", core.aval_method(split))\nsetattr(DeviceArray, \"broadcast\", lax.broadcast)\nsetattr(DeviceArray, \"broadcast_in_dim\", lax.broadcast_in_dim)\nsetattr(DeviceArray, \"split\", split)\n\ndef _compress_method(a, condition, axis=None, out=None):\n return compress(condition, a, axis, out)\n\nsetattr(ShapedArray, \"compress\", _compress_method)\nsetattr(DeviceArray, \"compress\", _compress_method)\n\n@partial(jit, static_argnums=(1,2,3))\ndef _multi_slice(arr: DeviceArray,\n start_indices: Tuple[Tuple[int, ...]],\n limit_indices: Tuple[Tuple[int, ...]],\n removed_dims: Tuple[Tuple[int, ...]]):\n \"\"\"Extracts multiple slices from `arr`.\n\n This is used to shard DeviceArray arguments to pmap. It's implemented as a\n DeviceArray method here to avoid circular imports.\n \"\"\"\n results = []\n for starts, limits, removed in safe_zip(start_indices, limit_indices, removed_dims):\n sliced = lax.slice(arr, starts, limits)\n if removed:\n sliced = sliced.reshape(np.delete(sliced.shape, removed_dims))\n results.append(sliced)\n return results\nsetattr(DeviceArray, \"_multi_slice\", _multi_slice)\n\n\n# Syntactic sugar for scatter operations.\nclass _IndexUpdateHelper:\n # Note: this docstring will appear as the docstring for the `at` property.\n \"\"\"Indexable helper object to call indexed update functions.\n\n The `at` property is syntactic sugar for calling the indexed update functions\n defined in :mod:`jax.ops`, and acts as a pure equivalent of in-place\n modificatons.\n\n In particular:\n - ``x = x.at[idx].set(y)`` is a pure equivalent of ``x[idx] = y``.\n - ``x = x.at[idx].add(y)`` is a pure equivalent of ``x[idx] += y``.\n - ``x = x.at[idx].mul(y)`` is a pure equivalent of ``x[idx] *= y``.\n - ``x = x.at[idx].min(y)`` is a pure equivalent of\n ``x[idx] = minimum(x[idx], y)``.\n - ``x = x.at[idx].max(y)`` is a pure equivalent of\n ``x[idx] = maximum(x[idx], y)``.\n \"\"\"\n __slots__ = (\"array\",)\n\n def __init__(self, array):\n self.array = array\n\n def __getitem__(self, index):\n return _IndexUpdateRef(self.array, index)\n\n def __repr__(self):\n return f\"_IndexUpdateHelper({repr(self.array)})\"\n\n\nclass _IndexUpdateRef:\n \"\"\"Helper object to call indexed update functions for an (advanced) index.\n\n This object references a source array and a specific indexer into that array.\n Methods on this object return copies of the source array that have been\n modified at the positions specified by the indexer.\n \"\"\"\n __slots__ = (\"array\", \"index\")\n\n def __init__(self, array, index):\n self.array = array\n self.index = index\n\n def __repr__(self):\n return f\"_IndexUpdateRef({repr(self.array)}, {repr(self.index)})\"\n\n def set(self, values, indices_are_sorted=False, unique_indices=False):\n \"\"\"Pure equivalent of ``x[idx] = y``.\n\n ``x.at[idx].set(y)`` is syntactic sugar for\n ``jax.ops.index_update(x, jax.ops.index[idx], y)``, and\n returns the value of ``x`` that would result from the NumPy-style\n :mod:indexed assignment <numpy.doc.indexing>` ``x[idx] = y``.\n\n See :mod:`jax.ops` for details.\n \"\"\"\n return ops.index_update(self.array, self.index, values,\n indices_are_sorted=indices_are_sorted,\n unique_indices=unique_indices)\n\n def add(self, values, indices_are_sorted=False, unique_indices=False):\n \"\"\"Pure equivalent of ``x[idx] += y``.\n\n ``x.at[idx].add(y)`` is syntactic sugar for\n ``jax.ops.index_add(x, jax.ops.index[idx], y)``, and\n returns the value of ``x`` that would result from the NumPy-style\n :mod:indexed assignment <numpy.doc.indexing>` ``x[idx] += y``.\n\n See :mod:`jax.ops` for details.\n \"\"\"\n return ops.index_add(self.array, self.index, values,\n indices_are_sorted=indices_are_sorted,\n unique_indices=unique_indices)\n\n def mul(self, values, indices_are_sorted=False, unique_indices=False):\n \"\"\"Pure equivalent of ``x[idx] += y``.\n\n ``x.at[idx].mul(y)`` is syntactic sugar for\n ``jax.ops.index_mul(x, jax.ops.index[idx], y)``, and\n returns the value of ``x`` that would result from the NumPy-style\n :mod:indexed assignment <numpy.doc.indexing>` ``x[idx] *= y``.\n\n See :mod:`jax.ops` for details.\n \"\"\"\n return ops.index_mul(self.array, self.index, values,\n indices_are_sorted=indices_are_sorted,\n unique_indices=unique_indices)\n\n def min(self, values, indices_are_sorted=False, unique_indices=False):\n \"\"\"Pure equivalent of ``x[idx] = minimum(x[idx], y)``.\n\n ``x.at[idx].min(y)`` is syntactic sugar for\n ``jax.ops.index_min(x, jax.ops.index[idx], y)``, and\n returns the value of ``x`` that would result from the NumPy-style\n :mod:indexed assignment <numpy.doc.indexing>`\n ``x[idx] = minimum(x[idx], y)``.\n\n See :mod:`jax.ops` for details.\n \"\"\"\n return ops.index_min(self.array, self.index, values,\n indices_are_sorted=indices_are_sorted,\n unique_indices=unique_indices)\n\n def max(self, values, indices_are_sorted=False, unique_indices=False):\n \"\"\"Pure equivalent of ``x[idx] = maximum(x[idx], y)``.\n\n ``x.at[idx].max(y)`` is syntactic sugar for\n ``jax.ops.index_max(x, jax.ops.index[idx], y)``, and\n returns the value of ``x`` that would result from the NumPy-style\n :mod:indexed assignment <numpy.doc.indexing>`\n ``x[idx] = maximum(x[idx], y)``.\n\n See :mod:`jax.ops` for details.\n \"\"\"\n return ops.index_max(self.array, self.index, values,\n indices_are_sorted=indices_are_sorted,\n unique_indices=unique_indices)\n\nsetattr(DeviceArray, \"at\", property(_IndexUpdateHelper))\nsetattr(ShapedArray, \"at\", core.aval_property(_IndexUpdateHelper))\n" ]
[ [ "numpy.asarray", "numpy.issubdtype", "numpy.dtype", "numpy.concatenate", "numpy.all", "numpy.any", "numpy.where", "numpy.arange", "numpy.version.version.split", "numpy.ceil", "numpy.size", "numpy.diff", "numpy.insert", "numpy.ravel", "numpy.zeros", "numpy.log", "numpy.multiply", "numpy.ndim", "numpy.int64", "numpy.cumprod", "numpy.delete", "numpy.equal", "numpy.not_equal", "numpy.array", "numpy.sum", "numpy.ones", "numpy.sign", "numpy.shape", "numpy.broadcast_to", "numpy.isscalar", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
PyWiFeS/tools
[ "f6f414134d1ce5c5a4c35add3400859900f3bca6" ]
[ "process_stellar.py" ]
[ "\"\"\"After analysis with WiFeS, this suite of routines can extract a star optimally\nand calculate its radial velocity.\n\nWARNING - this code is still not properly documented or complete. Any contributions\nwelcome!\n\nexample lines of code...\n\nExecuting from the code directory, e.g. with Margaret's output directory:\nrv_process_dir('PROCESSED_DATA_DIRECTORY', outdir =/priv/mulga1/mstream/wifes/wifes/tools')\n\nfn = 'T2m3wr-20140617.144009-0167.p11.fits'\nflux,sig,wave = read_and_find_star_p11(fn)\n\n\"\"\"\n\nfrom __future__ import print_function\ntry:\n import pyfits\nexcept:\n import astropy.io.fits as pyfits\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport scipy.optimize as op\nimport pdb\nimport glob\nimport pickle\n#from readcol import readcol\nfrom scipy.interpolate import InterpolatedUnivariateSpline\nfrom astropy.modeling import models, fitting\nfrom os.path import exists\nplt.ion()\n\ndef find_nearest(array,value):\n idx = (np.abs(array-value)).argmin()\n return array[idx]\n\ndef onclick(event):\n global ix, iy\n ix, iy = event.xdata, event.ydata\n \n # print 'x = %d, y = %d'%(\n # ix, iy)\n \n # assign global variable to access outside of function\n global coords\n coords.append((ix, iy))\n \n # Disconnect after 2 clicks\n if len(coords) == 2:\n fig.canvas.mpl_disconnect(cid)\n plt.close(1)\n return\n\ncoords = []\n\ndef read_and_find_star_p11(fn, manual_click=False, npix=7, subtract_sky=True,sky_rad=2):\n \"\"\"Read in a cube and find the star.\n Return a postage stamp around the star and the coordinates\n within the stamp\n \n NB This didn't really work as the details of flux calibration doesn't easily \n enable optimal extraction. \n \n This function should probably be REMOVED.\n \"\"\"\n a = pyfits.open(fn)\n #Assume Stellar mode if the flux is zero in any of the first columns\n if a[0].data[0,0,1]==0:\n flux = a[0].data[:,:,13:]\n sig = a[1].data[:,:,13:]\n else:\n flux = a[0].data\n sig = a[1].data\n image = np.median(flux,axis=0)\n maxpx = np.unravel_index(np.argmax(image[1:-1,1:-1]),image[1:-1,1:-1].shape)\n maxpx = (maxpx[0]+1,maxpx[1]+1)\n plt.clf()\n plt.imshow(image,interpolation='nearest', vmin=0)\n plt.plot(maxpx[1],maxpx[0],'rx')\n if subtract_sky:\n xy = np.meshgrid(range(image.shape[1]),range(image.shape[0]))\n dist = np.sqrt((xy[0]-maxpx[1])**2.0 + (xy[1]-maxpx[0])**2.0)\n sky = np.where( (xy[0] > 0) & (xy[1] > 0) & \n (xy[0] < image.shape[1]-1) & (xy[1] < image.shape[0]-1) &\n (dist > sky_rad) & (dist < image.shape[1]))\n for i in range(flux.shape[0]):\n flux[i,:,:] -= np.median(flux[i,sky[0],sky[1]])\n ymin = np.min([np.max([maxpx[0]-3,0]),image.shape[0]-npix])\n xmin = np.min([np.max([maxpx[1]-3,0]),image.shape[1]-npix])\n flux_stamp = flux[:,ymin:ymin+npix,xmin:xmin+npix]\n sig_stamp = sig[:,ymin:ymin+npix,xmin:xmin+npix]\n wave = a[0].header['CRVAL3'] + np.arange(flux.shape[0])*a[0].header['CDELT3']\n return flux_stamp,sig_stamp,wave\n \ndef read_and_find_star_p08(fn, manual_click=False, npix=7, subtract_sky=True, \n sky_rad=2, fig_fn='', fig_title=None, \n do_median_subtraction=False, arm='',min_slit_i=0,):\n \"\"\"Read in a cube and find the star.\n Return a postage stamp around the star and the wavelength scale\n \n NB This didn't really work as the details of flux calibration doesn't easily \n enable optimal extraction.\n\n Note: This may give unexpected results when more than a single star is \n within the IFU.\n \n Parameters\n ----------\n fn: string\n filename\n npix: int\n Number of pixels to extract\n \"\"\"\n a = pyfits.open(fn)\n Obj_name = a[0].header['OBJNAME']\n Obs_date = a[0].header['DATE-OBS'].split('T')[0]\n RA = a[0].header['RA']\n DEC = a[0].header['DEC']\n\n # Determine the spectrograph mode\n # ccd_sec has form [x_min:x_max, y_min:y_max]\n y_min = int(a[0].header[\"CCDSEC\"].split(\",\")[-1].split(\":\")[0])\n\n # Using Full Frame\n if y_min == 1:\n flux = np.array([a[i].data for i in range(1,26)])\n # Stellar mode (i.e. half frame)\n else:\n flux = np.array([a[i].data for i in range(1,13)])\n\n wave = a[1].header['CRVAL1'] + np.arange(flux.shape[2])*a[1].header['CDELT1']\n image = np.median(flux,axis=2)\n\n if do_median_subtraction:\n image = np.log10(image)\n image -= np.median(image)\n\n #!!! 1->7 is a HACK - because WiFeS seems to often fail on the edge pixels !!!\n plt.clf()\n global fig\n fig = plt.figure(1)\n plt.imshow(image,interpolation='nearest')\n\n # Set title\n if fig_title is not None:\n plt.title(fig_title)\n\n if manual_click == True:\n global coords\n\n # Call click func\n global cid\n cid = fig.canvas.mpl_connect('button_press_event', onclick)\n\n plt.show(1)\n maxpx = (int(round(np.min([coords[0][1], coords[1][1]]))), \n int(round(np.min([coords[0][0], coords[1][0]]))))\n coords = []\n else:\n maxpx = np.unravel_index(np.argmax(image[:,10:-10]),image[:,10:-10].shape)\n maxpx = (maxpx[0],maxpx[1]+10)\n\n # Plotting image\n plt.close(\"all\")\n fig, axes = plt.subplots(2,2)\n\n ax_im, ax_y, ax_x, _ = axes.flatten()\n _.set_visible(False)\n\n im_cmap = ax_im.imshow(image,interpolation='nearest')\n cb = fig.colorbar(im_cmap, ax=ax_im, fraction=0.0155, pad=0.0)\n ax_im.plot(maxpx[1],maxpx[0],'wx')\n fig.suptitle(str(Obj_name) + '_' + str(Obs_date) + '_(' + str(RA) + ',' \n + str(DEC) + ')_' + arm)\n\n # Plotting X and Y distributions\n ax_y.plot(np.log10(np.sum(image[:,min_slit_i:], axis=1)), \n np.arange(image.shape[0]), \"r.-\")\n ax_y.set_ylim(image.shape[0],0)\n #ax_y.set_xscale('log')\n\n ax_x.plot(np.arange(image.shape[1]), np.log10(np.sum(image, axis=0)), \".-\")\n #ax_x.set_yscale('log')\n \n # Set aspect the same\n asp_im = np.abs(float(np.diff(ax_im.get_xlim())[0]) / np.diff(ax_im.get_ylim())[0])\n asp_x = float(np.diff(ax_x.get_xlim())[0]) / np.diff(ax_x.get_ylim())[0]\n asp_y = float(np.diff(ax_y.get_xlim())[0]) / -np.diff(ax_y.get_ylim())[0]\n \n ax_x.set_aspect(asp_x/asp_im)\n ax_y.set_aspect(asp_y/asp_im)\n\n ax_x.set_xlabel('x pixel')\n ax_x.set_ylabel(r'$\\log_{10}$(x counts)')\n ax_im.set_ylabel('y pixel')\n ax_y.set_xlabel(r'$\\log_{10}$(y counts)')\n\n cb.ax.tick_params(labelsize=\"xx-small\")\n ax_im.tick_params(axis='both', which='major', labelsize=\"xx-small\")\n ax_x.tick_params(axis='both', which='major', labelsize=\"xx-small\")\n ax_y.tick_params(axis='both', which='major', labelsize=\"xx-small\")\n\n # Plot sum along y axis\n #ax_y.plot(np.sum(maxpx[0], axis=0), np.arange(maxpx.shape[0]), \".-\")\n\n #ax_x.plot(np.arange(maxpx.shape[1]), np.sum(maxpx[0], axis=0), \".-\")\n \n # Sky Subtraction\n if subtract_sky:\n xy = np.meshgrid(range(image.shape[1]),range(image.shape[0]))\n dist = np.sqrt((xy[0]-maxpx[1])**2.0 + (xy[1]-maxpx[0])**2.0)\n sky = np.where( (xy[0] > 0) & (xy[1] > 0) & \n (xy[0] < image.shape[1]-1) & (xy[1] < image.shape[0]-1) &\n (dist > sky_rad) & (dist < image.shape[1]))\n\n for i in range(flux.shape[2]):\n flux[:,:,i] -= np.median(flux[sky[0],sky[1],i])\n ymin = np.min([np.max([maxpx[0]-npix//2,0]),image.shape[0]-npix])\n xmin = np.min([np.max([maxpx[1]-npix//2,0]),image.shape[1]-npix])\n flux_stamp = flux[ymin:ymin+npix,xmin:xmin+npix,:]\n\n # Offset mins so plotted lines are at edge of pixels\n xminp = xmin - 0.5\n yminp = ymin - 0.5\n\n # Plot vertical bounds\n ax_im.plot([xminp, xminp], [yminp+npix, yminp], c=\"r\")\n ax_im.plot([xminp+npix, xminp+npix], [yminp+npix, yminp], c=\"r\")\n\n # Plot horizontal bounds\n ax_im.plot([xminp, xminp+npix], [yminp+npix, yminp+npix], c=\"r\")\n ax_im.plot([xminp, xminp+npix], [yminp, yminp], c=\"r\")\n\n if len(fig_fn)>0:\n #plt.gcf().set_size_inches(5*asp_im, 5/asp_im)\n plt.savefig(fig_fn, bbox_inches='tight')\n return flux_stamp,wave\n \ndef weighted_extract_spectrum(flux_stamp_in, readout_var=None):\n \"\"\"Optimally extract the spectrum based on a constant weighting\n \n Based on a p08 file axis ordering, but transposes axes\n as required. \n \n Readout variance is roughly 11 in the p08 extracted spectra\n \n Parameters\n ----------\n flux_stamp: numpy array\n nx x ny x nwave IFU image as a function of wavelength\n \n readout_var: float (optional)\n Readout variance in extracted spectrum in DN.\n \n TODO: \n 1) Look for and remove bad pix/cosmic rays.\n 2) Remove dodgy constant for readout_var.\n \"\"\"\n if flux_stamp_in.shape[0]>flux_stamp_in.shape[1]:\n flux_stamp = np.transpose(flux_stamp_in, (1,2,0))\n else:\n flux_stamp = flux_stamp_in\n \n #Find the readout variance roughly if it isn't given.\n if readout_var is None:\n rsdev = 1.4826/np.sqrt(2)*np.nanmedian(np.abs(flux_stamp[0,0,1:]-flux_stamp[0,0,:-1]))\n readout_var = rsdev**2\n \n #Find the median flux over all wavelengths, limiting to be >0\n flux_med = np.maximum(np.median(flux_stamp,axis=2),0)\n \n pixel_var = flux_med + readout_var\n weights = flux_med/pixel_var\n n_spaxels = np.prod(weights.shape)\n\n #Form a weighted average, then multiply by n_spaxels to get a sum\n spectrum = n_spaxels * np.array(\n [np.sum(flux_stamp[:,:,i]*weights)/np.sum(weights) for i in range(flux_stamp.shape[2])]) \n \n #Old calculation of sigma. Lets be a little more readable!\n sig = np.array([np.sqrt(np.sum((np.maximum(flux_stamp[:,:,i],0)+readout_var)*weights**2)) for i in range(flux_stamp.shape[2])])\n\n #The variance of each pixel is flux_stamp + readout_var, with flux_stamp being an estimate\n #of flux per pixel, which should not be less than zero.\n #var = [np.sum((np.maximum(flux_stamp[:,:,i],0)+readout_var)*weights**2)/np.sum(weights)**2 for i in range(flux_stamp.shape[2])]\n #sig = n_spaxels * np.sqrt(np.array(var))\n \n return spectrum,sig\n \ndef conv_ambre_spect(ambre_dir,ambre_conv_dir):\n \"\"\"Take all the AMBRE spectra from a directory, convolve and re-sample\n by a factor of 10, then save to a new directory\"\"\"\n infns = glob.glob(ambre_dir + '/*fits')\n for infn in infns:\n data = pyfits.getdata(infn)\n data = np.convolve(data,np.ones(10)/10., 'same')\n conv_data = data[10*np.arange(90000,dtype='int')].astype('float32')\n ix_start = infn.rfind('/') + 1\n ix_end = infn.rfind('.')\n outfn = infn[ix_start:ix_end] + 'conv.fits'\n pyfits.writeto(ambre_conv_dir + '/' + outfn,conv_data, clobber=True)\n\ndef conv_phoenix_spect(pho_dir,pho_conv_dir): \n \"\"\"\n Take all phoenix spectra from a directory, convolve to 0.1A, \n then save to a new directory\n Currently resampling onto a wavelength grid of 0.1A also, from \n 3000 to 12000A to match AMBRE spectra\n also mostly matching filenames\n \"\"\"\n infns = glob.glob(pho_dir + '/*.fits') \n for ii,infn in enumerate(infns):\n data = pyfits.getdata(infn)\n wav = pyfits.getdata('WAVE_PHOENIX-ACES-AGSS-COND-2011.fits')\n ##go from vacuum to air wavelengths\n wav = wav/(1.0+2.735182E-4+131.4182/wav**2+2.76249e8/wav**4)\n cdata = np.convolve(data,np.ones(10)/10.0,'same')\n intwav = 0.1*np.arange(90000)+3000.0\n icdata = np.interp(intwav,wav,cdata)\n n1 = infn.split('/')[-1].split('lte')[1].split('-')\n n2 = 'g'+n1[1]\n n1 = 'p'+n1[0]\n outname = pho_conv_dir+'/'+n1 + ':'+n2+':m0.0:t01:z+0.00:a+0.00.PHOENIXconv.fits'\n pyfits.writeto(outname,icdata,clobber=True)\n print('convolving '+ str(ii+1) +' out of ' + str(len(infns)))\n \n \ndef make_wifes_p08_template(fn, out_dir,rv=0.0):\n \"\"\"From a p08 file, create a template spectrum for future cross-correlation.\n The template is interpolated onto a 0.1 Angstrom grid (to match higher resolution \n templates.\n \n Parameters\n ----------\n ddir: string\n Data directory for the p08 file\n \n fn: string\n p08 fits filename\n \n out_dir: string\n Output directory\n \n \"\"\"\n flux_stamp,wave = read_and_find_star_p08(fn)\n heliocentric_correction = pyfits.getheader(fn)['RADVEL']\n star = pyfits.getheader(fn)['OBJECT']\n spectrum,sig = weighted_extract_spectrum(flux_stamp)\n dell_template = 0.1\n wave_template=np.arange(90000)*dell_template + 3000\n spectrum_interp = np.interp(wave_template,wave*(1 - (rv - heliocentric_correction)/2.998e5),spectrum)\n outfn = out_dir + star + ':' + fn.split('/')[-1]\n pyfits.writeto(outfn,spectrum_interp,clobber=True)\n \n\ndef rv_fit_mlnlike(shift,modft,data,errors,gaussian_offset):\n \"\"\"Return minus the logarithm of the likelihood of the model fitting the data\n \n Parameters\n ----------\n shift: float\n Shift in pixels\n modft: array-like\n Real numpy Fourier transform of the model spectrum.\n data: array-like\n spectral data.\n errors: array-like\n uncertainties in spectral data\n gaussian_offset: float\n Offset to Gaussian uncertainty distribution\n \"\"\"\n shifted_mod = np.fft.irfft(modft * np.exp(-2j * np.pi * np.arange(len(modft))/len(data) * shift))\n return -np.sum(np.log(np.exp(-(data - shifted_mod)**2/2.0/errors**2) + gaussian_offset))\n\ndef rv_shift_binary(shift1, shift2, alpha, modft1, modft2):\n \"\"\"Shift two templates and add them, to model a binary star\"\"\"\n data_len = (len(modft1)-1)*2\n shifted_mod1 = np.fft.irfft(modft1 * np.exp(-2j * np.pi * np.arange(len(modft1))/data_len * shift1))\n shifted_mod2 = np.fft.irfft(modft2 * np.exp(-2j * np.pi * np.arange(len(modft2))/data_len * shift2))\n return (shifted_mod1 + alpha*shifted_mod2)/(1.0 + alpha)\n\n \ndef make_fake_binary(spect,wave,sig, template_fns, flux_ratio, rv0, rv1):\n \"\"\"Make a fake binary in order to test todcor etc!\"\"\"\n# (wave_log, spect_int, sig_int, template_ints) = \\\n# interpolate_spectra_onto_log_grid(spect,wave,sig, template_fns)\n \n wave_templates = []\n spect_templates = []\n for template_fn in template_fns:\n dd = np.loadtxt(template_fn)\n wave_templates.append(dd[:,0])\n spect_templates.append(dd[:,1])\n wave_templates = np.array(wave_templates)\n spect_templates = np.array(spect_templates)\n \n c_light = 3e5\n fake_binary = np.interp(wave_templates[0]*(1 - rv0/c_light),wave_templates[0], spect_templates[0]) + \\\n np.interp(wave_templates[0]*(1 - rv1/c_light),wave_templates[1], spect_templates[1])*flux_ratio\n \n #fake_binary = np.interp(wave_log*(1 - rv0/c_light),wave_log, template_ints[0]) + \\\n # np.interp(wave_log*(1 - rv1/c_light),wave_log, template_ints[1])*flux_ratio\n #Un-continuum-subtract\n #binspect = fake_binary + 1\n #return binspect, wave_log, np.ones(len(binspect))*0.01\n return fake_binary, wave_templates[0], np.ones(len(wave_templates[0]))*0.01\n\ndef interpolate_spectra_onto_log_grid(spect,wave,sig, template_dir,bad_intervals=[],\\\n smooth_distance=201,convolve_template=True, nwave_log=int(1e4), \\\n subtract_smoothed=True, interp_k=1):\n \"\"\"Interpolate both the target and template spectra onto a common wavelength grid\"\"\"\n \n #Create our logarithmic wavelength scale with the same min and max wavelengths as the\n #target spectrum, and nwave_log wavelengths.\n wave_log = np.min(wave)*np.exp( np.log(np.max(wave)/np.min(wave))/\\\n nwave_log*np.arange(nwave_log))\n \n #Interpolate the target spectrum onto this scale\n #spect_int = np.interp(wave_log,wave,spect)\n #sig_int = np.interp(wave_log,wave,sig)\n spl = InterpolatedUnivariateSpline(wave, spect, k=interp_k)\n spect_int = spl(wave_log)\n spl = InterpolatedUnivariateSpline(wave, sig, k=interp_k)\n sig_int = spl(wave_log)\n \n #Normalise \n sig_int /= np.median(spect_int)\n spect_int /= np.median(spect_int)\n \n #Remove bad intervals \n for interval in bad_intervals:\n wlo = np.where(wave_log > interval[0])[0]\n if len(wlo)==0: \n continue\n whi = np.where(wave_log > interval[1])[0]\n if len(whi)==0:\n whi = [len(wave_log)-1]\n whi = whi[0]\n wlo = wlo[0]\n spect_int[wlo:whi] = spect_int[wlo] + np.arange(whi-wlo,dtype='float')/(whi-wlo)*(spect_int[whi] - spect_int[wlo])\n sig_int[wlo:whi]=1\n \n if subtract_smoothed:\n #Subtract smoothed spectrum\n spect_int -= spect_int[0] + np.arange(len(spect_int))/(len(spect_int)-1.0)*(spect_int[-1]-spect_int[0])\n spect_int -= np.convolve(spect_int,np.ones(smooth_distance)/smooth_distance,'same')\n \n #Now we find the interpolated template spectra, template_ints\n template_fns = template_dir\n template_ints = np.zeros( (len(template_fns),len(wave_log)) )\n for i,template_fn in enumerate(template_fns):\n try:\n #Try loading a reduced WiFeS file first... \n if template_fn.find(\"p08\") >= len(template_fn) - 8:\n print('Using raw wifes p08 file')\n flux,wave_template=read_and_find_star_p08(template_fn)\n spect_template,dummy = weighted_extract_spectrum(flux)\n dell_template = np.mean(wave_template[1:]-wave_template[:-1])\n #Try loading pickled RV standards\n elif template_fn.find('pkl') >= len(template_fn)-4:\n print('Using pickled Standards')\n template_file = open(template_fn, 'r')\n wave_template, spect_template = pickle.load(template_file)\n dell_template = np.mean(wave_template[1:]-wave_template[:-1])\n #Next try a template text file (wavelength and flux in 2 columns)\n elif template_fn.find('txt') >= len(template_fn)-4:\n print('Using text file input')\n dd = np.loadtxt(template_fn)\n dell_template = np.mean(dd[1:,0]-dd[:-1,0])\n wave_template = dd[:,0]\n spect_template = dd[:,1]\n #Finally try the Ambre convolved spectral format.\n elif template_fn.find('fit') >= len(template_fn)-4:\n print('Using ambre models (fits with fixed wavelength grid)')\n spect_template = pyfits.getdata(template_fn)\n dell_template = 0.1\n wave_template=np.arange(90000)*dell_template + 3000\n else:\n print('Invalid rv standard or model file: ' + template_fn)\n raise UserWarning\n except:\n print('Error loading model spectrum')\n raise UserWarning\n \n if convolve_template: \n #Amount of subsampling in the template\n template_subsamp = int((wave[1]-wave[0])/dell_template)\n \n #Make sure it is an odd number to prevent shifting...\n template_subsamp = np.maximum((template_subsamp//2)*2 - 1,1)\n spect_template = np.convolve(np.convolve(spect_template,np.ones(template_subsamp)/template_subsamp,'same'),\\\n np.ones(2*template_subsamp+1)/(2*template_subsamp+1),'same')\n\n #Interpolate onto the log wavelength grid.\n #template_int = np.interp(wave_log,wave_template,spect_template)\n spl = InterpolatedUnivariateSpline(wave_template,spect_template, k=interp_k)\n template_int = spl(wave_log)\n \n #Normalise \n template_int /= np.median(template_int)\n \n #Remove bad intervals \n for interval in bad_intervals:\n wlo = np.where(wave_log > interval[0])[0]\n if len(wlo)==0: \n continue\n whi = np.where(wave_log > interval[1])[0]\n if len(whi)==0:\n whi = [len(wave_log)-1]\n whi = whi[0]\n wlo = wlo[0]\n template_int[wlo:whi] = template_int[wlo] + np.arange(whi-wlo, dtype='float')/(whi-wlo)*(template_int[whi] - template_int[wlo])\n if subtract_smoothed:\n #Subtract smoothed spectrum\n template_int -= template_int[0] + np.arange(len(template_int))/(len(template_int)-1.0)*(template_int[-1]-template_int[0])\n template_int -= np.convolve(template_int,np.ones(smooth_distance)/smooth_distance,'same')\n template_ints[i,:] = template_int\n \n return wave_log, spect_int, sig_int, template_ints\n \ndef calc_rv_template(spect,wave,sig, template_dir,bad_intervals,smooth_distance=101, \\\n gaussian_offset=1e-4,nwave_log=1e4,oversamp=1,fig_fn='',convolve_template=True,\\\n starnumber=0, plotit=False, save_figures=False, save_dir='./', heliocentric_correction=0.):\n \"\"\"Compute a radial velocity based on an best fitting template spectrum.\n Teff is estimated at the same time.\n \n Parameters\n ----------\n spect: array-like\n The reduced WiFeS spectrum\n \n wave: array-like\n The wavelengths corresponding to the reduced WiFeS spectrum\n \n template_conv_dir: string\n The directory containing template spectra convolved to 0.1 Angstrom resolution\n \n bad_intervals: \n List of wavelength intervals where e.g. telluric absorption is bad.\n \n smooth_distance: float\n Distance to smooth for \"continuum\" correction\n \n oversamp: float\n Oversampling of the input wavelength scale. The slit is assumed 2 pixels wide.\n \n gaussian_offset: float\n Offset for the likelihood function from a Gaussian normalised to 1. \n\n \n Returns\n -------\n rv: float\n Radial velocity in km/s\n rv_sig: float\n Uncertainty in radial velocity (NB assumes good model fit)\n temp: int\n Temperature of model spectrum used for cross-correlation.\n \"\"\"\n if isinstance(template_dir, list):\n template_fns = template_dir\n else:\n template_fns = glob.glob(template_dir)\n \n #ADD IN HELIOCENTRIC CORRECTION SOMEWHERE:\n #Make the Heliocentric correction...\n #rv += h['RADVEL']\n \n #Interpolate the target and template spectra.\n (wave_log, spect_int, sig_int, template_ints) = interpolate_spectra_onto_log_grid(spect,wave,sig, template_fns,bad_intervals=bad_intervals, smooth_distance=smooth_distance,convolve_template=convolve_template, nwave_log=nwave_log)\n \n #Do a cross-correlation to the nearest \"spectral pixel\" for each template\n drv = np.log(wave_log[1]/wave_log[0])*2.998e5\n rvs = np.zeros(len(template_fns))\n peaks = np.zeros(len(template_fns))\n for i,template_fn in enumerate(template_fns):\n template_int = template_ints[i]\n if save_figures == True:\n plt.clf()\n plt.plot(wave_log, template_int, label='template')\n plt.plot(wave_log, spect_int, label='spectrum')\n plt.title('Template no.'+str(i+1))\n plt.savefig(save_dir + 'spectrum_vs_template_' + template_fns[i].split('/')[-1].split('.fits')[0] + '.png')\n plt.clf()\n cor = np.correlate(spect_int,template_int,'same')\n ##here it's a good idea to limit where the peak Xcorrelation can be, only search for a peak within 1000 of rv=0\n ## that's and RV range of -778 to 778 for the default spacings in the code\n peaks[i] = np.max(cor[int(nwave_log/2)-100:int(nwave_log/2)+100])/np.sqrt(np.sum(np.abs(template_int)**2))\n rvs[i] = (np.argmax(cor[int(nwave_log/2)-100:int(nwave_log/2)+100])-100)*drv\n if starnumber == 0: print('Correlating Template ' + str(i+1)+' out of ' + str(len(template_fns)))\n if starnumber >0 : print('Correlating Template ' + str(i+1)+' out of ' + str(len(template_fns)) +' for star '+str(starnumber))\n this_rvs = drv*(np.arange(2*smooth_distance)-smooth_distance)\n correlation = cor[int(nwave_log/2)-100:int(nwave_log/2)+100]/np.sqrt(np.sum(np.abs(template_int)**2))\n best_ind = np.argmax(correlation)\n print(\"best RV for template \"+str(i+1)+\" is \"+str(this_rvs[best_ind+1] + heliocentric_correction))\n if save_figures == True:\n plt.clf()\n plt.plot(this_rvs[1:-1], correlation/np.max(correlation))\n plt.title('Correlation_with_template_no.'+str(i+1))\n plt.savefig(save_dir + 'Correlation_with_template_no' + str(i+1) + '.png')\n plt.clf()\n \n \n #Find the best cross-correlation.\n ix = np.argmax(peaks)\n print(\"BEST TEMPLATE:\"+template_fns[ix].split('/')[-1])\n\n #Recompute and plot the best cross-correlation\n template_int = template_ints[ix,:]\n cor = np.correlate(spect_int,template_int,'same')\n plt.clf()\n plt.plot(drv*(np.arange(2*smooth_distance)-smooth_distance), \n cor[int(nwave_log/2)-smooth_distance:int(nwave_log/2)+smooth_distance])\n\n ##store the figure data for later use\n outsave = np.array([drv*(np.arange(2*smooth_distance)-smooth_distance),cor[int(nwave_log/2)-smooth_distance:int(nwave_log/2)+smooth_distance]])\n saveoutname = fig_fn.split('.png')[0] + \"_figdat.pkl\"\n pickle.dump(outsave,open(saveoutname,\"wb\"))\n \n plt.xlabel('Velocity (km/s)')\n plt.ylabel('X Correlation')\n #plt.show()\n fn_ix = template_fns[ix].rfind('/')\n #Dodgy! Need a better way to find a name for the template.\n fn_ix_delta = template_fns[ix][fn_ix:].find(':')\n if fn_ix_delta>0:\n name = template_fns[ix][fn_ix+1:fn_ix+fn_ix_delta]\n name_string=name\n #A little messy !!!\n if name[0]=='p':\n name = name[1:]\n name_string = 'T = ' + name + ' K'\n name_string = template_fns[ix][fn_ix+1:]\n \n #pdb.set_trace()\n #Fit for a precise RV... note that minimize (rather than minimize_scalar) failed more\n #often for spectra that were not good matches.\n modft = np.fft.rfft(template_int)\n #res = op.minimize(rv_fit_mlnlike,rvs[ix]/drv,args=(modft,spect_int,sig_int,gaussian_offset))\n #x = res.x[0]\n #res = op.minimize_scalar(rv_fit_mlnlike,args=(modft,spect_int,sig_int,gaussian_offset),bounds=((rvs[ix]-1)/drv,(rvs[ix]+1)/drv))\n #x = res.x\n #fval = res.fun\n x,fval,ierr,numfunc = op.fminbound(rv_fit_mlnlike,rvs[ix]/drv-5/drv,rvs[ix]/drv+5/drv,args=(modft,spect_int,sig_int,gaussian_offset),full_output=True)\n rv = x*drv\n rv += heliocentric_correction\n ##best model \n shifted_mod = np.fft.irfft(modft * np.exp(-2j * np.pi * np.arange(len(modft))/len(spect_int) * x))\n #pdb.set_trace()\n fplus = rv_fit_mlnlike(x+0.5,modft,spect_int,sig_int,gaussian_offset)\n fminus = rv_fit_mlnlike(x-0.5,modft,spect_int,sig_int,gaussian_offset)\n hess_inv = 0.5**2/(fplus + fminus - 2*fval)\n if (hess_inv < 0) | (fplus < fval) | (fminus < fval):\n #If you get here, then there is a problem with the input spectrum or fitting.\n #raise UserWarning\n print(\"WARNING: Radial velocity fit did not work - trying again with wider range for: \" + fig_fn)\n x,fval,ierr,numfunc = op.fminbound(rv_fit_mlnlike,rvs[ix]/drv-10/drv,rvs[ix]/drv+10/drv,args=(modft,spect_int,sig_int,gaussian_offset),full_output=True)\n rv = x*drv\n #print(\"RV =\"+str(rv)+\", fval =\"+str(fval))\n fplus = rv_fit_mlnlike(x+0.5,modft,spect_int,sig_int,gaussian_offset)\n #print(\"fplus =\"+str(fplus))\n fminus = rv_fit_mlnlike(x-0.5,modft,spect_int,sig_int,gaussian_offset)\n #print(\"fminus =\"+str(fminus))\n hess_inv = 0.5**2/(fplus + fminus - 2*fval)\n #print(\"hess_inv =\"+str(hess_inv))\n #import pdb\n #pdb.set_trace()\n \n if (hess_inv < 0) | (fplus < fval) | (fminus < fval):\n print(\"WARNING: Radial velocity fit did not work, giving up with NaN uncertainty\")\n \n rv_sig = np.sqrt(hess_inv*nwave_log/len(spect)/oversamp)*drv\n\n plt.title('RV, RV_sigma:' + str(rv) + ',' +str(rv_sig))\n plt.savefig(save_dir + 'Best_correlation_temp_' + template_fns[ix].split('/')[-1] + '.png')\n plt.title(name_string + ', RV = {0:4.1f}+/-{1:4.1f} km/s'.format(rv,rv_sig))\n if len(fig_fn) > 0:\n plt.savefig(fig_fn)\n plt.clf()\n plt.plot(wave_log,spect_int)\n plt.plot(wave_log,shifted_mod)\n plt.xlim([6400.0,6700.0])\n plt.title(name_string + ', RV = {0:4.1f}+/-{1:4.1f} km/s'.format(rv,rv_sig))\n if len(fig_fn) > 0:\n fig_fn_new = fig_fn.split('_xcor.png')[0] + 'fitplot.png' \n plt.savefig(fig_fn_new)\n #again save the figure data for use later in making nicer plots with IDL\n outsave = np.array([wave_log,spect_int,shifted_mod])\n saveoutname = fig_fn.split('_xcor.png')[0] + 'fitplot_figdat.pkl'\n pickle.dump(outsave,open(saveoutname,\"wb\"))\n # pdb.set_trace()\n return rv,rv_sig,template_fns[ix].split('/')[-1]\n \ndef calc_rv_todcor(spect,wave,sig, template_fns,bad_intervals=[],fig_fn='',\\\n smooth_distance=201,convolve_template=True, alpha=0.3,\\\n nwave_log=int(1e4),ncor=1000, return_fitted=False,jd=0.0,out_fn='',\\\n heliocentric_correction=0, plotit=False, window_divisor=20):\n \"\"\"Compute a radial velocity based on an best fitting template spectrum.\n Teff is estimated at the same time.\n \n Parameters\n ----------\n spect: array-like\n The reduced WiFeS spectrum\n \n wave: array-like\n The wavelengths corresponding to the reduced WiFeS spectrum\n \n template_fns: string\n Spectral template for star 1 and star 2 that can be read in by np.loadtxt\n \n bad_intervals: \n List of wavelength intervals where e.g. telluric absorption is bad. For todcor,\n These can only be smoothed over.\n \n smooth_distance: float\n Distance to smooth for \"continuum\" correction\n \n \n Returns\n -------\n rv1: float\n Radial velocity of star 1 in km/s\n rv_sig1: float\n Uncertainty in radial velocity (NB assumes good model fit)\n rv2: float\n Radial velocity of star 1 in km/s\n rv_sig2: float\n Uncertainty in radial velocity (NB assumes good model fit)\n corpeak: float\n Correlation peak\n \"\"\" \n (wave_log, spect_int, sig_int, template_ints) = \\\n interpolate_spectra_onto_log_grid(spect,wave,sig, template_fns,\\\n bad_intervals=bad_intervals, smooth_distance=smooth_distance, \\\n convolve_template=convolve_template, nwave_log=nwave_log)\n \n drv = np.log(wave_log[1]/wave_log[0])*2.998e5\n \n #*** Next (hopefully with two templates only!) we continue and apply the TODCOR algorithm.\n \n window_width = nwave_log//window_divisor\n ramp = np.arange(1,window_width+1,dtype=float)/window_width\n window = np.ones(nwave_log)\n window[:window_width] *= ramp\n window[-window_width:] *= ramp[::-1]\n \n template_ints[0] *= window\n template_ints[1] *= window\n spect_int *= window\n \n norm1 = np.sqrt(np.sum(template_ints[0]**2)) \n norm2 = np.sqrt(np.sum(template_ints[1]**2)) \n norm_tgt = np.sqrt(np.sum(spect_int**2))\n \n #pdb.set_trace()\n c1 = np.fft.irfft(np.conj(np.fft.rfft(template_ints[0]/norm1))*np.fft.rfft(spect_int/norm_tgt))\n c1 = np.roll(c1,ncor//2)[:ncor]\n c2 = np.fft.irfft(np.conj(np.fft.rfft(template_ints[1]/norm2))*np.fft.rfft(spect_int/norm_tgt))\n c2 = np.roll(c2,ncor//2)[:ncor]\n \n #Unclear which way around this line should be. ix_c12 sign was corrected in order to \n #give the right result with simulated data.\n c12 = np.fft.irfft(np.fft.rfft(template_ints[1]/norm2)*np.conj(np.fft.rfft(template_ints[0]/norm1)))\n c12 = np.roll(c12,ncor//2)[:ncor]\n ix = np.arange(ncor).astype(int)\n xy = np.meshgrid(ix,ix)\n\n #Correct the flux ratio for the RMS spectral variation. Is this needed???\n alpha_norm = alpha * norm2/norm1\n ix_c12 = np.minimum(np.maximum(xy[0]-xy[1]+ncor//2,0),ncor-1) #!!!This was the old line !!!\n #ix_c12 = np.minimum(np.maximum(xy[1]-xy[0]+ncor//2,0),ncor-1) #XXX New (temporary?) line XXX\n todcor = (c1[xy[0]] + alpha_norm*c2[xy[1]])/np.sqrt(1 + 2*alpha_norm*c12[ix_c12] + alpha_norm**2)\n \n print(\"Max correlation: {0:5.2f}\".format(np.max(todcor)))\n #print(alpha_norm)\n #plt.plot(drv*(np.arange(nwave_log)-nwave_log//2),np.roll(c1,nwave_log//2))\n #Figure like TODCOR paper:\n #fig = plt.figure()\n #ax = fig.gca(projection='3d') \n #ax.plot_surface(xy[0],xy[1],todcor)\n \n plt.clf()\n plt.imshow(todcor, cmap=cm.gray,interpolation='nearest',extent=[-drv*ncor/2,drv*ncor/2,-drv*ncor/2,drv*ncor/2])\n\n xym = np.unravel_index(np.argmax(todcor), todcor.shape)\n \n old_fit = False\n if (old_fit):\n hw_fit = 1 #2\n \n if (xym[0]< hw_fit) | (xym[1]< hw_fit) | (xym[0]>= ncor-hw_fit) | (xym[1]>= ncor-hw_fit):\n print(\"Error: TODCOR peak to close to edge!\")\n raise UserWarning\n \n ix_fit = np.arange(-hw_fit, hw_fit + 1).astype(int)\n xy_fit = np.meshgrid(ix_fit,ix_fit)\n p_init = models.Gaussian2D(amplitude=np.max(todcor),x_mean=0, y_mean=0, \n x_stddev = 50.0/drv, y_stddev = 50.0/drv)\n fit_p = fitting.LevMarLSQFitter()\n \n p = fit_p(p_init, xy_fit[0], xy_fit[1], todcor[xym[0]-hw_fit:xym[0]+hw_fit+1, \n xym[1]-hw_fit:xym[1]+hw_fit+1])\n\n rv_x = drv*((p.parameters[1] + xym[1]) - ncor//2)\n rv_y = drv*((p.parameters[2] + xym[0]) - ncor//2)\n else:\n pix = todcor[xym[0]-1:xym[0]+2, xym[1]]\n xym_frac0 = (pix[2] - pix[0])/(2*pix[1] - pix[0] - pix[2])/2\n pix = todcor[xym[0], xym[1]-1:xym[1]+2]\n xym_frac1 = (pix[2] - pix[0])/(2*pix[1] - pix[0] - pix[2])/2\n rv_x = drv*((xym_frac1 + xym[1]) - ncor//2)\n rv_y = drv*((xym_frac0 + xym[0]) - ncor//2)\n\n model_spect = rv_shift_binary(rv_x/drv, rv_y/drv, alpha, np.fft.rfft(template_ints[0]), np.fft.rfft(template_ints[1]))\n \n if plotit:\n (wave_log, spect_int_norm, sig_int, template_int_norm) = \\\n interpolate_spectra_onto_log_grid(spect,wave,sig, template_fns,\\\n bad_intervals=bad_intervals, smooth_distance=smooth_distance, \\\n convolve_template=convolve_template, nwave_log=nwave_log, \\\n subtract_smoothed=False)\n model_spect_norm = rv_shift_binary(rv_x/drv, rv_y/drv, alpha, \\\n np.fft.rfft(template_int_norm[0]), np.fft.rfft(template_int_norm[1]))\n model_spect_prim = rv_shift_binary(rv_x/drv, rv_y/drv, 0, \\\n np.fft.rfft(template_int_norm[0]), np.fft.rfft(template_int_norm[1]))\n model_spect_sec = rv_shift_binary(rv_x/drv, rv_y/drv, 1e6, \\\n np.fft.rfft(template_int_norm[0]), np.fft.rfft(template_int_norm[1]))\n \n #--- Old divisors as a dodgy attempt to deal with non-normalised\n # data... ---\n #ss = np.ones(5e2)/5e2\n #model_ss = np.convolve(model_spect_norm, ss, mode='same')\n #spect_ss = np.convolve(spect_int_norm, ss, mode='same')\n #plt.plot(wave_log, model_spect_norm/model_ss, label='Joint Model')\n #plt.plot(wave_log, model_spect_prim/model_ss/(1+alpha), label='Primary')\n #plt.plot(wave_log, model_spect_sec/model_ss*alpha/(1+alpha), label='Secondary')\n #plt.plot(wave_log, spect_int_norm/spect_ss, label='Data')\n \n plt.clf() \n plt.plot(wave_log, model_spect_norm, label='Joint Model')\n plt.plot(wave_log, model_spect_prim/(1+alpha), label='Primary')\n plt.plot(wave_log, model_spect_sec*alpha/(1+alpha), label='Secondary')\n plt.plot(wave_log, spect_int_norm, label='Data')\n plt.legend()\n plt.axis([3810, 5610, 0, 1.45])\n plt.xlabel(r'Wavelength ($\\AA$)')\n plt.ylabel('Flux (normalised)')\n plt.draw()\n \n #pdb.set_trace() #XXX\n \n #Compute theoretical RV uncertainties from the \"Q\" factors...\n errors = []\n for i,template_int in enumerate(template_ints):\n if (i==0):\n ti = template_int/(1 + alpha)\n else:\n ti = template_int*alpha/(1 + alpha)\n model_spect_deriv = (ti[1:]-ti[:-1])/(wave_log[1:]-wave_log[:-1])\n wave2_on_s = (0.5*(wave_log[1:]+wave_log[:-1]))**2/(0.5*(ti[1:]+ti[:-1]+2))\n q_factor = np.sqrt(np.mean(wave2_on_s*model_spect_deriv**2))\n photon_rv_error = 3e5/q_factor*np.median(sig_int)/np.sqrt(len(spect))\n errors.append(photon_rv_error)\n print(\"Q factor: {:5.2f}\".format(q_factor))\n #plt.clf()\n #plt.plot(template_int)\n #plt.pause(.01)\n #import pdb; pdb.set_trace()\n\n #ISSUES: \n #1) Error (below) not computed.\n #errors = np.sqrt(np.diag(fit_p.fit_info['cov_x']))\n\n if len(out_fn)>0:\n outfile = open(out_fn, 'a')\n outfile.write('{0:12.4f}, {1:8.2f}, {2:8.2f}, {3:8.2f}, {4:8.2f}, {5:8.3f}\\n'.\n format(jd, rv_x + heliocentric_correction, errors[0], rv_y + heliocentric_correction, errors[1], np.max(todcor)))\n outfile.close()\n\n if return_fitted:\n return wave_log, spect_int, model_spect\n else:\n return rv_x, errors[0], rv_y, errors[1], np.max(todcor)\n \ndef rv_process_dir(ddir,template_conv_dir='./ambre_conv/',standards_dir='',outfn='rvs.txt',texfn='rvs.tex',outdir='',mask_ha_emission=False):\n \"\"\"Process all files in a directory for radial velocities.\n \n Parameters\n ----------\n dir: string\n Directory in which to process the WiFeS reduced spectra\n template_conf_dir: string\n Directory containing template spectra convolved to WiFeS resolution\n outfn: string\n Output filename\"\"\"\n \n if len(standards_dir)>0:\n print(\"WARNING: Feature not implemented yet\")\n raise UserWarning\n fns = glob.glob(ddir + '/*p08.fits' )\n #Uncomment to test individual stars in a data-set\n #pdb.set_trace()\n #fns = fns[32:33]\n # If an out directory isn't given, use the data directory.\n if len(outdir)==0:\n outdir=ddir\n outfile = open(outdir + '/' + outfn,'w')\n outfile.write('#name,filename,ra,dec,bmsplt,mjd,rv,sig_rv,teff \\n')\n texfile = open(outdir + '/' + texfn,'w')\n for iii,fn in enumerate(fns):\n ##pdb.set_trace()\n h = pyfits.getheader(fn)\n flux,wave = read_and_find_star_p08(fn,fig_fn=outdir + '/'+ h['OBJNAME'] + '.' + h['OBSID'] + '_star.png') \n if h['BEAMSPLT']=='RT560':\n bad_intervals = ([0,5500],[6860,7020],)\n else:\n bad_intervals = ([6862,7020],)\n ##Maybe here decide if the star is e.g. a young K/M dwarf with lots of H-alpha emission and \n ##bad_interval out that section of spectrum this also works to remove Ae/Be emission which causes issues \n #pdb.set_trace()\n if mask_ha_emission == True:\n simple_spec = np.sum(np.sum(flux,axis=0),axis=0)\n harng = np.where((wave > 6560.0) & (wave < 6565.0))[0] \n crng = np.where((wave > 6500.0) & (wave < 6520.0))[0]\n cmed = np.median(simple_spec[crng])\n hamed = np.median(simple_spec[harng])\n scmed = np.std(simple_spec[crng])*1.253/len(crng)\n #pdb.set_trace()\n if hamed > 5.0*scmed+cmed: bad_intervals = bad_intervals+([6550,6580],)\n print('Removing H-alpha line due to emission')\n ##pdb.set_trace()\n \n spectrum,sig = weighted_extract_spectrum(flux)\n specfn = outdir + '/' + fn[fn.rfind('/')+1:] + '.spec.csv'\n specfile = open(specfn,'w')\n for i in range(len(spectrum)):\n specfile.write('{0:6.2f},{1:6.1f},{2:6.1f}\\n'.format(wave[i],spectrum[i],sig[i]))\n specfile.close()\n rv,rv_sig,name = calc_rv_template(spectrum,wave,sig,template_conv_dir, bad_intervals,\\\n fig_fn=outdir + '/' + h['OBJNAME'] + '.' + h['OBSID'] + '_xcor.png',starnumber=iii+1)\n outfile.write(h['OBJNAME'] + ','+fn +','+ h['RA'] + ','+ h['DEC'] + ',' + h['BEAMSPLT'] + \\\n ',{0:10.3f},{1:5.1f},{2:5.1f},'.format(h['MJD-OBS'],rv,rv_sig)+name + ' \\n')\n texfile.write(h['OBJNAME'] + ' & '+ h['RA'] + ' & '+ h['DEC'] + ' & ' + h['BEAMSPLT'] + \\\n ' & {0:10.3f} & {1:5.1f} $\\pm$ {2:5.1f} & '.format(h['MJD-OBS'],rv,rv_sig) + name + '\\\\\\\\ \\n')\n outfile.close()\n texfile.close()\n \nif __name__=='__main__':\n fn = 'T2m3wb-20210913.142518-0050.fits'\n if exists(fn):\n plt.figure(1)\n flux_stamp,sig_stamp,wave = read_and_find_star_p11(fn)\n flux, sig = weighted_extract_spectrum(np.transpose(flux_stamp,(1,2,0)))\n plt.figure(2)\n plt.clf()\n plt.plot(wave, flux)\n plt.ylim([0,np.percentile(flux,99.5)])\n plt.ylabel(r'Flux Density (erg/s/cm$^2/\\AA$)')\n plt.xlabel(r'Wavelength ($\\AA$)')" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.imshow", "numpy.sqrt", "matplotlib.pyplot.plot", "numpy.max", "numpy.mean", "numpy.exp", "numpy.where", "numpy.roll", "scipy.interpolate.InterpolatedUnivariateSpline", "numpy.arange", "numpy.std", "numpy.argmax", "numpy.interp", "matplotlib.pyplot.close", "matplotlib.pyplot.axis", "matplotlib.pyplot.figure", "numpy.log", "matplotlib.pyplot.title", "numpy.min", "numpy.median", "matplotlib.pyplot.savefig", "numpy.log10", "scipy.optimize.fminbound", "numpy.transpose", "numpy.correlate", "numpy.array", "matplotlib.pyplot.ion", "numpy.meshgrid", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "numpy.sum", "numpy.maximum", "numpy.abs", "numpy.fft.rfft", "matplotlib.pyplot.subplots", "numpy.percentile", "numpy.ones", "matplotlib.pyplot.draw", "matplotlib.pyplot.xlim", "matplotlib.pyplot.clf", "numpy.prod", "matplotlib.pyplot.xlabel", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
masmansouri/pyscf
[ "15957003aa26b165a823e7e2d0846a8564b2a10c" ]
[ "setup.py" ]
[ "#!/usr/bin/env python\n# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport sysconfig\nfrom setuptools import setup, find_packages, Extension\n\n#if sys.version_info[0] >= 3: # from Cython 0.14\n# from setuptools.command.build_py import build_py_2to3 as build_py\n#else:\n# from setuptools.command.build_py import build_py\nfrom setuptools.command.install import install\nfrom setuptools.command.build_ext import build_ext\n\ntry:\n import numpy\nexcept ImportError as e:\n print('**************************************************')\n print('* numpy was not installed in your system. Please run')\n print('* pip install numpy')\n print('* before installing pyscf.')\n print('**************************************************')\n raise e\n\ntopdir = os.path.abspath(os.path.join(__file__, '..'))\n\nCLASSIFIERS = [\n'Development Status :: 5 - Production/Stable',\n'Intended Audience :: Science/Research',\n'Intended Audience :: Developers',\n'License :: OSI Approved :: Apache Software License',\n'Programming Language :: C',\n'Programming Language :: Python',\n'Programming Language :: Python :: 2.7',\n'Programming Language :: Python :: 3.4',\n'Programming Language :: Python :: 3.5',\n'Programming Language :: Python :: 3.6',\n'Programming Language :: Python :: 3.7',\n'Programming Language :: Python :: 3.8',\n'Topic :: Software Development',\n'Topic :: Scientific/Engineering',\n'Operating System :: POSIX',\n'Operating System :: Unix',\n'Operating System :: MacOS',\n]\n\nNAME = 'pyscf'\nMAINTAINER = 'Qiming Sun'\nMAINTAINER_EMAIL = '[email protected]'\nDESCRIPTION = 'PySCF: Python-based Simulations of Chemistry Framework'\n#LONG_DESCRIPTION = ''\nURL = 'http://www.pyscf.org'\nDOWNLOAD_URL = 'http://github.com/pyscf/pyscf'\nLICENSE = 'Apache License 2.0'\nAUTHOR = 'Qiming Sun'\nAUTHOR_EMAIL = '[email protected]'\nPLATFORMS = ['Linux', 'Mac OS-X', 'Unix']\ndef get_version():\n with open(os.path.join(topdir, 'pyscf', '__init__.py'), 'r') as f:\n for line in f.readlines():\n if line.startswith('__version__'):\n return eval(line.strip().split(' = ')[1])\n raise ValueError(\"Version string not found\")\nVERSION = get_version()\n\n\nif (sys.platform.startswith('linux') or\n sys.platform.startswith('cygwin') or\n sys.platform.startswith('gnukfreebsd')):\n ostype = 'linux'\n so_ext = '.so'\n LD_LIBRARY_PATH = 'LD_LIBRARY_PATH'\nelif sys.platform.startswith('darwin'):\n ostype = 'mac'\n so_ext = '.dylib'\n LD_LIBRARY_PATH = 'DYLD_LIBRARY_PATH'\n from distutils.sysconfig import get_config_vars\n conf_vars = get_config_vars()\n# setuptools/pip install by default generate \"bundled\" library. Bundled\n# library cannot be linked at compile time\n# https://stackoverflow.com/questions/24519863/what-are-the-g-flags-to-build-a-true-so-mh-bundle-shared-library-on-mac-osx\n# configs LDSHARED and CCSHARED and SO are hard coded in lib/pythonX.X/_sysconfigdata.py\n# In some Python version, steuptools may correct these configs for OS X on the\n# fly by _customize_compiler_for_shlib function or setup_shlib_compiler function\n# in lib/pythonX.X/site-packages/setuptools/command/build_ext.py.\n# The hacks below ensures that the OS X compiler does not generate bundle\n# libraries. Relevant code:\n# lib/pythonX.X/_sysconfigdata.py\n# lib/pythonX.X/distutils/command/build_ext.py\n# lib/pythonX.X/distutils/sysconfig.py, get_config_vars()\n# lib/pythonX.X/distutils/ccompiler.py, link_shared_object()\n# lib/pythonX.X/distutils/unixcompiler.py, link()\n conf_vars['LDSHARED'] = conf_vars['LDSHARED'].replace('-bundle', '-dynamiclib')\n conf_vars['CCSHARED'] = \" -dynamiclib\"\n # On Mac OS, sysconfig.get_config_vars()[\"EXT_SUFFIX\"] was set to\n # '.cpython-3*m-darwin.so'. numpy.ctypeslib module uses this parameter to\n # determine the extension of an external library. Python distutlis module\n # only generates the library with the extension '.so'. It causes import\n # error at the runtime. numpy.ctypeslib treats '.dylib' as the native\n # python extension. Set 'EXT_SUFFIX' to '.dylib' can make distutlis\n # generate the libraries with extension '.dylib'. It can be loaded by\n # numpy.ctypeslib\n if sys.version_info[0] >= 3: # python3\n conf_vars['EXT_SUFFIX'] = '.dylib'\n else:\n conf_vars['SO'] = '.dylib'\nelif sys.platform.startswith('win'):\n ostype = 'windows'\n so_ext = '.dll'\nelif sys.platform.startswith('aix') or sys.platform.startswith('os400'):\n ostype = 'aix'\n so_ext = '.so'\n LD_LIBRARY_PATH = 'LIBPATH'\n if(os.environ.get('PYSCF_INC_DIR') is None):\n os.environ['PYSCF_INC_DIR'] = '/QOpenSys/pkgs:/QOpenSys/usr:/usr:/usr/local'\nelse:\n raise OSError('Unknown platform')\n ostype = None\n\n#if 'CC' in os.environ:\n# compiler = os.environ['CC'].split()[0]\n#else:\n# compiler = sysconfig.get_config_var(\"CC\").split()[0]\n#if 'gcc' in compiler or 'g++' in compiler: # GNU compiler\n# so_ext = '.so'\n\n#\n# default include and library path\n#\ndef check_version(version_to_test, version_min):\n return version_to_test.split('.') >= version_min.split('.')\n\n# version : the lowest version\ndef search_lib_path(libname, extra_paths=None, version=None):\n paths = os.environ.get(LD_LIBRARY_PATH, '').split(os.pathsep)\n if 'PYSCF_INC_DIR' in os.environ:\n PYSCF_INC_DIR = os.environ['PYSCF_INC_DIR'].split(os.pathsep)\n for p in PYSCF_INC_DIR:\n paths = [p, os.path.join(p, 'lib'), os.path.join(p, '..', 'lib')] + paths\n if extra_paths is not None:\n paths += extra_paths\n\n len_libname = len(libname)\n for path in paths:\n full_libname = os.path.join(path, libname)\n if os.path.isfile(full_libname):\n if version is None or ostype == 'mac':\n return os.path.abspath(path)\n #elif ostype == 'mac':\n # for f in os.listdir(path):\n # f_name = f[:len_libname+1-len(so_ext)]\n # f_version = f[len_libname+1-len(so_ext):-len(so_ext)]\n # if (f_name == libname[:len_libname+1-len(so_ext)] and f_version and\n # check_version(f_version, version)):\n # return os.path.abspath(path)\n else:\n for f in os.listdir(path):\n f_name = f[:len_libname]\n f_version = f[len_libname+1:]\n if (f_name == libname and f_version and\n check_version(f_version, version)):\n return os.path.abspath(path)\n\ndef search_inc_path(incname, extra_paths=None):\n paths = os.environ.get(LD_LIBRARY_PATH, '').split(os.pathsep)\n if 'PYSCF_INC_DIR' in os.environ:\n PYSCF_INC_DIR = os.environ['PYSCF_INC_DIR'].split(os.pathsep)\n for p in PYSCF_INC_DIR:\n paths = [p, os.path.join(p, 'include'), os.path.join(p, '..', 'include')] + paths\n if extra_paths is not None:\n paths += extra_paths\n for path in paths:\n full_incname = os.path.join(path, incname)\n if os.path.exists(full_incname):\n return os.path.abspath(path)\n\nif 'LDFLAGS' in os.environ:\n blas_found = any(x in os.environ['LDFLAGS']\n for x in ('blas', 'atlas', 'openblas', 'mkl', 'Accelerate'))\nelse:\n blas_found = False\n\nblas_include = []\nblas_lib_dir = []\nblas_libraries = []\nblas_extra_link_flags = []\nblas_extra_compile_flags = []\nif not blas_found:\n np_blas = numpy.__config__.get_info('blas_opt')\n blas_include = np_blas.get('include_dirs', [])\n blas_lib_dir = np_blas.get('library_dirs', [])\n blas_libraries = np_blas.get('libraries', [])\n blas_path_guess = [search_lib_path('lib'+x+so_ext, blas_lib_dir)\n for x in blas_libraries]\n blas_extra_link_flags = np_blas.get('extra_link_args', [])\n blas_extra_compile_flags = np_blas.get('extra_compile_args', [])\n if ostype == 'mac':\n if blas_extra_link_flags:\n blas_found = True\n else:\n if None not in blas_path_guess:\n blas_found = True\n blas_lib_dir = list(set(blas_path_guess))\n\nif not blas_found: # for MKL\n mkl_path_guess = search_lib_path('libmkl_rt'+so_ext, blas_lib_dir)\n if mkl_path_guess is not None:\n blas_libraries = ['mkl_rt']\n blas_lib_dir = [mkl_path_guess]\n blas_found = True\n print(\"Using MKL library in %s\" % mkl_path_guess)\n\nif not blas_found:\n possible_blas = ('blas', 'atlas', 'openblas')\n for x in possible_blas:\n blas_path_guess = search_lib_path('libblas'+so_ext, blas_lib_dir)\n if blas_path_guess is not None:\n blas_libraries = [x]\n blas_lib_dir = [blas_path_guess]\n blas_found = True\n print(\"Using BLAS library %s in %s\" % (x, blas_path_guess))\n break\n\nif not blas_found:\n print(\"****************************************************************\")\n print(\"*** WARNING: BLAS library not found.\")\n print(\"* You can include the BLAS library in the global environment LDFLAGS, eg\")\n print(\"* export LDFLAGS='-L/path/to/blas/lib -lblas'\")\n print(\"* or specify the BLAS library path in PYSCF_INC_DIR\")\n print(\"* export PYSCF_INC_DIR=/path/to/blas/lib:/path/to/other/lib\")\n print(\"****************************************************************\")\n raise RuntimeError\n\ndistutils_lib_dir = 'lib.{platform}-{version[0]}.{version[1]}'.format(\n platform=sysconfig.get_platform(),\n version=sys.version_info)\n\npyscf_lib_dir = os.path.join(topdir, 'pyscf', 'lib')\nbuild_lib_dir = os.path.join('build', distutils_lib_dir, 'pyscf', 'lib')\ndefault_lib_dir = [build_lib_dir] + blas_lib_dir\ndefault_include = ['.', 'build', pyscf_lib_dir] + blas_include\n\nif not os.path.exists(os.path.join(topdir, 'build')):\n os.mkdir(os.path.join(topdir, 'build'))\nwith open(os.path.join(topdir, 'build', 'config.h'), 'w') as f:\n f.write('''\n#if defined _OPENMP\n#include <omp.h>\n#else\n#define omp_get_thread_num() 0\n#define omp_get_num_threads() 1\n#endif\n#define WITH_RANGE_COULOMB\n#define FINT int\n''')\n\ndef make_ext(pkg_name, relpath, srcs, libraries=[], library_dirs=default_lib_dir,\n include_dirs=default_include, extra_compile_flags=[],\n extra_link_flags=[], **kwargs):\n if '/' in relpath:\n relpath = os.path.join(*relpath.split('/'))\n if (os.path.isfile(os.path.join(pyscf_lib_dir, 'build', 'CMakeCache.txt')) and\n os.path.isfile(os.path.join(pyscf_lib_dir, *pkg_name.split('.')) + so_ext)):\n return None\n else:\n if sys.platform.startswith('darwin'):\n soname = pkg_name.split('.')[-1]\n extra_link_flags = extra_link_flags + ['-install_name', '@loader_path/'+soname+so_ext]\n runtime_library_dirs = []\n elif sys.platform.startswith('aix') or sys.platform.startswith('os400'):\n extra_compile_flags = extra_compile_flags + ['-fopenmp']\n extra_link_flags = extra_link_flags + ['-lblas', '-lgomp', '-Wl,-brtl']\n runtime_library_dirs = ['$ORIGIN', '.']\n else:\n extra_compile_flags = extra_compile_flags + ['-fopenmp']\n extra_link_flags = extra_link_flags + ['-fopenmp']\n runtime_library_dirs = ['$ORIGIN', '.']\n srcs = make_src(relpath, srcs)\n return Extension(pkg_name, srcs,\n libraries = libraries,\n library_dirs = library_dirs,\n include_dirs = include_dirs + [os.path.join(pyscf_lib_dir,relpath)],\n extra_compile_args = extra_compile_flags,\n extra_link_args = extra_link_flags,\n# Be careful with the ld flag \"-Wl,-R$ORIGIN\" in the shell.\n# When numpy.distutils is imported, the default CCompiler of distutils will be\n# overwritten. Compilation is executed in shell and $ORIGIN will be converted to ''\n runtime_library_dirs = runtime_library_dirs,\n **kwargs)\n\ndef make_src(relpath, srcs):\n srcpath = os.path.join(pyscf_lib_dir, relpath)\n abs_srcs = []\n for src in srcs.split():\n if '/' in src:\n abs_srcs.append(os.path.relpath(os.path.join(srcpath, *src.split('/'))))\n else:\n abs_srcs.append(os.path.relpath(os.path.join(srcpath, src)))\n return abs_srcs\n\n#\n# Check libcint\n#\nextensions = []\nif 1:\n libcint_lib_path = search_lib_path('libcint'+so_ext, [pyscf_lib_dir,\n os.path.join(pyscf_lib_dir, 'deps', 'lib'),\n os.path.join(pyscf_lib_dir, 'deps', 'lib64')],\n version='4')\n libcint_inc_path = search_inc_path('cint.h', [pyscf_lib_dir,\n os.path.join(pyscf_lib_dir, 'deps', 'include')])\n if libcint_lib_path and libcint_inc_path:\n print(\"****************************************************************\")\n print(\"* libcint found in %s.\" % libcint_lib_path)\n print(\"****************************************************************\")\n default_lib_dir += [libcint_lib_path]\n default_include += [libcint_inc_path]\n else:\n srcs = '''g3c2e.c breit.c fblas.c rys_roots.c g2e_coulerf.c misc.c\ncint3c1e_a.c cint2c2e.c cint1e.c cint1e_a.c g2e.c cint_bas.c g1e.c\ncart2sph.c cint2e_coulerf.c optimizer.c g2c2e.c c2f.c cint3c1e.c\ng3c1e.c cint3c2e.c g4c1e.c cint2e.c autocode/intor4.c\nautocode/int3c1e.c autocode/int3c2e.c autocode/dkb.c autocode/breit1.c\nautocode/gaunt1.c autocode/grad1.c autocode/intor2.c autocode/intor3.c\nautocode/hess.c autocode/intor1.c autocode/grad2.c'''\n if os.path.exists(os.path.join(pyscf_lib_dir, 'libcint')):\n extensions.append(\n make_ext('pyscf.lib.libcint', 'libcint/src', srcs, blas_libraries,\n extra_compile_flags=blas_extra_compile_flags,\n extra_link_flags=blas_extra_link_flags)\n )\n default_include.append(os.path.join(pyscf_lib_dir, 'libcint','src'))\n else:\n print(\"****************************************************************\")\n print(\"*** WARNING: libcint library not found.\")\n print(\"* You can download libcint library from http://github.com/sunqm/libcint\")\n print(\"* May need to set PYSCF_INC_DIR if libcint library was not installed in the\")\n print(\"* system standard install path (/usr, /usr/local, etc). Eg\")\n print(\"* export PYSCF_INC_DIR=/path/to/libcint:/path/to/other/lib\")\n print(\"****************************************************************\")\n raise RuntimeError\n\nextensions += [\n make_ext('pyscf.lib.libnp_helper', 'np_helper',\n 'condense.c npdot.c omp_reduce.c pack_tril.c transpose.c',\n blas_libraries,\n extra_compile_flags=blas_extra_compile_flags,\n extra_link_flags=blas_extra_link_flags),\n make_ext('pyscf.lib.libcgto', 'gto',\n '''fill_int2c.c fill_nr_3c.c fill_r_3c.c fill_int2e.c ft_ao.c\n grid_ao_drv.c fastexp.c deriv1.c deriv2.c nr_ecp.c nr_ecp_deriv.c\n autocode/auto_eval1.c ft_ao_deriv.c fill_r_4c.c''',\n ['cint', 'np_helper']),\n make_ext('pyscf.lib.libcvhf', 'vhf',\n '''fill_nr_s8.c nr_incore.c nr_direct.c optimizer.c nr_direct_dot.c\n time_rev.c r_direct_o1.c rkb_screen.c r_direct_dot.c\n rah_direct_dot.c rha_direct_dot.c hessian_screen.c''',\n ['cgto', 'np_helper', 'cint']),\n make_ext('pyscf.lib.libao2mo', 'ao2mo',\n 'restore_eri.c nr_ao2mo.c nr_incore.c r_ao2mo.c',\n ['cvhf', 'cint', 'np_helper']),\n make_ext('pyscf.lib.libcc', 'cc',\n 'ccsd_pack.c ccsd_grad.c ccsd_t.c uccsd_t.c',\n ['cvhf', 'ao2mo', 'np_helper']),\n make_ext('pyscf.lib.libfci', 'mcscf',\n '''fci_contract.c fci_contract_nosym.c fci_rdm.c fci_string.c\n fci_4pdm.c select_ci.c''',\n ['np_helper']),\n make_ext('pyscf.lib.libmcscf', 'mcscf', 'nevpt_contract.c',\n ['fci', 'cvhf', 'ao2mo']),\n make_ext('pyscf.lib.libri', 'ri', 'r_df_incore.c',\n ['cint', 'ao2mo', 'np_helper']),\n make_ext('pyscf.lib.libhci', 'hci', 'hci.c', ['np_helper']),\n make_ext('pyscf.lib.libpbc', 'pbc', 'ft_ao.c optimizer.c fill_ints.c grid_ao.c',\n ['cgto', 'cint']),\n make_ext('pyscf.lib.libmbd', os.path.join('extras', 'mbd'), 'dipole.c', []),\n make_ext('pyscf.lib.libdft', 'dft',\n '''CxLebedevGrid.c grid_basis.c nr_numint.c r_numint.c\n numint_uniform_grid.c''',\n ['cvhf', 'cgto', 'cint', 'np_helper']),\n]\n\n#\n# Check libxc\n#\nDFT_AVAILABLE = 0\nif 1:\n libxc_lib_path = search_lib_path('libxc'+so_ext, [pyscf_lib_dir,\n os.path.join(pyscf_lib_dir, 'deps', 'lib'),\n os.path.join(pyscf_lib_dir, 'deps', 'lib64')],\n version='5')\n libxc_inc_path = search_inc_path('xc.h', [pyscf_lib_dir,\n os.path.join(pyscf_lib_dir, 'deps', 'include')])\n if libxc_lib_path and libxc_inc_path:\n print(\"****************************************************************\")\n print(\"* libxc found in %s.\" % libxc_lib_path)\n print(\"****************************************************************\")\n default_lib_dir += [libxc_lib_path]\n default_include += [libxc_inc_path]\n extensions += [\n make_ext('pyscf.lib.libxc_itrf', 'dft', 'libxc_itrf.c', ['xc']),\n ]\n DFT_AVAILABLE = 1\n else:\n print(\"****************************************************************\")\n print(\"*** WARNING: libxc library not found.\")\n print(\"* You can download libxc library from http://www.tddft.org/programs/libxc/down.php?file=4.3.4/libxc-4.3.4.tar.gz\")\n print(\"* libxc library needs to be compiled with the flag --enable-shared\")\n print(\"* May need to set PYSCF_INC_DIR if libxc library was not installed in the\")\n print(\"* system standard install path (/usr, /usr/local, etc). Eg\")\n print(\"* export PYSCF_INC_DIR=/path/to/libxc:/path/to/other/lib\")\n print(\"****************************************************************\")\n\n#\n# Check xcfun\n#\nif 1:\n xcfun_lib_path = search_lib_path('libxcfun'+so_ext, [pyscf_lib_dir,\n os.path.join(pyscf_lib_dir, 'deps', 'lib'),\n os.path.join(pyscf_lib_dir, 'deps', 'lib64')])\n xcfun_inc_path = search_inc_path('xcfun.h', [pyscf_lib_dir,\n os.path.join(pyscf_lib_dir, 'deps', 'include', 'XCFun')])\n if xcfun_lib_path and xcfun_inc_path:\n print(\"****************************************************************\")\n print(\"* xcfun found in %s.\" % xcfun_lib_path)\n print(\"****************************************************************\")\n default_lib_dir += [xcfun_lib_path]\n default_include += [xcfun_inc_path]\n extensions += [\n make_ext('pyscf.lib.libxcfun_itrf', 'dft', 'xcfun_itrf.c', ['xcfun']),\n ]\n DFT_AVAILABLE = 1\n\nextensions = [x for x in extensions if x is not None]\n\nclass PostInstallCommand(install):\n \"\"\"Post-installation for installation mode.\"\"\"\n def run(self):\n install.run(self)\n if not DFT_AVAILABLE:\n print(\"****************************************************************\")\n print(\"*** WARNING: DFT is not available.\")\n print(\"****************************************************************\")\n\n# Python ABI updates since 3.5\n# https://www.python.org/dev/peps/pep-3149/\nclass BuildExtWithoutPlatformSuffix(build_ext):\n def get_ext_filename(self, ext_name):\n from distutils.sysconfig import get_config_var\n ext_path = ext_name.split('.')\n filename = build_ext.get_ext_filename(self, ext_name)\n name, ext_suffix = os.path.splitext(filename)\n return os.path.join(*ext_path) + ext_suffix\n\nsetup(\n name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n url=URL,\n download_url=DOWNLOAD_URL,\n license=LICENSE,\n classifiers=CLASSIFIERS,\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n platforms=PLATFORMS,\n #package_dir={'pyscf': 'pyscf'}, # packages are under directory pyscf\n #include *.so *.dat files. They are now placed in MANIFEST.in\n #package_data={'': ['*.so', '*.dylib', '*.dll', '*.dat']},\n include_package_data=True, # include everything in source control\n packages=find_packages(exclude=['*dmrgscf*', '*fciqmcscf*', '*icmpspt*',\n '*shciscf*', '*xianci*', '*nao*',\n '*future*', '*test*', '*examples*',\n '*setup.py']),\n ext_modules=extensions,\n cmdclass={'build_ext': BuildExtWithoutPlatformSuffix,\n 'install': PostInstallCommand},\n install_requires=['numpy>1.8,!=1.16,!=1.17', 'scipy<1.5', 'h5py>2.2'],\n extras_require={\n 'geomopt': ['pyberny>=0.6.2', 'geometric>=0.9.7.2'],\n },\n setup_requires = ['numpy'],\n)\n\n" ]
[ [ "numpy.__config__.get_info" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LedioTerolli/DrawToMars
[ "e3e476a218ced9dd9839b49f193846651d4e4c97" ]
[ "image_process.py" ]
[ "from win32api import GetSystemMetrics\nfrom math import sqrt\nimport numpy as np\nimport imutils\nimport cv2\n\n\nclass Obj:\n\n def __init__(self, tup_coor, coor, area, peri, center):\n self.tup_coor = tup_coor\n self.coor = coor\n self.area = area\n self.peri = peri\n self.center = center\n\n\n# removing sides less than 10 px\ndef shape_eval(list, tolerance):\n if len(list) <= 3:\n return list\n\n distance_list = []\n\n for i in range(len(list) - 1):\n distance = int(sqrt(\n ((list.item((i, 0)) - list.item((i + 1, 0))) ** 2) + ((list.item((i, 1)) - list.item((i + 1, 1))) ** 2)))\n distance_list.append(distance)\n\n special_dis = int(sqrt(\n ((list.item((0, 0)) - list.item((-1, 0))) ** 2) + ((list.item((0, 1)) - list.item((-1, 1))) ** 2)))\n distance_list.append(special_dis)\n\n index_min = distance_list.index(min(distance_list))\n sorted_list = sorted(distance_list)\n\n while sorted_list[0] < tolerance:\n list = np.delete(list, index_min, 0)\n\n distance_list = []\n\n for i in range(len(list) - 1):\n distance = int(sqrt(\n ((list.item((i, 0)) - list.item((i + 1, 0))) ** 2) + (\n (list.item((i, 1)) - list.item((i + 1, 1))) ** 2)))\n distance_list.append(distance)\n\n special_dis = int(sqrt(\n ((list.item((0, 0)) - list.item((-1, 0))) ** 2) + ((list.item((0, 1)) - list.item((-1, 1))) ** 2)))\n distance_list.append(special_dis)\n\n index_min = distance_list.index(min(distance_list))\n sorted_list = sorted(distance_list)\n\n return list\n\n\ndef get_data(image):\n filename = image\n # filename = \"%s\" % filename\n img = cv2.imread(filename)\n screen_size_x = GetSystemMetrics(0)\n screen_size_y = GetSystemMetrics(1)\n bg = np.zeros((screen_size_y, screen_size_x, 3), np.uint8)\n\n hor_size = img.shape[1]\n ver_size = img.shape[0]\n\n # rotate & resize\n if ver_size > hor_size:\n img = imutils.rotate_bound(img, -90)\n hor_size = img.shape[1]\n ver_size = img.shape[0]\n max_dim = max(hor_size, ver_size)\n rule = 700\n r = rule / img.shape[1]\n dim = (int(rule), int(img.shape[0] * r))\n if max_dim > rule:\n img = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)\n\n # thresh & edge\n img = img[0:screen_size_y, 0:screen_size_x]\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n blurred_frame = cv2.GaussianBlur(gray, (9, 9), 0)\n thresh = cv2.adaptiveThreshold(blurred_frame, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)\n edge = cv2.Canny(thresh, 100, 200)\n _, cnts, _ = cv2.findContours(edge, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n total = 0\n list_obj = []\n\n for c in cnts:\n area = int(cv2.contourArea(c))\n if area < 1000:\n i = 0.05\n else:\n i = 0.01\n\n if area > 100:\n perimeter = cv2.arcLength(c, True)\n perimeter = round(perimeter, 2)\n\n epsilon = i * cv2.arcLength(c, True)\n approx = cv2.approxPolyDP(c, epsilon, True)\n apr = np.vstack(approx).squeeze()\n apr = shape_eval(apr, 10)\n\n if len(apr) < 3:\n continue\n\n data = str(len(apr))\n\n M = cv2.moments(c)\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n center = [cX, cY]\n\n cv2.drawContours(img, [approx], 0, (0, 255, 0), 2)\n # cv2.rectangle(img, (cX + 5, cY - 20), (cX + 20 * len(data), cY - 5), (255, 255, 255), -1)\n # cv2.putText(img, data, (cX + 5, cY - 7), cv2.FONT_HERSHEY_SCRIPT_SIMPLEX, 0.5, (0, 0, 0), 2)\n cv2.circle(img, (cX, cY), 3, (0, 0, 255), -1)\n\n enlarge_rate_x = screen_size_x * 1.0 / max(img.shape[0], img.shape[1])\n enlarge_rate_y = screen_size_y * 1.0 / min(img.shape[0], img.shape[1])\n\n for i in range(len(apr)):\n apr[i, 0] = apr.item((i, 0)) * enlarge_rate_x\n apr[i, 1] = apr.item((i, 1)) * enlarge_rate_y\n\n center[0] = center[0] * enlarge_rate_x\n center[1] = center[1] * enlarge_rate_y\n\n xp = apr[:, 0]\n yp = apr[:, 1]\n tup_coor = list(zip(xp, yp))\n list_obj.append(Obj(tup_coor, apr, area, perimeter, center))\n total += 1\n\n return edge, img, list_obj\n\n# edge, new_img, list_poly = get_data(\"images/p15.jpg\")\n# cv2.imshow(\"edge\", edge)\n# cv2.imshow(\"final\", new_img)\n# cv2.waitKey(0)\n" ]
[ [ "numpy.delete", "numpy.zeros", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ilham-bintang/ScanSSD
[ "a7298bd3fb105b7d4fb2ea7054dcc87182be3667", "a7298bd3fb105b7d4fb2ea7054dcc87182be3667" ]
[ "layers/functions/prior_box.py", "gtdb/resize_gt.py" ]
[ "from __future__ import division\nfrom math import sqrt as sqrt\nfrom itertools import product as product\nimport torch\n\n\nclass PriorBox(object):\n \"\"\"Compute priorbox coordinates in center-offset form for each source\n feature map.\n \"\"\"\n def __init__(self, args, cfg):\n super(PriorBox, self).__init__()\n self.image_size = cfg['min_dim']\n # number of priors for feature map location (either 4 or 6)\n self.num_priors = len(cfg['aspect_ratios'])\n self.variance = cfg['variance'] or [0.1]\n self.feature_maps = cfg['feature_maps']\n self.min_sizes = cfg['min_sizes']\n self.max_sizes = cfg['max_sizes']\n self.steps = cfg['steps']\n self.aspect_ratios = cfg['aspect_ratios']\n self.clip = cfg['clip']\n self.version = cfg['name']\n self.is_vertical_prior_boxes_enabled = cfg['is_vertical_prior_boxes_enabled']\n self.args = args\n for v in self.variance:\n if v <= 0:\n raise ValueError('Variances must be greater than 0')\n\n def forward(self):\n mean = []\n\n for k, f in enumerate(self.feature_maps):\n for i, j in product(range(f), repeat=2):\n f_k = self.image_size / self.steps[k]\n\n # unit center x,y\n cx = (j + 0.5) / f_k\n cy = (i + 0.5) / f_k\n\n # aspect_ratio: 1\n # rel size: min_size\n s_k = self.min_sizes[k]/self.image_size\n mean += [cx, cy, s_k, s_k]\n\n # aspect_ratio: 1\n # rel size: sqrt(s_k * s_(k+1))\n s_k_prime = sqrt(s_k * (self.max_sizes[k]/self.image_size))\n mean += [cx, cy, s_k_prime, s_k_prime]\n\n # rest of aspect ratios\n for ar in self.aspect_ratios[k]:\n mean += [cx, cy, s_k*sqrt(ar), s_k/sqrt(ar)]\n\n if self.is_vertical_prior_boxes_enabled:\n mean += [cx, cy, s_k/sqrt(ar), s_k*sqrt(ar)]\n\n # back to torch land\n output = torch.Tensor(mean).view(-1, 4)\n if self.clip:\n output.clamp_(max=1, min=0)\n return output\n", "# Author: Parag Mali\n# This script resizes ground truth based on the given final width and height\n# NOTE: It is no longer needed, as ground truth modification is done in the data loader\n\n# read the image\nimport numpy as np\nimport cv2\nimport copy\nimport os\nimport sys\nfrom multiprocessing import Pool\n\n# Default parameters for thr GTDB dataset\nfinal_width = 512\nfinal_height = 512\n\nmath_dir='/home/psm2208/data/GTDB/annotations/'\nchar_dir='/home/psm2208/data/GTDB/char_annotations/'\nimage_dir='/home/psm2208/data/GTDB/images/'\noutput_image_dir='/home/psm2208/data/GTDB/processed_images_/'\noutput_math_dir='/home/psm2208/data/GTDB/processed_annotations_/'\noutput_char_dir='/home/psm2208/data/GTDB/processed_char_annotations_/'\n\n# This function generates resized gt\ndef resize_gt(pdf_name ='Alford94'):\n\n # find all the images\n image_filelist = [file for file in os.listdir(os.path.join(image_dir, pdf_name)) if file.endswith('.png')]\n\n # math annotations\n math_filepath = os.path.join(math_dir, pdf_name + \".math\")\n math_file_present = os.path.isfile(math_filepath)\n\n # char annotations\n char_filepath = os.path.join(char_dir, pdf_name + \".char\")\n char_file_present = os.path.isfile(char_filepath)\n\n if math_file_present:\n math_file = open(math_filepath, 'r')\n\n boxes = {}\n\n for line in math_file:\n box = line.split(\",\")\n idx = int(box[0]) + 1\n box = box[1:]\n\n box = list(map(int, box))\n\n if idx not in boxes:\n boxes[idx] = []\n\n boxes[idx].append(box)\n\n if char_file_present:\n char_file = open(char_filepath, 'r')\n\n char_boxes = {}\n\n for line in char_file:\n char_box = line.split(\",\")\n idx = int(char_box[0]) + 1\n char_box = char_box[2:]\n\n #box = list(map(int, box))\n\n if idx not in char_boxes:\n char_boxes[idx] = []\n\n char_boxes[idx].append(char_box)\n\n\n for image_filepath in image_filelist:\n\n #os.path.basename\n image = cv2.imread(os.path.join(image_dir, pdf_name, image_filepath))\n basename = os.path.basename(image_filepath)\n page_id = int(os.path.splitext(basename)[0])\n\n original_width = image.shape[1]\n original_height = image.shape[0]\n\n resized_image = cv2.imread(os.path.join(resized_image_dir, pdf_name, image_filepath))\n intermediate_width = resized_image.shape[1]\n intermediate_height = resized_image.shape[0]\n\n intermediate_width_ratio = intermediate_width / original_width\n intermediate_height_ratio = intermediate_height / original_height\n\n final_width_ratio = final_width * intermediate_width_ratio / intermediate_width\n final_height_ratio = final_height * intermediate_height_ratio / intermediate_height\n\n final_image = cv2.resize(resized_image, (final_height, final_width))\n\n if math_file_present:\n if page_id in boxes:\n current_boxes = boxes[page_id]\n else:\n current_boxes = []\n\n # preprocess the boxes\n for box in current_boxes:\n\n box[0] = int(np.round(box[0] * final_width_ratio))\n box[1] = int(np.round(box[1] * final_height_ratio))\n box[2] = int(np.round(box[2] * final_width_ratio))\n box[3] = int(np.round(box[3] * final_height_ratio))\n\n #cv2.rectangle(final_image, (box[0], box[1]), (box[2], box[3]), (255,0,0))\n\n #cv2.imwrite(\"/home/psm2208/test.png\", final_image)\n\n if char_file_present:\n if page_id in char_boxes:\n current_char_boxes = char_boxes[page_id]\n else:\n current_char_boxes = []\n\n # preprocess the boxes\n for box in current_char_boxes:\n\n box[0] = int(np.round(float(box[0]) * final_width_ratio))\n box[1] = int(np.round(float(box[1]) * final_height_ratio))\n box[2] = int(np.round(float(box[2]) * final_width_ratio))\n box[3] = int(np.round(float(box[3]) * final_height_ratio))\n\n\n # create required dirs\n if not os.path.exists(os.path.join(output_image_dir, pdf_name)):\n os.makedirs(os.path.join(output_image_dir, pdf_name))\n\n if not os.path.exists(os.path.join(output_math_dir, pdf_name)):\n os.makedirs(os.path.join(output_math_dir, pdf_name))\n\n if not os.path.exists(os.path.join(output_char_dir, pdf_name)):\n os.makedirs(os.path.join(output_char_dir, pdf_name))\n\n print('Processing image : ', pdf_name, \"/\", page_id)\n\n # save the final image\n cv2.imwrite(os.path.join(output_image_dir, pdf_name, str(page_id) + \".png\"), final_image)\n\n if math_file_present:\n out_math_file = os.path.join(output_math_dir, pdf_name, str(page_id) + \".pmath\")\n out_math = open(out_math_file, \"w\")\n\n for box in current_boxes:\n out_math.write(','.join(str(x) for x in box) + \"\\n\")\n\n out_math.close()\n\n if char_file_present:\n out_char_file = os.path.join(output_char_dir, pdf_name, str(page_id) + \".pchar\")\n out_char = open(out_char_file, \"w\")\n\n for box in current_char_boxes:\n out_char.write(','.join(str(x) for x in box) + \"\\n\")\n\n out_char.close()\n\n\n# check if two rectangles intersect\ndef intersects(first, other):\n return not (first[2] < other[0] or\n first[0] > other[2] or\n first[1] > other[3] or\n first[3] < other[1])\n\nif __name__ == '__main__':\n\n training_pdf_names = open(sys.argv[1], 'r') # train_pdf\n\n training_pdf_names_list = []\n\n # for each training image pdf file\n for pdf_name in training_pdf_names:\n pdf_name = pdf_name.strip()\n if pdf_name != '':\n training_pdf_names_list.append(pdf_name)\n\n if not os.path.exists(os.path.join(output_image_dir, pdf_name)):\n os.makedirs(os.path.join(output_image_dir, pdf_name))\n\n if not os.path.exists(os.path.join(output_math_dir, pdf_name)):\n os.makedirs(os.path.join(output_math_dir, pdf_name))\n\n if not os.path.exists(os.path.join(output_char_dir, pdf_name)):\n os.makedirs(os.path.join(output_char_dir, pdf_name))\n\n training_pdf_names.close()\n\n size = \"512\"\n dpi = \"150\"\n\n suffix = dpi\n\n char_dir = '/home/psm2208/data/GTDB/char_annotations/'\n math_dir = '/home/psm2208/data/GTDB/annotations/'\n image_dir = '/home/psm2208/data/GTDB/images/'\n\n resized_image_dir = \"/home/psm2208/data/GTDB/resized_images_\" + suffix\n output_image_dir = '/home/psm2208/data/GTDB/processed_images_' + suffix\n output_math_dir = '/home/psm2208/data/GTDB/processed_annotations_' + suffix\n output_char_dir = '/home/psm2208/data/GTDB/processed_char_annotations_' + suffix\n\n # create required dirs\n if not os.path.exists(output_image_dir):\n os.makedirs(output_image_dir)\n\n if not os.path.exists(output_math_dir):\n os.makedirs(output_math_dir)\n\n if not os.path.exists(output_char_dir):\n os.makedirs(output_char_dir)\n\n pool = Pool(processes=24)\n pool.map(resize_gt, training_pdf_names_list)\n pool.close()\n pool.join()\n" ]
[ [ "torch.Tensor" ], [ "numpy.round" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mdaquin/fca.js
[ "b565a593da314c3780e169c36cd4a5f0eaceb0f5" ]
[ "example_data/extract_attributes.py" ]
[ "import json\nimport requests\nimport numpy\n\nproperties = {}\n\nwith open(\"country_data.json\") as f:\n data = json.load(f)\n for c in data:\n for k in c[\"claims\"].keys():\n if k not in properties:\n properties[k] = {\"count\": 0, \"datatypes\": [], \"values\": [], \"all\": []}\n properties[k][\"count\"] += 1\n if len(c[\"claims\"][k]) > 0:\n v = c[\"claims\"][k][0]\n dt = v[\"mainsnak\"][\"datatype\"]\n if \"datavalue\" in v[\"mainsnak\"]:\n if not isinstance(v[\"mainsnak\"][\"datavalue\"][\"value\"],dict):\n a = v[\"mainsnak\"][\"datavalue\"][\"value\"]\n if a not in properties[k][\"values\"]:\n properties[k][\"values\"].append(a) \n elif \"amount\" in v[\"mainsnak\"][\"datavalue\"][\"value\"]:\n a = v[\"mainsnak\"][\"datavalue\"][\"value\"][\"amount\"]\n if a not in properties[k][\"values\"]:\n properties[k][\"values\"].append(a)\n properties[k][\"all\"].append(float(a))\n elif \"text\" in v[\"mainsnak\"][\"datavalue\"][\"value\"]:\n a = v[\"mainsnak\"][\"datavalue\"][\"value\"][\"text\"]\n if a not in properties[k][\"values\"]:\n properties[k][\"values\"].append(a)\n elif \"id\" in v[\"mainsnak\"][\"datavalue\"][\"value\"]:\n a = v[\"mainsnak\"][\"datavalue\"][\"value\"][\"id\"]\n if a not in properties[k][\"values\"]:\n properties[k][\"values\"].append(a)\n else:\n print(v[\"mainsnak\"])\n else:\n print(v) \n if dt not in properties[k][\"datatypes\"]:\n properties[k][\"datatypes\"].append(dt)\n\nprops = []\n \nfor k in properties:\n if float(properties[k][\"count\"])/float(len(data)) > 0.90 and (\"quantity\" in properties[k][\"datatypes\"] or (len(properties[k][\"values\"]) < 6 and len(properties[k][\"values\"]) != 0)):\n url = 'https://www.wikidata.org/wiki/Special:EntityData/'+k+'.json'\n r = requests.get(url)\n sp = {\"id\": k}\n label = \"nolabel\"\n try:\n d = json.loads(r.text)\n if \"entities\" in d and k in d[\"entities\"] and \"labels\" in d[\"entities\"][k] and \"en\" in d[\"entities\"][k][\"labels\"] and \"value\" in d[\"entities\"][k][\"labels\"][\"en\"]: \n label = d[\"entities\"][k][\"labels\"][\"en\"][\"value\"] \n except json.decoder.JSONDecodeError:\n print(\"nop \"+r.text)\n sp[\"label\"] = label\n if 'quantity' in properties[k][\"datatypes\"]:\n p = numpy.percentile(properties[k][\"all\"], [33.3, 50, 66.67])\n sp[\"percentiles\"] = p.tolist()\n else:\n sp[\"values\"] = []\n sp[\"id_values\"] = []\n # get labels of values\n for v in properties[k][\"values\"]:\n vl = v\n sp[\"id_values\"].append(v)\n url = 'https://www.wikidata.org/wiki/Special:EntityData/'+v+'.json'\n r = requests.get(url)\n try:\n d = json.loads(r.text)\n if \"entities\" in d and v in d[\"entities\"] and \"labels\" in d[\"entities\"][v] and \"en\" in d[\"entities\"][v][\"labels\"] and \"value\" in d[\"entities\"][v][\"labels\"][\"en\"]: \n vl = d[\"entities\"][v][\"labels\"][\"en\"][\"value\"]\n except json.decoder.JSONDecodeError:\n print(\"nop \"+r.text)\n sp[\"values\"].append(vl)\n props.append(sp) \n print(k+\" (\"+label+\"):\"+str(properties[k][\"count\"])+\" \"+str(float(properties[k][\"count\"])/float(len(data)))+\" \"+str(properties[k][\"datatypes\"])+\" \"+str(len(properties[k][\"values\"])))\n\nprint(props)\nwith open(\"attributes.json\", \"w\") as f:\n json.dump(props, f)\n\n" ]
[ [ "numpy.percentile" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SuryaThiru/mljar-supervised
[ "e6b63d4fe6bd3ee0d9183728af8729dc85533c54" ]
[ "tests/tests_algorithms/test_decision_tree.py" ]
[ "import unittest\nimport tempfile\nimport json\nimport numpy as np\nimport pandas as pd\nimport os\nfrom numpy.testing import assert_almost_equal\nfrom sklearn import datasets\n\nfrom supervised.algorithms.decision_tree import (\n DecisionTreeAlgorithm,\n DecisionTreeRegressorAlgorithm,\n)\nfrom supervised.utils.metric import Metric\n\nimport tempfile\n\n\nclass DecisionTreeTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.X, cls.y = datasets.make_regression(\n n_samples=100,\n n_features=5,\n n_informative=4,\n n_targets=1,\n shuffle=False,\n random_state=0,\n )\n\n def test_reproduce_fit_regression(self):\n metric = Metric({\"name\": \"rmse\"})\n params = {\"max_depth\": 1, \"seed\": 1, \"ml_task\": \"regression\"}\n prev_loss = None\n for _ in range(3):\n model = DecisionTreeRegressorAlgorithm(params)\n model.fit(self.X, self.y)\n y_predicted = model.predict(self.X)\n loss = metric(self.y, y_predicted)\n if prev_loss is not None:\n assert_almost_equal(prev_loss, loss)\n prev_loss = loss\n\n def test_save_and_load(self):\n metric = Metric({\"name\": \"rmse\"})\n dt = DecisionTreeRegressorAlgorithm({\"ml_task\": \"regression\"})\n dt.fit(self.X, self.y)\n y_predicted = dt.predict(self.X)\n loss = metric(self.y, y_predicted)\n\n filename = os.path.join(tempfile.gettempdir(), os.urandom(12).hex())\n\n dt.save(filename)\n dt2 = DecisionTreeRegressorAlgorithm({\"ml_task\": \"regression\"})\n dt2.load(filename)\n\n y_predicted = dt2.predict(self.X)\n loss2 = metric(self.y, y_predicted)\n assert_almost_equal(loss, loss2)\n\n # Finished with temp file, delete it\n os.remove(filename)\n" ]
[ [ "sklearn.datasets.make_regression", "numpy.testing.assert_almost_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
os-climate/sostrades-core
[ "bcaa9b5e393ffbd0963e75a9315b27caf8b0abd9", "bcaa9b5e393ffbd0963e75a9315b27caf8b0abd9", "bcaa9b5e393ffbd0963e75a9315b27caf8b0abd9", "bcaa9b5e393ffbd0963e75a9315b27caf8b0abd9" ]
[ "sos_trades_core/sos_processes/test/test_disc_hessian_doe_eval_from_proc/usecase.py", "sos_trades_core/execution_engine/sos_scenario.py", "sos_trades_core/tests/l0_test_42_newton_raphson_problem.py", "sos_trades_core/tools/grad_solvers/validgrad/FDValidGrad.py" ]
[ "'''\nCopyright 2022 Airbus SA\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n'''\n# mode: python; py-indent-offset: 4; tab-width: 8; coding:utf-8\nfrom sos_trades_core.study_manager.study_manager import StudyManager\nimport pandas as pd\n\n\nclass Study(StudyManager):\n '''This is an example of usecase study for\n the test_disc_hessian_doe_eval_from_disc process.\n This process instantiates a DOE on the Hessian Discipline directly from the discipline.\n It uses the 1 wrapped discipline : sos_trades_core.sos_wrapping.test_discs.disc_hessian.DiscHessian.\n '''\n\n def __init__(self, execution_engine=None):\n super().__init__(__file__, execution_engine=execution_engine)\n\n def setup_usecase(self):\n ######### Numerical values ####\n x = 2.0\n y = 3.0\n\n ax2 = 4.0\n by2 = 5.0\n cx = 6.0\n dy = 7.0\n exy = 12.0\n\n input_selection_xy = {'selected_input': [True, True, False, False, False, False, False],\n 'full_name': ['DoE_Eval.Hessian.x', 'DoE_Eval.Hessian.y', 'DoE_Eval.Hessian.ax2',\n 'DoE_Eval.Hessian.by2', 'DoE_Eval.Hessian.cx', 'DoE_Eval.Hessian.dy', 'DoE_Eval.Hessian.exy']}\n input_selection_xy = pd.DataFrame(input_selection_xy)\n\n input_selection_xy = {'selected_input': [True, True, False, False, False, False, False],\n 'full_name': ['DoE_Eval.Hessian.x', 'DoE_Eval.Hessian.y', 'DoE_Eval.Hessian.ax2',\n 'DoE_Eval.Hessian.by2', 'DoE_Eval.Hessian.cx', 'DoE_Eval.Hessian.dy', 'DoE_Eval.Hessian.exy']}\n input_selection_xy = pd.DataFrame(input_selection_xy)\n\n output_selection_z = {'selected_output': [True],\n 'full_name': ['DoE_Eval.Hessian.z']}\n output_selection_z = pd.DataFrame(output_selection_z)\n\n dspace_dict_xy = {'variable': ['x', 'y'],\n 'lower_bnd': [-5., -5.],\n 'upper_bnd': [+5., +5.],\n #'enable_variable': [True, True],\n #'activated_elem': [[True], [True]]\n }\n my_doe_algo = \"lhs\"\n n_samples = 4\n\n dspace_xy = pd.DataFrame(dspace_dict_xy)\n\n ######### Fill the dictionary for dm ####\n values_dict = {}\n\n values_dict[f'{self.study_name}.DoE_Eval.eval_inputs'] = input_selection_xy\n values_dict[f'{self.study_name}.DoE_Eval.eval_outputs'] = output_selection_z\n values_dict[f'{self.study_name}.DoE_Eval.design_space'] = dspace_xy\n\n values_dict[f'{self.study_name}.DoE_Eval.sampling_algo'] = my_doe_algo\n values_dict[f'{self.study_name}.DoE_Eval.algo_options'] = {\n 'n_samples': n_samples}\n\n values_dict[f'{self.study_name}.DoE_Eval.Hessian.x'] = x\n values_dict[f'{self.study_name}.DoE_Eval.Hessian.y'] = y\n\n values_dict[f'{self.study_name}.DoE_Eval.Hessian.ax2'] = ax2\n values_dict[f'{self.study_name}.DoE_Eval.Hessian.by2'] = by2\n values_dict[f'{self.study_name}.DoE_Eval.Hessian.cx'] = cx\n values_dict[f'{self.study_name}.DoE_Eval.Hessian.dy'] = dy\n values_dict[f'{self.study_name}.DoE_Eval.Hessian.exy'] = exy\n\n return [values_dict]\n\n\nif __name__ == '__main__':\n uc_cls = Study()\n uc_cls.load_data()\n uc_cls.run(for_test=True)\n", "'''\nCopyright 2022 Airbus SAS\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n'''\n'''\nmode: python; py-indent-offset: 4; tab-width: 8; coding: utf-8\n'''\nfrom copy import deepcopy\nfrom multiprocessing import cpu_count\n\nimport pandas as pd\nfrom numpy import array, ndarray, delete\n\nfrom gemseo.algos.design_space import DesignSpace\nfrom gemseo.core.scenario import Scenario\nfrom gemseo.formulations.formulations_factory import MDOFormulationsFactory\nfrom sos_trades_core.api import get_sos_logger\nfrom sos_trades_core.execution_engine.data_manager import POSSIBLE_VALUES\nfrom sos_trades_core.execution_engine.ns_manager import NS_SEP, NamespaceManager\nfrom sos_trades_core.execution_engine.sos_discipline_builder import SoSDisciplineBuilder\n\n\nclass SoSScenario(SoSDisciplineBuilder, Scenario):\n '''\n Generic implementation of Scenario\n '''\n # Default values of algorithms\n\n # ontology information\n _ontology_data = {\n 'label': 'sos_trades_core.execution_engine.sos_scenario',\n 'type': 'Research',\n 'source': 'SoSTrades Project',\n 'validated': '',\n 'validated_by': 'SoSTrades Project',\n 'last_modification_date': '',\n 'category': '',\n 'definition': '',\n 'icon': '',\n 'version': '',\n }\n default_algo_options = {\"ftol_rel\": 3e-16,\n \"normalize_design_space\": True,\n \"maxls\": 100,\n \"maxcor\": 50,\n \"pg_tol\": 1.e-8,\n \"max_iter\": 500,\n \"disp\": 30}\n default_parallel_options = {'parallel': False,\n 'n_processes': cpu_count(),\n 'use_threading': False,\n 'wait_time_between_fork': 0}\n USER_GRAD = 'user'\n # Design space dataframe headers\n VARIABLES = \"variable\"\n VALUES = \"value\"\n UPPER_BOUND = \"upper_bnd\"\n LOWER_BOUND = \"lower_bnd\"\n TYPE = \"type\"\n ENABLE_VARIABLE_BOOL = \"enable_variable\"\n LIST_ACTIVATED_ELEM = \"activated_elem\"\n VARIABLE_TYPE = \"variable_type\"\n # To be defined in the heritage\n is_constraints = None\n INEQ_CONSTRAINTS = 'ineq_constraints'\n EQ_CONSTRAINTS = 'eq_constraints'\n # DESC_I/O\n PARALLEL_OPTIONS = 'parallel_options'\n # FD step\n FD_STEP = \"fd_step\"\n\n algo_dict = {}\n\n# 'dataframe_descriptor': {VARIABLES: ('string', None, True),\n# VALUES: ('array', None, True),\n# LOWER_BOUND: ('array', None, True),\n# UPPER_BOUND: ('array', None, True),\n# ENABLE_VARIABLE_BOOL: ('bool', None, True),\n# LIST_ACTIVATED_ELEM: ('list', None, True), },\n# 'dataframe_edition_locked': False,\n DESC_IN = {'algo': {'type': 'string', 'structuring': True},\n 'design_space': {'type': 'dataframe', 'structuring': True},\n 'formulation': {'type': 'string', 'structuring': True},\n 'maximize_objective': {'type': 'bool', 'structuring': True, 'default': False},\n 'objective_name': {'type': 'string', 'structuring': True},\n 'differentiation_method': {'type': 'string', 'default': Scenario.FINITE_DIFFERENCES,\n 'possible_values': [USER_GRAD, Scenario.FINITE_DIFFERENCES,\n Scenario.COMPLEX_STEP],\n 'structuring': True},\n 'fd_step': {'type': 'float', 'structuring': True, 'default': 1e-6}, \n 'algo_options': {'type': 'dict', 'dataframe_descriptor': {VARIABLES: ('string', None, False),\n VALUES: ('string', None, True)},\n 'dataframe_edition_locked': False,\n 'default': default_algo_options,\n 'structuring': True},\n PARALLEL_OPTIONS: {'type': 'dict', # SoSDisciplineBuilder.OPTIONAL: True,\n 'dataframe_descriptor': {VARIABLES: ('string', None, False), # bool\n VALUES: ('string', None, True)},\n # 'dataframe_descriptor': {'parallel': ('int', None, True), #bool\n # 'n_processes': ('int', None, True),\n # 'use_threading': ('int', None, True),#bool\n # 'wait_time_between_fork': ('int', None, True)},\n 'dataframe_edition_locked': False,\n 'default': default_parallel_options,\n 'structuring': True},\n 'eval_mode': {'type': 'bool', 'default': False, POSSIBLE_VALUES: [True, False], 'structuring': True},\n 'eval_jac': {'type': 'bool', 'default': False, POSSIBLE_VALUES: [True, False]},\n 'execute_at_xopt': {'type': 'bool', 'default': True}}\n\n DESC_OUT = {'design_space_out': {'type': 'dataframe'}\n }\n\n def __init__(self, sos_name, ee, cls_builder):\n \"\"\"\n Constructor\n \"\"\"\n self.__factory = ee.factory\n self.cls_builder = cls_builder\n self.formulation = None\n self.maximize_objective = None\n self.opt_problem = None\n self._maturity = None\n\n self._reload(sos_name, ee)\n self.logger = get_sos_logger(f'{self.ee.logger.name}.SoSScenario')\n\n self.DESIGN_SPACE = 'design_space'\n self.FORMULATION = 'formulation'\n self.MAXIMIZE_OBJECTIVE = 'maximize_objective'\n self.OBJECTIVE_NAME = 'objective_name'\n self.FORMULATION_OPTIONS = 'formulation_options'\n\n # self.SEARCH_PATHS = 'search_paths'\n self.SCENARIO_MANDATORY_FIELDS = [\n self.DESIGN_SPACE,\n self.FORMULATION,\n self.MAXIMIZE_OBJECTIVE,\n self.OBJECTIVE_NAME]\n # self.SEARCH_PATHS]\n self.OPTIMAL_OBJNAME_SUFFIX = \"opt\"\n self.dict_desactivated_elem = {}\n self.activated_variables = []\n\n def _reload(self, sos_name, ee):\n \"\"\"\n reload object\n \"\"\"\n SoSDisciplineBuilder._reload(self, sos_name, ee)\n\n def build(self):\n \"\"\"\n build of subdisciplines\n \"\"\"\n # build and set sos_disciplines (if any)\n if len(self.cls_builder) != 0:\n old_current_discipline = self.ee.factory.current_discipline\n self.ee.factory.current_discipline = self\n # get the list of builders\n builder_list = self.cls_builder\n if not isinstance(self.cls_builder, list):\n builder_list = [self.cls_builder]\n # build the disciplines if not already built\n for builder in builder_list:\n disc = builder.build()\n if disc not in self.sos_disciplines:\n self.ee.factory.add_discipline(disc)\n\n # append added disciplines to built_sos_disciplines for\n # disciplines cleaning\n if disc not in self.built_sos_disciplines:\n self.built_sos_disciplines.append(disc)\n\n self.ee.factory.current_discipline = old_current_discipline\n\n def configure(self):\n \"\"\"\n Configuration of SoSScenario, call to super Class and\n \"\"\"\n self.configure_io()\n\n self.configure_execution()\n\n # Extract variables for eval analysis\n if self.sos_disciplines is not None and len(self.sos_disciplines) > 0:\n self.set_eval_possible_values()\n\n # update MDA flag to flush residuals between each mda run\n self._set_flush_submdas_to_true()\n\n def is_configured(self):\n \"\"\"\n Return False if at least one sub discipline needs to be configured, True if not\n \"\"\"\n return self.get_configure_status() and not self.check_structuring_variables_changes() and (\n self.get_disciplines_to_configure() == [])\n\n def setup_sos_disciplines(self):\n \"\"\"\n Overload setup_sos_disciplines to create a dynamic desc_in\n \"\"\"\n if self.ALGO_OPTIONS in self._data_in:\n algo_name = self.get_sosdisc_inputs(self.ALGO)\n algo_options = self.get_sosdisc_inputs(self.ALGO_OPTIONS)\n if algo_name is not None:\n default_dict = self.get_algo_options(algo_name)\n self._data_in[self.ALGO_OPTIONS][self.DEFAULT] = default_dict\n if algo_options is not None:\n values_dict = deepcopy(default_dict)\n\n for k in algo_options.keys():\n if algo_options[k] != 'None' or not isinstance(algo_options[k], type(None)):\n values_dict.update({k: algo_options[k]})\n\n self._data_in[self.ALGO_OPTIONS][self.VALUE] = values_dict\n\n self.set_edition_inputs_if_eval_mode()\n\n def set_edition_inputs_if_eval_mode(self):\n '''\n if eval mode then algo and algo options will turn to not editable\n '''\n\n if 'eval_mode' in self._data_in:\n eval_mode = self.get_sosdisc_inputs('eval_mode')\n if eval_mode:\n self._data_in[self.ALGO][self.EDITABLE] = False\n self._data_in[self.ALGO_OPTIONS][self.EDITABLE] = False\n self._data_in[self.FORMULATION][self.EDITABLE] = False\n self._data_in[self.MAXIMIZE_OBJECTIVE][self.EDITABLE] = False\n self._data_in[self.PARALLEL_OPTIONS][self.EDITABLE] = False\n\n self._data_in[self.ALGO][self.OPTIONAL] = True\n self._data_in[self.ALGO_OPTIONS][self.OPTIONAL] = True\n self._data_in[self.FORMULATION][self.OPTIONAL] = True\n self._data_in[self.MAXIMIZE_OBJECTIVE][self.OPTIONAL] = True\n self._data_in[self.PARALLEL_OPTIONS][self.OPTIONAL] = True\n else:\n self._data_in['eval_jac'][self.VALUE] = False\n\n def pre_set_scenario(self):\n \"\"\"\n prepare the set GEMS set_scenario method\n \"\"\"\n design_space = None\n formulation = None\n obj_full_name = None\n maximize_objective = False\n\n dspace = self.get_sosdisc_inputs(self.DESIGN_SPACE)\n if dspace is not None:\n if any(type(design_variable).__name__ not in ['array', 'list'] for design_variable in\n dspace['value'].tolist()):\n raise ValueError(\n 'A design variable must obligatory be an array')\n\n # build design space\n design_space = self.set_design_space()\n if design_space.variables_names:\n _, formulation, maximize_objective, obj_name = self.get_sosdisc_inputs(\n self.SCENARIO_MANDATORY_FIELDS)\n\n # get full objective ids\n obj_name = self.get_sosdisc_inputs(self.OBJECTIVE_NAME)\n obj_full_name = self._update_names([obj_name])[0]\n\n return design_space, formulation, maximize_objective, obj_full_name\n\n def set_scenario(self):\n \"\"\"\n set GEMS scenario, to be overloaded with each type of scenario (MDO, DOE, ...)\n \"\"\"\n pass\n\n def set_design_space_for_complex_step(self):\n '''\n Set design space values to complex if the differentiation method is complex_step\n '''\n diff_method = self.get_sosdisc_inputs('differentiation_method')\n if diff_method == self.COMPLEX_STEP:\n dspace = deepcopy(self.opt_problem.design_space)\n curr_x = dspace._current_x\n for var in curr_x:\n curr_x[var] = curr_x[var].astype('complex128')\n self.opt_problem.design_space = dspace\n\n def update_default_coupling_inputs(self):\n '''\n Update default inputs of the couplings\n '''\n for disc in self.sos_disciplines:\n if disc.is_sos_coupling:\n self._set_default_inputs_from_dm(disc)\n\n def get_algo_options(self, algo_name):\n \"\"\"\n Create default dict for algo options\n :param algo_name: the name of the algorithm\n :returns: dictionary with algo options default values\n \"\"\"\n # TODO : add warning and log algo options\n\n default_dict = {}\n driver_lib = self._algo_factory.create(algo_name)\n driver_lib.init_options_grammar(algo_name)\n schema_dict = driver_lib.opt_grammar.schema.to_dict()\n properties = schema_dict.get(driver_lib.opt_grammar.PROPERTIES_FIELD)\n algo_options_keys = list(properties.keys())\n\n found_algo_names = [\n key for key in self.algo_dict.keys() if key in algo_name]\n if len(found_algo_names) == 1:\n key = found_algo_names[0]\n for algo_option in algo_options_keys:\n default_val = self.algo_dict[key].get(algo_option)\n if default_val is not None:\n default_dict[algo_option] = default_val\n else:\n for algo_option in algo_options_keys:\n if algo_option in self.default_algo_options:\n algo_default_val = self.default_algo_options.get(algo_option)\n if algo_default_val is not None:\n default_dict[algo_option] = algo_default_val\n\n return default_dict\n\n def run(self):\n '''\n Run method\n '''\n # TODO: to delete when MDA initialization is improved\n self.update_default_coupling_inputs()\n\n self.set_design_space_for_complex_step()\n\n eval_mode = self.get_sosdisc_inputs('eval_mode')\n if eval_mode:\n self.run_eval_mode()\n\n else:\n self.run_scenario()\n outputs = [discipline.get_output_data()\n for discipline in self.sos_disciplines]\n for data in outputs:\n self.local_data.update(data)\n # store local data in datamanager\n self.update_dm_with_local_data()\n\n def run_scenario(self):\n '''\n Run the scenario and store last design_space\n '''\n pass\n\n def run_eval_mode(self):\n '''\n Run evaluate functions with the initial x \n '''\n eval_jac = self.get_sosdisc_inputs('eval_jac')\n design_space = self.get_sosdisc_inputs('design_space')\n\n self.opt_problem.evaluate_functions(\n eval_jac=eval_jac, normalize=False)\n # if eval mode design space was not modified\n self.store_sos_outputs_values(\n {'design_space_out': design_space}, update_dm=True)\n\n def _run_algorithm(self):\n \"\"\"\n Runs the algo\n \"\"\"\n pass\n\n def _set_flush_submdas_to_true(self):\n # update MDA flag to flush residuals between each mda run\n for disc in self.sos_disciplines:\n if disc.is_sos_coupling:\n if len(disc.sub_mda_list) > 0:\n for sub_mda in disc.sub_mda_list:\n sub_mda.reset_history_each_run = True\n\n def _set_default_inputs_from_dm(self, disc):\n \"\"\"\n Based on dm values, default_inputs are set to mdachains,\n and default_inputs dtype is set to complex in case of complex_step gradient computation.\n \"\"\"\n input_data = {}\n input_data_names = disc.get_input_data_names()\n for data_name in input_data_names:\n val = self.ee.dm.get_value(data_name)\n # for cases of early configure steps\n if val is not None:\n input_data[data_name] = val\n\n # convert sostrades types into numpy arrays\n # no need to update DM since call by SoSTrades\n input_data = disc._convert_new_type_into_array(var_dict=input_data)\n disc.mdo_chain.default_inputs.update(input_data)\n\n for disc in disc.sos_disciplines:\n if disc.is_sos_coupling:\n self._set_default_inputs_from_dm(disc)\n\n def configure_io(self):\n \"\"\"\n Configure discipline and all sub-disciplines\n \"\"\"\n if self._data_in == {} or self.check_structuring_variables_changes():\n super().configure()\n\n disc_to_configure = self.get_disciplines_to_configure()\n\n if len(disc_to_configure) > 0:\n self.set_configure_status(False)\n else:\n self.set_children_cache_inputs()\n self.set_configure_status(True)\n\n for disc in disc_to_configure:\n disc.configure()\n\n def get_disciplines_to_configure(self):\n \"\"\"\n Get sub disciplines list to configure\n \"\"\"\n disc_to_configure = []\n for disc in self.sos_disciplines:\n if not disc.is_configured():\n disc_to_configure.append(disc)\n return disc_to_configure\n\n def configure_execution(self):\n \"\"\"\n - configure GEMS grammar\n - set scenario\n \"\"\"\n for disc in self.sos_disciplines:\n disc.update_gems_grammar_with_data_io()\n self.set_scenario()\n self.set_parallel_options()\n\n def _update_names(self, names):\n \"\"\"\n if no dot in the name, it looks for the full name in the dm\n else we suppose that this is a full name that needs to be updated with current\n study name\n |!| it will NOT work for names with a dot in data_io...\n \"\"\"\n local_names = []\n full_names = []\n for name in names:\n if NamespaceManager.NS_SEP not in name:\n local_names.append(name)\n else:\n full_names.append(name)\n return self.get_full_names(local_names) + \\\n self._update_study_ns_in_varname(full_names)\n\n def set_diff_method(self):\n \"\"\"\n Set differentiation method and send a WARNING\n if some linearization_mode are not coherent with diff_method\n \"\"\"\n diff_method = self.get_sosdisc_inputs('differentiation_method')\n\n if diff_method in self.APPROX_MODES:\n for disc in self.sos_disciplines:\n if disc.linearization_mode != diff_method:\n self.logger.warning(\n f'The differentiation method \"{diff_method}\" will overload the linearization mode \"{disc.linearization_mode}\" ')\n\n fd_step = self.get_sosdisc_inputs(self.FD_STEP)\n Scenario.set_differentiation_method(\n self, diff_method, fd_step)\n\n def set_parallel_options(self):\n \"\"\"\n sets parallel options for jacobian approximation\n \"\"\"\n\n # update default options with user options\n user_options = self.get_sosdisc_inputs(self.PARALLEL_OPTIONS)\n if user_options is None:\n user_options = {}\n options = deepcopy(self.default_parallel_options)\n options.update(user_options)\n parallel = options.pop(\"parallel\")\n # update problem options\n if self.formulation is not None:\n self.opt_problem = self.formulation.opt_problem\n self.opt_problem.parallel_differentiation = parallel\n self.opt_problem.parallel_differentiation_options = options\n\n def set_design_space(self):\n \"\"\"\n reads design space (set_design_space)\n \"\"\"\n\n dspace_df = self.get_sosdisc_inputs(self.DESIGN_SPACE)\n # update design space dv with full names\n dvs = list(dspace_df[self.VARIABLES])\n full_dvs = []\n dspace_dict_updated = {}\n\n for key in dvs:\n\n full_key_l = self.get_full_names([key])\n if len(full_key_l) > 0:\n full_key = full_key_l[0]\n full_dvs.append(full_key)\n # dspace_dict_updated[full_key] = dspace_df[key]\n else:\n self.logger.warning(f\" missing design variable in dm : {key}\")\n if len(full_dvs) == len(dvs):\n dspace_dict_updated = dspace_df.copy()\n dspace_dict_updated[self.VARIABLES] = full_dvs\n\n design_space = self.read_from_dataframe(dspace_dict_updated)\n\n else:\n\n design_space = DesignSpace()\n return design_space\n\n def read_from_dict(self, dp_dict):\n \"\"\"Parses a dictionary to read the DesignSpace\n\n :param dp_dict : design space dictionary\n :returns: the design space\n \"\"\"\n design_space = DesignSpace()\n for key in dp_dict:\n print(key)\n if type(dp_dict[key]['value']) != list and type(dp_dict[key]['value']) != ndarray:\n name = key\n var_type = ['float']\n\n size = 1\n l_b = array([dp_dict[key]['lower_bnd']])\n u_b = array([dp_dict[key]['upper_bnd']])\n value = array([dp_dict[key]['value']])\n else:\n size = len(dp_dict[key]['value'])\n var_type = ['float'] * size\n\n name = key\n l_b = array(dp_dict[key]['lower_bnd'])\n u_b = array(dp_dict[key]['upper_bnd'])\n value = array(dp_dict[key]['value'])\n\n design_space.add_variable(name, size, var_type, l_b, u_b, value)\n return design_space\n\n def read_from_dataframe(self, df):\n \"\"\"Parses a DataFrame to read the DesignSpace\n\n :param df : design space df\n :returns: the design space\n \"\"\"\n names = list(df[self.VARIABLES])\n values = list(df[self.VALUES])\n l_bounds = list(df[self.LOWER_BOUND])\n u_bounds = list(df[self.UPPER_BOUND])\n enabled_variable = list(df[self.ENABLE_VARIABLE_BOOL])\n list_activated_elem = list(df[self.LIST_ACTIVATED_ELEM])\n \n # looking for the optionnal variable type in the design space\n if self.VARIABLE_TYPE in df:\n var_types = df[self.VARIABLE_TYPE]\n else:\n # set to None for all variables if not exists\n var_types = [None] * len(names)\n \n design_space = DesignSpace()\n \n for dv, val, lb, ub, l_activated, enable_var, vtype in zip(names, values, l_bounds, u_bounds, list_activated_elem, enabled_variable, var_types):\n\n # check if variable is enabled to add it or not in the design var\n if enable_var:\n self.dict_desactivated_elem[dv] = {}\n \n if type(val) != list and type(val) != ndarray:\n size = 1\n var_type = ['float']\n l_b = array([lb])\n u_b = array([ub])\n value = array([val])\n else:\n # check if there is any False in l_activated\n if not all(l_activated):\n index_false = l_activated.index(False)\n self.dict_desactivated_elem[dv] = {\n 'value': val[index_false], 'position': index_false}\n\n val = delete(val, index_false)\n lb = delete(lb, index_false)\n ub = delete(ub, index_false)\n\n size = len(val)\n var_type = ['float'] * size\n l_b = array(lb)\n u_b = array(ub)\n value = array(val)\n \n # 'automatic' var_type values are overwritten if filled by the user\n if vtype is not None:\n var_type = vtype\n \n design_space.add_variable(\n dv, size, var_type, l_b, u_b, value)\n return design_space\n\n def read_from_dataframe_new(self, df):\n \"\"\"Parses a DataFrame to read the DesignSpace\n\n :param df : design space df\n :returns: the design space\n \"\"\"\n names = df[self.VARIABLES]\n values = df[self.VALUES]\n l_bounds = df[self.LOWER_BOUND]\n u_bounds = df[self.UPPER_BOUND]\n\n design_space = DesignSpace()\n for dv, val, lb, ub in zip(names, values, l_bounds, u_bounds):\n # if [type(val), type(lb), type(ub)] == [str] * 3:\n # val = eval(val)\n # lb = eval(lb)\n # ub = eval(ub)\n name = dv\n if type(val) != list and type(val) != ndarray:\n size = 1\n var_type = ['float']\n l_b = array([lb])\n u_b = array([ub])\n value = array([val])\n else:\n size = len(val)\n var_type = ['float'] * size\n l_b = array(lb)\n u_b = array(ub)\n value = array(val)\n design_space.add_variable(name, size, var_type, l_b, u_b, value)\n return design_space\n\n def update_design_space_out(self):\n \"\"\"\n Method to update design space with opt value\n \"\"\"\n design_space = deepcopy(self.get_sosdisc_inputs(self.DESIGN_SPACE))\n l_variables = design_space[self.VARIABLES]\n for var in l_variables:\n full_name_var = self.get_full_names([var])[0]\n if full_name_var in self.activated_variables:\n value_x_opt = list(self.formulation.design_space._current_x.get(\n full_name_var))\n if self.dict_desactivated_elem[full_name_var] != {}:\n # insert a desactivated element\n value_x_opt.insert(\n self.dict_desactivated_elem[full_name_var]['position'],\n self.dict_desactivated_elem[full_name_var]['value'])\n\n design_space.loc[design_space[self.VARIABLES] == var, self.VALUE] = pd.Series(\n [value_x_opt] * len(design_space))\n\n self.store_sos_outputs_values(\n {'design_space_out': design_space})\n\n # GEMSEO overload\n def _update_input_grammar(self):\n self.update_gems_grammar_with_data_io()\n Scenario._update_input_grammar(self)\n\n def get_maturity(self):\n ref_dict_maturity = deepcopy(self.dict_maturity_ref)\n for discipline in self.sos_disciplines:\n disc_maturity = discipline.get_maturity()\n\n if isinstance(disc_maturity, dict):\n for m_k, m_v in ref_dict_maturity.items():\n if m_v != disc_maturity[m_k]:\n ref_dict_maturity[m_k] += disc_maturity[m_k]\n elif disc_maturity in ref_dict_maturity:\n ref_dict_maturity[disc_maturity] += 1\n\n self._maturity = ref_dict_maturity\n return self._maturity\n\n def set_eval_possible_values(self):\n\n analyzed_disc = self.sos_disciplines\n possible_out_values = self.fill_possible_values(\n analyzed_disc) # possible_in_values\n\n possible_out_values = self.find_possible_values(\n analyzed_disc, possible_out_values) # possible_in_values\n\n # Take only unique values in the list\n possible_out_values = list(set(possible_out_values))\n\n # Fill the possible_values of obj and constraints\n self.dm.set_data(f'{self.get_disc_full_name()}.{self.OBJECTIVE_NAME}',\n self.POSSIBLE_VALUES, possible_out_values)\n\n if self.is_constraints:\n self.dm.set_data(f'{self.get_disc_full_name()}.{self.INEQ_CONSTRAINTS}',\n self.POSSIBLE_VALUES, possible_out_values)\n self.dm.set_data(f'{self.get_disc_full_name()}.{self.EQ_CONSTRAINTS}',\n self.POSSIBLE_VALUES, possible_out_values)\n # fill the possible values of algos\n self._init_algo_factory()\n avail_algos = self._algo_factory.algorithms\n self.dm.set_data(f'{self.get_disc_full_name()}.{self.ALGO}',\n self.POSSIBLE_VALUES, avail_algos)\n # fill the possible values of formulations\n self._form_factory = MDOFormulationsFactory()\n avail_formulations = self._form_factory.formulations\n self.dm.set_data(f'{self.get_disc_full_name()}.{self.FORMULATION}',\n self.POSSIBLE_VALUES, avail_formulations)\n # fill the possible values of maximize_objective\n self.dm.set_data(f'{self.get_disc_full_name()}.{self.MAXIMIZE_OBJECTIVE}',\n self.POSSIBLE_VALUES, [False, True])\n\n # -- Set possible design variables and objevtives\n # adapted from soseval\n # TODO: find a better way to select constraints and objectives\n\n def find_possible_values(\n self, sos_disciplines, possible_out_values): # possible_in_values\n \"\"\"\n Run through all disciplines and sublevels\n to find possible values for eval_inputs and eval_outputs\n \"\"\"\n if len(sos_disciplines) != 0:\n for disc in sos_disciplines:\n sub_out_values = self.fill_possible_values(\n [disc]) # sub_in_values\n # possible_in_values.extend(sub_in_values)\n possible_out_values.extend(sub_out_values)\n self.find_possible_values(\n disc.sos_disciplines, possible_out_values) # possible_in_values\n\n return possible_out_values # possible_in_values\n\n def fill_possible_values(self, sos_disciplines):\n \"\"\"\n Fill possible values lists for eval inputs and outputs\n an input variable must be a float coming from a data_in of a discipline in all the process\n and not a default variable\n an output variable must be any data from a data_out discipline\n \"\"\"\n # poss_in_values = []\n poss_out_values = []\n for disc in sos_disciplines:\n # for data_in_key in disc.get_input_data_names(): #disc._data_in.keys():\n # is_float = disc._data_in[data_in_key.split(NS_SEP)[-1]][self.TYPE] == 'float'\n # in_coupling_numerical = data_in_key in SoSCoupling.DEFAULT_NUMERICAL_PARAM\n # if not in_coupling_numerical: #is_float and\n # # Caution ! This won't work for variables with points in name\n # # as for ac_model\n # poss_in_values.append(data_in_key)\n for data_out_key in disc.get_output_data_names(): # disc._data_out.keys():\n # Caution ! This won't work for variables with points in name\n # as for ac_model\n data_out_key = data_out_key.split(NS_SEP)[-1]\n poss_out_values.append(data_out_key)\n\n return poss_out_values # poss_in_values\n\n def __str__(self):\n \"\"\"\n Summarize results for display\n\n :returns: string summarizing results\n \"\"\"\n msg = \"\"\n if hasattr(self, \"disciplines\"):\n msg = self.__class__.__name__ + \":\\nDisciplines: \"\n disc_names = [disc.name\n for disc in self.disciplines] # pylint: disable=E1101\n msg += \" \".join(disc_names)\n msg += \"\\nMDOFormulation: \" # We keep MDO here has is done in gemseo\n msg += self.formulation.__class__.__name__\n msg += \"\\nAlgorithm: \"\n msg += str(self.get_sosdisc_inputs(self.ALGO)) + \"\\n\"\n\n return msg\n\n def get_full_names(self, names):\n '''\n get full names of ineq_names and obj_names\n '''\n full_names = []\n for i_name in names:\n full_id_l = self.dm.get_all_namespaces_from_var_name(i_name)\n if full_id_l != []:\n if len(full_id_l) > 1:\n # full_id = full_id_l[0]\n full_id = self.get_scenario_lagr(full_id_l)\n else:\n full_id = full_id_l[0]\n full_names.append(full_id)\n\n return full_names\n\n def get_algo_options_dict(self):\n \"\"\"\n Method to get the dictionnary of algo options from dataframe so that it is in GEMS format\n \"\"\"\n algo_options_df = self.get_sosdisc_inputs('algo_options')\n return algo_options_df.to_dict('records')[0]\n", "'''\nCopyright 2022 Airbus SAS\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n'''\n# -*-mode: python; py-indent-offset: 4; tab-width: 8; coding: iso-8859-1 -*-\nimport unittest\nfrom numpy import zeros, array\nfrom os import remove\nfrom sos_trades_core.tools.grad_solvers.solvers.newton_raphson_problem import NewtonRaphsonProblem\n\n\nclass TestNewtonRaphsonProblem(unittest.TestCase):\n \"\"\"\n NewtonRaphsonProblem test class\n \"\"\"\n # Simple example to test the Newton Raphson method\n # Try to solve:\n # R[0] = x**2+y**2\n # R[1] = y**2+z**2\n # R[2] = x**2+z**2\n #\n # solution expected [0.,0.,0.]\n\n def __comp_R(self, W):\n R = zeros(3)\n R[0] = W[0]**2 + W[1]**2\n R[1] = W[1]**2 + W[2]**2 + 10. * W[0] * W[1] * W[2]\n R[2] = W[0]**2 + W[2]**2\n return R\n\n def __comp_dRdW(self, W):\n dRdW = zeros((3, 3))\n\n dRdW[0, 0] = 2. * W[0]\n dRdW[0, 1] = 2. * W[1]\n dRdW[0, 2] = 0.\n\n dRdW[1, 0] = 10. * W[1] * W[2]\n dRdW[1, 1] = 2. * W[1] + 10. * W[0] * W[2]\n dRdW[1, 2] = 2. * W[2] + 10. * W[0] * W[1]\n\n dRdW[2, 0] = 2. * W[0]\n dRdW[2, 1] = 0.\n dRdW[2, 2] = 2. * W[2]\n return dRdW\n\n def __comp_wrong_dRdW(self, W):\n dRdW = zeros((3, 3))\n\n dRdW[0, 0] = 2. * W[0]\n dRdW[0, 1] = 2. * W[1]\n dRdW[0, 2] = 0.\n\n dRdW[1, 0] = 50. * W[1] * W[2] # should be 10.*W[1]*W[2]\n dRdW[1, 1] = 2. * W[1] + 10. * W[0] * W[2]\n dRdW[1, 2] = 2. * W[2] + 10. * W[0] * W[1]\n\n dRdW[2, 0] = 2. * W[0]\n dRdW[2, 1] = 0.\n dRdW[2, 2] = 2. * W[2]\n return dRdW\n\n def test_01_NewtonRaphsonProblem_instantiation(self):\n \"\"\"\n test class instantiation\n \"\"\"\n W0 = array([1., 1., 1.])\n NRPb = NewtonRaphsonProblem(W0, self.__comp_R, self.__comp_dRdW)\n assert(NRPb is not None)\n\n def test_02_NewtonRaphsonProblem_W0_list(self):\n \"\"\"\n test set of starting point in python list format\n \"\"\"\n W0 = [1., 1., 1.]\n NRPb = NewtonRaphsonProblem(W0, self.__comp_R, self.__comp_dRdW)\n assert(NRPb is not None)\n\n def test_03_NewtonRaphsonProblem_set_relax_factor(self):\n \"\"\"\n test relax_factor attribute overload\n \"\"\"\n W0 = array([1., 1., 1.])\n NRPb = NewtonRaphsonProblem(W0, self.__comp_R, self.__comp_dRdW)\n NRPb.set_relax_factor(0.80)\n assert(NRPb.get_relax_factor() == 0.80)\n\n def test_04_NewtonRaphsonProblem_set_stop_residual(self):\n \"\"\"\n test stop_residual attribute overload\n \"\"\"\n W0 = array([1., 1., 1.])\n NRPb = NewtonRaphsonProblem(W0, self.__comp_R, self.__comp_dRdW)\n NRPb.set_stop_residual(1.e-9)\n assert(NRPb.get_stop_residual() == 1.e-9)\n\n def test_05_NewtonRaphsonProblem_set_max_iterations(self):\n \"\"\"\n test max_iterations attribute overload\n \"\"\"\n W0 = array([1., 1., 1.])\n NRPb = NewtonRaphsonProblem(W0, self.__comp_R, self.__comp_dRdW)\n NRPb.set_max_iterations(200)\n assert(NRPb.get_max_iterations() == 200)\n\n def test_06_NewtonRaphsonProblem_defaults_parameters(self):\n \"\"\"\n test defaults parameters sfor relax_factor, stop_residual and max_iterations\n \"\"\"\n W0 = array([1., 1., 1.])\n NRPb = NewtonRaphsonProblem(W0, self.__comp_R, self.__comp_dRdW)\n assert(NRPb.get_relax_factor() == 0.99)\n assert(NRPb.get_stop_residual() == 1.e-6)\n assert(NRPb.get_max_iterations() == 100)\n\n def test_07_NewtonRaphsonProblem_jacobian_validation(self):\n \"\"\"\n test Hessian validation when Hessian is valid\n \"\"\"\n W0 = array([1., 1., 1.])\n NRPb = NewtonRaphsonProblem(W0, self.__comp_R, self.__comp_dRdW)\n ok = NRPb.valid_jacobian(W0, iprint=True)\n assert(ok)\n\n remove('gradient_file0.dat')\n remove('gradient_file1.dat')\n remove('gradient_file2.dat')\n\n def test_08_NewtonRaphsonProblem_jacobian_non_validation(self):\n \"\"\"\n test Hessian validation when Hessian is not valid\n \"\"\"\n W0 = array([1., 1., 1.])\n NRPb = NewtonRaphsonProblem(W0, self.__comp_R, self.__comp_wrong_dRdW)\n ok, df_fd, df = NRPb.valid_jacobian(W0, iprint=False)\n assert(not ok)\n\n def test_09_NewtonRaphsonProblem_solving(self):\n W0 = array([1., 1., 1.])\n sol = array([0., 0., 0.])\n print('')\n NRPb = NewtonRaphsonProblem(W0, self.__comp_R, self.__comp_dRdW)\n NRPb.set_relax_factor(0.99)\n NRPb.set_stop_residual(1.e-15)\n NRPb.set_max_iterations(100)\n W = NRPb.solve()\n self.assertAlmostEqual(W[0], sol[0])\n self.assertAlmostEqual(W[1], sol[1])\n self.assertAlmostEqual(W[2], sol[2])\n\n def test_10_NewtonRaphsonProblem_solving_limit_max_iterations(self):\n W0 = array([1., 1., 1.])\n sol = array([0., 0., 0.])\n print('')\n NRPb = NewtonRaphsonProblem(W0, self.__comp_R, self.__comp_dRdW)\n NRPb.set_relax_factor(0.99)\n NRPb.set_stop_residual(1.e-15)\n NRPb.set_max_iterations(10)\n W = NRPb.solve()\n res_hist = NRPb.get_residual_hist()\n assert(len(res_hist) == 10)\n\n def test_11_NewtonRaphsonProblem_solving_limit_residual(self):\n W0 = array([1., 1., 1.])\n sol = array([0., 0., 0.])\n print('')\n NRPb = NewtonRaphsonProblem(W0, self.__comp_R, self.__comp_dRdW)\n NRPb.set_relax_factor(0.99)\n NRPb.set_stop_residual(1.e-6)\n NRPb.set_max_iterations(100)\n W = NRPb.solve()\n res = NRPb.get_residual()\n assert(1.e-7 < res < 1.e-6)\n\n def test_12_NewtonRaphsonProblem_solving_parallel(self):\n W0 = array([1., 1., 1.])\n sol = array([0., 0., 0.])\n print('')\n NRPb = NewtonRaphsonProblem(W0, self.__comp_R, self.__comp_dRdW)\n NRPb.set_relax_factor(0.99)\n NRPb.set_stop_residual(1.e-15)\n NRPb.set_max_iterations(100)\n NRPb.set_method = 'inhouse'\n NRPb.multi_proc = True\n W = NRPb.solve()\n self.assertAlmostEqual(W[0], sol[0])\n self.assertAlmostEqual(W[1], sol[1])\n self.assertAlmostEqual(W[2], sol[2])\n\n\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(TestNewtonRaphsonProblem)\n unittest.TextTestRunner(verbosity=2).run(suite)\n", "'''\nCopyright 2022 Airbus SAS\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n'''\n# -*-mode: python; py-indent-offset: 4; tab-width: 8; coding: iso-8859-1 -*-\n\nfrom numpy.linalg import norm\nfrom numpy import savetxt\nfrom .FDGradient import FDGradient\n\n\nclass FDValidGrad(object):\n \"\"\"\n Finite differences gradient calculation and validation.\n \"\"\"\n\n def __init__(self, scheme_order, f_pointer, df_pointer, fd_step=1e-6, bounds=None):\n \"\"\"\n Constructor\n Args:\n scheme_order : order of the numerical scheme : 1, 1j, 2,\n f_pointer : pointer to the function to be derived\n df_pointer : pointer to the function gradient to be checked\n fd_step : finite differences step\n \"\"\"\n self.__fpointer = f_pointer\n self.__df_pointer = df_pointer\n self.__fd_grad = FDGradient(\n scheme_order, f_pointer, fd_step=fd_step, bounds=bounds)\n\n self.__multi_proc = False\n self.set_multi_proc(False)\n\n def set_bounds(self, bounds):\n self.__fd_grad.set_bounds(bounds)\n\n def set_multi_proc(self, multi):\n self.__multi_proc = multi\n self.__fd_grad.set_multi_proc(multi)\n\n def compute_fd_grad(self, x, args=None):\n \"\"\"\n Computes the gradient by finite differences\n Args :\n x : variables where the function is derived\n Returns:\n The gradient vector\n \"\"\"\n if args is not None:\n return self.__fd_grad.grad_f(x, args)\n return self.__fd_grad.grad_f(x)\n\n def compare(self, x, treshold=1e-4, args=None, force_print=False, split_out=False, iprint=True, return_all=False, write_to_files=False, grad_files_prefix=\"\"):\n \"\"\"\n Comparison of provided gradient and finite differences gradient.\n Args :\n x : variables where the function is derived\n treshold : tolerance between analytical and finite differences gradient\n args : function additional args\n force_print : if True, error is printed\n file names of the exported gradient values\n split_out: split checking of vectorial outputs\n iprint : allows printing of messages\n return_all : instead of returning status only, returns status, finite differences gradient and analytical gradients\n write_to_files: write gradients into files\n grad_files_prefix : if write_to_files and gradient is written to disc,\n\n Returns:\n ok : True if gradient is valid\n df_fd : optional finite differences gradient output\n df: optional analytical gradient output\n \"\"\"\n df_fd = self.compute_fd_grad(x, args)\n\n if args is None:\n df = self.__df_pointer(x)\n else:\n df = self.__df_pointer(x, args)\n\n ok, msg = self.__compute_error_and_check(\n df_fd, df, treshold, split_out=split_out)\n\n if (not ok or force_print) and iprint:\n print(msg)\n\n if write_to_files:\n for i in range(len(x)):\n savetxt(grad_files_prefix + 'df_analytic_' +\n str(i) + '.txt', df[:, :, i].T)\n savetxt(grad_files_prefix + 'df_FD_' +\n str(i) + '.txt', df_fd[:, :, i].T)\n\n if return_all:\n return ok, df_fd, df\n else:\n return ok\n\n def __compute_error_and_check(self, df_fd, df, treshold, split_out=False):\n \"\"\"\n Computes the relative error between finite differences gradient\n and analytical gradient.\n Args :\n df_fd : the gradient obtained by finite differences\n df : the analytical gradient\n treshold : the numerical tolerance for the comparison\n split_out : option to check each output from a vectorial output\n\n Returns:\n ok : the status \n msg : message about the error\n \"\"\"\n if len(df.shape) == 1 or not split_out:\n nfd = norm(df_fd)\n if nfd < treshold: # In case df = 0\n err = norm(df_fd - df)\n else:\n err = norm(df_fd - df) / nfd\n if err < treshold:\n ok = True\n msg = 'Gradients are valid.'\n else:\n ok = False\n msg = 'Gradient not in bounds, error = ' + str(err) + '\\n'\n msg += 'df =\\n' + str(df) + '\\n'\n msg += 'df finite diff =\\n' + str(df_fd) + '\\n'\n msg += 'df-df_fd =\\n' + str(df - df_fd) + '\\n'\n else:\n ok = True\n err = 0.\n dim_out = df.shape[0]\n err_msg = 'Gradients are not valid due to an error in the output vector\\n'\n for n in range(dim_out):\n ndv = len(df_fd[n, :])\n nfd = norm(df_fd[n, :]) / ndv\n if nfd < treshold: # In case df = 0\n lerr = norm(df_fd[n, :] - df[n, :])\n else:\n lerr = norm(df_fd[n, :] - df[n, :]) / nfd\n err += lerr\n if lerr > treshold:\n ok = False\n err_msg += 'Error may come from output ' + \\\n str(n) + ' error = ' + str(lerr) + '\\n'\n if ok:\n msg = 'Gradients are valid.'\n else:\n msg = err_msg\n err = err / dim_out\n return ok, msg\n" ]
[ [ "pandas.DataFrame" ], [ "numpy.delete", "numpy.array" ], [ "numpy.array", "numpy.zeros" ], [ "numpy.linalg.norm" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
RamiroIsaJ/TR_KF_Interface
[ "306896920cbf563d2d96e64d3ca6c3c9a20e6610" ]
[ "Trk_def.py" ]
[ "import numpy as np\nimport cv2\nimport glob\nimport math\n\n\ndef f_sorted(files_, id_sys):\n symbol = '\\\\' if id_sys == 0 else '/'\n ids = []\n for f in files_:\n parts = f.split(symbol)\n name_i = parts[len(parts) - 1]\n ids.append(name_i.split('.')[0].split('_')[-1])\n ids_1 = list(map(int, ids))\n idx_1 = int(np.where(np.array(ids_1) == 1)[0])\n if len(ids[idx_1]) >= 2:\n ids = list(map(str, ids))\n ids.sort(key=str)\n else:\n ids = list(map(int, ids))\n ids.sort(key=int)\n file_r = []\n for i in range(len(files_)):\n parts = files_[i].split(symbol)\n name = parts[len(parts) - 1].split('.')\n exp = name[0].split('_')\n if len(exp) >= 2:\n n_exp = exp[0]\n for j in range(1, len(exp)-1):\n n_exp += '_' + exp[j]\n n_name = n_exp + '_' + str(ids[i]) + '.' + name[1]\n else:\n n_name = str(ids[i]) + '.' + name[1]\n\n if id_sys == 0:\n n_file = parts[0]\n else:\n n_file = (symbol + parts[0])\n for j in range(1, len(parts)-1):\n n_file += (parts[j] + symbol)\n n_file += n_name\n file_r.append(n_file)\n\n return file_r\n\n\ndef load_image_i(orig, i, type_, filenames, exp, id_sys):\n symbol = '\\\\' if id_sys == 0 else '/'\n if len(filenames) == 0:\n filenames = [img for img in glob.glob(orig+type_)]\n # filenames.sort()\n filenames = f_sorted(filenames, id_sys)\n \n if i < len(filenames):\n name = filenames[i]\n parts = name.split(symbol)\n exp, name_i = parts[len(parts)-2], parts[len(parts)-1]\n # read image\n image_ = cv2.imread(name)\n else:\n image_, name_i = [], []\n\n return filenames, image_, exp, name_i\n\n\ndef update_dir(path):\n path_s = path.split('/')\n cad, path_f = len(path_s), path_s[0]\n for p in range(1, cad):\n path_f += '\\\\' + path_s[p]\n return path_f\n\n\ndef bytes_(img, m, n):\n ima = cv2.resize(img, (m, n))\n return cv2.imencode('.png', ima)[1].tobytes()\n\n\ndef preprocessing(img):\n image_gray_ = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n clh = cv2.createCLAHE(clipLimit=5)\n clh_img = clh.apply(image_gray_)\n\n blurred = cv2.GaussianBlur(clh_img, (5, 5), 0)\n\n return clh_img, blurred\n\n\ndef show_features(img, features_):\n for i in features_:\n x, y = i.ravel()\n cv2.circle(img, (x, y), 3, (0, 0, 255), -1)\n\n return img\n\n\ndef features_img(img, v_th):\n ima_gray, final_ima = preprocessing(img)\n thresh = cv2.threshold(final_ima, v_th, 255, cv2.THRESH_TOZERO_INV)[1]\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))\n binary = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=3)\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))\n binary = cv2.morphologyEx(binary, cv2.MORPH_DILATE, kernel, iterations=3)\n contours = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n contours = contours[0] if len(contours) == 2 else contours[1]\n features_ = []\n for c in contours:\n mts = cv2.moments(c)\n cx = int(mts[\"m10\"] / mts[\"m00\"])\n cy = int(mts[\"m01\"] / mts[\"m00\"])\n features_.append((cx, cy))\n\n features_ = np.asarray(sorted(features_, key=lambda k: [k[0], k[1]]))\n frame = show_features(img, features_)\n return features_, frame\n\n\ndef distance(x, y):\n return math.hypot(y[0] - x[0], y[1] - x[1])\n\n\ndef sort_features(last_f, curr_f, max_v, min_v):\n for i in range(len(last_f)):\n xy = last_f[i, :2]\n idx = None\n for kd in range(curr_f.shape[0]):\n dist = distance(xy, curr_f[int(kd), :])\n if max_v > dist > min_v:\n idx = kd\n break\n if idx is not None:\n last_f[i, 2:4] = curr_f[idx, :]\n last_f[i, 4], last_f[i, 5] = 1, 0\n else:\n last_f[i, 4], last_f[i, 5] = 0, last_f[i, 5] + 1\n\n return np.array(last_f)\n\n\ndef find_seq_feat(k, features_, tab_feat, max_v, min_v):\n if k == 0:\n tab_feat = np.append(features_, features_, axis=1)\n tab_feat = np.append(tab_feat, np.ones((len(features_), 2)), axis=1)\n else:\n tab_feat = sort_features(tab_feat, features_, max_v, min_v)\n\n idx = np.where((tab_feat[:, 4] == 1) & (tab_feat[:, 5] < 5))\n f_track = np.array(tab_feat[idx, :])\n f_track = f_track.reshape((f_track.shape[0] * f_track.shape[1]), f_track.shape[2])\n\n return tab_feat, f_track\n\n\ndef find_track_feat(k, features_, tab_feat, max_v, min_v):\n if k == 0:\n tab_feat = np.append(features_, features_, axis=1)\n tab_feat = np.append(tab_feat, np.ones((len(features_), 2)), axis=1)\n elif k == 10:\n tab_feat = sort_features(tab_feat, features_, max_v, min_v)\n idx = np.where((tab_feat[:, 4] == 0) & (tab_feat[:, 5] > 5))\n tab_feat = np.delete(tab_feat, idx, 0)\n else:\n tab_feat = sort_features(tab_feat, features_, max_v, min_v)\n\n return tab_feat\n\n\ndef tracking_feat(frame, tracker, f_track, delta):\n # update tracker\n tracker.update(f_track, delta)\n\n errors, move_dist = [], [0]\n n = len(tracker.tracks)\n for j in range(n):\n if len(tracker.tracks[j].trace) > 1:\n x = int(tracker.tracks[j].trace[-1][0, 0])\n y = int(tracker.tracks[j].trace[-1][0, 1])\n # compute error\n errors.append(distance(f_track[j, :], tracker.tracks[j].trace[-1][0, :]))\n # compute distances\n n1 = len(tracker.tracks[j].trace)\n move_dist.append(distance(tracker.tracks[j].trace[n1 - 2][0, :], tracker.tracks[j].trace[n1 - 1][0, :]))\n # graphics\n cv2.circle(frame, (x, y), 6, (255, 20, 25), -1)\n cv2.rectangle(frame, (x - 15, y - 15), (x + 15, y + 15), (0, 255, 0), 2)\n cv2.putText(frame, str(tracker.tracks[j].track_id), (x - 10, y - 20), 0, 0.5, (0, 55, 255), 2)\n cv2.circle(frame, (int(f_track[j, 0]), int(f_track[j, 1])), 6, (0, 0, 0), -1)\n r_mse = np.round(np.sqrt(np.sum(np.array(errors)) / n), 4)\n r_mse = 100 if r_mse == 0 else r_mse\n mean_d = np.round(np.mean(np.array(move_dist)), 4)\n\n return frame, r_mse, move_dist, mean_d\n" ]
[ [ "numpy.append", "numpy.array", "numpy.where", "numpy.delete" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
WISDEM/wake-exchange
[ "1e9bb1266799517afeca0358c3237f9250bacfa4" ]
[ "src/wakeexchange/gauss.py" ]
[ "\"\"\"\ngauss.py\nCreated by Jared J. Thomas, Jul. 2016.\nBrigham Young University\n\"\"\"\n\nfrom openmdao.api import IndepVarComp, Group\nfrom gaussianwake.gaussianwake import GaussianWake\nimport numpy as np\n\n\ndef add_gauss_params_IndepVarComps(openmdao_object, nRotorPoints=1):\n\n # openmdao_object.add('bp0', IndepVarComp('model_params:ke', 0.052, pass_by_object=True))\n # openmdao_object.add('bp1', IndepVarComp('model_params:rotation_offset_angle', val=1.56, units='deg',\n # pass_by_object=True))\n # openmdao_object.add('bp2', IndepVarComp('model_params:spread_angle', val=5.84, units='deg', pass_by_object=True))\n\n # params for Bastankhah model with yaw\n openmdao_object.add('bp3', IndepVarComp('model_params:ky', 0.022, pass_by_object=True), promotes=['*'])\n openmdao_object.add('bp4', IndepVarComp('model_params:kz', 0.022, pass_by_object=True), promotes=['*'])\n openmdao_object.add('bp5', IndepVarComp('model_params:alpha', 2.32, pass_by_object=True), promotes=['*'])\n openmdao_object.add('bp6', IndepVarComp('model_params:beta', 0.154, pass_by_object=True), promotes=['*'])\n openmdao_object.add('bp7', IndepVarComp('model_params:I', 0.075, pass_by_object=True), promotes=['*'])\n openmdao_object.add('bp8', IndepVarComp('model_params:wake_combination_method', 0, pass_by_object=True),\n promotes=['*'])\n openmdao_object.add('bp9', IndepVarComp('model_params:ti_calculation_method', 0, pass_by_object=True),\n promotes=['*'])\n openmdao_object.add('bp10', IndepVarComp('model_params:calc_k_star', False, pass_by_object=True), promotes=['*'])\n openmdao_object.add('bp11', IndepVarComp('model_params:sort', True, pass_by_object=True), promotes=['*'])\n openmdao_object.add('bp12', IndepVarComp('model_params:RotorPointsY', val=np.zeros(nRotorPoints), pass_by_object=True,\n desc='rotor swept area sampling Y points centered at (y,z)=(0,0) normalized by rotor radius'),\n promotes=['*'])\n openmdao_object.add('bp13', IndepVarComp('model_params:RotorPointsZ', val=np.zeros(nRotorPoints), pass_by_object=True,\n desc='rotor swept area sampling Z points centered at (y,z)=(0,0) normalized by rotor radius'),\n promotes=['*'])\n openmdao_object.add('bp14', IndepVarComp('model_params:z_ref', val=80.0, pass_by_object=True,\n desc='wind speed measurement height'), promotes=['*'])\n openmdao_object.add('bp15', IndepVarComp('model_params:z_0', val=0.0, pass_by_object=True,\n desc='ground height'), promotes=['*'])\n openmdao_object.add('bp16', IndepVarComp('model_params:shear_exp', val=0.15, pass_by_object=True,\n desc='wind shear calculation exponent'), promotes=['*'])\n\n openmdao_object.add('bp17', IndepVarComp('model_params:opt_exp_fac', val=1.0, pass_by_object=True,\n desc='opt_exp_fac'), promotes=['*'])\n openmdao_object.add('bp18', IndepVarComp('model_params:print_ti', val=False, pass_by_object=True,\n desc='print TI values to a file for use in plotting etc'), promotes=['*'])\n openmdao_object.add('bp19', IndepVarComp('model_params:wake_model_version', val=2016, pass_by_object=True,\n desc='choose whether to use Bastankhah 2014 or 2016'), promotes=['*'])\n\n # openmdao_object.add('bp8', IndepVarComp('model_params:yaw_mode', val='bastankhah', pass_by_object=True))\n # openmdao_object.add('bp9', IndepVarComp('model_params:spread_mode', val='bastankhah', pass_by_object=True))\n\nclass gauss_wrapper(Group):\n\n def __init__(self, nTurbs, direction_id=0, wake_model_options=None):\n super(gauss_wrapper, self).__init__()\n\n self.add('f_1', GaussianWake(nTurbines=nTurbs, direction_id=direction_id, options=wake_model_options),\n promotes=['*'])\n" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hemantghuge/TensorFlow
[ "09f5609f0fd282943defd4608ee90bb6883a394b" ]
[ "tensorflow/python/ops/numpy_ops/np_interop_test.py" ]
[ "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for interop between TF ops, numpy_ops, and numpy methods.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as onp\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow.python.ops import numpy_ops as np\n\n\n# Tests for code snippet put in README.md\nclass ReadmeTest(tf.test.TestCase):\n\n def testBroadcastAdd(self):\n x_np = np.ones([2, 1]) + np.ones([1, 2])\n x_onp = onp.ones([2, 1]) + onp.ones([1, 2])\n self.assertAllClose(x_onp, x_np)\n\n def testTypePromotion(self):\n x_np = np.ones([1, 2], dtype=np.int16) + np.ones([2, 1], dtype=np.uint8)\n x_onp = np.ones([1, 2], dtype=np.int16) + np.ones([2, 1], dtype=np.uint8)\n self.assertEqual(x_onp.dtype, x_np.dtype)\n self.assertAllClose(x_onp, x_np)\n\n def testTFInterop(self):\n x_np = np.sum(np.ones([1, 2]) + tf.ones([2, 1]))\n x_onp = onp.sum(onp.ones([1, 2]) + onp.ones([2, 1]))\n self.assertAllClose(x_onp, x_np)\n\n def testOnpInterop(self):\n x_np = onp.sum(np.ones([1, 2]) + onp.ones([2, 1]))\n x_onp = onp.sum(onp.ones([1, 2]) + onp.ones([2, 1]))\n self.assertAllClose(x_onp, x_np)\n\n def testDevice(self):\n if tf.test.is_gpu_available():\n with tf.device('GPU:0'):\n x = np.ones([1, 2])\n self.assertIn('GPU', tf.convert_to_tensor(x).device)\n with tf.device('CPU:0'):\n x = np.ones([1, 2])\n self.assertIn('CPU', tf.convert_to_tensor(x).device)\n\n def testFunction(self):\n\n @tf.function\n def f(x, y):\n return np.sum(x + y)\n\n x_np = f(np.ones([1, 2]), tf.ones([2, 1]))\n x_onp = onp.sum(onp.ones([1, 2]) + onp.ones([2, 1]))\n self.assertAllClose(x_onp, x_np)\n\n\nclass InteropTest(tf.test.TestCase):\n\n def setUp(self):\n super(InteropTest, self).setUp()\n physical_devices = tf.config.list_physical_devices('CPU')\n configs = tf.config.get_logical_device_configuration(physical_devices[0])\n if configs is None:\n logical_devices = [\n tf.config.LogicalDeviceConfiguration() for _ in range(3)\n ]\n tf.config.set_logical_device_configuration(physical_devices[0],\n logical_devices)\n\n def testGradientTapeInterop(self):\n with tf.GradientTape() as t:\n x = np.asarray(3.0)\n y = np.asarray(2.0)\n\n t.watch([x, y])\n\n xx = 2 * x\n yy = 3 * y\n\n dx, dy = t.gradient([xx, yy], [x, y])\n\n self.assertIsInstance(dx, np.ndarray)\n self.assertIsInstance(dy, np.ndarray)\n self.assertAllClose(dx, 2.0)\n self.assertAllClose(dy, 3.0)\n\n def testGradientTapeNoneGradients(self):\n y = np.asarray(2.0)\n\n with tf.GradientTape() as t:\n x = np.asarray(3.0)\n t.watch([x])\n z = 2 * x\n\n dz = t.gradient(z, y)\n\n self.assertIsNone(dz)\n\n def testCondInterop(self):\n x = np.asarray(3.0)\n\n def fn(x):\n x_plus_1 = tf.cond(x > 0, lambda: x + 1, lambda: x + 2)\n x_plus_2 = tf.cond(x < 0, lambda: x + 1, lambda: x + 2)\n\n return x_plus_1, x_plus_2\n\n raw_x_plus_1, raw_x_plus_2 = fn(x)\n fn_x_plus_1, fn_x_plus_2 = tf.function(fn)(x)\n\n self.assertAllClose(raw_x_plus_1, x + 1)\n self.assertAllClose(raw_x_plus_2, x + 2)\n\n self.assertAllClose(fn_x_plus_1, x + 1)\n self.assertAllClose(fn_x_plus_2, x + 2)\n\n def testWhileInterop(self):\n\n def fn():\n x = np.asarray(0)\n c = lambda x: x < 10000\n b = lambda x: [x + 1]\n return tf.while_loop(c, b, [x], parallel_iterations=20)\n\n self.assertEqual(10000, fn()[0])\n self.assertEqual(10000, tf.function(fn)()[0])\n\n def testTensorTFNPArrayInterop(self):\n arr = np.asarray(0.)\n t = tf.constant(10.)\n\n arr_plus_t = arr + t\n t_plus_arr = t + arr\n\n self.assertIsInstance(arr_plus_t, tf.Tensor)\n self.assertIsInstance(t_plus_arr, tf.Tensor)\n self.assertEqual(10., arr_plus_t.numpy())\n self.assertEqual(10., t_plus_arr.numpy())\n\n def testTensorTFNPOp(self):\n t = tf.constant(10.)\n\n sq = np.square(t)\n self.assertIsInstance(sq, np.ndarray)\n self.assertEqual(100., sq)\n\n def testTFNPArrayTFOpInterop(self):\n arr = np.asarray(10.)\n\n # TODO(nareshmodi): Test more ops.\n sq = tf.square(arr)\n self.assertIsInstance(sq, tf.Tensor)\n self.assertEqual(100., sq.numpy())\n\n def testTFNPArrayNPOpInterop(self):\n arr = np.asarray([10.])\n\n # TODO(nareshmodi): Test more ops.\n sq = onp.square(arr)\n self.assertIsInstance(sq, onp.ndarray)\n self.assertEqual(100., sq[0])\n\n # TODO(nareshmodi): Fails since the autopacking code doesn't use\n # nest.flatten.\n\n\n# def testAutopacking(self):\n# arr1 = np.asarray(1.)\n# arr2 = np.asarray(2.)\n# arr3 = np.asarray(3.)\n# t = ops.convert_to_tensor_v2([arr1, arr2, arr3])\n\n# self.assertEqual(t.numpy(), [1., 2., 3.])\n\n def testDistStratInterop(self):\n strategy = tf.distribute.MirroredStrategy(\n devices=['CPU:0', 'CPU:1', 'CPU:2'])\n\n multiplier = np.asarray(5.)\n\n @tf.function\n def run():\n ctx = tf.distribute.get_replica_context()\n val = np.asarray(ctx.replica_id_in_sync_group)\n return val * multiplier\n\n distributed_values = strategy.run(run)\n reduced = strategy.reduce(\n tf.distribute.ReduceOp.SUM, distributed_values, axis=None)\n\n values = strategy.experimental_local_results(distributed_values)\n\n # Note that this should match the number of virtual CPUs.\n self.assertLen(values, 3)\n self.assertIsInstance(values[0], np.ndarray)\n self.assertIsInstance(values[1], np.ndarray)\n self.assertIsInstance(values[2], np.ndarray)\n self.assertAllClose(values[0], 0)\n self.assertAllClose(values[1], 5)\n self.assertAllClose(values[2], 10)\n\n # \"strategy.reduce\" doesn't rewrap in ndarray.\n # self.assertIsInstance(reduced, np.ndarray)\n self.assertAllClose(reduced, 15)\n\n def testPyFuncInterop(self):\n def py_func_fn(a, b):\n return a + b\n\n @tf.function\n def fn(a, b):\n result = tf.py_function(py_func_fn, [a, b], a.dtype)\n return np.asarray(result)\n\n a = np.asarray(1.)\n b = np.asarray(2.)\n\n result = fn(a, b)\n self.assertIsInstance(result, np.ndarray)\n self.assertAllClose(result, 3.)\n\n def testDatasetInterop(self):\n values = [1, 2, 3, 4, 5, 6]\n values_as_array = np.asarray(values)\n\n # Tensor dataset\n dataset = tf.data.Dataset.from_tensors(values_as_array)\n\n for value, value_from_dataset in zip([values_as_array], dataset):\n self.assertIsInstance(value_from_dataset, np.ndarray)\n self.assertAllEqual(value_from_dataset, value)\n\n # Tensor slice dataset\n dataset = tf.data.Dataset.from_tensor_slices(values_as_array)\n\n for value, value_from_dataset in zip(values, dataset):\n self.assertIsInstance(value_from_dataset, np.ndarray)\n self.assertAllEqual(value_from_dataset, value)\n\n # # TODO(nareshmodi): as_numpy_iterator() doesn't work.\n # items = list(dataset.as_numpy_iterator())\n\n # Map over a dataset.\n dataset = dataset.map(lambda x: np.add(x, 1))\n\n for value, value_from_dataset in zip(values, dataset):\n self.assertIsInstance(value_from_dataset, np.ndarray)\n self.assertAllEqual(value_from_dataset, value + 1)\n\n # Batch a dataset.\n dataset = tf.data.Dataset.from_tensor_slices(values_as_array).batch(2)\n\n for value, value_from_dataset in zip([[1, 2], [3, 4], [5, 6]], dataset):\n self.assertIsInstance(value_from_dataset, np.ndarray)\n self.assertAllEqual(value_from_dataset, value)\n\n def testKerasInterop(self):\n # Return an ndarray from the model.\n inputs = tf.keras.layers.Input(shape=(10,))\n output_layer = tf.keras.layers.Lambda(np.square)(inputs)\n model = tf.keras.Model([inputs], output_layer)\n\n values = onp.arange(10, dtype=onp.float32)\n values_as_array = np.asarray(values)\n\n result = model(values)\n self.assertIsInstance(result, np.ndarray)\n self.assertAllClose(result, onp.square(values))\n\n result = model(values_as_array)\n self.assertIsInstance(result, np.ndarray)\n self.assertAllClose(result, onp.square(values))\n\n def testKerasInteropSequential(self):\n class ProjectionLayer(tf.keras.layers.Layer):\n \"\"\"Linear projection layer using TF NumPy.\"\"\"\n\n def __init__(self, units):\n super(ProjectionLayer, self).__init__()\n self._units = units\n\n def build(self, input_shape):\n stddev = np.sqrt(self._units).astype(np.float32)\n initial_value = np.random.randn(input_shape[1], self._units).astype(\n np.float32) / stddev\n # Note that TF NumPy can interoperate with tf.Variable.\n self.w = tf.Variable(initial_value, trainable=True)\n\n def call(self, inputs):\n return np.matmul(inputs, self.w)\n\n model = tf.keras.Sequential(\n [tf.keras.layers.Dense(100), ProjectionLayer(2)])\n output = model.call(np.random.randn(10, 100).astype(np.float32))\n\n self.assertIsInstance(output, np.ndarray)\n\n dense_layer = tf.keras.layers.Dense(100)\n output = dense_layer(np.random.randn(10, 100).astype(np.float32))\n\n def testPForInterop(self):\n def outer_product(a):\n return np.tensordot(a, a, 0)\n\n batch_size = 100\n a = np.ones((batch_size, 32, 32))\n c = tf.vectorized_map(outer_product, a)\n\n self.assertIsInstance(c, np.ndarray)\n self.assertEqual(c.shape, (batch_size, 32, 32, 32, 32))\n\n c = tf.vectorized_map(lambda x: x.T, a)\n\n self.assertIsInstance(c, np.ndarray)\n self.assertEqual(c.shape, (batch_size, 32, 32))\n\n def testJacobian(self):\n with tf.GradientTape() as g:\n x = np.asarray([1., 2.])\n y = np.asarray([3., 4.])\n g.watch(x)\n g.watch(y)\n z = x * x * y\n\n jacobian = g.jacobian(z, [x, y])\n answer = [tf.linalg.diag(2 * x * y), tf.linalg.diag(x * x)]\n\n self.assertIsInstance(jacobian[0], np.ndarray)\n self.assertIsInstance(jacobian[1], np.ndarray)\n self.assertAllClose(jacobian, answer)\n\n def testBatchJacobian(self):\n with tf.GradientTape() as g:\n x = np.asarray([[1., 2.], [3., 4.]])\n y = np.asarray([[3., 4.], [5., 6.]])\n g.watch(x)\n g.watch(y)\n z = x * x * y\n\n batch_jacobian = g.batch_jacobian(z, x)\n answer = tf.stack(\n [tf.linalg.diag(2 * x[0] * y[0]),\n tf.linalg.diag(2 * x[1] * y[1])])\n\n self.assertIsInstance(batch_jacobian, np.ndarray)\n self.assertAllClose(batch_jacobian, answer)\n\n def testForwardprop(self):\n x = np.asarray([1., 2.])\n xt = np.asarray([3., 4.])\n with tf.autodiff.ForwardAccumulator(x, xt) as acc:\n y = x * 2.\n yt = acc.jvp(y)\n self.assertIsInstance(yt, np.ndarray)\n self.assertAllClose([6., 8.], yt)\n z = np.asarray([1.])\n self.assertIsNone(acc.jvp(z))\n\n def testMapFn(self):\n x = np.asarray([1., 2.])\n mapped_x = tf.map_fn(lambda x: (x[0]+1, x[1]+1), (x, x))\n\n self.assertIsInstance(mapped_x[0], np.ndarray)\n self.assertIsInstance(mapped_x[1], np.ndarray)\n self.assertAllClose(mapped_x[0], [2., 3.])\n self.assertAllClose(mapped_x[1], [2., 3.])\n\n\nclass FunctionTest(InteropTest):\n\n def testFunctionInterop(self):\n x = np.asarray(3.0)\n y = np.asarray(2.0)\n\n add = lambda x, y: x + y\n add_fn = tf.function(add)\n\n raw_result = add(x, y)\n fn_result = add_fn(x, y)\n\n self.assertIsInstance(raw_result, np.ndarray)\n self.assertIsInstance(fn_result, np.ndarray)\n self.assertAllClose(raw_result, fn_result)\n\n def testLen(self):\n\n @tf.function\n def f(x):\n # Note that shape of input to len is data dependent.\n return len(np.where(x)[0])\n\n t = np.asarray([True, False, True])\n with self.assertRaises(TypeError):\n f(t)\n\n def testIter(self):\n\n @tf.function\n def f(x):\n y, z = x\n return y, z\n\n with self.assertRaises(TypeError):\n f(np.asarray([3, 4]))\n\n def testIndex(self):\n\n @tf.function\n def f(x):\n return [0, 1][x]\n\n with self.assertRaises(TypeError):\n f(np.asarray([1]))\n\n\nclass VariableTest(InteropTest):\n\n def test(self):\n tf_var = tf.Variable(2.0)\n value = np.square(tf_var)\n self.assertIsInstance(value, np.ndarray)\n self.assertAllClose(4.0, value)\n with tf.control_dependencies([tf_var.assign_add(value)]):\n tf_var_value = tf_var.read_value()\n self.assertAllClose(6.0, tf_var_value)\n\n\nif __name__ == '__main__':\n tf.compat.v1.enable_eager_execution()\n tf.test.main()\n" ]
[ [ "tensorflow.python.ops.numpy_ops.random.randn", "tensorflow.compat.v2.cond", "tensorflow.python.ops.numpy_ops.sum", "tensorflow.python.ops.numpy_ops.sqrt", "tensorflow.compat.v2.linalg.diag", "tensorflow.compat.v2.autodiff.ForwardAccumulator", "tensorflow.python.ops.numpy_ops.square", "tensorflow.compat.v2.keras.Model", "tensorflow.compat.v2.convert_to_tensor", "tensorflow.python.ops.numpy_ops.asarray", "tensorflow.compat.v2.ones", "tensorflow.compat.v2.map_fn", "tensorflow.compat.v2.while_loop", "numpy.square", "tensorflow.python.ops.numpy_ops.matmul", "tensorflow.compat.v2.data.Dataset.from_tensor_slices", "tensorflow.compat.v2.py_function", "numpy.arange", "tensorflow.python.ops.numpy_ops.add", "tensorflow.compat.v2.vectorized_map", "tensorflow.compat.v2.distribute.MirroredStrategy", "tensorflow.python.ops.numpy_ops.where", "tensorflow.compat.v2.data.Dataset.from_tensors", "tensorflow.compat.v2.distribute.get_replica_context", "tensorflow.compat.v2.config.list_physical_devices", "tensorflow.compat.v2.Variable", "tensorflow.compat.v2.function", "tensorflow.python.ops.numpy_ops.tensordot", "tensorflow.compat.v2.test.main", "tensorflow.compat.v2.square", "tensorflow.python.ops.numpy_ops.ones", "tensorflow.compat.v2.config.get_logical_device_configuration", "tensorflow.compat.v2.constant", "tensorflow.compat.v2.test.is_gpu_available", "tensorflow.compat.v2.compat.v1.enable_eager_execution", "tensorflow.compat.v2.device", "tensorflow.compat.v2.GradientTape", "tensorflow.compat.v2.keras.layers.Dense", "numpy.ones", "tensorflow.compat.v2.keras.layers.Lambda", "tensorflow.compat.v2.config.LogicalDeviceConfiguration", "tensorflow.compat.v2.keras.layers.Input", "tensorflow.compat.v2.config.set_logical_device_configuration" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
iliasprc/IDPMetagenome
[ "519cec77bb55eb91dbb7b243a2d80999742c033d", "519cec77bb55eb91dbb7b243a2d80999742c033d" ]
[ "idp_comparison/test_idp_predictors.py", "tests/ssltrain.py" ]
[ "import argparse\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nimport torch\nimport torch.nn as nn\nfrom models.rnn import IDP_test_rnn\nfrom models.transformer import IDP_compare_Transformer\n\nfrom idp_methods.utils import *\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-b', '--batch_size', type=int, default=4, help='batch size for training')\nparser.add_argument('--dataset', type=str, default=\"d723\", help='dataset name')\nparser.add_argument('--epochs', type=int, default=50, help='total number of epochs')\nparser.add_argument('--test-predictor', type=str, default='seg',\n choices=['prediction-disorder-iupl', 'prediction-disorder-iups',\n 'prediction-disorder-espN', 'prediction-disorder-espX', 'prediction-disorder-espD',\n 'prediction-disorder-glo', 'cast', 'seg'])\n\nargs = parser.parse_args()\n\nSEED = 42\ntorch.manual_seed(SEED)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\nnp.random.seed(SEED)\ntorch.cuda.manual_seed(SEED)\ndataset = args.dataset\n\nif dataset == 'd723':\n train_dataset = np.load('./results/mobidb/d723_train2.npy',\n allow_pickle=True).item()\n\n val_dataset = np.load('./results/mobidb/d723_test2.npy', allow_pickle=True).item()\n print(val_dataset['0'].keys())\n predictors = ['prediction-disorder-iupl', 'prediction-disorder-iups',\n 'prediction-disorder-espN', 'prediction-disorder-espX', 'prediction-disorder-espD',\n 'prediction-disorder-glo', 'cast', 'seg']\nelif dataset == 'mxd494':\n\n\n train_dataset = np.load('./results/mobidb/mxd494_train_pred3.npy',\n allow_pickle=True).item()\n\n val_dataset = np.load('./results/mobidb/mxd494_val_pred3.npy', allow_pickle=True).item()\n print(val_dataset['0'].keys())\n predictors = ['prediction-disorder-iupl', 'prediction-disorder-iups',\n 'prediction-disorder-espN', 'prediction-disorder-espX', 'prediction-disorder-espD',\n 'prediction-disorder-glo', 'cast', 'seg','fldpnn']\n\ntest_predictor = args.test_predictor\npredictors.remove(test_predictor)\ntrain_predictors = predictors\nassert len(train_predictors) == len(predictors)\n\ndef next_number(x, N=20):\n if x % 20:\n return x + (N - x % N)\n else:\n return 0\n\n\n\nm = IDP_test_rnn(input_channels=len(train_predictors))\nm =IDP_compare_Transformer(input_channels=len(train_predictors))\ntrain_X = []\ntrainY = []\nfor sample in train_dataset:\n # print(train_dataset[sample].keys())\n sample_data = torch.tensor([])\n for preds_ in train_predictors:\n data = torch.from_numpy(train_dataset[sample][preds_]).unsqueeze(0).float()\n sample_data = torch.cat([sample_data, data])\n\n train_X.append(sample_data.transpose(0, 1).float())\n trainY.append(torch.from_numpy(train_dataset[sample][test_predictor]).unsqueeze(0).transpose(0, 1).float())\n # print(torch.from_numpy(train_dataset[sample][test_predictor]).unsqueeze(0).shape,sample_data.shape)\nval_X = []\nvalY = []\nfor sample in val_dataset:\n # print(train_dataset[sample].keys())\n sample_data = torch.tensor([])\n for preds_ in train_predictors:\n data = torch.from_numpy(train_dataset[sample][preds_]).unsqueeze(0).float()\n sample_data = torch.cat([sample_data, data])\n\n val_X.append(sample_data.transpose(0, 1).float())\n valY.append(torch.from_numpy(train_dataset[sample][test_predictor]).unsqueeze(0).transpose(0, 1).float())\n\nEPOCHS = 50\noptimizer = torch.optim.Adam(m.parameters(), lr=0.5e-3)\nuse_cuda = torch.cuda.is_available()\n\ndevice = torch.device(\"cuda:0\" if use_cuda else \"cpu\")\nm = m.to(device)\nloss = nn.CrossEntropyLoss()\nfor i in range(EPOCHS):\n train_loss = 0.0\n val_loss = 0.0\n yhat = []\n y = []\n for batchidx in range(len(train_X)):\n sample = train_X[batchidx].to(device)\n out = m(sample.unsqueeze(0)).squeeze(0)\n target = trainY[batchidx].squeeze(-1).to(device)\n # print(target.shape,out.shape)\n loss_sclar = loss(out, target.long())\n loss_sclar.backward()\n optimizer.step()\n optimizer.zero_grad()\n train_loss += loss_sclar.item()\n output = torch.softmax(out, dim=-1) # .squeeze()\n # print(output.shape)\n _, output = torch.max(output, 1)\n # print(output.shape)\n y += target.squeeze().detach().cpu().numpy().tolist()\n yhat += output.tolist()\n #print(f'EPOCH {i} Train Loss {train_loss / (batchidx + 1):.4f}')\n metrics_, _ = dataset_metrics(yhat, y)\n\n print(f'EPOCH {i} Train Loss {train_loss / (batchidx + 1):.4f}')\n print(f'EPOCH TRAIN METRICS{i}\\n{metrics_}')\n train_loss = 0.0\n val_loss = 0.0\n yhat = []\n y = []\n for batchidx in range(len(val_X)):\n sample = val_X[batchidx].to(device)\n out = m(sample.unsqueeze(0)).squeeze(0)\n target = valY[batchidx].squeeze(-1).to(device)\n # print(target.shape,out.shape)\n loss_sclar = loss(out, target.long())\n loss_sclar.backward()\n optimizer.step()\n optimizer.zero_grad()\n val_loss += loss_sclar.item()\n output = torch.softmax(out, dim=-1) # .squeeze()\n # print(output.shape)\n _, output = torch.max(output, 1)\n # print(output.shape)\n y += target.squeeze().detach().cpu().numpy().tolist()\n yhat += output.tolist()\n metrics_, _ = dataset_metrics(yhat, y)\n\n print(f'EPOCH {i} Val Loss {val_loss / (batchidx + 1):.4f}')\n print(f'EPOCH VALIDATION METRICS {i}\\n{metrics_}')\n # print(out.shape,sample.shape)\n", "import datetime\nimport os\nimport shutil\nimport torch.nn as nn\nimport torch\nfrom omegaconf import OmegaConf\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom dataloaders.dataset import loaders\nfrom trainer.logger import Logger\nfrom trainer.util import reproducibility, select_model, select_optimizer, load_checkpoint, get_arguments\n\n\ndef main():\n args = get_arguments()\n myargs = [] # getopts(sys.argv)\n now = datetime.datetime.now()\n cwd = os.getcwd()\n if len(myargs) > 0:\n if 'c' in myargs:\n config_file = myargs['c']\n else:\n config_file = '../config/sslconfig.yaml'\n\n config = OmegaConf.load(os.path.join(cwd, config_file))['trainer']\n config.cwd = str(cwd)\n reproducibility(config)\n dt_string = now.strftime(\"%d_%m_%Y_%H.%M.%S\")\n cpkt_fol_name = os.path.join(config.cwd,\n f'checkpoints/SSL/dataset_{config.dataset.name}/model_{config.model.name}/date_'\n f'{dt_string}')\n\n log = Logger(path=cpkt_fol_name, name='LOG').get_logger()\n\n log.info(f\"Checkpoint folder {cpkt_fol_name}\")\n log.info(f\"date and time = {dt_string}\")\n\n log.info(f'pyTorch VERSION:{torch.__version__}', )\n log.info(f'CUDA VERSION')\n\n log.info(f'CUDNN VERSION:{torch.backends.cudnn.version()}')\n log.info(f'Number CUDA Devices: {torch.cuda.device_count()}')\n\n if args.tensorboard:\n\n # writer_path = os.path.join(config.save,\n # 'checkpoints/model_' + config.model.name + '/dataset_' + config.dataset.name +\n # '/date_' + dt_string + '/runs/')\n\n writer = SummaryWriter(cpkt_fol_name + '/runs/')\n else:\n writer = None\n\n use_cuda = torch.cuda.is_available()\n\n device = torch.device(\"cuda:0\" if use_cuda else \"cpu\")\n log.info(f'device: {device}')\n\n training_generator, val_generator, classes = loaders(args=config, dataset_name=config.dataset.name)\n n_classes = len(classes)\n\n model = select_model(config, n_classes)\n\n log.info(f\"{model}\")\n print(n_classes)\n if (config.load):\n # model.head = torch.nn.Linear(128, 256)\n # model.embed = nn.Embedding(256, 128)\n # dim=256\n # model.embed = nn.Sequential(nn.Embedding(256, dim), TextTokenizer(word_embedding_dim=dim,\n # embedding_dim=dim,\n # n_output_channels=dim,\n # kernel_size=1,\n # stride=1,\n # padding=0))\n pth_file, _ = load_checkpoint( config.pretrained_cpkt, model, strict=True, load_seperate_layers=False)\n model.head = torch.nn.Linear(128, 22)\n model.embed = nn.Embedding(22, 128)\n print(model)\n\n # embed_w = torch.load('/home/iliask/PycharmProjects/MScThesis/checkpoints/SSL/dataset_DM/_embed.pth')\n # print(embed_w.shape)\n # model.embed.from_pretrained(embed_w)\n # model.embed.requires_grad = False\n #exit()\n\n # model.embed = nn.Sequential(nn.Embedding(22, dim), TextTokenizer(word_embedding_dim=dim,\n # embedding_dim=dim,\n # n_output_channels=dim,\n # kernel_size=1,\n # stride=1,\n # padding=0))\n\n\n else:\n pth_file = None\n if (config.cuda and use_cuda):\n if torch.cuda.device_count() > 1:\n log.info(f\"Let's use {torch.cuda.device_count()} GPUs!\")\n\n model = torch.nn.DataParallel(model)\n model.to(device)\n\n optimizer, scheduler = select_optimizer(model, config['model'], None)\n\n # log.info(f'{model}')\n log.info(f\"Checkpoint Folder {cpkt_fol_name} \")\n shutil.copy(os.path.join(config.cwd, config_file), cpkt_fol_name)\n\n from trainer.ssltrainer import SSLTrainer\n trainer = SSLTrainer(config, model=model, optimizer=optimizer,\n data_loader=training_generator, writer=writer, logger=log,\n valid_data_loader=val_generator, class_dict=classes,\n lr_scheduler=scheduler,\n checkpoint_dir=cpkt_fol_name)\n trainer.train()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.softmax", "torch.max", "torch.cuda.manual_seed", "torch.cat", "torch.manual_seed", "torch.from_numpy", "torch.tensor", "torch.cuda.is_available", "torch.device" ], [ "torch.backends.cudnn.version", "torch.nn.Embedding", "torch.nn.Linear", "torch.nn.DataParallel", "torch.utils.tensorboard.SummaryWriter", "torch.cuda.is_available", "torch.device", "torch.cuda.device_count" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vm6502q/ProjectQ
[ "1eac4b1f529551dfc1668443eba0c68dee54120b" ]
[ "projectq/setups/decompositions/rz2rx_test.py" ]
[ "# Copyright 2017 ProjectQ-Framework (www.projectq.ch)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"Tests for projectq.setups.decompositions.rz2rx.py\"\n\nimport math\nimport numpy as np\nimport pytest\n\nfrom projectq import MainEngine\nfrom projectq.backends import Simulator\nfrom projectq.cengines import (AutoReplacer, DecompositionRuleSet, DummyEngine,\n InstructionFilter)\nfrom projectq.meta import Control\nfrom projectq.ops import Measure, Rz\n\nfrom . import rz2rx\n\ntolerance = 1e-6\n\n\ndef test_recognize_correct_gates():\n \"\"\" Test that recognize_RzNoCtrl recognizes ctrl qubits \"\"\"\n saving_backend = DummyEngine(save_commands=True)\n eng = MainEngine(backend=saving_backend)\n qubit = eng.allocate_qubit()\n ctrl_qubit = eng.allocate_qubit()\n eng.flush()\n Rz(0.3) | qubit\n with Control(eng, ctrl_qubit):\n Rz(0.4) | qubit\n eng.flush(deallocate_qubits=True)\n assert rz2rx._recognize_RzNoCtrl(saving_backend.received_commands[3])\n assert not rz2rx._recognize_RzNoCtrl(saving_backend.received_commands[4])\n\n\ndef rz_decomp_gates(eng, cmd):\n \"\"\" Test that cmd.gate is the gate Rz \"\"\"\n g = cmd.gate\n if isinstance(g, Rz):\n return False\n else:\n return True\n\n\n# ------------test_decomposition function-------------#\n# Creates two engines, correct_eng and test_eng.\n# correct_eng implements Rz(angle) gate.\n# test_eng implements the decomposition of the Rz(angle) gate.\n# correct_qb and test_qb represent results of these two engines, respectively.\n#\n# The decomposition only needs to produce the same state in a qubit up to a\n# global phase.\n# test_vector and correct_vector represent the final wave states of correct_qb\n# and test_qb.\n#\n# The dot product of correct_vector and test_vector should have absolute value\n# 1, if the two vectors are the same up to a global phase.\n\n\[email protected](\"angle\", [0, math.pi, 2 * math.pi, 4 * math.pi, 0.5])\ndef test_decomposition(angle):\n \"\"\"\n Test that this decomposition of Rz produces correct amplitudes\n\n Note that this function tests each DecompositionRule in\n rz2rx.all_defined_decomposition_rules\n \"\"\"\n decomposition_rule_list = rz2rx.all_defined_decomposition_rules\n for rule in decomposition_rule_list:\n for basis_state in ([1, 0], [0, 1]):\n correct_dummy_eng = DummyEngine(save_commands=True)\n correct_eng = MainEngine(backend=Simulator(),\n engine_list=[correct_dummy_eng])\n\n rule_set = DecompositionRuleSet(rules=[rule])\n test_dummy_eng = DummyEngine(save_commands=True)\n test_eng = MainEngine(backend=Simulator(),\n engine_list=[\n AutoReplacer(rule_set),\n InstructionFilter(rz_decomp_gates),\n test_dummy_eng\n ])\n\n correct_qb = correct_eng.allocate_qubit()\n Rz(angle) | correct_qb\n correct_eng.flush()\n\n test_qb = test_eng.allocate_qubit()\n Rz(angle) | test_qb\n test_eng.flush()\n\n # Create empty vectors for the wave vectors for the correct and\n # test qubits\n correct_vector = np.zeros((2, 1), dtype=np.complex_)\n test_vector = np.zeros((2, 1), dtype=np.complex_)\n\n i = 0\n for fstate in ['0', '1']:\n test = test_eng.backend.get_amplitude(fstate, test_qb)\n correct = correct_eng.backend.get_amplitude(fstate, correct_qb)\n correct_vector[i] = correct\n test_vector[i] = test\n i += 1\n\n # Necessary to transpose vector to use matrix dot product\n test_vector = test_vector.transpose()\n # Remember that transposed vector should come first in product\n vector_dot_product = np.dot(test_vector, correct_vector)\n\n assert np.absolute(vector_dot_product) == pytest.approx(1,\n rel=tolerance,\n abs=tolerance)\n\n Measure | test_qb\n Measure | correct_qb\n" ]
[ [ "numpy.dot", "numpy.absolute", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SemihAkkoc/arz_cazibesi
[ "0a72da0d5c9e0f104f51cb375d858a7b9ac96eb5" ]
[ "arz_mk1_TR.py" ]
[ "\"\"\"\r\nTODO:\r\nmake choosable graphics NOT gravity[0], time[0]\r\n\"\"\"\r\n\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport array, math\r\nfrom tkinter import *\r\n\r\nsensitivity = 200\r\nheight = 2.0 # meters\r\ntime = [0.61, 0.622, 0.6423, 0.6323]\r\npressure = [1, 0.9, 0.81, 0.3]\r\nexnum = ['Deney 1:', 'Deney 2:', 'Deney 3:', 'Deney 4:']\r\ngravity = array.array('f')\r\n\r\nfor i in range(0, 4):\r\n gravity.insert(i, 2 * height / time[i] ** 2)\r\n\r\n\r\n# position-time graphic\r\ndef ptG():\r\n currentHeight = array.array('f')\r\n currentTime = array.array('f')\r\n for i in range(0, int(time[0] * sensitivity)): # this is not correct\r\n currentTime.insert(i, math.sqrt(i * time[0] / sensitivity))\r\n currentHeight.insert(i, (0.5 * gravity[0] * currentTime[i] ** 2))\r\n print(currentHeight[i])\r\n plt.plot(currentTime, currentHeight)\r\n plt.ylabel('Konum (m)')\r\n plt.xlabel('Zaman (s)')\r\n plt.show()\r\n\r\n\r\n# speed-time graphic\r\ndef stG():\r\n currentTime = array.array('f')\r\n currentSpeed = array.array('f')\r\n for i in range(0, int(time[0] * sensitivity)):\r\n currentTime.insert(i, math.sqrt(i * time[0] / sensitivity))\r\n currentSpeed.insert(i, gravity[0] * currentTime[i])\r\n print(currentSpeed[i])\r\n plt.plot(currentTime, currentSpeed)\r\n plt.ylabel('Hiz (m/s)')\r\n plt.xlabel('Zaman (s)')\r\n plt.show()\r\n\r\n\r\n# acceleration-time graphic\r\ndef atG():\r\n currentTime = array.array('f')\r\n currentGravity = array.array('f')\r\n for i in range(0, int(time[0] * sensitivity)):\r\n currentTime.insert(i, math.sqrt(i * time[0] / sensitivity))\r\n currentGravity.insert(i, gravity[0])\r\n plt.plot(currentTime, currentGravity)\r\n plt.ylabel('Ivme (m/s^2)')\r\n plt.xlabel('Zaman (s)')\r\n plt.show()\r\n\r\ndef writeFile(expnum):\r\n file = open(\"gravityTR.txt\", \"a\")\r\n for i in range(50):\r\n file.write('-')\r\n file.write(\"\\nDeney Numarasi{0:4s}Yer Cekimi{0:4s}Basinc{0:4s}Zaman\\n\".format(' '))\r\n for i in range(expnum):\r\n file.write('{4:4s}Deney {0:d}:{1:18.6f}{2:10.2f}{3:10.4f}\\n'.format(i, gravity[i], pressure[i], time[i], ' '))\r\n\r\n\r\ndef printVal():\r\n g = pd.Series(gravity)\r\n t = pd.Series(time)\r\n Data = {'Deney Numarasi': exnum, 'Yer Cekimi': g, 'Basinc': pressure, 'Zaman': t}\r\n df = pd.DataFrame(Data)\r\n print(df)\r\n writeFile(4)\r\n\r\n\r\nroot = Tk()\r\nroot.geometry(\"400x200\")\r\nroot.title('Arz Cazibesi')\r\n\r\nlabel= Label(root, text=\"Arz Cazibesi\")\r\n\r\nbutton1 = Button(root, text = \"Konum - Zaman\", command = ptG)\r\nbutton2 = Button(root, text = \"Hiz - Zaman\", command = stG)\r\nbutton3 = Button(root, text = \"Ivme - Zaman\", command = atG)\r\nbutton4 = Button(root, text = \"Konsola Yazdir\", command = printVal)\r\n\r\nbutton1.config(font=(\"Courier 10 bold\"))\r\nbutton2.config(font=(\"Courier 10 bold\"))\r\nbutton3.config(font=(\"Courier 10 bold\"))\r\nbutton4.config(font=(\"Courier 10 bold\"))\r\nlabel.config(font=(\"Courier 22 bold\"))\r\n\r\nlabel.place(x=80, y=30)\r\nbutton1.place(x=20,y=95)\r\nbutton2.place(x=220,y=95)\r\nbutton3.place(x=20,y=150)\r\nbutton4.place(x=220,y=150)\r\n\r\nroot.mainloop()\r\n" ]
[ [ "pandas.Series", "pandas.DataFrame", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
eladj/ScrabbleOCR
[ "4470eca7dc476914ffaed71a6688c411fb958bdc" ]
[ "Python/findboard.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 26 15:20:14 2015\n\n@author: elad\n\"\"\"\nfrom scipy.misc import imread\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\nfrom PIL import Image\nimport pytesseract\n\ndef four_point_transform(image, pts, dst=None):\n # obtain a consistent order of the points and unpack them\n # individually\n rect = order_points(pts)\n (tl, tr, br, bl) = rect\n\n if dst == None:\n # compute the width of the new image, which will be the\n # maximum distance between bottom-right and bottom-left\n # x-coordiates or the top-right and top-left x-coordinates\n widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\n widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\n maxWidth = max(int(widthA), int(widthB))\n \n # compute the height of the new image, which will be the\n # maximum distance between the top-right and bottom-right\n # y-coordinates or the top-left and bottom-left y-coordinates\n heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\n heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\n maxHeight = max(int(heightA), int(heightB))\n else:\n maxWidth, maxHeight = dst\n\n # now that we have the dimensions of the new image, construct\n # the set of destination points to obtain a \"birds eye view\",\n # (i.e. top-down view) of the image, again specifying points\n # in the top-left, top-right, bottom-right, and bottom-left\n # order \n dst = np.array([ [0, 0], [maxWidth - 1, 0],\n [maxWidth - 1, maxHeight - 1],\n [0, maxHeight - 1]], dtype = \"float32\") \n # compute the perspective transform matrix and then apply it\n M = cv2.getPerspectiveTransform(rect.astype(\"float32\"), dst)\n warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))\n\n # return the warped image\n return warped\n\nif __name__ == \"__main__\":\n #img_fn = r\"C:\\Users\\elad\\Documents\\code\\DigitalScrabble\\board_OnePlus (1).jpg\"\n #img_fn = r\"C:\\Users\\elad\\Documents\\code\\DigitalScrabble\\board_letters (3).jpg\"\n img_fn = r\"C:\\Users\\elad\\Desktop\\IMG_BOARD.jpg\"\n #img_fn = r\"C:\\Users\\elad\\Documents\\code\\DigitalScrabble\\board_nexus3 (3).jpg\"\n \n im_size = 8e6 #in total pixels. The size to set the image (larger will shrink and smaller will enlarge)\n blur_size = (5,5)\n blur_std = 5\n open_close_kernel_size = (10, 10)\n curve_approx_eps = 15 # maximum distance between the original curve and its approximation\n warped_shape = (1024, 1024) # to which shape wrap the board\n grid_size = (8,8) # x,y\n border_shift = 55 #pixels. from outer border to inner\n tile_std_th = 10 # STD of each tile Hue, to decide if it is occupied or not\n letter_bw_th = 150 # threshold to seperate tile's letter from background\n \n #%%\n bgr = cv2.imread(img_fn)\n # Bring all images to the same size\n factor = np.round(np.sqrt(im_size/(bgr.shape[0]*bgr.shape[1])),2)\n if factor < 1: interpolation = cv2.INTER_AREA #shrink\n else: interpolation = cv2.INTER_LINEAR #enlarge\n bgr = cv2.resize(bgr,None, fx=factor, fy=factor) \n rgb = cv2.cvtColor(bgr.copy(), cv2.COLOR_BGR2RGB)\n rgb = cv2.GaussianBlur(rgb, blur_size, blur_std)\n \n rgbPyrDown = cv2.pyrDown(rgb)\n rgbPyrDown = cv2.pyrDown(rgbPyrDown) # Downsample image by 4\n r,g,b = rgbPyrDown[:,:,0],rgbPyrDown[:,:,1],rgbPyrDown[:,:,2]\n hsv = cv2.cvtColor(rgbPyrDown.copy(), cv2.COLOR_RGB2HSV)\n h,s,v = hsv[:,:,0],hsv[:,:,1],hsv[:,:,2]\n \n #%% Thresholding \n lower_red = (0, 50, 50)\n upper_red = (9, 230, 235)\n bw = cv2.inRange(hsv, lower_red, upper_red)\n lower_red = (170, 50, 50)\n upper_red = (180, 230, 235)\n bw2 = cv2.inRange(hsv, lower_red, upper_red)\n bw = np.uint8(np.logical_or(bw,bw2))\n kernel = np.ones(open_close_kernel_size ,np.uint8)\n bw = cv2.morphologyEx(bw, cv2.MORPH_OPEN, kernel) # opening (remove small objects from the foreground)\n bw = cv2.morphologyEx(bw, cv2.MORPH_CLOSE, kernel) # closing (fill small holes in the foreground)\n \n #%% Find Contour and 4 Corners\n bwCanny = cv2.Canny(bw, 1, 1)\n\n \n #%%\n image, contours, hierarchy = cv2.findContours(bw.copy(), cv2.RETR_EXTERNAL ,cv2.CHAIN_APPROX_SIMPLE)\n rgb_contours = rgb.copy()\n rgb_contours_approx = rgb.copy()\n rgb_warped = None\n if contours != []:\n for contour in contours:\n if np.abs(cv2.contourArea(contour)) < 15000: \n continue\n #minRect = cv2.minAreaRect(contour) \n #rectPoints = cv2.boxPoints(minRect).astype(np.int32)\n # TODO - check distance from center\n contour = contour*4 # Upsample back to original image size\n points = contour.reshape((-1,2))\n topLeft_ind = np.argmin(points[:,0] + points[:,1])\n bottomRight_ind = np.argmin(- points[:,0] - points[:,1])\n topRight_ind = np.argmin(- points[:,0] + points[:,1])\n bottomLeft_ind = np.argmin(points[:,0] - points[:,1])\n corners = np.vstack((points[topLeft_ind,:],\n points[topRight_ind,:],\n points[bottomRight_ind,:],\n points[bottomLeft_ind,:]))\n \n rgb_contours_approx = rgb.copy()\n cv2.drawContours(rgb_contours, contour, 0, (255,255,0), 5)\n #cv2.drawContours(rgb_contours_approx, rectPoints.reshape((4,-1,2)), 0, (255,255,0), 5) \n colors = ((255,0,0), (0,255,0), (0,0,255), (255,255,255))\n for n in range(4):\n cv2.circle(rgb_contours_approx, tuple(corners[n,:].tolist()), 35, colors[n],-1)\n # Apply the perspective transformation\n rgb_warped = four_point_transform(rgb.copy(), corners, warped_shape)\n\n #%% find accurate corners of warped board\n TEMPLATE_SIZE = 32\n template = np.zeros((TEMPLATE_SIZE,TEMPLATE_SIZE,3), dtype=np.uint8)\n template[0:TEMPLATE_SIZE/2-2, :, :] = (255, 0, 0) #red\n template[:, 0:TEMPLATE_SIZE/2-2, :] = (255, 0, 0)\n template[TEMPLATE_SIZE/2+2:, TEMPLATE_SIZE/2+2:, :] = (189, 215, 238) #light blue\n roi_img_size_x = rgb_warped.shape[1] / 8\n roi_img_size_y = rgb_warped.shape[0] / 8\n corr_result = cv2.matchTemplate(rgb_warped[0:roi_img_size_y, 0:roi_img_size_x],\n template, cv2.TM_CCOEFF_NORMED)\n vmin, vmax, minLoc, maxLoc = cv2.minMaxLoc(corr_result)\n topLeft = (maxLoc[0] + TEMPLATE_SIZE /2, maxLoc[1] + TEMPLATE_SIZE /2)\n template = cv2.flip(template, -1)\n roi_col = rgb_warped.shape[1] - roi_img_size_x\n roi_row = rgb_warped.shape[0] - roi_img_size_y\n corr_result = cv2.matchTemplate(rgb_warped[roi_col:, roi_row:], template, cv2.TM_CCOEFF_NORMED) \n vmin, vmax, minLoc, maxLoc = cv2.minMaxLoc(corr_result)\n bottomRight = (roi_col + maxLoc[0] + TEMPLATE_SIZE /2, roi_row + maxLoc[1] + TEMPLATE_SIZE /2) \n \n # find two other corners by calculation\n xc = (topLeft[0] + bottomRight[0])/2\n yc = (topLeft[1] + bottomRight[1])/2 # Center point\n xd = (topLeft[0] - bottomRight[0])/2\n yd = (topLeft[1] - bottomRight[1])/2 # Half-diagonal\n topRight = (xc - yd, yc + xd)\n bottomLeft = (xc + yd, yc - xd)\n corners = np.array([topLeft, topRight, bottomRight, bottomLeft])\n \n \n \n #%% Build Tiles grid\n rgb_warped_plot = rgb_warped.copy()\n vr_x = (corners[1,0] - corners[0,0]) / grid_size[0]; # one unit of vector right\n vr_y = (corners[1,1] - corners[0,1]) / grid_size[1]; # one unit of vector right\n vd_x = (corners[3,0] - corners[0,0]) / grid_size[0]; # one unit of vector down\n vd_y = (corners[3,1] - corners[0,1]) / grid_size[1]; # one unit of vector down\n tiles = []\n for row in range(grid_size[1]):\n for col in range(grid_size[0]): \n p1 = np.array([corners[0,0] + col*vr_x + row*vd_x,\n corners[0,1] + col*vr_y + row*vd_y])\n p2 = np.array([corners[0,0] + (col+1)*vr_x + (row+1)*vd_x,\n corners[0,1] + (col+1)*vr_y + (row+1)*vd_y])\n tiles.append({'row':row, 'col': col, 'p1':p1, 'p2': p2 })\n \n for tile in tiles:\n cv2.rectangle(rgb_warped_plot, tuple(tile['p1'].tolist()),tuple(tile['p2'].tolist()), (0,255,255), 5)\n\n #%% Check if grid occupied\n hsv2 = cv2.cvtColor(rgb_warped.copy(), cv2.COLOR_RGB2HSV)\n h2,s2,v2 = hsv2[:,:,0],hsv2[:,:,1],hsv2[:,:,2]\n occupied_tiles = []\n for i in range(grid_size[1]):\n for j in range(grid_size[0]): \n x,y = grid[i,j,:]\n tile_roi = h2[y-tile_height/2+20:y+tile_height/2-20,\n x-tile_width/2+20:x+tile_width/2-20]\n tile_std = np.std(tile_roi)\n #print(\"i=%d, j=%d, std=%.2f\" % (i,j,tile_std))\n if tile_std > tile_std_th:\n occupied_tiles.append((i,j))\n cv2.circle(rgb_warped_plot, tuple(grid[i,j,:].tolist()), 30, (255,255,0),-1) \n #%% Build Lettes Dict\n rgb_letters_plots = rgb_warped.copy()\n letters = [] \n for tile_ij in occupied_tiles:\n letter = {}\n i,j = tile_ij \n x,y = grid[i,j,:]\n tile_roi = v2[y-tile_height/2+25:y+tile_height/2-25,\n x-tile_width/2+25:x+tile_width/2-25]\n \n tile_bw = tile_roi > letter_bw_th\n pil_img = Image.fromarray(np.uint8(tile_bw))\n tile_ocr = pytesseract.image_to_string(pil_img, config=\"-psm 10\")\n letter['i'], letter['j'] = i,j \n letter['bw'] = tile_bw\n letter['ocr'] = tile_ocr\n letters.append(letter)\n print(\"i=%d, j=%d, OCR=%s\" % (i,j, tile_ocr))\n cv2.putText(rgb_letters_plots, \"%s\" % tile_ocr, tuple((grid[i,j,:]-4).tolist()),\n cv2.FONT_HERSHEY_SIMPLEX, 2.5, (255,255,255), 3 ,2) \n cv2.putText(rgb_letters_plots, \"%s\" % tile_ocr, tuple(grid[i,j,:].tolist()),\n cv2.FONT_HERSHEY_SIMPLEX, 2.5, (0,0,0), 3 ,2) \n \n \n # \n #minLineLength = 100\n #maxLineGap = 1\n #lines = cv2.HoughLinesP(bw.copy(), 1, np.pi/180, 100, minLineLength, maxLineGap)\n #rgb_hough_lines = rgb.copy()\n #for x1,y1,x2,y2 in lines[:,0,:]:\n # cv2.line(rgb_hough_lines,(x1,y1),(x2,y2),(0,255,0),2)\n \n #%% Plot\n # Plot RGB and HSV\n fig = plt.figure()\n ax1 = fig.add_subplot(2,3,1)\n ax1.imshow(r, cmap='gray')\n ax1.set_title(\"Red\")\n ax1.format_coord = lambda x,y: \"x=%.1f, y=%.1f, Red=%1.f\" % (x, y, r[int(y),int(x)])\n ax2 = fig.add_subplot(2,3,2)\n ax2.imshow(g, cmap='gray')\n ax2.set_title(\"Green\")\n ax2.format_coord = lambda x,y: \"x=%.1f, y=%.1f, Green=%1.f\" % (x, y, g[int(y),int(x)])\n ax3 = fig.add_subplot(2,3,3)\n ax3.imshow(b, cmap='gray')\n ax3.set_title(\"Blue\")\n ax3.format_coord = lambda x,y: \"x=%.1f, y=%.1f, Blue=%1.f\" % (x, y, b[int(y),int(x)])\n ax4 = fig.add_subplot(2,3,4)\n ax4.imshow(h, cmap='gray')\n ax4.set_title(\"Hue\")\n ax4.format_coord = lambda x,y: \"x=%.1f, y=%.1f, Hue=%1.f\" % (x, y, h[int(y),int(x)])\n ax5 = fig.add_subplot(2,3,5)\n ax5.imshow(s, cmap='gray')\n ax5.set_title(\"Saturation\")\n ax5.format_coord = lambda x,y: \"x=%.1f, y=%.1f, Saturation=%1.f\" % (x, y, s[int(y),int(x)])\n ax6 = fig.add_subplot(2,3,6)\n ax6.imshow(v, cmap='gray')\n ax6.set_title(\"Value\")\n ax6.format_coord = lambda x,y: \"x=%.1f, y=%.1f, Value=%1.f\" % (x, y, v[int(y),int(x)])\n # Plot Threshold\n fig2 = plt.figure()\n ax1_2 = fig2.add_subplot(2,2,1)\n ax1_2.imshow(rgb)\n ax1_2.set_title(\"RGB\")\n ax2_2 = fig2.add_subplot(2,2,2)\n ax2_2.imshow(bw, cmap='gray')\n ax2_2.set_title(\"BW\")\n ax3_2 = fig2.add_subplot(2,2,3)\n ax3_2.imshow(rgb_contours_approx)\n ax3_2.set_title(\"4 Corners detction\")\n ax4_2 = fig2.add_subplot(2,2,4)\n ax4_2.imshow(rgb_warped)\n ax4_2.set_title(\"RGB Warped\") \n # Plot Grid\n fig3 = plt.figure()\n ax1_3 = fig3.add_subplot(2,2,1)\n ax1_3.imshow(rgb_warped_plot)\n ax1_3.set_title(\"Grid Detection\")\n ax2_3 = fig3.add_subplot(2,2,2)\n ax2_3.imshow(rgb_letters_plots)\n ax2_3.set_title(\"Letters OCR\")\n \n \"\"\"\n HSV color space is also consists of 3 matrices, HUE, SATURATION and VALUE.\n In OpenCV, value range for HUE, SATURATION and VALUE are \n respectively 0-179, 0-255 and 0-255.\n HUE represents the color, SATURATION represents the amount to which that\n respective color is mixed with white and VALUE represents the amount to \n which that respective color is mixed with black.\n \n red object has HUE, SATURATION and VALUE in between 170-180, 160-255, 60-255 \n \n Hue values of basic colors\n Orange 0-22\n Yellow 22- 38\n Green 38-75\n Blue 75-130\n Violet 130-160\n Red 160-179\n \"\"\"" ]
[ [ "numpy.sqrt", "numpy.uint8", "numpy.ones", "numpy.logical_or", "numpy.std", "numpy.argmin", "numpy.array", "numpy.zeros", "numpy.vstack", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
luisggc/FConcrete
[ "0f8d003da965b359900acb084c0f8762aef1799e" ]
[ "fconcrete/Structural/Load.py" ]
[ "import numpy as np\r\nfrom fconcrete.helpers import to_unit\r\n\r\nclass Load:\r\n \"\"\"\r\n Class that defines a load.\r\n \"\"\"\r\n def __init__(self, force, momentum, x_begin, x_end, q=0, order=0, displacement=0):\r\n force = to_unit(force, \"kN\")\r\n momentum = to_unit(momentum, \"kNcm\")\r\n x_begin = to_unit(x_begin, \"cm\")\r\n x_end = to_unit(x_end, \"cm\")\r\n q = to_unit(q, \"kN/cm\")\r\n \r\n self.x = x_begin + (x_end-x_begin)/2\r\n self.x_begin = x_begin\r\n self.x_end = x_end\r\n self.force = force\r\n self.momentum = momentum\r\n self.q = q\r\n self.order = order\r\n self.displacement = displacement\r\n \r\n @classmethod\r\n def PontualLoad(cls, load, x):\r\n \"\"\"\r\n Define a pontual load.\r\n\r\n Call signatures:\r\n\r\n fc.PontualLoad(load, x)\r\n\r\n >>> pontual_load_1 = fc.Load.PontualLoad(-10.0, 200)\r\n >>> pontual_load_2 = fc.Load.PontualLoad('-10.0kN', '2m')\r\n >>> repr(pontual_load_1) == repr(pontual_load_2)\r\n True\r\n\r\n Parameters\r\n ----------\r\n load : number or str\r\n Represent the load measure. If it is a number, default unit is kN, but also [force] unit can be given. Example:\r\n '20kN', '10N', etc\r\n \r\n x : number or str\r\n Where the load is going to end. If it is a number, default unit is cm, but also [length] unit can be given. Example:\r\n '20cm', '10dm', etc\r\n \r\n \"\"\" \r\n return cls(load, 0, x, x, q=0, order=0)\r\n \r\n @classmethod\r\n def UniformDistributedLoad(cls, q, x_begin, x_end):\r\n \"\"\"\r\n Define a uniform and distributed load.\r\n\r\n Call signatures:\r\n\r\n fc.UniformDistributedLoad(q, x_begin, x_end)\r\n\r\n >>> uniform_load_1 = fc.Load.UniformDistributedLoad(0.1, 0, 2000)\r\n >>> uniform_load_2 = fc.Load.UniformDistributedLoad('10.0kN/m', '0m', '20m')\r\n >>> repr(uniform_load_1) == repr(uniform_load_2)\r\n True\r\n\r\n Parameters\r\n ----------\r\n q : number or str\r\n Represent the load by length measure. If it is a number, default unit is kN/cm, but also [force]/[length] unit can be given. Example:\r\n '20kN/m', '10N/m', etc\r\n \r\n x_begin : number or str\r\n Where the load is going to start. If it is a number, default unit is cm, but also [length] unit can be given. Example:\r\n '20cm', '10dm', etc\r\n \r\n x_end : number or str\r\n Where the load is going to end. If it is a number, default unit is cm, but also [length] unit can be given. Example:\r\n '20cm', '10dm', etc\r\n \"\"\" \r\n \r\n q = to_unit(q, \"kN/cm\")\r\n x_begin = to_unit(x_begin, \"cm\")\r\n x_end = to_unit(x_end, \"cm\")\r\n force = q*(x_end-x_begin)\r\n \r\n return cls(force, 0, x_begin, x_end, q=q, order=1)\r\n \r\n #@classmethod\r\n # def DisplacementLoad(cls, x, displacement):\r\n # return cls(0, 0, x, x, displacement=displacement)\r\n \r\n def __repr__(self):\r\n return str(self.__dict__)+'\\n'\r\n\r\n\r\nclass Loads:\r\n \"\"\"\r\n Class that defines a load list with easy to work properties and methods.\r\n \"\"\"\r\n def __init__(self, loads):\r\n loads = np.array(loads)\r\n self.loads = loads\r\n self.x = np.array([ load.x for load in loads ])\r\n self.x_begin = np.array([ load.x_begin for load in loads ])\r\n self.x_end = np.array([ load.x_end for load in loads ])\r\n self.force = np.array([ load.force for load in loads ])\r\n self.momentum = np.array([ load.momentum for load in loads ])\r\n self.q = np.array([ load.q for load in loads ])\r\n self.order = np.array([ load.order for load in loads ])\r\n \r\n @classmethod\r\n def create(cls, loads):\r\n \"\"\"\r\n Creates a instance of Loads with array of Load.\r\n \"\"\"\r\n loads = np.array(loads)\r\n x_start = np.array([ load.x_begin for load in loads ])\r\n load_sort_position = np.argsort(x_start)\r\n return cls(loads[load_sort_position])\r\n \r\n \r\n def add(self, loads):\r\n \"\"\"\r\n Add a array of Load in the Loads instance.\r\n \"\"\"\r\n loads = np.concatenate((self.loads,loads))\r\n return self.create(loads)\r\n \r\n def __repr__(self):\r\n return str(self.loads)\r\n \r\n def __getitem__(self, key):\r\n return self.loads[key]\r\n \r\n def __len__(self):\r\n return len(self.loads)\r\n \r\n" ]
[ [ "numpy.concatenate", "numpy.argsort", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
CompRhys/botorch
[ "6965426853b7c2d61244f6874eff3317b3588554" ]
[ "botorch/models/multitask.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nr\"\"\"\nMulti-Task GP models.\n\nReferences\n\n.. [Doucet2010sampl]\n A. Doucet. A Note on Efficient Conditional Simulation of Gaussian Distributions.\n http://www.stats.ox.ac.uk/~doucet/doucet_simulationconditionalgaussian.pdf,\n Apr 2010.\n\n.. [Maddox2021bohdo]\n W. Maddox, M. Balandat, A. Wilson, and E. Bakshy. Bayesian Optimization with\n High-Dimensional Outputs. https://arxiv.org/abs/2106.12997, Jun 2021.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport math\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nimport torch\nfrom botorch.acquisition.objective import PosteriorTransform\nfrom botorch.models.gp_regression import MIN_INFERRED_NOISE_LEVEL\nfrom botorch.models.gpytorch import GPyTorchModel, MultiTaskGPyTorchModel\nfrom botorch.models.transforms.input import InputTransform\nfrom botorch.models.transforms.outcome import OutcomeTransform\nfrom botorch.posteriors.multitask import MultitaskGPPosterior\nfrom botorch.utils.containers import TrainingData\nfrom gpytorch.constraints import GreaterThan\nfrom gpytorch.distributions.multitask_multivariate_normal import (\n MultitaskMultivariateNormal,\n)\nfrom gpytorch.distributions.multivariate_normal import MultivariateNormal\nfrom gpytorch.kernels.index_kernel import IndexKernel\nfrom gpytorch.kernels.matern_kernel import MaternKernel\nfrom gpytorch.kernels.multitask_kernel import MultitaskKernel\nfrom gpytorch.kernels.scale_kernel import ScaleKernel\nfrom gpytorch.lazy import (\n BatchRepeatLazyTensor,\n CatLazyTensor,\n DiagLazyTensor,\n KroneckerProductDiagLazyTensor,\n KroneckerProductLazyTensor,\n lazify,\n RootLazyTensor,\n)\nfrom gpytorch.likelihoods.gaussian_likelihood import (\n FixedNoiseGaussianLikelihood,\n GaussianLikelihood,\n)\nfrom gpytorch.likelihoods.multitask_gaussian_likelihood import (\n MultitaskGaussianLikelihood,\n)\nfrom gpytorch.means import MultitaskMean\nfrom gpytorch.means.constant_mean import ConstantMean\nfrom gpytorch.models.exact_gp import ExactGP\nfrom gpytorch.module import Module\nfrom gpytorch.priors.lkj_prior import LKJCovariancePrior\nfrom gpytorch.priors.prior import Prior\nfrom gpytorch.priors.smoothed_box_prior import SmoothedBoxPrior\nfrom gpytorch.priors.torch_priors import GammaPrior\nfrom gpytorch.settings import detach_test_caches\nfrom gpytorch.utils.errors import CachingError\nfrom gpytorch.utils.memoize import cached, pop_from_cache\nfrom torch import Tensor\n\n\nclass MultiTaskGP(ExactGP, MultiTaskGPyTorchModel):\n r\"\"\"Multi-Task GP model using an ICM kernel, inferring observation noise.\n\n Multi-task exact GP that uses a simple ICM kernel. Can be single-output or\n multi-output. This model uses relatively strong priors on the base Kernel\n hyperparameters, which work best when covariates are normalized to the unit\n cube and outcomes are standardized (zero mean, unit variance).\n\n This model infers the noise level. WARNING: It currently does not support\n different noise levels for the different tasks. If you have known observation\n noise, please use `FixedNoiseMultiTaskGP` instead.\n \"\"\"\n\n def __init__(\n self,\n train_X: Tensor,\n train_Y: Tensor,\n task_feature: int,\n covar_module: Optional[Module] = None,\n task_covar_prior: Optional[Prior] = None,\n output_tasks: Optional[List[int]] = None,\n rank: Optional[int] = None,\n input_transform: Optional[InputTransform] = None,\n outcome_transform: Optional[OutcomeTransform] = None,\n ) -> None:\n r\"\"\"Multi-Task GP model using an ICM kernel, inferring observation noise.\n\n Args:\n train_X: A `n x (d + 1)` or `b x n x (d + 1)` (batch mode) tensor\n of training data. One of the columns should contain the task\n features (see `task_feature` argument).\n train_Y: A `n x 1` or `b x n x 1` (batch mode) tensor of training\n observations.\n task_feature: The index of the task feature (`-d <= task_feature <= d`).\n output_tasks: A list of task indices for which to compute model\n outputs for. If omitted, return outputs for all task indices.\n rank: The rank to be used for the index kernel. If omitted, use a\n full rank (i.e. number of tasks) kernel.\n task_covar_prior : A Prior on the task covariance matrix. Must operate\n on p.s.d. matrices. A common prior for this is the `LKJ` prior.\n input_transform: An input transform that is applied in the model's\n forward pass.\n\n Example:\n >>> X1, X2 = torch.rand(10, 2), torch.rand(20, 2)\n >>> i1, i2 = torch.zeros(10, 1), torch.ones(20, 1)\n >>> train_X = torch.cat([\n >>> torch.cat([X1, i1], -1), torch.cat([X2, i2], -1),\n >>> ])\n >>> train_Y = torch.cat(f1(X1), f2(X2)).unsqueeze(-1)\n >>> model = MultiTaskGP(train_X, train_Y, task_feature=-1)\n \"\"\"\n with torch.no_grad():\n transformed_X = self.transform_inputs(\n X=train_X, input_transform=input_transform\n )\n self._validate_tensor_args(X=transformed_X, Y=train_Y)\n all_tasks, task_feature, d = self.get_all_tasks(\n transformed_X, task_feature, output_tasks\n )\n if outcome_transform is not None:\n train_Y, _ = outcome_transform(train_Y)\n\n # squeeze output dim\n train_Y = train_Y.squeeze(-1)\n if output_tasks is None:\n output_tasks = all_tasks\n else:\n if set(output_tasks) - set(all_tasks):\n raise RuntimeError(\"All output tasks must be present in input data.\")\n self._output_tasks = output_tasks\n self._num_outputs = len(output_tasks)\n\n # TODO (T41270962): Support task-specific noise levels in likelihood\n likelihood = GaussianLikelihood(noise_prior=GammaPrior(1.1, 0.05))\n\n # construct indexer to be used in forward\n self._task_feature = task_feature\n self._base_idxr = torch.arange(d)\n self._base_idxr[task_feature:] += 1 # exclude task feature\n\n super().__init__(\n train_inputs=train_X, train_targets=train_Y, likelihood=likelihood\n )\n self.mean_module = ConstantMean()\n if covar_module is None:\n self.covar_module = ScaleKernel(\n base_kernel=MaternKernel(\n nu=2.5, ard_num_dims=d, lengthscale_prior=GammaPrior(3.0, 6.0)\n ),\n outputscale_prior=GammaPrior(2.0, 0.15),\n )\n else:\n self.covar_module = covar_module\n\n num_tasks = len(all_tasks)\n self._rank = rank if rank is not None else num_tasks\n\n self.task_covar_module = IndexKernel(\n num_tasks=num_tasks, rank=self._rank, prior=task_covar_prior\n )\n if input_transform is not None:\n self.input_transform = input_transform\n if outcome_transform is not None:\n self.outcome_transform = outcome_transform\n self.to(train_X)\n\n def _split_inputs(self, x: Tensor) -> Tuple[Tensor, Tensor]:\n r\"\"\"Extracts base features and task indices from input data.\n\n Args:\n x: The full input tensor with trailing dimension of size `d + 1`.\n Should be of float/double data type.\n\n Returns:\n 2-element tuple containing\n\n - A `q x d` or `b x q x d` (batch mode) tensor with trailing\n dimension made up of the `d` non-task-index columns of `x`, arranged\n in the order as specified by the indexer generated during model\n instantiation.\n - A `q` or `b x q` (batch mode) tensor of long data type containing\n the task indices.\n \"\"\"\n batch_shape, d = x.shape[:-2], x.shape[-1]\n x_basic = x[..., self._base_idxr].view(batch_shape + torch.Size([-1, d - 1]))\n task_idcs = (\n x[..., self._task_feature]\n .view(batch_shape + torch.Size([-1, 1]))\n .to(dtype=torch.long)\n )\n return x_basic, task_idcs\n\n def forward(self, x: Tensor) -> MultivariateNormal:\n if self.training:\n x = self.transform_inputs(x)\n x_basic, task_idcs = self._split_inputs(x)\n # Compute base mean and covariance\n mean_x = self.mean_module(x_basic)\n covar_x = self.covar_module(x_basic)\n # Compute task covariances\n covar_i = self.task_covar_module(task_idcs)\n # Combine the two in an ICM fashion\n covar = covar_x.mul(covar_i)\n return MultivariateNormal(mean_x, covar)\n\n @classmethod\n def get_all_tasks(\n cls,\n train_X: Tensor,\n task_feature: int,\n output_tasks: Optional[List[int]] = None,\n ) -> Tuple[List[int], int, int]:\n if train_X.ndim != 2:\n # Currently, batch mode MTGPs are blocked upstream in GPyTorch\n raise ValueError(f\"Unsupported shape {train_X.shape} for train_X.\")\n d = train_X.shape[-1] - 1\n if not (-d <= task_feature <= d):\n raise ValueError(f\"Must have that -{d} <= task_feature <= {d}\")\n task_feature = task_feature % (d + 1)\n all_tasks = train_X[:, task_feature].unique().to(dtype=torch.long).tolist()\n return all_tasks, task_feature, d\n\n @classmethod\n def construct_inputs(cls, training_data: TrainingData, **kwargs) -> Dict[str, Any]:\n r\"\"\"Construct kwargs for the `Model` from `TrainingData` and other options.\n\n Args:\n training_data: `TrainingData` container with data for single outcome\n or for multiple outcomes for batched multi-output case.\n **kwargs: Additional options for the model that pertain to the\n training data, including:\n\n - `task_features`: Indices of the input columns containing the task\n features (expected list of length 1),\n - `task_covar_prior`: A GPyTorch `Prior` object to use as prior on\n the cross-task covariance matrix,\n - `prior_config`: A dict representing a prior config, should only be\n used if `prior` is not passed directly. Should contain:\n `use_LKJ_prior` (whether to use LKJ prior) and `eta` (eta value,\n float),\n - `rank`: The rank of the cross-task covariance matrix.\n \"\"\"\n\n task_features = kwargs.pop(\"task_features\", None)\n if task_features is None:\n raise ValueError(f\"`task_features` required for {cls.__name__}.\")\n task_feature = task_features[0]\n inputs = {\n \"train_X\": training_data.X,\n \"train_Y\": training_data.Y,\n \"task_feature\": task_feature,\n \"rank\": kwargs.get(\"rank\"),\n }\n\n prior = kwargs.get(\"task_covar_prior\")\n prior_config = kwargs.get(\"prior_config\")\n if prior and prior_config:\n raise ValueError(\n \"Only one of `prior` and `prior_config` arguments expected.\"\n )\n\n if prior_config:\n if not prior_config.get(\"use_LKJ_prior\"):\n raise ValueError(\"Currently only config for LKJ prior is supported.\")\n all_tasks, _, _ = MultiTaskGP.get_all_tasks(training_data.X, task_feature)\n num_tasks = len(all_tasks)\n sd_prior = GammaPrior(1.0, 0.15)\n sd_prior._event_shape = torch.Size([num_tasks])\n eta = prior_config.get(\"eta\", 0.5)\n if not isinstance(eta, float) and not isinstance(eta, int):\n raise ValueError(f\"eta must be a real number, your eta was {eta}.\")\n prior = LKJCovariancePrior(num_tasks, eta, sd_prior)\n\n inputs[\"task_covar_prior\"] = prior\n return inputs\n\n\nclass FixedNoiseMultiTaskGP(MultiTaskGP):\n r\"\"\"Multi-Task GP model using an ICM kernel, with known observation noise.\n\n Multi-task exact GP that uses a simple ICM kernel. Can be single-output or\n multi-output. This model uses relatively strong priors on the base Kernel\n hyperparameters, which work best when covariates are normalized to the unit\n cube and outcomes are standardized (zero mean, unit variance).\n\n This model requires observation noise data (specified in `train_Yvar`).\n \"\"\"\n\n def __init__(\n self,\n train_X: Tensor,\n train_Y: Tensor,\n train_Yvar: Tensor,\n task_feature: int,\n covar_module: Optional[Module] = None,\n task_covar_prior: Optional[Prior] = None,\n output_tasks: Optional[List[int]] = None,\n rank: Optional[int] = None,\n input_transform: Optional[InputTransform] = None,\n ) -> None:\n r\"\"\"Multi-Task GP model using an ICM kernel and known observation noise.\n\n Args:\n train_X: A `n x (d + 1)` or `b x n x (d + 1)` (batch mode) tensor\n of training data. One of the columns should contain the task\n features (see `task_feature` argument).\n train_Y: A `n x 1` or `b x n x 1` (batch mode) tensor of training\n observations.\n train_Yvar: A `n` or `b x n` (batch mode) tensor of observation\n noise standard errors.\n task_feature: The index of the task feature (`-d <= task_feature <= d`).\n task_covar_prior : A Prior on the task covariance matrix. Must operate\n on p.s.d. matrices. A common prior for this is the `LKJ` prior.\n output_tasks: A list of task indices for which to compute model\n outputs for. If omitted, return outputs for all task indices.\n rank: The rank to be used for the index kernel. If omitted, use a\n full rank (i.e. number of tasks) kernel.\n input_transform: An input transform that is applied in the model's\n forward pass.\n\n Example:\n >>> X1, X2 = torch.rand(10, 2), torch.rand(20, 2)\n >>> i1, i2 = torch.zeros(10, 1), torch.ones(20, 1)\n >>> train_X = torch.cat([\n >>> torch.cat([X1, i1], -1), torch.cat([X2, i2], -1),\n >>> ], dim=0)\n >>> train_Y = torch.cat(f1(X1), f2(X2))\n >>> train_Yvar = 0.1 + 0.1 * torch.rand_like(train_Y)\n >>> model = FixedNoiseMultiTaskGP(train_X, train_Y, train_Yvar, -1)\n \"\"\"\n with torch.no_grad():\n transformed_X = self.transform_inputs(\n X=train_X, input_transform=input_transform\n )\n self._validate_tensor_args(X=transformed_X, Y=train_Y, Yvar=train_Yvar)\n # We'll instatiate a MultiTaskGP and simply override the likelihood\n super().__init__(\n train_X=train_X,\n train_Y=train_Y,\n covar_module=covar_module,\n task_feature=task_feature,\n output_tasks=output_tasks,\n rank=rank,\n task_covar_prior=task_covar_prior,\n input_transform=input_transform,\n )\n self.likelihood = FixedNoiseGaussianLikelihood(noise=train_Yvar.squeeze(-1))\n self.to(train_X)\n\n @classmethod\n def construct_inputs(cls, training_data: TrainingData, **kwargs) -> Dict[str, Any]:\n r\"\"\"Construct kwargs for the `Model` from `TrainingData` and other options.\n\n Args:\n training_data: `TrainingData` container with data for single outcome\n or for multiple outcomes for batched multi-output case.\n **kwargs: Additional options for the model that pertain to the\n training data, including:\n\n - `task_features`: Indices of the input columns containing the task\n features (expected list of length 1),\n - `task_covar_prior`: A GPyTorch `Prior` object to use as prior on\n the cross-task covariance matrix,\n - `prior_config`: A dict representing a prior config, should only be\n used if `prior` is not passed directly. Should contain:\n use_LKJ_prior` (whether to use LKJ prior) and `eta` (eta value,\n float),\n - `rank`: The rank of the cross-task covariance matrix.\n \"\"\"\n if training_data.Yvar is None:\n raise ValueError(f\"Yvar required for {cls.__name__}.\")\n\n inputs = super().construct_inputs(training_data=training_data, **kwargs)\n inputs[\"train_Yvar\"] = training_data.Yvar\n return inputs\n\n\nclass KroneckerMultiTaskGP(ExactGP, GPyTorchModel):\n \"\"\"Multi-task GP with Kronecker structure, using an ICM kernel.\n\n This model assumes the \"block design\" case, i.e., it requires that all tasks\n are observed at all data points.\n\n For posterior sampling, this model uses Matheron's rule [Doucet2010sampl] to compute\n the posterior over all tasks as in [Maddox2021bohdo] by exploiting Kronecker\n structure.\n \"\"\"\n\n def __init__(\n self,\n train_X: Tensor,\n train_Y: Tensor,\n likelihood: Optional[MultitaskGaussianLikelihood] = None,\n data_covar_module: Optional[Module] = None,\n task_covar_prior: Optional[Prior] = None,\n rank: Optional[int] = None,\n input_transform: Optional[InputTransform] = None,\n outcome_transform: Optional[OutcomeTransform] = None,\n **kwargs: Any,\n ) -> None:\n r\"\"\"Multi-task GP with Kronecker structure, using a simple ICM kernel.\n\n Args:\n train_X: A `batch_shape x n x d` tensor of training features.\n train_Y: A `batch_shape x n x m` tensor of training observations.\n likelihood: A `MultitaskGaussianLikelihood`. If omitted, uses a\n `MultitaskGaussianLikelihood` with a `GammaPrior(1.1, 0.05)`\n noise prior.\n data_covar_module: The module computing the covariance (Kernel) matrix\n in data space. If omitted, use a `MaternKernel`.\n task_covar_prior : A Prior on the task covariance matrix. Must operate\n on p.s.d. matrices. A common prior for this is the `LKJ` prior. If\n omitted, uses `LKJCovariancePrior` with `eta` parameter as specified\n in the keyword arguments (if not specified, use `eta=1.5`).\n rank: The rank of the ICM kernel. If omitted, use a full rank kernel.\n kwargs: Additional arguments to override default settings of priors,\n including:\n - eta: The eta parameter on the default LKJ task_covar_prior.\n A value of 1.0 is uninformative, values <1.0 favor stronger\n correlations (in magnitude), correlations vanish as eta -> inf.\n - sd_prior: A scalar prior over nonnegative numbers, which is used\n for the default LKJCovariancePrior task_covar_prior.\n - likelihood_rank: The rank of the task covariance matrix to fit.\n Defaults to 0 (which corresponds to a diagonal covariance matrix).\n\n Example:\n >>> train_X = torch.rand(10, 2)\n >>> train_Y = torch.cat([f_1(X), f_2(X)], dim=-1)\n >>> model = KroneckerMultiTaskGP(train_X, train_Y)\n \"\"\"\n with torch.no_grad():\n transformed_X = self.transform_inputs(\n X=train_X, input_transform=input_transform\n )\n if outcome_transform is not None:\n train_Y, _ = outcome_transform(train_Y)\n\n self._validate_tensor_args(X=transformed_X, Y=train_Y)\n self._num_outputs = train_Y.shape[-1]\n batch_shape, ard_num_dims = train_X.shape[:-2], train_X.shape[-1]\n num_tasks = train_Y.shape[-1]\n\n if rank is None:\n rank = num_tasks\n if likelihood is None:\n noise_prior = GammaPrior(1.1, 0.05)\n noise_prior_mode = (noise_prior.concentration - 1) / noise_prior.rate\n likelihood = MultitaskGaussianLikelihood(\n num_tasks=num_tasks,\n batch_shape=batch_shape,\n noise_prior=noise_prior,\n noise_constraint=GreaterThan(\n MIN_INFERRED_NOISE_LEVEL,\n transform=None,\n initial_value=noise_prior_mode,\n ),\n rank=kwargs.get(\"likelihood_rank\", 0),\n )\n if task_covar_prior is None:\n task_covar_prior = LKJCovariancePrior(\n n=num_tasks,\n eta=kwargs.get(\"eta\", 1.5),\n sd_prior=kwargs.get(\n \"sd_prior\",\n SmoothedBoxPrior(math.exp(-6), math.exp(1.25), 0.05),\n ),\n )\n super().__init__(train_X, train_Y, likelihood)\n self.mean_module = MultitaskMean(\n base_means=ConstantMean(batch_shape=batch_shape), num_tasks=num_tasks\n )\n if data_covar_module is None:\n data_covar_module = MaternKernel(\n nu=2.5,\n ard_num_dims=ard_num_dims,\n lengthscale_prior=GammaPrior(3.0, 6.0),\n batch_shape=batch_shape,\n )\n else:\n data_covar_module = data_covar_module\n\n self.covar_module = MultitaskKernel(\n data_covar_module=data_covar_module,\n num_tasks=num_tasks,\n rank=rank,\n batch_shape=batch_shape,\n task_covar_prior=task_covar_prior,\n )\n\n if outcome_transform is not None:\n self.outcome_transform = outcome_transform\n if input_transform is not None:\n self.input_transform = input_transform\n self.to(train_X)\n\n def forward(self, X: Tensor) -> MultitaskMultivariateNormal:\n if self.training:\n X = self.transform_inputs(X)\n\n mean_x = self.mean_module(X)\n covar_x = self.covar_module(X)\n return MultitaskMultivariateNormal(mean_x, covar_x)\n\n @property\n def _task_covar_matrix(self):\n res = self.covar_module.task_covar_module.covar_matrix\n if detach_test_caches.on():\n res = res.detach()\n return res\n\n @property\n @cached(name=\"train_full_covar\")\n def train_full_covar(self):\n train_x = self.transform_inputs(self.train_inputs[0])\n\n # construct Kxx \\otimes Ktt\n train_full_covar = self.covar_module(train_x).evaluate_kernel()\n if detach_test_caches.on():\n train_full_covar = train_full_covar.detach()\n return train_full_covar\n\n @property\n @cached(name=\"predictive_mean_cache\")\n def predictive_mean_cache(self):\n train_x = self.transform_inputs(self.train_inputs[0])\n train_noise = self.likelihood._shaped_noise_covar(train_x.shape)\n if detach_test_caches.on():\n train_noise = train_noise.detach()\n\n train_diff = self.train_targets - self.mean_module(train_x)\n train_solve = (self.train_full_covar + train_noise).inv_matmul(\n train_diff.reshape(*train_diff.shape[:-2], -1)\n )\n if detach_test_caches.on():\n train_solve = train_solve.detach()\n\n return train_solve\n\n def posterior(\n self,\n X: Tensor,\n output_indices: Optional[List[int]] = None,\n observation_noise: Union[bool, Tensor] = False,\n posterior_transform: Optional[PosteriorTransform] = None,\n **kwargs: Any,\n ) -> MultitaskGPPosterior:\n self.eval()\n\n if posterior_transform is not None:\n # this could be very costly, disallow for now\n raise NotImplementedError(\n \"Posterior transforms currently not supported for \"\n f\"{self.__class__.__name__}\"\n )\n\n X = self.transform_inputs(X)\n train_x = self.transform_inputs(self.train_inputs[0])\n\n # construct Ktt\n task_covar = self._task_covar_matrix\n task_rootlt = self._task_covar_matrix.root_decomposition(\n method=\"diagonalization\"\n )\n task_root = task_rootlt.root\n if task_covar.batch_shape != X.shape[:-2]:\n task_covar = BatchRepeatLazyTensor(task_covar, batch_repeat=X.shape[:-2])\n task_root = BatchRepeatLazyTensor(\n lazify(task_root), batch_repeat=X.shape[:-2]\n )\n\n task_covar_rootlt = RootLazyTensor(task_root)\n\n # construct RR' \\approx Kxx\n data_data_covar = self.train_full_covar.lazy_tensors[0]\n # populate the diagonalziation caches for the root and inverse root\n # decomposition\n data_data_evals, data_data_evecs = data_data_covar.diagonalization()\n\n # pad the eigenvalue and eigenvectors with zeros if we are using lanczos\n if data_data_evecs.shape[-1] < data_data_evecs.shape[-2]:\n cols_to_add = data_data_evecs.shape[-2] - data_data_evecs.shape[-1]\n zero_evecs = torch.zeros(\n *data_data_evecs.shape[:-1],\n cols_to_add,\n dtype=data_data_evals.dtype,\n device=data_data_evals.device,\n )\n zero_evals = torch.zeros(\n *data_data_evecs.shape[:-2],\n cols_to_add,\n dtype=data_data_evals.dtype,\n device=data_data_evals.device,\n )\n data_data_evecs = CatLazyTensor(\n data_data_evecs,\n lazify(zero_evecs),\n dim=-1,\n output_device=data_data_evals.device,\n )\n data_data_evals = torch.cat((data_data_evals, zero_evals), dim=-1)\n\n # construct K_{xt, x}\n test_data_covar = self.covar_module.data_covar_module(X, train_x)\n # construct K_{xt, xt}\n test_test_covar = self.covar_module.data_covar_module(X)\n\n # now update root so that \\tilde{R}\\tilde{R}' \\approx K_{(x,xt), (x,xt)}\n # cloning preserves the gradient history\n updated_lazy_tensor = data_data_covar.cat_rows(\n cross_mat=test_data_covar.clone(),\n new_mat=test_test_covar,\n method=\"diagonalization\",\n )\n updated_root = updated_lazy_tensor.root_decomposition().root\n # occasionally, there's device errors so enforce this comes out right\n updated_root = updated_root.to(data_data_covar.device)\n\n # build a root decomposition of the joint train/test covariance matrix\n # construct (\\tilde{R} \\otimes M)(\\tilde{R} \\otimes M)' \\approx\n # (K_{(x,xt), (x,xt)} \\otimes Ktt)\n joint_covar = RootLazyTensor(\n KroneckerProductLazyTensor(updated_root, task_covar_rootlt.root.detach())\n )\n\n # construct K_{xt, x} \\otimes Ktt\n test_obs_kernel = KroneckerProductLazyTensor(test_data_covar, task_covar)\n\n # collect y - \\mu(x) and \\mu(X)\n train_diff = self.train_targets - self.mean_module(train_x)\n if detach_test_caches.on():\n train_diff = train_diff.detach()\n test_mean = self.mean_module(X)\n\n train_noise = self.likelihood._shaped_noise_covar(train_x.shape)\n diagonal_noise = isinstance(train_noise, DiagLazyTensor)\n if detach_test_caches.on():\n train_noise = train_noise.detach()\n test_noise = (\n self.likelihood._shaped_noise_covar(X.shape) if observation_noise else None\n )\n\n # predictive mean and variance for the mvn\n # first the predictive mean\n pred_mean = (\n test_obs_kernel.matmul(self.predictive_mean_cache).reshape_as(test_mean)\n + test_mean\n )\n # next the predictive variance, assume diagonal noise\n test_var_term = KroneckerProductLazyTensor(test_test_covar, task_covar).diag()\n\n if diagonal_noise:\n task_evals, task_evecs = self._task_covar_matrix.diagonalization()\n # TODO: make this be the default KPMatmulLT diagonal method in gpytorch\n full_data_inv_evals = (\n KroneckerProductDiagLazyTensor(\n DiagLazyTensor(data_data_evals), DiagLazyTensor(task_evals)\n )\n + train_noise\n ).inverse()\n test_train_hadamard = KroneckerProductLazyTensor(\n test_data_covar.matmul(data_data_evecs).evaluate() ** 2,\n task_covar.matmul(task_evecs).evaluate() ** 2,\n )\n data_var_term = test_train_hadamard.matmul(full_data_inv_evals).sum(dim=-1)\n else:\n # if non-diagonal noise (but still kronecker structured), we have to pull\n # across the noise because the inverse is not closed form\n # should be a kronecker lt, R = \\Sigma_X^{-1/2} \\kron \\Sigma_T^{-1/2}\n # TODO: enforce the diagonalization to return a KPLT for all shapes in\n # gpytorch or dense linear algebra for small shapes\n data_noise, task_noise = train_noise.lazy_tensors\n data_noise_root = data_noise.root_inv_decomposition(\n method=\"diagonalization\"\n )\n task_noise_root = task_noise.root_inv_decomposition(\n method=\"diagonalization\"\n )\n\n # ultimately we need to compute the diagonal of\n # (K_{x* X} \\kron K_T)(K_{XX} \\kron K_T + \\Sigma_X \\kron \\Sigma_T)^{-1}\n # (K_{x* X} \\kron K_T)^T\n # = (K_{x* X} \\Sigma_X^{-1/2} Q_R)(\\Lambda_R + I)^{-1}\n # (K_{x* X} \\Sigma_X^{-1/2} Q_R)^T\n # where R = (\\Sigma_X^{-1/2T}K_{XX}\\Sigma_X^{-1/2} \\kron\n # \\Sigma_T^{-1/2T}K_{T}\\Sigma_T^{-1/2})\n # first we construct the components of R's eigen-decomposition\n # TODO: make this be the default KPMatmulLT diagonal method in gpytorch\n whitened_data_covar = (\n data_noise_root.transpose(-1, -2)\n .matmul(data_data_covar)\n .matmul(data_noise_root)\n )\n w_data_evals, w_data_evecs = whitened_data_covar.diagonalization()\n whitened_task_covar = (\n task_noise_root.transpose(-1, -2)\n .matmul(self._task_covar_matrix)\n .matmul(task_noise_root)\n )\n w_task_evals, w_task_evecs = whitened_task_covar.diagonalization()\n\n # we add one to the eigenvalues as above (not just for stability)\n full_data_inv_evals = (\n KroneckerProductDiagLazyTensor(\n DiagLazyTensor(w_data_evals), DiagLazyTensor(w_task_evals)\n )\n .add_jitter(1.0)\n .inverse()\n )\n\n test_data_comp = (\n test_data_covar.matmul(data_noise_root).matmul(w_data_evecs).evaluate()\n ** 2\n )\n task_comp = (\n task_covar.matmul(task_noise_root).matmul(w_task_evecs).evaluate() ** 2\n )\n\n test_train_hadamard = KroneckerProductLazyTensor(test_data_comp, task_comp)\n data_var_term = test_train_hadamard.matmul(full_data_inv_evals).sum(dim=-1)\n\n pred_variance = test_var_term - data_var_term\n specialized_mvn = MultitaskMultivariateNormal(\n pred_mean, DiagLazyTensor(pred_variance)\n )\n if observation_noise:\n specialized_mvn = self.likelihood(specialized_mvn)\n\n posterior = MultitaskGPPosterior(\n mvn=specialized_mvn,\n joint_covariance_matrix=joint_covar,\n test_train_covar=test_obs_kernel,\n train_diff=train_diff,\n test_mean=test_mean,\n train_train_covar=self.train_full_covar,\n train_noise=train_noise,\n test_noise=test_noise,\n )\n\n if hasattr(self, \"outcome_transform\"):\n posterior = self.outcome_transform.untransform_posterior(posterior)\n return posterior\n\n def train(self, val=True, *args, **kwargs):\n if val:\n fixed_cache_names = [\"data_data_roots\", \"train_full_covar\", \"task_root\"]\n for name in fixed_cache_names:\n try:\n pop_from_cache(self, name)\n except CachingError:\n pass\n\n return super().train(val, *args, **kwargs)\n" ]
[ [ "torch.Size", "torch.zeros", "torch.cat", "torch.no_grad", "torch.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Siviuze/CL4P-TP
[ "8e0afced80cac7699764654719043ecb2d948bc0" ]
[ "simulation/src/claptrap_simu/core/claptrap.py" ]
[ "# Claptrap class\n# This class runs a simulation of Claptrap using pinocchio, given a controller as input.\n\n# A controller is defined by the following signature:\n# def controller(t, q, v):\n# return tau\nimport pinocchio as pnc\nimport numpy as np\nimport scipy.integrate\nimport tqdm\nimport pkg_resources\nimport os\n\nfrom ..log_handling.logger import Logger\n\n\nclass Claptrap():\n def __init__(self, urdf_path = None):\n '''\n Init the robot object.\n @param urdf_path Path to URDF (default is to load URDF supplied with this python package)\n '''\n if urdf_path is None:\n urdf_path = pkg_resources.resource_filename('claptrap_simu', 'data/claptrap.urdf')\n\n self.robot = pnc.RobotWrapper.BuildFromURDF(urdf_path, [os.path.dirname(urdf_path)], root_joint=None)\n\n # Compute wheel radius vector.\n self.wheel_radius = self.robot.model.jointPlacements[self.robot.model.getJointId(\"BodyJoint\")].translation[2]\n\n\n def _dynamics(self, t, x):\n ''' Forward dynamics of the robot, to integrate\n '''\n # Split input as (q, v) pair\n q = x[:self.robot.nq]\n v = x[self.robot.nq:]\n\n # Run forward dynamic computation\n # Compute H and g + coloriolis effects\n H = self.robot.mass(q)\n g = self.robot.nle(q, v)\n\n # Compute contact jacobian and derivative (drift).\n # Since q = (x y gamma beta alpha theta) where (alpha beta gamma) are RPY angles of the base,\n # the contact implies that in the YawLink frame (after rotation by gamma), we have vx = R (dalpha + dtheta)\n # and vy = 0\n gamma = q[2]\n J = np.array([[np.cos(gamma), np.sin(gamma), 0, 0, -self.wheel_radius, -self.wheel_radius],\n [-np.sin(gamma), np.cos(gamma), 0, 0, 0, 0]])\n dJ = np.array([[-np.sin(gamma), np.cos(gamma), 0, 0, 0, 0],\n [-np.cos(gamma), -np.sin(gamma), 0, 0, 0, 0]]) * v[2]\n drift = - dJ @ v\n\n # Compute controller torque\n torque = np.zeros(self.robot.model.nv)\n\n torque[5] = self.controller(t, q, v)\n # Write full equation\n A = np.block([[H, J.T], [J, np.zeros((2, 2))]])\n b = np.concatenate((torque - g, drift))\n # Find dv, we don't care about the contact forces for now.\n dv = np.linalg.solve(A, b)[:-2]\n\n return np.concatenate((v, dv))\n\n\n def simulate(self, x0, simulation_duration, dt, motor_control_law, output_name = \"/tmp/claptrap.csv\", verbose = True):\n ''' Run a simulation of given controller motor_control_law, log the results, and return the state.\n @param x0 Initial state (position + velocity)\n @param simulation_duration Length of the simulation\n @param dt Timestep for logger - note that the controller is simulated in a continuous manner\n @param motor_control_law Motor callback law, with signature motor_control_law(t, q, v) -> torque\n @param output_name Optional, name of the output log file.\n @param verbose Optional, whether or not to display a progress bar during simulation.\n '''\n self.controller = motor_control_law\n\n # Create integrator.\n solver = scipy.integrate.ode(self._dynamics)\n solver.set_integrator('dopri5')\n solver.set_initial_value(x0)\n\n # Create logger\n logged_values = [\"Claptrap.q\" + str(i) for i in range(self.robot.model.nq)] + \\\n [\"Claptrap.v\" + str(i) for i in range(self.robot.model.nv)] + \\\n [\"Claptrap.energy\"]\n\n logger = Logger(logged_values)\n\n if verbose:\n pbar = tqdm.tqdm(total=simulation_duration, bar_format=\"{percentage:3.0f}%|{bar}| {n:.2f}/{total_fmt} [{elapsed}<{remaining}]\")\n\n t = 0\n result_x = []\n while solver.successful() and t < simulation_duration:\n # Integrate, skip first iteration, we only want to log in this case\n if t > 0:\n solver.integrate(t)\n if verbose:\n pbar.update(dt)\n\n result_x.append(solver.y)\n q = solver.y[:self.robot.nq]\n v = solver.y[self.robot.nq:]\n\n # Log\n for i in range(self.robot.model.nq):\n logger.set(\"Claptrap.q\" + str(i), q[i])\n for i in range(self.robot.model.nv):\n logger.set(\"Claptrap.v\" + str(i), v[i])\n pnc.computeAllTerms(self.robot.model, self.robot.data, q, v)\n energy = self.robot.data.kinetic_energy + self.robot.data.potential_energy\n logger.set(\"Claptrap.energy\", energy)\n logger.set(\"time\", t)\n logger.new_line()\n\n t += dt\n\n\n logger.save(output_name)\n # Return time and x\n return logger.data[\"time\"][:-1], np.array(result_x)\n\n\n # ~ def log_state(self, logger, prefix):\n # ~ '''Log current state: the values logged are defined in CLAPTRAP_STATE_SUFFIXES\n # ~ @param logger Logger object\n # ~ @param prefix Prefix to add before each suffix.\n # ~ '''\n # ~ logger.set(prefix + \"roll\", self.q[3, 0])\n # ~ logger.set(prefix + \"pitch\", self.q[4,0])\n # ~ logger.set(prefix + \"yaw\", self.q[2, 0])\n # ~ # TODO\n # ~ logger.set(prefix + \"wheelVelocity\", self.v[self.robot.model.joints[self.robot.model.getJointId(\"WheelJoint\")].idx_v, 0])\n\n # ~ pnc.computeAllTerms(self.robot.model, self.robot.data, self.q, self.v)\n # ~ energy = self.robot.data.kinetic_energy + self.robot.data.potential_energy\n # ~ logger.set(prefix + \"energy\", energy)\n\n # ~ logger.set(prefix + \"wheelTorque\", self.tau[0, 0])\n\n # ~ w_M_base = self.robot.framePosition(self.q, self.robot.model.getFrameId(\"Body\"), False)\n # ~ logger.set_vector(prefix + \"base\", w_M_base.translation)\n\n" ]
[ [ "numpy.linalg.solve", "numpy.cos", "numpy.sin", "numpy.concatenate", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shapelets/shapelets-compute
[ "1dffe62d4eab9b1115b95bda5aaa7a3392024d72" ]
[ "modules/test/test_array_manipulation.py" ]
[ "# Copyright (c) 2021 Grumpy Cat Software S.L.\n#\n# This Source Code is licensed under the MIT 2.0 license.\n# the terms can be found in LICENSE.md at the root of\n# this project, or at http://mozilla.org/MPL/2.0/.\n\nimport numpy as np\nimport shapelets.compute as sc\n\n\ndef test_explicit_list_creation():\n sa = sc.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=\"float64\")\n na = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=\"float64\")\n assert sa.same_as(na)\n\n\ndef test_explicit_tuple_creation():\n sa = sc.array(((1, 2, 3), (4, 5, 6), (7, 8, 9)), dtype=\"float64\")\n na = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9)), dtype=\"float64\")\n assert sa.same_as(na)\n\n\ndef test_create_iota():\n a = sc.iota((5, 3), dtype=\"float32\")\n assert a.same_as([[0., 5, 10], [1, 6, 11], [2, 7, 12], [3, 8, 13], [4, 9, 14]])\n b = sc.iota((5, 3), (1, 2), dtype=\"float32\")\n assert b.same_as([\n [0., 5, 10, 0, 5, 10],\n [1, 6, 11, 1, 6, 11],\n [2, 7, 12, 2, 7, 12],\n [3, 8, 13, 3, 8, 13],\n [4, 9, 14, 4, 9, 14]\n ])\n\n\ndef test_create_constant():\n a = sc.ones((5, 4, 3), dtype=\"int32\")\n b = np.ones((5, 4, 3), dtype=\"int32\")\n a.same_as(b)\n\n\ndef test_dimension_conversion():\n a = np.random.randn(5, 4, 3, 2)\n b = sc.array(a)\n assert a.ndim == b.ndim\n assert a.dtype == b.dtype\n assert a.shape == b.shape\n b.same_as(a)\n\n\ndef test_diagonal_creation():\n ones = sc.ones(4, dtype=\"float32\")\n diag_zero = sc.diag(ones, 0, False)\n assert diag_zero.same_as([\n [1., 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n\n diag_plus_one = sc.diag(ones, 1, False)\n assert diag_plus_one.same_as([\n [0., 1, 0, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 0, 1, 0],\n [0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0]])\n diag_minus_one = sc.diag(ones, -1, False)\n assert diag_minus_one.same_as(\n [[0., 0, 0, 0, 0],\n [1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0],\n [0, 0, 1, 0, 0],\n [0, 0, 0, 1, 0]])\n\n\ndef test_diagonal_extraction():\n a = sc.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])\n d = sc.diag(a, 0, True)\n assert d.same_as([1, 7])\n\n\ndef test_identity_creation():\n i = sc.identity((5, 3), dtype=\"int16\")\n assert i.same_as([\n [1, 0, 0],\n [0, 1, 0],\n [0, 0, 1],\n [0, 0, 0],\n [0, 0, 0]\n ])\n\n\ndef test_range_creation():\n # Generates an array of [0, 4] along first dimension\n r1 = sc.range(5)\n assert r1.same_as([0, 1, 2, 3, 4])\n # Generates an array of [0, 4] along first dimension, tiled along second dimension\n r2 = sc.range((5, 2))\n assert r2.same_as([\n [0, 0],\n [1, 1],\n [2, 2],\n [3, 3],\n [4, 4]\n ])\n # Generates an array of [0, 2] along second dimension, tiled along first dimension\n r3 = sc.range((5, 3), 1)\n assert r3.same_as([\n [0, 1, 2],\n [0, 1, 2],\n [0, 1, 2],\n [0, 1, 2],\n [0, 1, 2]\n ])\n\n\ndef test_lower_upper():\n a = sc.random.random((5, 5), dtype=\"float32\")\n lower = sc.lower(a, True)\n upper = sc.upper(a, False)\n assert (lower + upper - sc.identity((5, 5))).same_as(a)\n\n\ndef test_pad_creation():\n a = sc.iota((3, 2), dtype=\"float32\") + 10.\n # add one extra row at the beginning (1, 0, ...) and one at the\n # end (1, 0, 0, 0). Values are zero.\n pz = sc.pad(a, (1, 0, 0, 0), (1, 0, 0, 0), 'zero')\n assert pz.same_as([\n [0., 0],\n [10, 13],\n [11, 14],\n [12, 15],\n [0, 0]\n ])\n\n # add one extra column at the beginning, one extra row at the end\n # and out of bound values are clamped to the edge\n zce = sc.pad(a, (0, 1, 0, 0), (1, 0, 0, 0), 'clampedge')\n assert zce.same_as([\n [10., 10, 13],\n [11, 11, 14],\n [12, 12, 15],\n [12, 12, 15]\n ])\n\n # same as before but cycle out of bound values are mapped to range\n # of the dimension in cyclic fashion\n zcc = sc.pad(a, (0, 1, 0, 0), (1, 0, 0, 0), 'periodic')\n assert zcc.same_as([\n [13., 10, 13],\n [14, 11, 14],\n [15, 12, 15],\n [13, 10, 13]\n ])\n\n # Out of bound values are symmetric over the edge\n zcs = sc.pad(a, (1, 1, 0, 0), (1, 1, 0, 0), 'symmetric')\n assert zcs.same_as([\n [10, 10, 13, 13],\n [10, 10, 13, 13],\n [11, 11, 14, 14],\n [12, 12, 15, 15],\n [12, 12, 15, 15]\n ])\n\n\ndef test_moddims():\n # start with a single column, 12 rows\n a = sc.iota(12, 1)\n # in order, organise it as 2 rows, 6 columns\n b = sc.reshape(a, (2, 6))\n assert b.same_as([[0, 2, 4, 6, 8, 10], [1, 3, 5, 7, 9, 11]])\n # now 3 rows, 4 columns\n c = sc.reshape(a, (3, 4))\n assert c.same_as([[0, 3, 6, 9], [1, 4, 7, 10], [2, 5, 8, 11]])\n # etc...\n d = sc.reshape(a, (4, 3))\n assert d.same_as([[0, 4, 8], [1, 5, 9], [2, 6, 10], [3, 7, 11]])\n e = sc.reshape(a, (6, 2))\n assert e.same_as([[0, 6], [1, 7], [2, 8], [3, 9], [4, 10], [5, 11]])\n f = sc.reshape(a, (1, 12))\n assert f.same_as(a.T)\n\n\ndef test_flat():\n a = sc.iota((5, 5, 5), 1)\n assert sc.flat(a).same_as(sc.iota(5 * 5 * 5, 1))\n\n\ndef test_flip():\n a = sc.iota((3, 2), 1)\n assert a.same_as([[0, 3], [1, 4], [2, 5]])\n # flip columns\n b = sc.flip(a, 1)\n assert b.same_as([[3, 0], [4, 1], [5, 2]])\n # flip rows\n c = sc.flip(a, 0)\n assert c.same_as([[2, 5], [1, 4], [0, 3]])\n\n\ndef test_reorder():\n a = sc.iota((3, 3), 1)\n # rows per columns, columns per rows\n b = sc.reorder(a, 1, 0)\n b.same_as([[0, 1, 2], [3, 4, 5], [6, 7, 8]])\n # one row, 3 columns, 3 depth where\n # depth is now what it used to be columns, columns are rows\n c = sc.reorder(a, 2, 0, 1)\n assert c.ndim == 3\n assert c.shape == (1, 3, 3)\n assert c.same_as([[[0, 3, 6], [1, 4, 7], [2, 5, 8]]])\n\n\ndef test_replace():\n a = sc.iota(10)\n sc.where(a < 5, a, 10 * a).same_as([0., 1, 2, 3, 4, 50, 60, 70, 80, 90])\n\n a = sc.iota((3, 3), 1)\n a = sc.where(a < 4, a, sc.full((3, 3), -1))\n assert a.same_as([[0., 3, -1], [1, -1, -1], [2, -1, -1]])\n\n a = sc.iota((3, 3), 1)\n assert (a < 4).same_as([[True, True, False], [True, False, False], [True, False, False]])\n\n\ndef test_shift():\n a = sc.iota((3, 3), 1)\n # get columns as a whole and shift them to the right in circular manner\n b = sc.shift(a, 0, 1)\n assert b.same_as([[6, 0, 3], [7, 1, 4], [8, 2, 5]])\n # same, but to the left\n c = sc.shift(a, 0, -1)\n assert c.same_as([[3, 6, 0], [4, 7, 1], [5, 8, 2]])\n\n\ndef test_tile():\n # one simple column vector\n a = sc.iota(4, 1)\n # tile the number of rows twice\n b = sc.tile(a, 2)\n assert b.same_as([0, 1, 2, 3, 0, 1, 2, 3])\n # tile b by two columns\n c = sc.tile(b, 1, 2)\n assert c.same_as([[0, 0], [1, 1], [2, 2], [3, 3], [0, 0], [1, 1], [2, 2], [3, 3]])\n # same in one operation\n d = sc.tile(a, 2, 2)\n assert d.same_as(c)\n\n\ndef test_transpose():\n a = sc.iota((3, 3), 1)\n b = sc.transpose(a)\n assert b.same_as(a.T)\n assert b.same_as([[0, 1, 2], [3, 4, 5], [6, 7, 8]])\n\n\ndef test_numpy_interface_s_n_s():\n a = sc.iota((3, 5, 7, 11), 1)\n n = np.array(a)\n assert n.shape == a.shape\n assert n.dtype == a.dtype\n assert n.ndim == a.ndim\n c = sc.array(n)\n assert c.shape == a.shape\n assert c.dtype == a.dtype\n assert c.ndim == a.ndim\n assert c.same_as(a)\n\n\ndef test_numpy_interface_n_s_n():\n a = np.linspace(0., 100, 100, False, dtype=\"float32\")\n a.shape = (4, 25)\n b = sc.array(a)\n assert a.shape == b.shape\n assert a.dtype == b.dtype\n assert a.ndim == b.ndim\n c = np.array(b)\n assert np.allclose(a, c)\n\n\ndef test_memory_view():\n a = sc.iota((3, 5, 7, 11), 1, dtype=\"float32\")\n b = memoryview(a)\n assert b.shape == a.shape\n assert b.f_contiguous\n assert not b.readonly\n assert b.ndim == a.ndim\n assert b.contiguous\n assert not b.c_contiguous\n\n # data is shared for CPU backend.\n sc.set_backend('cpu')\n assert sc.get_backend() == 'cpu'\n\n # memoryview doesn't have indexing\n # implemented\n a = sc.iota(10)\n b = memoryview(a)\n assert b.shape[0] == a.shape[0]\n assert b.f_contiguous\n assert b.c_contiguous\n assert not b.readonly\n assert b.ndim == a.ndim\n assert b.contiguous\n b[3] = -3\n assert a.same_as([0, 1, 2, -3, 4, 5, 6, 7, 8, 9])\n\n\ndef test_join():\n a = sc.array([1, 2, 3, 4])\n b = sc.array([5, 6, 7, 8])\n c = sc.array([9, 10, 11, 12])\n # join as rows\n assert sc.join([a, b, c], 0).same_as([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])\n # join on columns\n assert sc.join([a, b, c], 1).same_as([[1, 5, 9], [2, 6, 10], [3, 7, 11], [4, 8, 12]])\n\n\ndef test_linspace():\n sc.linspace(1, 100, num=10).same_as(np.linspace(1, 100, num=10))\n sc.linspace(1, 100, num=10, endpoint=False).same_as(np.linspace(1, 100, num=10, endpoint=False))\n sc.linspace(1, 100, num=10, endpoint=True).same_as(np.linspace(1, 100, num=10, endpoint=True))\n sc.geomspace(1, 100, num=10).same_as(np.geomspace(1, 100, num=10))\n sc.geomspace(1, 100, num=10, endpoint=False).same_as(np.geomspace(1, 100, num=10, endpoint=False))\n sc.geomspace(1, 100, num=10, endpoint=True).same_as(np.geomspace(1, 100, num=10, endpoint=True))\n sc.logspace(1, 100, num=10).same_as(np.logspace(1, 100, num=10))\n sc.logspace(1, 100, num=10, endpoint=False).same_as(np.logspace(1, 100, num=10, endpoint=False))\n sc.logspace(1, 100, num=10, endpoint=True).same_as(np.logspace(1, 100, num=10, endpoint=True))\n" ]
[ [ "numpy.allclose", "numpy.linspace", "numpy.logspace", "numpy.ones", "numpy.geomspace", "numpy.random.randn", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
krmurtha/aslprep
[ "5c00c2c9ad1daf93b056907596b7798ae8059efb", "5c00c2c9ad1daf93b056907596b7798ae8059efb" ]
[ "aslprep/niworkflows/conftest.py", "aslprep/pybids/analysis/transformations/base.py" ]
[ "\"\"\"py.test configuration\"\"\"\nimport os\nfrom sys import version_info\nfrom pathlib import Path\nimport numpy as np\nimport nibabel as nb\nimport pandas as pd\nimport pytest\nimport tempfile\nimport pkg_resources\n\nfrom .utils.bids import collect_data\n\ntest_data_env = os.getenv(\n \"TEST_DATA_HOME\", str(Path.home() / \".cache\" / \"stanford-crn\")\n)\ndata_dir = Path(test_data_env) / \"BIDS-examples-1-enh-ds054\"\n\n\[email protected](autouse=True)\ndef add_np(doctest_namespace):\n doctest_namespace[\"PY_VERSION\"] = version_info\n doctest_namespace[\"np\"] = np\n doctest_namespace[\"nb\"] = nb\n doctest_namespace[\"pd\"] = pd\n doctest_namespace[\"os\"] = os\n doctest_namespace[\"pytest\"] = pytest\n doctest_namespace[\"Path\"] = Path\n doctest_namespace[\"datadir\"] = data_dir\n doctest_namespace[\"bids_collect_data\"] = collect_data\n doctest_namespace[\"test_data\"] = pkg_resources.resource_filename(\n \"niworkflows\", \"tests/data\"\n )\n\n tmpdir = tempfile.TemporaryDirectory()\n\n doctest_namespace[\"tmpdir\"] = tmpdir.name\n\n nifti_fname = str(Path(tmpdir.name) / \"test.nii.gz\")\n nii = nb.Nifti1Image(np.random.random((5, 5)).astype(\"f4\"), np.eye(4))\n nii.header.set_qform(np.diag([1, 1, 1, 1]), code=1)\n nii.header.set_sform(np.diag([-1, 1, 1, 1]), code=1)\n nii.to_filename(nifti_fname)\n doctest_namespace[\"nifti_fname\"] = nifti_fname\n\n cwd = os.getcwd()\n os.chdir(tmpdir.name)\n yield\n os.chdir(cwd)\n tmpdir.cleanup()\n\n\[email protected]\ndef testdata_dir():\n return data_dir\n", "\"\"\"Base Transformation class and associated utilities. \"\"\"\n\nimport re\nimport warnings\nfrom abc import ABCMeta, abstractmethod\nfrom copy import deepcopy\nimport itertools\nimport inspect\n\nimport numpy as np\nimport pandas as pd\n\nfrom bids.utils import listify\nfrom bids.variables import SparseRunVariable\n\n\nclass Transformation(metaclass=ABCMeta):\n\n ### Class-level settings ###\n # The following settings govern the way Transformations are applied to the\n # data. The default settings can be overridden within subclasses.\n\n # List all argument names that specify variables used by Transformation.\n # This is necessary in order to ensure that all and only variables touched\n # by the transformation are cloned before any manipulation occurs.\n # variables in 'variables' are always cloned, so only additional arguments\n # should be specified here.\n _variables_used = ()\n\n # What data type to pass onto the core _transform() logic. Must be one\n # of 'variable' (the entire BIDSVariable object), 'pandas' (the extracted\n # pandas DF stored in .values), or 'numpy' (just the numpy array inside\n # the .values property of the pandas DF). To minimize overhead and\n # simplify code, it is recommended to avoid using 'variable' if possible.\n _input_type = 'pandas'\n\n # The data type the internal _transform() method is expected to return.\n # Must be one of 'variable', 'pandas', 'numpy', or 'none'. In the last\n # case, all desired changes must be made in-place within _transform(), as\n # no further changes will be committed.\n _return_type = 'pandas'\n\n # A tuple indicating which arguments give the names of variables that must\n # all be aligned with one another (i.e., onsets and durations match\n # perfectly) before processing. Defaults to None.\n _align = None\n\n # Boolean indicating whether the Transformation should be applied to each\n # variable in the input list in turn. When True (default), Transformation\n # is applied once per element in the variable list, with all arguments\n # being passed repeatedly. When False, all data (i.e., variables or their\n # pandas DFs or ndarrays, as specified in _input_type) are passed to the\n # Transformation simultaneously.\n _loopable = True\n\n # Boolean indicating whether the Transformation can handle groupby\n # operations. When True, a 'groupby' argument is made implicitly available,\n # and if passed, the Transformation will be applied separately to each\n # subset of the data, as defined by the variables named in groupby. When\n # False, the Transformations does not allow grouping, and will raise an\n # exception if groupby is passed. Transformations should set this to False\n # if the groupby argument cannot possibly change the returned result.\n _groupable = True\n\n # Boolean indicating whether the output argument is mandatory. When False\n # (default), transformations will be applied in-place unless output is set.\n # When True, the user must explicitly specify the output, or an exception\n # is raised.\n _output_required = False\n\n # An implicit 'dense' argument is always available, and indicates whether\n # or not to operate on dense variables. When True, the arguments listed in\n # _densify control which variables will be densified. Defaults to variables\n # named in the 'variables' argument. Note that if this value is overridden,\n # 'variables' will need to be explicitly included (i.e., the subclass's\n # _densify tuple replaces the base class rather than appending to it).\n _densify = ('variables',)\n\n # Allow categorical variables in the input arguments? When None (default),\n # any categorical variables encountered as inputs will raise an exception.\n # Otherwise, a tuple giving the names of the arguments whose variables will\n # be passed through as-is even if categorical.\n _allow_categorical = None\n\n def __new__(cls, collection, variables, *args, **kwargs):\n t = super(Transformation, cls).__new__(cls)\n t._setup(collection, variables, *args, **kwargs)\n return t.transform()\n\n def _setup(self, collection, variables, *args, **kwargs):\n \"\"\"Replaces __init__ to set instance attributes because on Python\n >= 3.3, we can't override both new and init. \"\"\"\n self.collection = collection\n self.variables = listify(variables)\n self.groupby = kwargs.pop('groupby', None)\n self.output = listify(kwargs.pop('output', None))\n self.output_prefix = kwargs.pop('output_prefix', None)\n self.output_suffix = kwargs.pop('output_suffix', None)\n self.dense = kwargs.pop('dense', False)\n\n # Convert any args to named keyword arguments in order to make sure\n # that operations like densification, alignment, etc. correctly detect\n # all named arguments.\n if args:\n arg_spec = inspect.getfullargspec(self._transform)\n for i, arg_val in enumerate(args):\n # Skip first two argnames--they're always 'self' and\n # 'variables'\n kwargs[arg_spec.args[2 + i]] = arg_val\n\n self.kwargs = kwargs\n\n # Expand any detected variable group names or wild cards\n self._expand_variable_groups()\n self._expand_variable_names()\n\n def _expand_variable_groups(self):\n \"\"\" Replace any detected variable groups with the associated lists of\n variable names.\n \"\"\"\n groups = self.collection.groups\n variables = [groups[v] if v in groups else [v] for v in self.variables]\n self.variables = list(itertools.chain(*variables))\n\n def _expand_variable_names(self):\n \"\"\"Filter all available arguments against collection's variables using\n unix-style pattern matching.\"\"\"\n def _replace_arg_values(values):\n is_iter = isinstance(values, (list, tuple))\n values = listify(values)\n result = []\n # Only try to match strings containing a relevant special character\n for v in values:\n if isinstance(v, str) and re.search('[\\*\\?\\[\\]]', v):\n result.append(self.collection.match_variables(v))\n else:\n result.append([v])\n\n result = list(itertools.chain(*result))\n # Don't return a list unless we have to\n if is_iter or len(result) > 1:\n return result\n return result[0]\n\n # 'variables' is stored separately, so handle it separately\n self.variables = _replace_arg_values(self.variables)\n\n for k, arg in self.kwargs.items():\n self.kwargs[k] = _replace_arg_values(arg)\n\n def _clone_variables(self):\n \"\"\"Deep copy all variables the transformation touches. This prevents us\n from unnecessarily overwriting existing variables. \"\"\"\n\n # Always clone the target variables\n self._variables = {v: self.collection[v].clone()\n for v in self.variables}\n\n if not self._variables_used:\n return\n\n # Loop over argument names and clone all variable names in each one\n for var in self._variables_used:\n for v in listify(self.kwargs.get(var, [])):\n # Kludge: we need to allow entity variables to be passed as\n # names even though they don't exist as separate variables\n if (v not in self.collection.variables and\n v in ['task', 'run', 'session', 'subject']):\n continue\n self._variables[v] = deepcopy(self.collection[v])\n\n def _check_categorical_variables(self):\n \"\"\"Convert categorical variables to dummy-coded indicators. \"\"\"\n\n # Collect variable names to pass through\n pass_thru = []\n if self._allow_categorical is not None:\n for arg in self._allow_categorical:\n keys = self.variables if arg == 'variables' \\\n else self.kwargs.get(arg, [])\n pass_thru.extend(listify(keys))\n pass_thru = list(set(pass_thru))\n\n for name, col in self._variables.items():\n if name not in pass_thru:\n if col.values.values.dtype.kind not in 'bifc':\n msg = (\"The %s transformation does not allow variable '%s'\"\n \" to be categorical. Either pass a different \"\n \"variable or explicitly convert to a set of binary \"\n \"indicators via the 'factor' transformation.\")\n raise ValueError(msg % (self.__class__.__name__, name))\n\n def _densify_variables(self):\n\n variables = []\n\n for var in self._densify:\n\n if var == 'variables':\n variables.extend(self.variables)\n else:\n variables.extend(listify(self.kwargs.get(var, [])))\n\n for v in variables:\n var = self._variables[v]\n if isinstance(var, SparseRunVariable):\n sr = self.collection.sampling_rate\n self._variables[v] = var.to_dense(sr)\n\n def transform(self):\n\n output_passed = not (self.output is None and self.output_prefix is None\n and self.output_suffix is None)\n\n if not output_passed and self._output_required:\n raise ValueError(\"Transformation '%s' requires output names to be \"\n \"provided. Please set at least one of 'output',\"\n \"'output_prefix', or 'output_suffix'.\" %\n self.__class__.__name__)\n\n kwargs = self.kwargs\n\n # Deep copy all variables we expect to touch\n self._clone_variables()\n\n # Make sure we don't have categorical variables we can't handle\n self._check_categorical_variables()\n\n # Densify variables if needed\n if self.dense:\n self._densify_variables()\n\n # Set variables we plan to operate on directly\n variables = [self._variables[c] for c in self.variables]\n\n # Align variables if needed\n self._align_variables(variables)\n\n # Pass desired type--variable, DataFrame, or NDArray\n def select_type(col):\n return {'variable': col, 'pandas': col.values,\n 'numpy': col.values.values}[self._input_type]\n\n data = [select_type(c) for c in variables]\n\n if not self._loopable:\n variables = [variables]\n\n for i, col in enumerate(variables):\n\n # If we still have a list, pass all variables in one block\n if isinstance(col, (list, tuple)):\n result = self._transform(data, **kwargs)\n if self._return_type not in ['none', None]:\n col = col[0].clone(data=result, name=self.output[0])\n # Otherwise loop over variables individually\n else:\n if self._groupable and self.groupby is not None:\n result = col.apply(self._transform, groupby=self.groupby,\n **kwargs)\n else:\n result = self._transform(data[i], **kwargs)\n\n if self._return_type in ['none', None]:\n continue\n elif self._return_type == 'numpy':\n col.values = pd.DataFrame(result)\n elif self._return_type == 'pandas':\n col.values = result\n elif self._return_type == 'variable':\n col = result\n\n # Overwrite existing variable\n if not output_passed:\n # If multiple variables were returned, add each one separately\n if isinstance(result, (list, tuple)):\n for r in result:\n self.collection[r.name] = r\n else:\n self.collection[col.name] = col\n\n # Set as a new variable\n else:\n # Either assign new name in order, or re-use existing one\n if self.output is not None:\n if len(self.variables) == len(self.output) or not \\\n self._loopable:\n _output = self.output[i]\n elif len(self.output) == 1:\n _output = str(self.output) + '_' + col.name\n else:\n _output = col.name\n\n # Add prefix and suffix if provided\n if self.output_prefix is not None:\n _output = self.output_prefix + _output\n if self.output_suffix is not None:\n _output += self.output_suffix\n\n col.name = _output\n self.collection[_output] = col\n\n @abstractmethod\n def _transform(self, **kwargs):\n pass\n\n def _preprocess(self, col):\n return col\n\n def _postprocess(self, col):\n return col\n\n def _align_variables(self, variables, force=True):\n \"\"\"Checks whether the specified variables have aligned indexes. This\n implies either that all variables are dense, or that all variables are\n sparse and have exactly the same onsets and durations. If variables are\n not aligned and force = True, all variables will be forced to dense\n format in order to ensure alignment.\n \"\"\"\n\n if self._align is None or self._align == 'none':\n return\n\n def _align(variables):\n # If any variable is dense, all variables must be dense\n sparse = [c for c in variables\n if isinstance(c, SparseRunVariable)]\n if len(sparse) < len(variables):\n if sparse:\n sparse_names = [s.name for s in sparse]\n msg = (\"Found a mix of dense and sparse variables. May \"\n \"cause problems for some transformations.\")\n if force:\n msg += (\" Sparse variables %s were converted to dense \"\n \"form to ensure proper alignment.\" %\n sparse_names)\n sr = self.collection.sampling_rate\n sparse = [s.to_dense(sr) for s in sparse]\n warnings.warn(msg)\n # If all are sparse, durations, onsets, and index must match\n # perfectly for all\n else:\n def get_col_data(col):\n return np.c_[col.values.index, col.duration, col.onset]\n\n def compare_variables(a, b):\n return len(a) == len(b) and np.allclose(a, b)\n\n # Compare 1st col with each of the others\n fc = get_col_data(variables[0])\n if not all([compare_variables(fc, get_col_data(c))\n for c in variables[1:]]):\n msg = \"Misaligned sparse variables found.\"\n if force:\n msg += (\" Forcing all sparse variables to dense in \"\n \"order to ensure proper alignment.\")\n sr = self.collection.sampling_rate\n variables = [c.to_dense(sr) for c in variables]\n warnings.warn(msg)\n\n align_variables = [listify(self.kwargs[v])\n for v in listify(self._align) if v in self.kwargs]\n align_variables = list(itertools.chain(*align_variables))\n align_variables = [self.collection[c] for c in align_variables if c]\n\n if align_variables and self._loopable:\n for c in variables:\n # TODO: should clone all variables in align_variables before\n # alignment to prevent conversion to dense in any given\n # iteration having side effects. This could be an issue if,\n # e.g., some vars in 'variables' are dense and some are sparse.\n _align([c] + align_variables)\n else:\n _align(listify(variables) + align_variables)\n" ]
[ [ "numpy.diag", "numpy.eye", "numpy.random.random" ], [ "numpy.allclose", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
epintilii/deep-learning-coursera
[ "974794c446386e524446a10b8949db5cc547f936" ]
[ "Neural Networks and Deep Learning/planar_utils.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\nimport sklearn\nimport sklearn.linear_model\n\ndef plot_decision_boundary(model, X, y):\n # Set min and max values and give it some padding\n x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1\n y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1\n h = 0.01\n # Generate a grid of points with distance h between them\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n # Predict the function value for the whole grid\n Z = model(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n # Plot the contour and training examples\n plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)\n plt.ylabel('x2')\n plt.xlabel('x1')\n plt.scatter(X[0, :], X[1, :], c=y.ravel(), cmap=plt.cm.Spectral)\n #plt.show()\n\ndef sigmoid(x):\n \"\"\"\n Compute the sigmoid of x\n\n Arguments:\n x -- A scalar or numpy array of any size.\n\n Return:\n s -- sigmoid(x)\n \"\"\"\n s = 1/(1+np.exp(-x))\n return s\n\ndef load_planar_dataset():\n np.random.seed(1)\n m = 400 # number of examples\n N = int(m/2) # number of points per class\n D = 2 # dimensionality\n X = np.zeros((m,D)) # data matrix where each row is a single example\n Y = np.zeros((m,1), dtype='uint8') # labels vector (0 for red, 1 for blue)\n a = 4 # maximum ray of the flower\n\n for j in range(2):\n ix = range(N*j,N*(j+1))\n t = np.linspace(j*3.12,(j+1)*3.12,N) + np.random.randn(N)*0.2 # theta\n r = a*np.sin(4*t) + np.random.randn(N)*0.2 # radius\n X[ix] = np.c_[r*np.sin(t), r*np.cos(t)]\n Y[ix] = j\n \n X = X.T\n Y = Y.T\n\n return X, Y\n\ndef load_extra_datasets(): \n N = 200\n noisy_circles = sklearn.datasets.make_circles(n_samples=N, factor=.5, noise=.3)\n noisy_moons = sklearn.datasets.make_moons(n_samples=N, noise=.2)\n blobs = sklearn.datasets.make_blobs(n_samples=N, random_state=5, n_features=2, centers=6)\n gaussian_quantiles = sklearn.datasets.make_gaussian_quantiles(mean=None, cov=0.5, n_samples=N, n_features=2, n_classes=2, shuffle=True, random_state=None)\n no_structure = np.random.rand(N, 2), np.random.rand(N, 2)\n \n return noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure" ]
[ [ "matplotlib.pyplot.contourf", "sklearn.datasets.make_gaussian_quantiles", "numpy.random.seed", "numpy.linspace", "sklearn.datasets.make_moons", "numpy.arange", "numpy.cos", "numpy.sin", "sklearn.datasets.make_circles", "numpy.random.rand", "numpy.random.randn", "matplotlib.pyplot.xlabel", "numpy.exp", "numpy.zeros", "sklearn.datasets.make_blobs", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
3lectrologos/comet
[ "97c945a5f3a355028de302667cc2cb785b13bc8e" ]
[ "comet/setup.py" ]
[ "#!/usr/bin/python\n\n\"\"\"Compiles the C and Fortran modules used by CoMEt.\"\"\"\n\n############################################################################\n# First compile the C code\n\n# Load required modules\nfrom distutils.core import setup, Extension\nimport subprocess, numpy, os\n\nthisDir = os.path.dirname(os.path.realpath(__file__))\n\ndef subprocessOutput(args):\n proc = subprocess.Popen(args, stdout=subprocess.PIPE)\n out, err = proc.communicate()\n try: return out\n except: print(\"Error: \" + err)\n\ncompile_args = ['-g', '-O0']\n\nsrcs = ['/src/c/utils/cephes/polevl.c','/src/c/utils/cephes/gamma.c',\n '/src/c/utils/cephes/incbet.c', '/src/c/utils/utilities.c',\n '/src/c/weights.c', '/src/c/mutation_data.c', '/src/c/cometmodule.c',\n '/src/c/comet_mcmc.c', '/src/c/comet_exhaustive.c']\nmodule = Extension('cComet', include_dirs=[numpy.get_include()],\n\tsources = [ thisDir + s for s in srcs ], extra_compile_args = compile_args)\nsetup(name='CoMEt', version='1.0', description='C module for running CoMEt.',\n ext_modules=[module])\n\n############################################################################\n# Second compile the Fortran code\n\n# Load required modules\nfrom numpy.distutils.core import Extension, setup\n\n# Compile the bipartite edge swap code\next = Extension(name='permute_matrix', sources=[thisDir + '/src/fortran/permute_matrix.f95'])\nsetup(name='permute_matrix', ext_modules=[ext])\n" ]
[ [ "numpy.get_include", "numpy.distutils.core.Extension", "numpy.distutils.core.setup" ] ]
[ { "matplotlib": [], "numpy": [ "1.11", "1.19", "1.24", "1.16", "1.23", "1.20", "1.7", "1.12", "1.21", "1.22", "1.14", "1.6", "1.13", "1.9", "1.17", "1.10", "1.18", "1.15", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
alexxfernandez13/bienes_inmuebles
[ "6ee2761f4e1e8092d5ec814228e7d5bd393067c4" ]
[ "tests/test_csv.py" ]
[ "import os\nimport copy\nimport pandas as pd\nimport numpy as np\nfrom bienes_inmuebles.dataset.csv_preprocesamiento import CSV, PATH4\n\n\ndef test_cvs_to_dataframe():\n objeto_csv = CSV(os.path.join(PATH4, \"data/csv_barcelona.csv\"))\n assert objeto_csv.df.columns[1] == \"listing_url\" # funcion de comprobacion -> como un if para test\n\n\n\"\"\"test de que plot sale bien, guardar grafico en png, ver que la file existe \n\ndef test_plot():\n csv.plot(save=True)\n assert file_existe? \"\"\"\n\ncsv = CSV(os.path.join(PATH4, \"data/csv_barcelona.csv\"))\ndf1 = pd.DataFrame(data={'col1': [1, 1, \"hola\", np.NaN], 'col2': [2, 2, \"hola\", np.NaN], 'col3': [1, 1, 1, 1]})\ncsv.df = df1\n\ncsv2 = copy.deepcopy(csv)\ndf2 = pd.DataFrame(data={'col1': [1, 1, 3, 5], 'col2': [2, 2, 6, 7]})\ncsv2.df = df2\n\n\ndef test_plot(csv=csv):\n output = os.path.join(PATH4, \"data/col3.png\")\n delete_test_output(output=output)\n csv.guardar_plot(save=True)\n assert os.path.exists(output) # convierte path relativo en absoluto\n delete_test_output(output=output)\n\n\n\"\"\"test funciones nuevas\"\"\"\n\n\ndef test_vistazo():\n csv = CSV(os.path.join(PATH4, \"data/csv_barcelona.csv\"))\n cabecera, final, columnas, faltantes, forma = csv.vistazo()\n assert \"neighborhood_overview\" in columnas\n assert \"https://www.airbnb.com/rooms/21974\" in cabecera.values[0]\n\n\ndef test_duplicates(csv=csv):\n csv_dup = csv.duplicados()\n assert csv.df.shape != csv_dup.df.shape\n\n\ndef test_dropna(csv=csv):\n csv_dup = csv.dropna(axis=0, number=0)\n assert csv.df.shape != csv_dup.df.shape\n\n\ndef test_int(csv=csv):\n csv_int = csv.ints()\n assert csv.df.shape != csv_int.df.shape\n\n\ndef delete_test_output(output=\"file.png\"):\n try:\n os.remove(output)\n except FileNotFoundError:\n pass\n\n\ndef test_histograma(csv=csv2, output=\"file_histograma.png\"):\n delete_test_output(output)\n csv.plot_histograma(df=csv.df, output=output)\n assert os.path.exists(output)\n delete_test_output(output)\n\n\ndef test_densidad(csv=csv2, output=\"file_densidad.png\"):\n delete_test_output(output)\n csv.plot_densidad(df=csv.df, output=output)\n assert os.path.exists(output)\n delete_test_output(output)\n\n\ndef test_bigotes(csv=csv2, output=\"file_bigotes.png\"):\n delete_test_output(output)\n csv.plot_bigotes(df=csv.df, output=output)\n assert os.path.exists(output)\n delete_test_output(output)\n\n\ndef test_correlacion(csv=csv2, output=\"file_correlacion.png\"):\n delete_test_output(output)\n csv.plot_correlacion(df=csv.df, output=output)\n assert os.path.exists(output)\n delete_test_output(output)\n\n\ndef test_dispersion(csv=csv2, output=\"file_dispersion.png\"):\n delete_test_output(output)\n csv.plot_dispersion(df=csv.df, output=output)\n assert os.path.exists(output)\n delete_test_output(output)\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
guanzhchen/PETuning
[ "eb36327713e237ea95a8982ceabb71de5ca4b09d", "eb36327713e237ea95a8982ceabb71de5ca4b09d" ]
[ "model/roberta/modeling_roberta.py", "tasks/glue/glue.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch RoBERTa model. \"\"\"\n\nimport math\n\nimport torch\nimport torch.utils.checkpoint\nimport model.lora as lora\nfrom packaging import version\nfrom torch import nn\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n\nfrom transformers.activations import ACT2FN, gelu\nfrom transformers.adapters.model_mixin import ModelWithHeadsAdaptersMixin\nfrom transformers.adapters.models.bert import (\n BertEncoderAdaptersMixin,\n BertLayerAdaptersMixin,\n BertModelAdaptersMixin,\n BertModelHeadsMixin,\n BertOutputAdaptersMixin,\n BertSelfOutputAdaptersMixin,\n)\nfrom transformers.file_utils import (\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom transformers.modeling_outputs import (\n BaseModelOutputWithPastAndCrossAttentions,\n BaseModelOutputWithPoolingAndCrossAttentions,\n CausalLMOutputWithCrossAttentions,\n MaskedLMOutput,\n MultipleChoiceModelOutput,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n)\nfrom transformers.modeling_utils import (\n PreTrainedModel,\n apply_chunking_to_forward,\n find_pruneable_heads_and_indices,\n prune_linear_layer,\n)\nfrom transformers.utils import logging\nfrom .configuration_roberta import RobertaConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"roberta-base\"\n_CONFIG_FOR_DOC = \"RobertaConfig\"\n_TOKENIZER_FOR_DOC = \"RobertaTokenizer\"\n\nROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"roberta-base\",\n \"roberta-large\",\n \"roberta-large-mnli\",\n \"distilroberta-base\",\n \"roberta-base-openai-detector\",\n \"roberta-large-openai-detector\",\n # See all RoBERTa models at https://huggingface.co/models?filter=roberta\n]\n\n\nclass RobertaEmbeddings(nn.Module):\n \"\"\"\n Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.\n \"\"\"\n\n # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__\n def __init__(self, config):\n super().__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n # position_ids (1, len position emb) is contiguous in memory and exported when serialized\n self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\")\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\n if version.parse(torch.__version__) > version.parse(\"1.6.0\"):\n self.register_buffer(\n \"token_type_ids\",\n torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device),\n persistent=False,\n )\n\n # End copy\n self.padding_idx = config.pad_token_id\n self.position_embeddings = nn.Embedding(\n config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx\n )\n\n def forward(\n self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0\n ):\n if position_ids is None:\n if input_ids is not None:\n # Create the position ids from the input token ids. Any padded tokens remain padded.\n position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)\n else:\n position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)\n\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n\n # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs\n # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves\n # issue #5664\n if token_type_ids is None:\n if hasattr(self, \"token_type_ids\"):\n buffered_token_type_ids = self.token_type_ids[:, :seq_length]\n buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)\n token_type_ids = buffered_token_type_ids_expanded\n else:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = inputs_embeds + token_type_embeddings\n if self.position_embedding_type == \"absolute\":\n position_embeddings = self.position_embeddings(position_ids)\n embeddings += position_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n def create_position_ids_from_inputs_embeds(self, inputs_embeds):\n \"\"\"\n We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.\n\n Args:\n inputs_embeds: torch.Tensor\n\n Returns: torch.Tensor\n \"\"\"\n input_shape = inputs_embeds.size()[:-1]\n sequence_length = input_shape[1]\n\n position_ids = torch.arange(\n self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device\n )\n return position_ids.unsqueeze(0).expand(input_shape)\n\n\n# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Roberta\nclass RobertaSelfAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, \"embedding_size\"):\n raise ValueError(\n f\"The hidden size ({config.hidden_size}) is not a multiple of the number of attention \"\n f\"heads ({config.num_attention_heads})\"\n )\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n # self.query = nn.Linear(config.hidden_size, self.all_head_size)\n # self.key = nn.Linear(config.hidden_size, self.all_head_size)\n\n if config.lora:\n self.query = lora.Linear(config.hidden_size, self.all_head_size, config.lora_r, lora_alpha=config.lora_alpha)\n # print(config.lora_r)\n else:\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n \n self.key = nn.Linear(config.hidden_size, self.all_head_size)\n\n if config.lora:\n self.value = lora.Linear(config.hidden_size, self.all_head_size, config.lora_r, lora_alpha=config.lora_alpha)\n else:\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n\n # self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n self.position_embedding_type = getattr(config, \"position_embedding_type\", \"absolute\")\n if self.position_embedding_type == \"relative_key\" or self.position_embedding_type == \"relative_key_query\":\n self.max_position_embeddings = config.max_position_embeddings\n self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)\n\n self.is_decoder = config.is_decoder\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n mixed_query_layer = self.query(hidden_states)\n\n # If this is instantiated as a cross-attention module, the keys\n # and values come from an encoder; the attention mask needs to be\n # such that the encoder's padding tokens are not attended to.\n is_cross_attention = encoder_hidden_states is not None\n\n if is_cross_attention and past_key_value is not None:\n # reuse k,v, cross_attentions\n key_layer = past_key_value[0]\n value_layer = past_key_value[1]\n attention_mask = encoder_attention_mask\n elif is_cross_attention:\n key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))\n value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))\n attention_mask = encoder_attention_mask\n elif past_key_value is not None:\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n key_layer = torch.cat([past_key_value[0], key_layer], dim=2)\n value_layer = torch.cat([past_key_value[1], value_layer], dim=2)\n else:\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n\n if self.is_decoder:\n # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.\n # Further calls to cross_attention layer can then reuse all cross-attention\n # key/value_states (first \"if\" case)\n # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of\n # all previous decoder key/value_states. Further calls to uni-directional self-attention\n # can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)\n # if encoder bi-directional self-attention `past_key_value` is always `None`\n past_key_value = (key_layer, value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n\n if self.position_embedding_type == \"relative_key\" or self.position_embedding_type == \"relative_key_query\":\n seq_length = hidden_states.size()[1]\n position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)\n position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)\n distance = position_ids_l - position_ids_r\n positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)\n positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility\n\n if self.position_embedding_type == \"relative_key\":\n relative_position_scores = torch.einsum(\"bhld,lrd->bhlr\", query_layer, positional_embedding)\n attention_scores = attention_scores + relative_position_scores\n elif self.position_embedding_type == \"relative_key_query\":\n relative_position_scores_query = torch.einsum(\"bhld,lrd->bhlr\", query_layer, positional_embedding)\n relative_position_scores_key = torch.einsum(\"bhrd,lrd->bhlr\", key_layer, positional_embedding)\n attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key\n\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in RobertaModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n\n if self.is_decoder:\n outputs = outputs + (past_key_value,)\n return outputs\n\n\n# Copied from transformers.models.modeling_bert.BertSelfOutput\nclass RobertaSelfOutput(BertSelfOutputAdaptersMixin, nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self._init_adapter_modules()\n\n def forward(self, hidden_states, input_tensor, **kwargs):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.adapters_forward(hidden_states, input_tensor, **kwargs)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Roberta\nclass RobertaAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.self = RobertaSelfAttention(config)\n self.output = RobertaSelfOutput(config)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(\n heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads\n )\n\n # Prune linear layers\n self.self.query = prune_linear_layer(self.self.query, index)\n self.self.key = prune_linear_layer(self.self.key, index)\n self.self.value = prune_linear_layer(self.self.value, index)\n self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)\n\n # Update hyper params and store pruned heads\n self.self.num_attention_heads = self.self.num_attention_heads - len(heads)\n self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n **kwargs\n ):\n self_outputs = self.self(\n hidden_states,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n past_key_value,\n output_attentions,\n )\n attention_output = self.output(self_outputs[0], hidden_states, **kwargs)\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n return outputs\n\n\n# Copied from transformers.models.bert.modeling_bert.BertIntermediate\nclass RobertaIntermediate(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertOutput\nclass RobertaOutput(BertOutputAdaptersMixin, nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self._init_adapter_modules()\n\n def forward(self, hidden_states, input_tensor, **kwargs):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.adapters_forward(hidden_states, input_tensor, **kwargs)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Roberta\nclass RobertaLayer(BertLayerAdaptersMixin, nn.Module):\n def __init__(self, config):\n super().__init__()\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n self.seq_len_dim = 1\n self.attention = RobertaAttention(config)\n self.is_decoder = config.is_decoder\n self.add_cross_attention = config.add_cross_attention\n if self.add_cross_attention:\n assert self.is_decoder, f\"{self} should be used as a decoder model if cross attention is added\"\n self.crossattention = RobertaAttention(config)\n self.intermediate = RobertaIntermediate(config)\n self.output = RobertaOutput(config)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n **kwargs\n ):\n # decoder uni-directional self-attention cached key/values tuple is at positions 1,2\n self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n self_attention_outputs = self.attention(\n hidden_states,\n attention_mask,\n head_mask,\n output_attentions=output_attentions,\n past_key_value=self_attn_past_key_value,\n **kwargs,\n )\n attention_output = self_attention_outputs[0]\n\n # if decoder, the last output is tuple of self-attn cache\n if self.is_decoder:\n outputs = self_attention_outputs[1:-1]\n present_key_value = self_attention_outputs[-1]\n else:\n outputs = self_attention_outputs[1:] # add self attentions if we output attention weights\n\n cross_attn_present_key_value = None\n if self.is_decoder and encoder_hidden_states is not None:\n assert hasattr(\n self, \"crossattention\"\n ), f\"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`\"\n\n # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple\n cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None\n cross_attention_outputs = self.crossattention(\n attention_output,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n cross_attn_past_key_value,\n output_attentions,\n )\n attention_output = cross_attention_outputs[0]\n outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights\n\n # add cross-attn cache to positions 3,4 of present_key_value tuple\n cross_attn_present_key_value = cross_attention_outputs[-1]\n present_key_value = present_key_value + cross_attn_present_key_value\n\n layer_output = apply_chunking_to_forward(\n self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output, **kwargs\n )\n outputs = (layer_output,) + outputs\n\n # if decoder, return the attn key/values as the last output\n if self.is_decoder:\n outputs = outputs + (present_key_value,)\n\n return outputs\n\n def feed_forward_chunk(self, attention_output, **kwargs):\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output, **kwargs)\n return layer_output\n\n\n# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Roberta\nclass RobertaEncoder(BertEncoderAdaptersMixin, nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.layer = nn.ModuleList([RobertaLayer(config) for _ in range(config.num_hidden_layers)])\n self.gradient_checkpointing = False\n self.drop_layer = -1\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n **kwargs\n ):\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None\n\n next_decoder_cache = () if use_cache else None\n for i, layer_module in enumerate(self.layer):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_head_mask = head_mask[i] if head_mask is not None else None\n # rmlayers = [9,10,11]\n past_key_value = past_key_values[i] if past_key_values is not None else None\n \n # for j in range(i):\n # past_key_value = past_key_value + past_key_values[j]\n # if i > 0:\n # past_key_value = past_key_value + past_key_values[i-1]\n # if i in rmlayers:\n # # print(attention_mask)\n # attention_mask_new = attention_mask[:, :, :, self.config.pre_seq_len:]\n # else:\n # attention_mask_new = attention_mask[:]\n if self.gradient_checkpointing and self.training:\n\n if use_cache:\n logger.warning(\n \"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...\"\n )\n use_cache = False\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, past_key_value, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer_module),\n hidden_states,\n attention_mask,\n layer_head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n )\n else:\n layer_outputs = layer_module(\n hidden_states,\n attention_mask,\n layer_head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n past_key_value,\n output_attentions,\n **kwargs,\n )\n\n hidden_states = layer_outputs[0]\n attention_mask = self.adjust_attention_mask_for_parallel(hidden_states, attention_mask)\n\n if use_cache:\n next_decoder_cache += (layer_outputs[-1],)\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[1],)\n if self.config.add_cross_attention:\n all_cross_attentions = all_cross_attentions + (layer_outputs[2],)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(\n v\n for v in [\n hidden_states,\n next_decoder_cache,\n all_hidden_states,\n all_self_attentions,\n all_cross_attentions,\n ]\n if v is not None\n )\n return BaseModelOutputWithPastAndCrossAttentions(\n last_hidden_state=hidden_states,\n past_key_values=next_decoder_cache,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n cross_attentions=all_cross_attentions,\n )\n\n\n# Copied from transformers.models.bert.modeling_bert.BertPooler\nclass RobertaPooler(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n pooled_output = self.activation(pooled_output)\n return pooled_output\n\n\nclass RobertaPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = RobertaConfig\n base_model_prefix = \"roberta\"\n supports_gradient_checkpointing = True\n\n # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights\n def _init_weights(self, module):\n \"\"\"Initialize the weights\"\"\"\n if isinstance(module, nn.Linear):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, RobertaEncoder):\n module.gradient_checkpointing = value\n\n def update_keys_to_ignore(self, config, del_keys_to_ignore):\n \"\"\"Remove some keys from ignore list\"\"\"\n if not config.tie_word_embeddings:\n # must make a new list, or the class variable gets modified!\n self._keys_to_ignore_on_save = [k for k in self._keys_to_ignore_on_save if k not in del_keys_to_ignore]\n self._keys_to_ignore_on_load_missing = [\n k for k in self._keys_to_ignore_on_load_missing if k not in del_keys_to_ignore\n ]\n\n\nROBERTA_START_DOCSTRING = r\"\"\"\n\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n\n Parameters:\n config (:class:`~transformers.RobertaConfig`): Model configuration class with all the parameters of the\n model. Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nROBERTA_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`~transformers.RobertaTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaModel(BertModelAdaptersMixin, RobertaPreTrainedModel):\n \"\"\"\n\n The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of\n cross-attention is added between the self-attention layers, following the architecture described in `Attention is\n all you need`_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz\n Kaiser and Illia Polosukhin.\n\n To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration\n set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`\n argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an\n input to the forward pass.\n\n .. _`Attention is all you need`: https://arxiv.org/abs/1706.03762\n\n \"\"\"\n\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Roberta\n def __init__(self, config, add_pooling_layer=True):\n super().__init__(config)\n self.config = config\n\n self.embeddings = RobertaEmbeddings(config)\n self.encoder = RobertaEncoder(config)\n\n self.pooler = RobertaPooler(config) if add_pooling_layer else None\n\n self._init_adapter_modules()\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=BaseModelOutputWithPoolingAndCrossAttentions,\n config_class=_CONFIG_FOR_DOC,\n )\n # Copied from transformers.models.bert.modeling_bert.BertModel.forward\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n self.pre_transformer_forward(**kwargs)\n\n if self.config.is_decoder:\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n else:\n use_cache = False\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n batch_size, seq_length = input_shape\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n # past_key_values_length\n past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0\n\n if attention_mask is None:\n attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)\n\n if token_type_ids is None:\n if hasattr(self.embeddings, \"token_type_ids\"):\n buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]\n buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)\n token_type_ids = buffered_token_type_ids_expanded\n else:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)\n\n # If a 2D or 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n past_key_values_length=past_key_values_length,\n )\n embedding_output = self.invertible_adapters_forward(embedding_output)\n\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n **kwargs,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output) if self.pooler is not None else None\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPoolingAndCrossAttentions(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n past_key_values=encoder_outputs.past_key_values,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n cross_attentions=encoder_outputs.cross_attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"Roberta Model transformer with the option to add multiple flexible heads on top.\"\"\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaModelWithHeads(BertModelHeadsMixin, RobertaPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.roberta = RobertaModel(config)\n\n self._init_head_modules()\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"roberta-base\",\n output_type=ModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n adapter_names=None,\n head=None,\n **kwargs\n ):\n input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None\n position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None\n token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None\n attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None\n inputs_embeds = (\n inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))\n if inputs_embeds is not None\n else None\n )\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n adapter_names=adapter_names,\n )\n # BERT & RoBERTa return the pooled output as second item, we don't need that in these heads\n if not return_dict:\n head_inputs = (outputs[0],) + outputs[2:]\n else:\n head_inputs = outputs\n pooled_output = outputs[1]\n\n if head or self.active_head:\n head_outputs = self.forward_head(\n head_inputs,\n head_name=head,\n attention_mask=attention_mask,\n return_dict=return_dict,\n pooled_output=pooled_output,\n **kwargs,\n )\n return head_outputs\n else:\n # in case no head is used just return the output of the base model (including pooler output)\n return outputs\n\n\n@add_start_docstrings(\n \"\"\"RoBERTa Model with a `language modeling` head on top for CLM fine-tuning. \"\"\", ROBERTA_START_DOCSTRING\n)\nclass RobertaForCausalLM(ModelWithHeadsAdaptersMixin, RobertaPreTrainedModel):\n _keys_to_ignore_on_save = [r\"lm_head.decoder.weight\", r\"lm_head.decoder.bias\"]\n _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"lm_head.decoder.weight\", r\"lm_head.decoder.bias\"]\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n if not config.is_decoder:\n logger.warning(\"If you want to use `RobertaLMHeadModel` as a standalone, add `is_decoder=True.`\")\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n self.lm_head = RobertaLMHead(config)\n\n # The LM head weights require special treatment only when they are tied with the word embeddings\n self.update_keys_to_ignore(config, [\"lm_head.decoder.weight\"])\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.lm_head.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n adapter_names=None,\n ):\n r\"\"\"\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in\n ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are\n ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n\n Returns:\n\n Example::\n\n >>> from transformers import RobertaTokenizer, RobertaForCausalLM, RobertaConfig\n >>> import torch\n\n >>> tokenizer = RobertaTokenizer.from_pretrained('roberta-base')\n >>> config = RobertaConfig.from_pretrained(\"roberta-base\")\n >>> config.is_decoder = True\n >>> model = RobertaForCausalLM.from_pretrained('roberta-base', config=config)\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n\n >>> prediction_logits = outputs.logits\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if labels is not None:\n use_cache = False\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n adapter_names=adapter_names,\n )\n\n sequence_output = outputs[0]\n prediction_scores = self.lm_head(\n sequence_output,\n inv_lang_adapter=self.roberta.get_invertible_adapter(),\n )\n\n lm_loss = None\n if labels is not None:\n # we are doing next-token prediction; shift prediction scores and input ids by one\n shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()\n labels = labels[:, 1:].contiguous()\n loss_fct = CrossEntropyLoss()\n lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((lm_loss,) + output) if lm_loss is not None else output\n\n return CausalLMOutputWithCrossAttentions(\n loss=lm_loss,\n logits=prediction_scores,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n cross_attentions=outputs.cross_attentions,\n )\n\n def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):\n input_shape = input_ids.shape\n # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly\n if attention_mask is None:\n attention_mask = input_ids.new_ones(input_shape)\n\n # cut decoder_input_ids if past is used\n if past is not None:\n input_ids = input_ids[:, -1:]\n\n return {\"input_ids\": input_ids, \"attention_mask\": attention_mask, \"past_key_values\": past}\n\n def _reorder_cache(self, past, beam_idx):\n reordered_past = ()\n for layer_past in past:\n reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)\n return reordered_past\n\n\n@add_start_docstrings(\"\"\"RoBERTa Model with a `language modeling` head on top. \"\"\", ROBERTA_START_DOCSTRING)\nclass RobertaForMaskedLM(ModelWithHeadsAdaptersMixin, RobertaPreTrainedModel):\n _keys_to_ignore_on_save = [r\"lm_head.decoder.weight\", r\"lm_head.decoder.bias\"]\n _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"lm_head.decoder.weight\", r\"lm_head.decoder.bias\"]\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n if config.is_decoder:\n logger.warning(\n \"If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for \"\n \"bi-directional self-attention.\"\n )\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n self.lm_head = RobertaLMHead(config)\n\n # The LM head weights require special treatment only when they are tied with the word embeddings\n self.update_keys_to_ignore(config, [\"lm_head.decoder.weight\"])\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.lm_head.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=MaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n mask=\"<mask>\",\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n adapter_names=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):\n Used to hide legacy arguments that have been deprecated.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n adapter_names=adapter_names,\n )\n sequence_output = outputs[0]\n prediction_scores = self.lm_head(\n sequence_output,\n inv_lang_adapter=self.roberta.get_invertible_adapter(),\n )\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return MaskedLMOutput(\n loss=masked_lm_loss,\n logits=prediction_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\nclass RobertaLMHead(nn.Module):\n \"\"\"Roberta Head for masked language modeling.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size)\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n self.decoder.bias = self.bias\n\n def forward(self, features, inv_lang_adapter=None, **kwargs):\n x = self.dense(features)\n x = gelu(x)\n x = self.layer_norm(x)\n\n if inv_lang_adapter:\n x = inv_lang_adapter(x, rev=True)\n\n # project back to size of vocabulary with bias\n x = self.decoder(x)\n\n return x\n\n def _tie_weights(self):\n # To tie those two weights if they get disconnected (on TPU or when the bias is resized)\n self.bias = self.decoder.bias\n\n\n@add_start_docstrings(\n \"\"\"\n RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n \"\"\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaForSequenceClassification(ModelWithHeadsAdaptersMixin, RobertaPreTrainedModel):\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.config = config\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n self.classifier = RobertaClassificationHead(config)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n adapter_names=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n adapter_names=adapter_names,\n )\n sequence_output = outputs[0]\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n if self.config.problem_type is None:\n if self.num_labels == 1:\n self.config.problem_type = \"regression\"\n elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = \"single_label_classification\"\n else:\n self.config.problem_type = \"multi_label_classification\"\n\n if self.config.problem_type == \"regression\":\n loss_fct = MSELoss()\n if self.num_labels == 1:\n loss = loss_fct(logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(logits, labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n \"\"\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaForMultipleChoice(ModelWithHeadsAdaptersMixin, RobertaPreTrainedModel):\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n self.roberta = RobertaModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, 1)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, num_choices, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=MultipleChoiceModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n token_type_ids=None,\n attention_mask=None,\n labels=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n adapter_names=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,\n num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See\n :obj:`input_ids` above)\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]\n\n flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None\n flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None\n flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None\n flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None\n flat_inputs_embeds = (\n inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))\n if inputs_embeds is not None\n else None\n )\n\n outputs = self.roberta(\n flat_input_ids,\n position_ids=flat_position_ids,\n token_type_ids=flat_token_type_ids,\n attention_mask=flat_attention_mask,\n head_mask=head_mask,\n inputs_embeds=flat_inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n adapter_names=adapter_names,\n )\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n reshaped_logits = logits.view(-1, num_choices)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels)\n\n if not return_dict:\n output = (reshaped_logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return MultipleChoiceModelOutput(\n loss=loss,\n logits=reshaped_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n Roberta Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n \"\"\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaForTokenClassification(ModelWithHeadsAdaptersMixin, RobertaPreTrainedModel):\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n classifier_dropout = (\n config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob\n )\n self.dropout = nn.Dropout(classifier_dropout)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n adapter_names=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -\n 1]``.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n adapter_names=adapter_names,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)\n active_labels = torch.where(\n active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)\n )\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\nclass RobertaClassificationHead(nn.Module):\n \"\"\"Head for sentence-level classification tasks.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n classifier_dropout = (\n config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob\n )\n self.dropout = nn.Dropout(classifier_dropout)\n self.out_proj = nn.Linear(config.hidden_size, config.num_labels)\n\n def forward(self, features, **kwargs):\n x = features[:, 0, :] # take <s> token (equiv. to [CLS])\n x = self.dropout(x)\n x = self.dense(x)\n x = torch.tanh(x)\n x = self.dropout(x)\n x = self.out_proj(x)\n return x\n\n\n@add_start_docstrings(\n \"\"\"\n Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n \"\"\",\n ROBERTA_START_DOCSTRING,\n)\nclass RobertaForQuestionAnswering(ModelWithHeadsAdaptersMixin, RobertaPreTrainedModel):\n _keys_to_ignore_on_load_unexpected = [r\"pooler\"]\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.roberta = RobertaModel(config, add_pooling_layer=False)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=QuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n adapter_names=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the\n sequence are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.roberta(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1).contiguous()\n end_logits = end_logits.squeeze(-1).contiguous()\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions = start_positions.clamp(0, ignored_index)\n end_positions = end_positions.clamp(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (start_logits, end_logits) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return QuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\ndef create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):\n \"\"\"\n Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols\n are ignored. This is modified from fairseq's `utils.make_positions`.\n\n Args:\n x: torch.Tensor x:\n\n Returns: torch.Tensor\n \"\"\"\n # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.\n mask = input_ids.ne(padding_idx).int()\n incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask\n return incremental_indices.long() + padding_idx\n", "# coding=utf-8\n# Copyright 2020 The HuggingFace Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" GLUE benchmark metric. \"\"\"\n\nfrom scipy.stats import pearsonr, spearmanr\nfrom sklearn.metrics import f1_score, matthews_corrcoef\n\nimport datasets\n\n\n_CITATION = \"\"\"\\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\"\"\"\n\n_DESCRIPTION = \"\"\"\\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\"\"\"\n\n_KWARGS_DESCRIPTION = \"\"\"\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n\"\"\"\n\n\ndef simple_accuracy(preds, labels):\n return float((preds == labels).mean())\n\n\ndef acc_and_f1(preds, labels):\n acc = simple_accuracy(preds, labels)\n f1 = float(f1_score(y_true=labels, y_pred=preds))\n return {\n \"accuracy\": acc,\n \"f1\": f1,\n }\n\n\ndef pearson_and_spearman(preds, labels):\n pearson_corr = float(pearsonr(preds, labels)[0])\n spearman_corr = float(spearmanr(preds, labels)[0])\n return {\n \"pearson\": pearson_corr,\n \"spearmanr\": spearman_corr,\n }\n\n\[email protected]_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)\nclass Glue(datasets.Metric):\n def _info(self):\n if self.config_name not in [\n \"sst2\",\n \"mnli\",\n \"mnli_mismatched\",\n \"mnli_matched\",\n \"cola\",\n \"stsb\",\n \"mrpc\",\n \"qqp\",\n \"qnli\",\n \"rte\",\n \"wnli\",\n \"hans\",\n ]:\n raise KeyError(\n \"You should supply a configuration name selected in \"\n '[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", '\n '\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]'\n )\n return datasets.MetricInfo(\n description=_DESCRIPTION,\n citation=_CITATION,\n inputs_description=_KWARGS_DESCRIPTION,\n features=datasets.Features(\n {\n \"predictions\": datasets.Value(\"int64\" if self.config_name != \"stsb\" else \"float32\"),\n \"references\": datasets.Value(\"int64\" if self.config_name != \"stsb\" else \"float32\"),\n }\n ),\n codebase_urls=[],\n reference_urls=[],\n format=\"numpy\",\n )\n\n def _compute(self, predictions, references):\n if self.config_name == \"cola\":\n return {\"matthews_correlation\": matthews_corrcoef(references, predictions)}\n elif self.config_name == \"stsb\":\n return pearson_and_spearman(predictions, references)\n elif self.config_name in [\"mrpc\", \"qqp\"]:\n return acc_and_f1(predictions, references)\n elif self.config_name in [\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]:\n return {\"accuracy\": simple_accuracy(predictions, references)}\n else:\n raise KeyError(\n \"You should supply a configuration name selected in \"\n '[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", '\n '\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]'\n )" ]
[ [ "torch.nn.Softmax", "torch.nn.Dropout", "torch.nn.CrossEntropyLoss", "torch.ones", "torch.zeros", "torch.cat", "torch.einsum", "torch.nn.Embedding", "torch.nn.LayerNorm", "torch.nn.Tanh", "torch.nn.Linear", "torch.matmul", "torch.tanh", "torch.nn.BCEWithLogitsLoss", "torch.tensor", "torch.arange", "torch.cumsum", "torch.nn.MSELoss" ], [ "scipy.stats.spearmanr", "sklearn.metrics.f1_score", "sklearn.metrics.matthews_corrcoef", "scipy.stats.pearsonr" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
HaaLeo/ant-colony-optimization
[ "044b10be5694359900495403cc9f0e84d38a9e88" ]
[ "swarmlib/woa/woa_problem.py" ]
[ "# ------------------------------------------------------------------------------------------------------\n# Copyright (c) Leo Hanisch. All rights reserved.\n# Licensed under the BSD 3-Clause License. See LICENSE.txt in the project root for license information.\n# ------------------------------------------------------------------------------------------------------\n\n# pylint: disable=too-many-instance-attributes\n\nimport logging\nfrom copy import deepcopy\n\nimport numpy as np\n\nfrom .whale import Whale\nfrom ..util.base_visualizer import BaseVisualizer\nfrom ..util.problem_base import ProblemBase\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass WOAProblem(ProblemBase):\n def __init__(self, **kwargs):\n \"\"\"\n Initialize a new whale optimization algorithm problem.\n \"\"\"\n super().__init__(**kwargs)\n self.__iteration_number = kwargs['iteration_number']\n self.__whales = [\n Whale(**kwargs, bit_generator=self._random)\n for _ in range(kwargs['whales'])\n ]\n\n self._visualizer = BaseVisualizer(**kwargs)\n # Initialize visualizer for plotting\n positions = [whale.position for whale in self.__whales]\n self._visualizer.add_data(positions=positions)\n\n def solve(self) -> Whale:\n global_best_whale = None\n\n # And also update global_best_whale\n for _ in range(self.__iteration_number):\n\n # Update global best\n global_best_whale = np.amin(self.__whales)\n\n random_whales = deepcopy(self._random.choice(self.__whales, size=len(self.__whales)))\n for whale, random_whale in zip(self.__whales, random_whales):\n whale.step(global_best_whale, random_whale)\n\n # Add data for plot\n self._visualizer.add_data(positions=[whale.position for whale in self.__whales])\n\n global_best_whale = np.amin(self.__whales)\n LOGGER.info('Last best solution=\"%s\" at position=\"%s\"', global_best_whale.value, global_best_whale.position)\n return global_best_whale\n" ]
[ [ "numpy.amin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
erisyon/plaster
[ "20af32aed2365c6351fe3c26293308960099152b", "20af32aed2365c6351fe3c26293308960099152b" ]
[ "plaster/run/sigproc_v3/c/sigproc_v3.py", "plaster/main.py" ]
[ "import ctypes as c\nimport pathlib\nfrom contextlib import contextmanager, redirect_stdout\nfrom io import StringIO\n\nimport numpy as np\nfrom plaster.run.sigproc_v3.c.build import build_dev\nfrom plaster.tools.c_common import c_common_tools\nfrom plaster.tools.c_common.c_common_tools import (\n CException,\n F64Arr,\n U16Arr,\n U64Arr,\n U64Arr,\n Tab,\n)\nfrom plaster.tools.schema import check\nfrom plaster.tools.utils import utils\nfrom plaster.tools.zlog.zlog import spy\nfrom plumbum import local\n\n\nclass SigprocV3Context(c_common_tools.FixupStructure):\n # Operates on a single field stack so that we don't have\n # to realize all fields in to memory simultaneously\n\n # fmt: off\n _fixup_fields = [\n (\"fl_i\", \"Index\"),\n (\"cy_i\", \"Index\"),\n (\"n_channels\", \"Size\"),\n (\"ch_ims\", U16Arr), # Raw stack of all channels for this field, cycle\n (\"ch_bal_ims\", U16Arr), # Balance ims per channel\n (\"ch_thresh\", U64Arr), # Threshold per channel\n (\"ch_bias\", U64Arr), # Bias per channel\n (\"max_n_locs\", \"Size\"),\n (\"im_mea\", \"Size\"),\n\n (\"n_regs\", \"Size\"),\n (\"reg_mea\", \"Size\"),\n (\"f_cy0_reg_real\", F64Arr), # (n_regs, reg_mea, reg_mea)\n (\"f_cy0_reg_imag\", F64Arr), # (n_regs, reg_mea, reg_mea)\n\n (\"sub_locs_tab\", Tab, \"SubLoc\"),\n\n (\"out_reg_aln_y\", F64Arr), # (n_regs,)\n (\"out_reg_aln_x\", F64Arr), # (n_regs,)\n\n # Outputs\n (\"out_n_locs\", U64Arr), # Size == 1\n (\"out_locs\", F64Arr), # (out_n_peaks, 2), where 2 is: (y, x)\n (\"out_warning_n_locs_overflow\", U64Arr), # Size == 1\n (\"out_debug\", U64Arr),\n ]\n # fmt: on\n\n\nclass SubLoc(c_common_tools.FixupStructure):\n # fmt: off\n _fixup_fields = [\n (\"reg_i\", \"Index\"),\n (\"sub_y\", \"Float64\"),\n (\"sub_x\", \"Float64\"),\n ]\n # fmt: on\n\n\nc_sigproc_v3_path = local.path(\"/erisyon/plaster/plaster/run/sigproc_v3/c\")\n\n\ndef _init():\n \"\"\"\n This must be called once before any work\n \"\"\"\n SigprocV3Context.struct_fixup()\n SubLoc.struct_fixup()\n\n if local.env.get(\"PLASTER_C_COMPILE\"):\n with local.cwd(c_sigproc_v3_path):\n fp = StringIO()\n with redirect_stdout(fp):\n print(\n f\"// This file was code-generated by sigproc_v3.c.sigproc_v3.load_lib and should be version controlled\"\n )\n print()\n print(\"#ifndef SIGPROC_V3_H\")\n print(\"#define SIGPROC_V3_H\")\n print()\n print('#include \"stdint.h\"')\n print('#include \"c_common.h\"')\n print()\n SigprocV3Context.struct_emit_header(fp)\n SubLoc.struct_emit_header(fp)\n print(\"#endif\")\n\n header_file_path = \"./_sigproc_v3.h\"\n existing_h = utils.load(header_file_path, return_on_non_existing=\"\")\n\n if existing_h != fp.getvalue():\n utils.save(header_file_path, fp.getvalue())\n\n build_dev()\n\n\n# Note, this is executed at LOAD TIME!\n_init()\n\n_lib = None\n\nMODULE_DIR = pathlib.Path(__file__).parent\n\n\nclass Radmat:\n # The columns of a radmat\n sig = 0\n noi = 1\n bg_med = 2\n bg_std = 3\n\n\ndef load_lib():\n global _lib\n if _lib is not None:\n return _lib\n\n lib = c.CDLL(MODULE_DIR / \"_sigproc_v3.so\")\n lib.context_init.argtypes = [\n c.POINTER(SigprocV3Context),\n ]\n lib.context_init.restype = c.c_char_p\n\n lib.context_free.argtypes = [\n c.POINTER(SigprocV3Context),\n ]\n\n lib.peak_find_multichannel.argtypes = [\n c.POINTER(SigprocV3Context),\n c_common_tools.typedef_to_ctype(\"Index\"), # Index reg_i,\n ]\n lib.peak_find_multichannel.restype = c.c_char_p\n\n lib.fft2.argtypes = [\n c.POINTER(c.c_double), # data_real\n c.POINTER(c.c_double), # data_imag\n c.c_uint64, # im_mea\n c.c_uint64, # direction\n c.c_uint64, # sub_y\n c.c_uint64, # sub_x\n c.c_uint64, # sub_mea\n c.POINTER(c.c_double), # out_real\n c.POINTER(c.c_double), # out_imag\n ]\n lib.fft2.restype = None\n\n lib.fft2_uint16.argtypes = [\n c.POINTER(c.c_uint16), # data_real\n c.c_uint64, # im_mea\n c.c_uint64, # direction\n c.c_uint64, # sub_y\n c.c_uint64, # sub_x\n c.c_uint64, # sub_mea\n c.POINTER(c.c_double), # out_real\n c.POINTER(c.c_double), # out_imag\n ]\n lib.fft2.restype = None\n\n _lib = lib\n return lib\n\n\n@contextmanager\ndef context(\n ch_ims, ch_bal_ims, ch_thresh, ch_bias, field_i, cycle_i, n_regs=0, reg_mea=0,\n):\n lib = load_lib()\n\n check.array_t(ch_ims, ndim=3, dtype=np.uint16, c_contiguous=True)\n n_channels, height, width = ch_ims.shape\n assert width == height and utils.is_power_of_2(width)\n im_mea = width\n\n # The following was an early sketch for the necessary context\n\n # TODO: store ch_bal_ims in the correct format to avoid this rescaling\n bal_precision = 1024\n ch_bal_ims = ((1.0 / ch_bal_ims) * bal_precision).astype(np.uint16)\n check.array_t(\n ch_bal_ims,\n shape=(n_channels, im_mea, im_mea),\n dtype=np.uint16,\n c_contiguous=True,\n )\n\n check.array_t(ch_thresh, shape=(n_channels,))\n assert np.all(ch_thresh > 0)\n ch_thresh = (ch_thresh * bal_precision).astype(np.uint64)\n\n check.array_t(ch_bias, shape=(n_channels,))\n assert np.all(ch_bias > 0)\n ch_bias = ch_bias.astype(np.uint64)\n\n max_n_locs = 8196\n # out_locs = np.zeros((max_n_locs, 2), dtype=np.float64)\n # out_n_locs = np.zeros((1,), dtype=np.uint64)\n out_debug = np.zeros((ch_ims.shape[-2:]), dtype=np.uint64)\n out_warning_n_locs_overflow = np.zeros((1,), dtype=np.uint64)\n\n f_cy0_reg_real = np.zeros((n_regs, reg_mea, reg_mea))\n f_cy0_reg_imag = np.zeros((n_regs, reg_mea, reg_mea))\n\n sub_locs_tab = Tab.allocate(SubLoc, max_n_locs)\n\n ctx = SigprocV3Context(\n fl_i=field_i,\n cy_i=cycle_i,\n n_channels=n_channels,\n ch_ims=U16Arr.from_ndarray(ch_ims),\n ch_bal_ims=U16Arr.from_ndarray(ch_bal_ims),\n ch_thresh=U64Arr.from_ndarray(ch_thresh),\n ch_bias=U64Arr.from_ndarray(ch_bias),\n im_mea=im_mea,\n max_n_locs=max_n_locs,\n out_debug=U64Arr.from_ndarray(out_debug),\n _out_debug=out_debug,\n out_warning_n_locs_overflow=U64Arr.from_ndarray(out_warning_n_locs_overflow),\n _out_warning_n_locs_overflow=out_warning_n_locs_overflow,\n f_cy0_reg_real=F64Arr.from_ndarray(f_cy0_reg_real),\n f_cy0_reg_imag=F64Arr.from_ndarray(f_cy0_reg_imag),\n _f_cy0_reg_real=f_cy0_reg_real,\n _f_cy0_reg_imag=f_cy0_reg_imag,\n reg_mea=reg_mea,\n sub_locs_tab=sub_locs_tab,\n _sub_locs_tab=sub_locs_tab,\n )\n\n error = lib.context_init(ctx)\n if error is not None:\n raise CException(error)\n\n try:\n yield ctx\n finally:\n lib.context_free(ctx)\n\n\ndef peak_find_multichannel(**kws):\n \"\"\"\n For testing purposes, typically called internally by the analyze pipeline\n \"\"\"\n lib = load_lib()\n\n with context(**kws) as ctx:\n reg_i = 0\n error = lib.peak_find_multichannel(ctx, reg_i)\n if error is not None:\n raise CException(error)\n\n n_rows = ctx.sub_locs_tab.n_rows\n return np.array(\n [\n (ctx._sub_locs_tab._arr[i].sub_y, ctx._sub_locs_tab._arr[i].sub_x)\n for i in range(n_rows)\n ]\n )\n\n\ndef fft2(im, sub_y, sub_x, sub_mea):\n check.array_t(im, ndim=2, is_square=True)\n im_mea = im.shape[0]\n assert im.shape[0] == im.shape[1]\n\n assert 0 <= sub_y <= im_mea - sub_mea\n assert 0 <= sub_x <= im_mea - sub_mea\n\n lib = load_lib()\n\n data_real = np.ascontiguousarray(im, dtype=np.double)\n data_imag = np.zeros_like(data_real)\n data_real_ptr = data_real.ctypes.data_as(c.POINTER(c.c_double))\n data_imag_ptr = data_imag.ctypes.data_as(c.POINTER(c.c_double))\n\n out_real = np.zeros((sub_mea * sub_mea,), dtype=np.double)\n out_imag = np.zeros((sub_mea * sub_mea,), dtype=np.double)\n out_real_ptr = out_real.ctypes.data_as(c.POINTER(c.c_double))\n out_imag_ptr = out_imag.ctypes.data_as(c.POINTER(c.c_double))\n\n direction = 1\n lib.fft2(\n data_real_ptr,\n data_imag_ptr,\n im_mea,\n direction,\n sub_y,\n sub_x,\n sub_mea,\n out_real_ptr,\n out_imag_ptr,\n )\n\n return (out_real + 1j * out_imag).reshape((sub_mea, sub_mea))\n\n\ndef ifft2(f_im):\n check.array_t(f_im, ndim=2, is_square=True, dtype=np.cdouble)\n assert f_im.shape[0] == f_im.shape[1]\n\n lib = load_lib()\n\n im_mea = f_im.shape[0]\n\n f_im_real = np.ascontiguousarray(f_im.real, dtype=np.double)\n f_im_imag = np.ascontiguousarray(f_im.imag, dtype=np.double)\n f_im_real_ptr = f_im_real.ctypes.data_as(c.POINTER(c.c_double))\n f_im_imag_ptr = f_im_imag.ctypes.data_as(c.POINTER(c.c_double))\n\n out_real = np.zeros((im_mea * im_mea,), dtype=np.double)\n out_imag = np.zeros((im_mea * im_mea,), dtype=np.double)\n out_real_ptr = out_real.ctypes.data_as(c.POINTER(c.c_double))\n out_imag_ptr = out_imag.ctypes.data_as(c.POINTER(c.c_double))\n\n direction = -1\n lib.fft2(\n f_im_real_ptr,\n f_im_imag_ptr,\n im_mea,\n direction,\n 0,\n 0,\n im_mea,\n out_real_ptr,\n out_imag_ptr,\n )\n\n return out_real.reshape((im_mea, im_mea))\n", "#!/usr/bin/env python -u\n\"\"\"\nAll commands that can be run in this project are available through this unified interface.\nThis should be run with the ./plaster.sh helper to get into the correct context.\n\"\"\"\nimport tempfile\nimport numpy as np\nimport time\nimport os\nimport sys\nimport pandas as pd\nimport json\nfrom pathlib import Path\nfrom munch import Munch\nfrom plumbum import colors\nfrom plumbum import FG, TF, cli, local\nfrom plaster.tools.zlog.zlog import important\nfrom plaster.run.sigproc_v2 import synth\nfrom plaster.tools.zlog.profile import prof, profile_from_file, profile_dump\nfrom plaster.tools.utils.tmp import tmp_file\nfrom plaster.tools.assets import assets\nfrom plaster.tools.test_tools.test_tools import run_p\nfrom plaster.run.run import RunResult\nfrom plaster.tools.zlog import zlog\nfrom plaster.tools.zlog.zlog import tell, h_line, spy\nfrom plaster.tools.utils import tmp\nfrom plaster.tools.utils import utils\n\n\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\nclass CommandError(Exception):\n def __init__(self, retcode=None):\n self.retcode = retcode\n\n\ndef assert_env():\n must_exist = (\"ERISYON_ROOT\", \"JOBS_FOLDER\")\n found = 0\n for e in must_exist:\n if e in local.env:\n found += 1\n else:\n print(f'Environment variable \"{e}\" not found.')\n\n if found != len(must_exist):\n raise CommandError(f\"Environment variable(s) not found.\")\n\n\nclass DoFuncs:\n def is_dev(self):\n return local.env.get(\"ERISYON_DEV\") == \"1\"\n\n def folder_user(self):\n return local.env[\"FOLDER_USER\"]\n\n def run_user(self):\n return local.env[\"RUN_USER\"]\n\n def clear(self):\n local[\"clear\"] & FG\n\n def _print_job_folders(self, file_list, show_plaster_json=True):\n \"\"\"\n file_list is a list of munches [Munch(folder=\"folder\", name=\"foo.txt\", size=123, mtime=123456789)]\n \"\"\"\n\n if len(file_list) == 0:\n print(\"No files found\")\n return\n\n folders = {\n file.folder: Munch(folder=file.folder, size_gb=0, file_count=0,)\n for file in file_list\n }\n\n gb = 1024 ** 3\n total_gb = 0\n for file in file_list:\n folder = file.folder\n total_gb += file.size / gb\n folders[folder].size_gb += file.size / gb\n folders[folder].file_count += 1\n\n df = pd.DataFrame.from_dict(folders, orient=\"index\")\n formatters = dict(\n size_gb=\"{:10.2f}\".format,\n folder=\"{:<40.40s}\".format,\n file_count=\"{:.0f}\".format,\n )\n columns = [\"folder\", \"size_gb\", \"file_count\"]\n\n df = df.append(dict(folder=\"TOTAL\", size_gb=total_gb), ignore_index=True)\n\n print(df.to_string(columns=columns, formatters=formatters))\n\n def print_local_job_folders(self):\n important(\"Local job folders:\")\n\n root = local.path(\"./jobs_folder\")\n self._print_job_folders(\n [\n Munch(\n folder=(p - root)[0],\n name=p.name,\n size=int(p.stat().st_size),\n mtime=int(p.stat().st_mtime),\n )\n for p in root.walk()\n ]\n )\n\n def validate_job_folder(self, job_folder, allow_run_folders=False):\n return assets.validate_job_folder(\n job_folder, allow_run_folders=allow_run_folders\n )\n\n def run_zests_v2(self, cli_args, debug_mode):\n tell(f\"Running zests v2...\")\n\n # as os.environ is evaluated when it is first imported\n # we can't use any of the more graceful ways to set the environment\n with local.env(RUN_ENV=\"test\", ZAP_DEBUG_MODE=debug_mode):\n zest_version = None\n try:\n from zest.version import __version__ as zest_version\n except ImportError:\n pass\n\n assert zlog.config_dict is not None\n assert zest_version.startswith(\"1.1.\")\n with tmp.tmp_file() as tmp_path:\n with open(tmp_path, \"w\") as f:\n f.write(json.dumps(zlog.config_dict))\n\n # cli_args += [\"--logger_config_json\", tmp_path]\n local[\"python\"][\"-u\", \"-m\", \"zest.zest_cli\"].bound_command(\n *cli_args\n ) & FG(retcode=None)\n\n def run_nbstripout(self):\n \"\"\"Strip all notebooks of output to save space in commits\"\"\"\n important(\"Stripping Notebooks...\")\n result = (\n local[\"find\"][\n \".\",\n \"-type\",\n \"f\",\n \"-not\",\n \"-path\",\n \"*/\\.*\",\n \"-name\",\n \"*.ipynb\",\n \"-print\",\n ]\n | local[\"xargs\"][\"nbstripout\"]\n ) & TF(FG=True)\n\n if not result:\n raise CommandError\n\n def run_docker_build(self, docker_tag, quiet=False):\n important(f\"Building docker tag {docker_tag}\")\n with local.env(LANG=\"en_US.UTF-8\"):\n args = [\n \"build\",\n \"-t\",\n f\"erisyon:{docker_tag}\",\n \"-f\",\n \"./scripts/main_env.docker\",\n ]\n if quiet:\n args += [\"--quiet\"]\n args += \".\"\n local[\"docker\"][args] & FG\n\n\nclass DoCommand(cli.Application, DoFuncs):\n def main(self):\n return\n\n\[email protected](\"run_notebook\")\nclass RunNotebookCommand(cli.Application, DoFuncs):\n \"\"\"\n Run a notebook rendered to HTML\n \"\"\"\n\n def main(self, notebook_path, output_path: Path = None):\n args = [\n \"nbconvert\",\n \"--to\",\n \"html\",\n \"--execute\",\n notebook_path,\n \"--ExecutePreprocessor.timeout=1800\",\n ]\n if output_path is not None:\n args += [\"--output\", output_path]\n local[\"jupyter\"].bound_command(*args) & FG\n\n\[email protected](\"profile\")\nclass ProfileCommand(cli.Application, DoFuncs):\n gb = 1024 ** 3\n\n skip_hardware = cli.Flag(\"--skip_hardware\", help=\"Do not include hardware profile\")\n skip_sigproc = cli.Flag(\"--skip_sigproc\", help=\"Do not include sigproc profile\")\n\n def fileio_test(self, jobs_folder):\n job_name = f\"_profile/_{int(time.time()):08x}\"\n large_random = np.random.uniform(\n size=1024 ** 3 // 8\n ) # 8 because floats are 8 bytes\n\n def write_to(write_path):\n # import shutil\n # total, used, free = shutil.disk_usage(write_path.dirname)\n # print(f\"Free disk at {write_path}: {free / gb:2.2f}GB ({free / total:2.1f}%)\")\n\n write_path.dirname.mkdir()\n with open(write_path, \"wb\") as f:\n f.write(large_random)\n\n # PROFILE write to jobs_folder\n job_folder_write_path = jobs_folder / job_name\n try:\n with prof(\n \"fileio_to_jobs_folder\", gbs=large_random.nbytes / self.gb, _tell=True,\n ):\n write_to(job_folder_write_path)\n finally:\n job_folder_write_path.delete()\n\n # PROFILE write to plaster_tmp\n with tmp_file() as plaster_tmp_folder_write_path:\n with prof(\n \"fileio_to_plaster_tmp\", gbs=large_random.nbytes / self.gb, _tell=True,\n ):\n write_to(plaster_tmp_folder_write_path)\n\n # PROFILE write to /tmp\n tmp_folder_write_path = local.path(tempfile.mkstemp())\n try:\n with prof(\"fileio_to_tmp\", gbs=large_random.nbytes / self.gb, _tell=True):\n write_to(tmp_folder_write_path)\n finally:\n tmp_folder_write_path.delete()\n\n def cpu_test(self):\n mat = np.random.uniform(size=(5000, 5000))\n with prof(\n \"cpu_tests_matrix_invert\",\n mega_elems=(mat.shape[0] * mat.shape[1]) / 1e6,\n _tell=True,\n ):\n np.linalg.inv(mat)\n\n def mem_test(self):\n gb = 1024 ** 3\n rnd = np.random.uniform(size=(1_000, 500_000))\n\n with prof(\"mem_tests_copy\", gbs=rnd.nbytes / gb, _tell=True):\n rnd.copy()\n\n def sigproc_test(self, jobs_folder):\n \"\"\"\n This is adapted from zest_sigproc_v2_integration\n \"\"\"\n profile_folder = jobs_folder / \"_profile\"\n profile_folder.delete()\n job_folder = profile_folder / \"sigproc_test\"\n source_folder = profile_folder / \"_synth_field\"\n job_folder.mkdir()\n source_folder.mkdir()\n\n # GENERATE some fake data\n\n dim = (1024, 1024)\n n_channels = 1\n n_cycles = 10\n n_peaks = 500\n psf_width = 1.5\n bg_mean = 100.0\n bg_std = 30.0\n gain = 5000.0\n\n def _synth_field(fl_i):\n with synth.Synth(n_channels=n_channels, n_cycles=n_cycles, dim=dim) as s:\n peaks = (\n synth.PeaksModelGaussianCircular(n_peaks=n_peaks)\n .locs_randomize()\n .widths_uniform(psf_width)\n .amps_constant(gain)\n )\n synth.CameraModel(bg_mean=bg_mean, bg_std=bg_std)\n synth.HaloModel()\n synth.IlluminationQuadraticFalloffModel()\n\n chcy_ims = s.render_chcy(0)\n\n for ch_i in range(chcy_ims.shape[0]):\n for cy_i in range(chcy_ims.shape[1]):\n np.save(\n str(\n source_folder\n / f\"area_{fl_i:03d}_cell_000_{ch_i:03d}nm_{cy_i:03d}.npy\"\n ),\n chcy_ims[ch_i, cy_i],\n )\n\n n_fields = 2\n for fl_i in range(n_fields):\n _synth_field(fl_i)\n\n run_p(\n [\n f\"gen\",\n f\"sigproc_v2\",\n f\"--job={job_folder}\",\n f\"--sigproc_source={source_folder}\",\n f\"--force\",\n f\"--self_calib\",\n ]\n )\n\n log_file = local.path(local.env[\"PLASTER_ROOT\"]) / \"plaster.log\"\n log_file.delete()\n\n run_p([\"run\", job_folder, \"--no_progress\", \"--skip_reports\"])\n\n profile_lines = profile_from_file(log_file)\n\n with colors.fg.DeepSkyBlue3:\n print()\n print(h_line(\"--\"))\n print(\"PROFILE RESULTS\")\n print(h_line(\"--\"))\n profile_dump(profile_lines)\n\n def main(self, jobs_folder):\n assert_env()\n\n jobs_folder = local.path(jobs_folder)\n\n if not self.skip_hardware:\n tell(colors.cyan | \"Profiling file_io\")\n self.fileio_test(jobs_folder)\n\n tell(colors.cyan | \"Profiling cpu\")\n self.cpu_test()\n\n tell(colors.cyan | \"Profiling mem\")\n self.mem_test()\n\n if not self.skip_sigproc:\n tell(colors.cyan | \"Profiling sigproc\")\n self.sigproc_test(jobs_folder)\n\n\[email protected](\"profile_dump\")\nclass ProfileDumpCommand(cli.Application, DoFuncs):\n def main(self, log_path):\n assert_env()\n\n log_file = local.path(log_path)\n profile_lines = profile_from_file(log_file)\n profile_dump(profile_lines)\n\n\[email protected](\"test\")\nclass TestCommand(cli.Application, DoFuncs):\n \"\"\"\n Run tests\n \"\"\"\n\n no_clear = cli.Flag(\"--no_clear\", help=\"Do not clear screen\")\n integration = cli.Flag(\"--integration\", help=\"Run integration tests\")\n debug_mode = cli.Flag(\"--debug_mode\", help=\"Put zap into debug_mode\")\n cli_mode = cli.Flag(\"--cli_mode\", help=\"Run without ui\")\n\n def main(self, *args):\n if not self.no_clear:\n self.clear()\n\n cli_args = list(args)\n\n root = local.env[\"PLASTER_ROOT\"]\n cli_args += [f\"--root={root}\"]\n\n folders = (\n \"./plaster\",\n \"./plaster/scripts\",\n )\n include_dirs = \":\".join(folders)\n cli_args += [f\"--include_dirs={include_dirs}\"]\n with local.cwd(root):\n cli_args += [f\"--hook_start=./scripts/testing_start.py:test_setup_logs\"]\n\n if not self.debug_mode:\n if not self.cli_mode:\n cli_args += [f\"--ui\"]\n cli_args += [f\"--n_workers\", \"8\"]\n\n if self.integration:\n cli_args += [f\"--groups=integration\"]\n else:\n cli_args += [f\"--exclude_groups=integration\"]\n\n return self.run_zests_v2(cli_args, self.debug_mode)\n\n\[email protected](\"jupyter\")\nclass JupyterCommand(cli.Application, DoFuncs):\n ip = cli.SwitchAttr(\"--ip\", str, default=\"0.0.0.0\", help=\"ip to bind to\")\n port = cli.SwitchAttr(\"--port\", int, default=\"8080\", help=\"port to bind to\")\n\n def main(self, *args):\n assert_env()\n os.execlp(\n \"jupyter\",\n \"jupyter\",\n \"notebook\",\n f\"--ip={self.ip}\",\n f\"--port={self.port}\",\n \"--allow-root\",\n *args,\n )\n\n\[email protected](\"pluck\")\nclass PluckCommand(cli.Application, DoFuncs):\n \"\"\"\n Pluck a field from a result pickle\n \"\"\"\n\n save_npy = cli.SwitchAttr(\"--save_npy\", str, default=None, help=\"save as npy file\")\n save_csv = cli.SwitchAttr(\n \"--save_csv\", str, default=None, help=\"save as csv file (dataframe only)\"\n )\n save_pkl = cli.SwitchAttr(\n \"--save_pkl\", str, default=None, help=\"save as pkl file (dataframe only)\"\n )\n\n def main(self, run_path, symbol):\n \"\"\"\n run_path: path to the run folder\n symbol: Eg: \"sigproc_v2.sig\"\n \"\"\"\n run = RunResult(run_path)\n parts = symbol.split(\".\")\n result = run[parts[0]]\n sym = getattr(result, parts[1])\n if callable(sym):\n val = sym()\n else:\n val = sym\n\n if self.save_npy is not None:\n assert isinstance(val, np.ndarray)\n np.save(self.save_npy, val)\n if self.save_csv is not None:\n assert isinstance(val, pd.DataFrame)\n val.to_csv(self.save_csv)\n if self.save_pkl is not None:\n assert isinstance(val, pd.DataFrame)\n val.to_pickle(self.save_pkl)\n\n\[email protected](\"export_sigproc_v2\")\nclass ExportSigprocV2Command(cli.Application, DoFuncs):\n \"\"\"\n Export sigproc_v2 and raw data in easy to use formats.\n \"\"\"\n\n def main(self, run_path):\n \"\"\"\n run_path: path to the run folder (don't forget this is a subfolder of job)\n \"\"\"\n run = RunResult(run_path)\n name = run.run_folder.parent.name\n\n prefix = f\"{name}__\"\n tell(f\"Prefixing saved files with {prefix}\")\n\n tell(\"Saving sig.npy\")\n np.save(f\"{prefix}sig.npy\", run.sigproc_v2.sig())\n\n tell(\"Saving noi.npy\")\n np.save(f\"{prefix}noi.npy\", run.sigproc_v2.noi())\n\n tell(\"Saving df.csv\")\n run.sigproc_v2.fields__n_peaks__peaks__radmat().to_csv(f\"{prefix}df.csv\")\n\n ims = []\n for fl_i in range(run.sigproc_v2.n_fields):\n tell(f\"Loading align field {fl_i} of {run.sigproc_v2.n_fields}\")\n ims += [run.sigproc_v2.aln_unfilt_chcy_ims(fl_i)]\n\n tell(\"Saving aln_ims.npy\")\n np.save(f\"{prefix}aln_ims.npy\", np.stack(ims))\n\n tell(\"Saving example.py\")\n utils.save(\n f\"{prefix}example.py\",\n f\"import numpy as np\\n\"\n + f\"import pandas as pd\\n\\n\"\n + f'prefix = \"{prefix}\"'\n + utils.smart_wrap(\n \"\"\"\n sig = np.load(f\"{prefix}sig.npy\")\n noi = np.load(f\"{prefix}noi.npy\")\n df = pd.read_csv(f\"{prefix}df.csv\")\n ims = np.load(f\"{prefix}aln_ims.npy\", mmap_mode=\"r\")\n n_peaks = sig.shape[0]\n n_fields, n_channels, n_cycles, im_mea, _ = ims.shape\n\n # Examine some peak\n peak_i = 123 # 0 <= peak_i < n_peaks\n ch_i = 0 # 0 <= ch_i < n_channels\n cy_i = 0 # 0 <= cy_i < n_cycles\n y, x, fl_i = df[df.peak_i == peak_i][[\"aln_y\", \"aln_x\", \"field_i\"]].drop_duplicates().values.flatten().astype(int)\n peak_radius = 10\n peak_im = ims[fl_i, ch_i, cy_i, y-peak_radius:y+peak_radius, x-peak_radius:x+peak_radius]\n # Now peak_im is a centered sub-image of that peak with shape=(peak_radius, peak_radius)\n \"\"\",\n width=200,\n assert_if_exceeds_width=True,\n ),\n )\n\n tell(\"\\n\\nThe following commands may be useful:\")\n # tell(f\" tar czf {prefix}data.tar.gz {prefix}sig.npy {prefix}noi.npy {prefix}df.csv\")\n # tell(f\" tar czf {prefix}ims.tar.gz {prefix}aln_ims.npy\")\n # tell(\"\")\n # tell(f\" aws s3 cp {prefix}data.tar.gz s3://erisyon-public\")\n # tell(f\" aws s3 cp {prefix}ims.tar.gz s3://erisyon-public\")\n tell(f\" aws s3 cp {prefix}sig.npy s3://erisyon-public\")\n tell(f\" aws s3 cp {prefix}noi.npy s3://erisyon-public\")\n tell(f\" aws s3 cp {prefix}df.csv s3://erisyon-public\")\n tell(f\" aws s3 cp {prefix}aln_ims.npy s3://erisyon-public\")\n tell(f\" aws s3 cp {prefix}example.py s3://erisyon-public\")\n\n\nif __name__ == \"__main__\":\n try:\n DoCommand.subcommand(\"gen\", \"plaster.gen.gen_main.GenApp\")\n DoCommand.subcommand(\"run\", \"plaster.run.run_main.RunApp\")\n DoCommand.run()\n except (KeyboardInterrupt):\n print() # Add an extra line because various thing terminate with \\r\n sys.exit(1)\n except Exception as e:\n log.exception(e)\n sys.exit(1)\n" ]
[ [ "numpy.all", "numpy.zeros", "numpy.zeros_like", "numpy.ascontiguousarray" ], [ "numpy.linalg.inv", "numpy.stack", "numpy.save", "pandas.DataFrame.from_dict", "numpy.random.uniform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
michel8195/CardDetection
[ "3f538e0ecb32f1bdafdb324f57d5c6fa17f19163" ]
[ "samples/gen_dataset/config.py" ]
[ "import numpy as np\nimport itertools\nimport os\n\n# imgW,imgH: dimensions of the generated dataset images\nimgW = 720\nimgH = 720\n\n\ncardW = 60\ncardH = 114\ncornerXmin = 3 * 4\ncornerXmax = 9 * 4\ncornerYmin = 3 * 4\ncornerYmax = 19 * 4\n\n# We convert the measures from mm to pixels: multiply by an arbitrary factor 'zoom'\nzoom = 4\ncardW *= zoom\ncardH *= zoom\ndecalX = int((imgW - cardW) * 0.5)\ndecalY = int((imgH - cardH) * 0.5)\n\nx1 = cornerXmin\ny1 = cornerYmin\nx2 = cornerXmax\ny2 = cornerYmax\n\nrefCard = np.array([[0, 0], [cardW, 0], [cardW, cardH], [0, cardH]], dtype=np.float32)\nrefCardRot = np.array([[cardW, 0], [cardW, cardH], [0, cardH], [0, 0]], dtype=np.float32)\n\n# Define the corners points of each 4 corners\n\ncorner1 = [[cornerXmin, cornerYmin], [cornerXmax, cornerYmin], [cornerXmin, cornerYmax], [cornerXmax, cornerYmax]]\ncorner2 = [[cardW - cornerXmax, cornerYmin], [cardW - cornerXmin, cornerYmin], [cardW - cornerXmax, cornerYmax],\n [cardW - cornerXmin, cornerYmax]]\ncorner3 = [[cornerXmin, cardH - cornerYmax], [cornerXmax, cardH - cornerYmax], [cornerXmin, cardH - cornerYmin],\n [cornerXmax, cardH - cornerYmin]]\ncorner4 = [[cardW - cornerXmax, cardH - cornerYmax], [cardW - cornerXmin, cardH - cornerYmax],\n [cardW - cornerXmax, cardH - cornerYmin], [cardW - cornerXmin, cardH - cornerYmin]]\n\n\ncard_suits = ['c']\ncard_values = ['1', '2', '3', '4', '5', '6', '7', '8', '9']\n\nlist_cards = []\nfor suit, value in itertools.product(card_suits, card_values):\n list_cards.append('{}{}'.format(value, suit))\n\nprint(list_cards)\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rajkaramchedu/DALI
[ "3ee7b3476cfaebb4d5299ca147def01086b39373" ]
[ "docs/examples/video/video_example.py" ]
[ "#!/bin/env python\n\nimport numpy as np\nfrom nvidia.dali.pipeline import Pipeline\nimport nvidia.dali.ops as ops\nimport nvidia.dali.types as types\n\ntry:\n from matplotlib import pyplot as plt\n has_matplotlib = True\nexcept ImportError:\n has_matplotlib = False\n\n\nBATCH_SIZE=4\nCOUNT=5\n\nVIDEO_FILES=[\"prepared.mp4\"]\nITER=10\n\nclass VideoPipe(Pipeline):\n def __init__(self, batch_size, num_threads, device_id, data):\n super(VideoPipe, self).__init__(batch_size, num_threads, device_id, seed=12)\n self.input = ops.VideoReader(device=\"gpu\", filenames=data, count=COUNT,\n shard_id=0, num_shards=1, random_shuffle=False)\n\n\n def define_graph(self):\n output = self.input(name=\"Reader\")\n return output\n\nif __name__ == \"__main__\":\n pipe = VideoPipe(batch_size=BATCH_SIZE, num_threads=2, device_id=0, data=VIDEO_FILES)\n pipe.build()\n for i in range(ITER):\n print(\"Iteration \" + str(i))\n pipe_out = pipe.run()\n sequences_out = pipe_out[0].asCPU().as_array()\n print(sequences_out.shape)\n print(\"Got sequence \" + str(i*COUNT) + \" \" + str((i + 1)*COUNT - 1))\n for b in range(BATCH_SIZE):\n batch_sequences = sequences_out[b]\n print(batch_sequences.shape)\n for c in range(COUNT):\n sample_frame = batch_sequences[c]\n\n frame_to_show = sequences_out[0][0]\n if has_matplotlib:\n plt.imshow(frame_to_show.astype('uint8'), interpolation='bicubic')\n plt.show()\n plt.savefig('saved_frame.png')" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.savefig" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zutn/incremental_breakdown
[ "eefa5442502ebcf74f13ff3d802938e2a43188d3", "eefa5442502ebcf74f13ff3d802938e2a43188d3" ]
[ "model14_supercomputer.py", "model1_supercomputer.py" ]
[ "#!/usr/bin/env python\r\n# coding: utf-8 \r\n\r\n\"\"\"\r\n\r\n\"\"\"\r\nfrom __future__ import division\r\nimport datetime\r\n\r\nimport cmf\r\nimport spotpy\r\nfrom spotpy.parameter import Uniform as param\r\nimport sys\r\nimport os\r\nimport numpy as np\r\nfrom datawriter_multi_objective import DataWriter\r\n\r\n\r\n# Calibration time span\r\n# 1979: Spin-Up\r\nbegin = 1980\r\nend = 1985\r\n# 1986 .. 1988: Validation\r\n\r\nprefix='model14' ###### <------------- enter model name her \r\n\r\n# Number of runs\r\nruns = 10\r\n\r\n\r\nfnQ = 'GrebenauQTagMittel__1979_1990.txt'\r\nfnT = 'Temp_max_min_avg_1979_1988.txt'\r\nfnP = 'Prec_Grebenau_1979_1988.txt'\r\n\r\n\r\nclass Fulda_lumped(object):\r\n \"\"\"\r\n Contains the whole model\r\n \"\"\"\r\n def __init__(self,begin,end, with_valid_data = False, shift_one_day = False):\r\n \"\"\"\r\n Initializes the model and builds the core setup \r\n begin: begin of the calibration period\r\n eng: end of the calibration period\r\n with_calib_data: save also the data from the validation period\r\n the calibration is still only done form 'begin' to 'end'\r\n \"\"\" \r\n # tr_S = Residence time of the water in the soil to the GW\r\n self.params = [param('tr_soil_GW',0.5,150.),\r\n # tr_soil_river = residence time from soil to river\r\n param(\"tr_soil_fulda\", 0.5,55.),\r\n # tr_surf = Residence time from surface \r\n param('tr_surf',0.001,30),\r\n # tr_GW_l = residence time in the lower groundwate\r\n # param('tr_GW_l',1.,1000.),\r\n # tr_GW_u = Residence time in the upper groundwater to the river\r\n param('tr_GW_u_fulda',1.,750.),\r\n # tr_GW_u_GW_l = residencete time to GW_l from GW_u\r\n # param(\"tr_GW_u_GW_l\", 10., 750.),\r\n # tr_fulda = Residence time in the river (in days)\r\n param('tr_fulda', 0., 3.5), \r\n\r\n # V0_soil = field capacity for the soil\r\n param('V0_soil',15.,350.),\r\n\r\n # beta_P_soil = Exponent that changes the form of the flux from the soil\r\n param('beta_soil_GW',0.5,3.2),\r\n\r\n # beta_fulda = exponent that changes the form of the flux from the soil \r\n param(\"beta_fulda\", 0.3,4.),\r\n\r\n # ETV1 = the Volume that defines that the evaporation is lowered because of not enough water\r\n param('ETV1',0.,100.),\r\n # fETV0 = factor the ET is multiplied with when water is low\r\n param('fETV0',0.,0.25),\r\n\r\n # Rate of snow melt\r\n param('meltrate',0.15,10.),\r\n # Snow_melt_temp = Temperature at which the snow melts (needed because of averaged temp\r\n param('snow_melt_temp',-1.0,4.2) ,\r\n \r\n #Qd_max = maximal flux from lower groundwater to drinking water production\r\n # param('Qd_max', 0.,3.),\r\n # tw_thresholt = amount of water that can't be slurped out by the water pumps\r\n # param(\"TW_threshold\", 0.,100.),\r\n\r\n # LAI = leaf area index\r\n param('LAI', 1.,12.),\r\n # Canopy Closure\r\n param(\"CanopyClosure\",0.,0.5),\r\n\r\n # Ksat = saturated conductivity of the soil \r\n param(\"Ksat\", 0., 1)\r\n ] \r\n \r\n # loads the data \r\n P,T,Tmin,Tmax,Q = self.loadPETQ()\r\n self.Q=Q\r\n # only use one core (quicker for small models)\r\n cmf.set_parallel_threads(1)\r\n # Generate a project with on ecell for a lumped model\r\n self.project = cmf.project()\r\n p = self.project\r\n \r\n # Add cell for soil and so on (x,y,z,area)\r\n c = p.NewCell(0,0,0,1000)\r\n \r\n # Add snow storage\r\n c.add_storage('Snow','S')\r\n cmf.Snowfall(c.snow,c)\r\n \r\n # Surfacewater is treated as a storage\r\n c.surfacewater_as_storage()\r\n \r\n # Add the soil and groundwater layers to the soil cell\r\n soil = c.add_layer(2.0)\r\n gw_upper = c.add_layer(5.0) \r\n # gw_lower = c.add_layer(20.0)\r\n \r\n # Fill storages\r\n c.layers[0].volume = 15\r\n c.layers[1].volume = 80\r\n # c.layers[2].volume = 120\r\n # \r\n # Evapotranspiration\r\n cmf.HargreaveET(soil,c.transpiration)\r\n #cmf.PenmanMonteith()\r\n \r\n # Add the Fulda River\r\n self.fulda = p.NewOpenStorage(name=\"Fulda\",x=0,y=0,z=0, area = 3.3*10**6)\r\n # Giving the Fulda a mean depth\r\n self.fulda.potential = 1.5 \r\n \r\n # add the drinking water outlet\r\n self.trinkwasser = p.NewOutlet('trinkwasser',20,0,0)\r\n \r\n # Outlet\r\n self.outlet = p.NewOutlet('outlet',10,0,0)\r\n \r\n # Storage for the interception\r\n I=c.add_storage('Canopy','C')\r\n \r\n # Rain\r\n self.makestations(P,T,Tmin,Tmax)\r\n self.project = p\r\n self.begin = begin\r\n self.end = end \r\n self.with_valid_data = with_valid_data\r\n self.shift_one_day = shift_one_day\r\n \r\n def setparameters(self,\r\n tr_soil_GW = 12.36870481, \r\n tr_soil_fulda = 12.,\r\n tr_surf = 3.560855356,\r\n # tr_GW_l = 829.7188064, \r\n tr_GW_u_fulda = 270.05035, \r\n # tr_GW_u_GW_l = 270., \r\n tr_fulda = 2.264612944, \r\n\r\n V0_soil = 280.0850875, \r\n \r\n beta_soil_GW=1.158865311, \r\n beta_fulda = 1.1,\r\n \r\n ETV1=2.575261852,\r\n fETV0=0.014808919,\r\n \r\n meltrate = 4.464735097,\r\n snow_melt_temp = 4.51938545,\r\n \r\n # Qd_max = 0.250552812,\r\n # TW_threshold = 10.,\r\n \r\n LAI = 2.992013336,\r\n CanopyClosure = 5.,\r\n \r\n Ksat = 0.02\r\n ): # this list has to be identical with the one above\r\n \"\"\"\r\n sets the parameters, all parameterized connections will be created anew \r\n \"\"\"\r\n # Get all definitions from init method\r\n p = self.project\r\n c = p[0]\r\n outlet = self.outlet\r\n fulda = self.fulda\r\n trinkwasser = self.trinkwasser\r\n\r\n # Adjustment of the evapotranspiration\r\n c.set_uptakestress(cmf.VolumeStress(ETV1,ETV1 * fETV0))\r\n \r\n # Flux from the surfaces to the river\r\n cmf.kinematic_wave(c.surfacewater,fulda,tr_surf)\r\n # flux from surfaces to the soil (infiltration)\r\n cmf.SimpleInfiltration(c.layers[0], c.surfacewater) \r\n\r\n # change the saturated conductivity of the soil\r\n c.layers[0].soil.Ksat = Ksat\r\n \r\n # Flux from soil to river (interflow)\r\n cmf.kinematic_wave(c.layers[0],fulda,tr_soil_fulda/V0_soil, V0 = V0_soil) \r\n # flux from the soil to the upper groundwater (percolation)\r\n cmf.kinematic_wave(c.layers[0], c.layers[1],tr_soil_GW, exponent=beta_soil_GW) \r\n\r\n # flux from the upper groundwater to the river (baseflow)\r\n cmf.kinematic_wave(c.layers[1], fulda, tr_GW_u_fulda) \r\n # flux from upper to lower groundwater (percolation)\r\n # cmf.kinematic_wave(c.layers[1], c.layers[2],tr_GW_u_GW_l)#, exponent=beta_GW_u_GW_l) \r\n \r\n # flux from the lower groundwater to river (baseflow)\r\n # cmf.kinematic_wave(c.layers[2], fulda, tr_GW_l) \r\n # Flux from the lower groundwater to the drinking water outlet\r\n # the fourths argument is the amount that is now allowed to be slurped \r\n # out of the lower groundwater\r\n # cmf.TechnicalFlux(c.layers[1],trinkwasser,Qd_max,TW_threshold,cmf.day)\r\n \r\n # Flux from drinking water to the river\r\n # cmf.waterbalance_connection(trinkwasser, fulda) \r\n \r\n # flux from the river to the outlet\r\n cmf.kinematic_wave(fulda, outlet, tr_fulda, exponent = beta_fulda) \r\n \r\n # set snowmelt temperature\r\n cmf.Weather.set_snow_threshold(snow_melt_temp) \r\n # Snowmelt at the surfaces\r\n snowmelt_surf = cmf.SimpleTindexSnowMelt(c.snow,c.surfacewater,c,rate=meltrate)\r\n\r\n # Splits the rainfall in interzeption and throughfall\r\n cmf.Rainfall(c.canopy,c, False, True)\r\n cmf.Rainfall(c.surfacewater,c, True, False)\r\n # Makes a overflow for the interception storage\r\n cmf.RutterInterception(c.canopy,c.surfacewater,c)\r\n # Transpiration on the plants is added\r\n cmf.CanopyStorageEvaporation(c.canopy,c.evaporation,c)\r\n # Sets the parameters for the interception \r\n c.vegetation.LAI= LAI \r\n # Defines how much throughfall there is (in %)\r\n c.vegetation.CanopyClosure = CanopyClosure\r\n \r\n \r\n def loadPETQ(self):\r\n \"\"\"\r\n Loads climata and discharge data from the corresponding files fnQ, fnT and fnP \r\n \"\"\" \r\n # Fixed model starting point\r\n begin = datetime.datetime(1979,1,1)\r\n step = datetime.timedelta(days=1)\r\n # empty time series\r\n P = cmf.timeseries(begin, step)\r\n P.extend(float(Pstr) for Pstr in open(fnP))\r\n \r\n Q = cmf.timeseries(begin,step)\r\n Q.extend(float(Qstr) for Qstr in open(fnQ))\r\n # Convert m3/s to mm/day\r\n Q *= 86400 * 1e3 / (2976.41 * 1e6)\r\n T = cmf.timeseries(begin,step)\r\n Tmin = cmf.timeseries(begin,step)\r\n Tmax = cmf.timeseries(begin,step)\r\n \r\n # Go through all lines in the file\r\n for line in open(fnT):\r\n columns = line.split('\\t')\r\n if len(columns) == 3:\r\n Tmax.add(float(columns[0]))\r\n Tmin.add(float(columns[1]))\r\n T.add(float(columns[2]))\r\n \r\n return P,T,Tmin,Tmax,Q\r\n \r\n def makestations(self,P,T,Tmin,Tmax):\r\n \"\"\"\r\n Creates the rainfall and the climate stations\r\n P = time series precipitation\r\n T, Tmin, Tmax = time series of mean temperatur, min and max \r\n \"\"\"\r\n rainstation = self.project.rainfall_stations.add('Grebenau avg',P,(0,0,0))\r\n self.project.use_nearest_rainfall()\r\n\r\n # Temperature data\r\n meteo = self.project.meteo_stations.add_station('Grebenau avg',(0,0,0))\r\n meteo.T = T\r\n meteo.Tmin = Tmin\r\n meteo.Tmax = Tmax\r\n self.project.use_nearest_meteo()\r\n \r\n return rainstation\r\n\r\n def runmodel(self,verbose=False):\r\n \"\"\"\r\n starts the model\r\n if verboose = True --> give something out for every day \r\n \"\"\"\r\n try:\r\n # Creates a solver for the differential equations\r\n #solver = cmf.ImplicitEuler(self.project,1e-8)\r\n solver = cmf.CVodeIntegrator(self.project,1e-8)\r\n # usually the CVodeIntegrator computes the jakobi matrix only\r\n # partially to save computation time. But in models with low spatial\r\n # complexity this leads to a longer computational time\r\n # therefore the jakob matrix is computed completely to speed things up\r\n # this is done by LinearSolver = 0\r\n solver.LinearSolver = 0\r\n c = self.project[0]\r\n solver.max_step = cmf.h\r\n \r\n # New time series for model results (res - result)\r\n resQ = cmf.timeseries(self.begin,cmf.day)\r\n # starts the solver and calculates the daily time steps\r\n end = self.end\r\n if self.with_valid_data:\r\n end = datetime.datetime(1988,12,31)\r\n \r\n for t in solver.run(self.project.meteo_stations[0].T.begin, end,cmf.day):\r\n # Fill the results\r\n if t>=self.begin:\r\n resQ.add(self.outlet.waterbalance(t))\r\n # Print a status report\r\n if verbose:\r\n print (t,'Q=%5.3f, P=%5.3f' % (resQ[t],c.get_rainfall(t)))\r\n \r\n # Print that one year was calculated, so one knows the model is still working\r\n #### comment this out if run on supercomputer to avoid spam ######\r\n #if t % cmf.year == cmf.year - cmf.year:\r\n # print(\"Finished one year\") \r\n \r\n # Return the filled result time series\r\n return resQ\r\n except RuntimeError:\r\n return np.array(self.Q[self.begin:self.end + datetime.timedelta(days=1)])*np.nan\r\n \r\n def simulation(self,vector):\r\n \"\"\"\r\n SpotPy expects a method simulation. This methods calls setparameters\r\n and runmodels, so SpotPy is satisfied \r\n \"\"\" \r\n \r\n paramdict = dict((pp.name,v) for pp,v in zip(self.params,vector))\r\n self.setparameters(**paramdict)\r\n resQ = self.runmodel()\r\n return np.array(resQ)\r\n\r\n\r\n def evaluation(self):\r\n \"\"\"\r\n For Spotpy \r\n \"\"\"\r\n return np.array(self.Q[self.begin:self.end + datetime.timedelta(days=1)])\r\n \r\n\r\n def parameters(self):\r\n \"\"\"\r\n For Spotpy \r\n \"\"\" \r\n return spotpy.parameter.generate(self.params)\r\n\r\n def objectivefunction(self,simulation,evaluation):\r\n \"\"\"\r\n For Spotpy \r\n \"\"\"\r\n # to hit peaks better shift the timeseries by one day \r\n if self.shift_one_day:\r\n simulation = simulation[:-1]\r\n evaluation = evaluation[1:]\r\n\r\n # if the validation data is added to the simulated data as well it should not\r\n # be used for calibration. To avoid this we have to shorten the list of \r\n # the simulated data to the length of the calibration period \r\n \r\n if self.with_valid_data:\r\n simulation = simulation[:len(evaluation)]\r\n logNS = spotpy.objectivefunctions.lognashsutcliff(evaluation, simulation)\r\n \r\n # calulate pbias here instead of problems to avoid problems with \r\n # older spotpy versions\r\n sim = np.array(simulation)\r\n obs = np.array(evaluation)\r\n pbias = 100 * (float(np.sum( sim - obs )) / float(np.sum( obs )) ) \r\n \r\n rmse = spotpy.objectivefunctions.rmse(evaluation,simulation)\r\n standart_dev = obs.std()\r\n # rsr = Ratio between the root mean square error and the standart\r\n # deviation of the measured data (see Moriasi et al 2007)\r\n rsr = rmse / standart_dev\r\n \r\n# print(\"logNS: \"+str(logNS))\r\n# print(\"pbias: \"+str(pbias))\r\n# print(\"rsr: \"+str(rsr))\r\n# print()\r\n\r\n return [logNS, pbias, rsr]\r\n\r\n\r\nif __name__ == '__main__': \r\n # Import algorithm\r\n from spotpy.algorithms import lhs as Sampler\r\n\r\n # Find out if the model should run parallel (for supercomputer)\r\n parallel = 'mpi' if 'OMPI_COMM_WORLD_SIZE' in os.environ else 'seq'\r\n \r\n # Create the spotted model \r\n model = Fulda_lumped(datetime.datetime(begin,1,1),\r\n datetime.datetime(end,12,31), with_valid_data = True,\r\n shift_one_day = True)\r\n if 'i' in sys.argv:\r\n runs = 0\r\n elif 'v' in sys.argv:\r\n sys.argv.remove('v')\r\n best = eval(open(prefix + '-best.dict').read())\r\n best.pop('Eff')\r\n model.setparameters(**best)\r\n model.begin = datetime.datetime(1986,1,1)\r\n model.end = datetime.datetime(1988,12,31)\r\n resQ = np.array(model.runmodel())\r\n model.plotvalidation(np.array(resQ))\r\n runs = 0\r\n elif len(sys.argv)>1:\r\n runs = int(sys.argv[1])\r\n if runs:\r\n sampler = Sampler(model, parallel=parallel)\r\n # sampler.datawriter = DataWriter(prefix,model.params, model.begin, model.end, 0.0)\r\n # multi objective datawriter\r\n sampler.datawriter = DataWriter(prefix, model.params, model.begin, model.end, simthreshold_NS = 0.50, \r\n simthreshold_pbias = 25.0, simthreshold_rsr = 0.70,\r\n with_valid_data = model.with_valid_data,\r\n shift_one_day = model.shift_one_day)\r\n # Now we can sample with the implemented Latin hypercube algorithm:\r\n sampler.sample(runs)\r\n", "#!/usr/bin/env python\r\n# coding: utf-8 \r\n\r\n\"\"\"\r\n\r\n\"\"\"\r\nfrom __future__ import division\r\nimport datetime\r\n\r\nimport cmf\r\nimport spotpy\r\nfrom spotpy.parameter import Uniform as param\r\nimport sys\r\nimport os\r\nimport numpy as np\r\nfrom datawriter_multi_objective import DataWriter\r\n\r\n\r\n# Calibration time span\r\n# 1979: Spin-Up\r\nbegin = 1980\r\nend = 1985\r\n# 1986 .. 1988: Validation\r\n\r\nprefix='model1' ###### <------------- enter model name her \r\n\r\n# Number of runs\r\nruns = 10\r\n\r\n\r\nfnQ = 'GrebenauQTagMittel__1979_1990.txt'\r\nfnT = 'Temp_max_min_avg_1979_1988.txt'\r\nfnP = 'Prec_Grebenau_1979_1988.txt'\r\n\r\n\r\nclass Fulda_lumped(object):\r\n \"\"\"\r\n Contains the whole model\r\n \"\"\"\r\n def __init__(self,begin,end, with_valid_data = False, shift_one_day = False):\r\n \"\"\"\r\n Initializes the model and builds the core setup \r\n begin: begin of the calibration period\r\n eng: end of the calibration period\r\n with_calib_data: save also the data from the validation period\r\n the calibration is still only done form 'begin' to 'end'\r\n \"\"\" \r\n # tr_S = Residence time of the water in the soil to the GW\r\n self.params = [param('tr_soil_GW',0.5,150.),\r\n # tr_soil_river = residence time from soil to river\r\n param(\"tr_soil_fulda\", 0.5,55.),\r\n # tr_surf = Residence time from surface \r\n param('tr_surf',0.001,30),\r\n # tr_GW_l = residence time in the lower groundwate\r\n param('tr_GW_l',1.,1000.),\r\n # tr_GW_u = Residence time in the upper groundwater to the river\r\n param('tr_GW_u_fulda',1.,750.),\r\n # tr_GW_u_GW_l = residencete time to GW_l from GW_u\r\n param(\"tr_GW_u_GW_l\", 10., 750.),\r\n # tr_fulda = Residence time in the river (in days)\r\n param('tr_fulda', 0., 3.5), \r\n\r\n # V0_soil = field capacity for the soil\r\n param('V0_soil',15.,350.),\r\n\r\n # beta_P_soil = Exponent that changes the form of the flux from the soil\r\n param('beta_soil_GW',0.5,3.2),\r\n\r\n # beta_fulda = exponent that changes the form of the flux from the soil \r\n param(\"beta_fulda\", 0.3,4.),\r\n\r\n # ETV1 = the Volume that defines that the evaporation is lowered because of not enough water\r\n param('ETV1',0.,100.),\r\n # fETV0 = factor the ET is multiplied with when water is low\r\n param('fETV0',0.,0.25),\r\n\r\n # Rate of snow melt\r\n param('meltrate',0.15,10.),\r\n # Snow_melt_temp = Temperature at which the snow melts (needed because of averaged temp\r\n param('snow_melt_temp',-1.0,4.2) ,\r\n \r\n #Qd_max = maximal flux from lower groundwater to drinking water production\r\n param('Qd_max', 0.,3.),\r\n # tw_thresholt = amount of water that can't be slurped out by the water pumps\r\n param(\"TW_threshold\", 0.,100.),\r\n\r\n # LAI = leaf area index\r\n param('LAI', 1.,12.),\r\n # Canopy Closure\r\n param(\"CanopyClosure\",0.,0.5),\r\n\r\n # Ksat = saturated conductivity of the soil \r\n param(\"Ksat\", 0., 1)\r\n ] \r\n \r\n # loads the data \r\n P,T,Tmin,Tmax,Q = self.loadPETQ()\r\n self.Q=Q\r\n # only use one core (quicker for small models)\r\n cmf.set_parallel_threads(1)\r\n # Generate a project with on ecell for a lumped model\r\n self.project = cmf.project()\r\n p = self.project\r\n \r\n # Add cell for soil and so on (x,y,z,area)\r\n c = p.NewCell(0,0,0,1000)\r\n \r\n # Add snow storage\r\n c.add_storage('Snow','S')\r\n cmf.Snowfall(c.snow,c)\r\n \r\n # Surfacewater is treated as a storage\r\n c.surfacewater_as_storage()\r\n \r\n # Add the soil and groundwater layers to the soil cell\r\n soil = c.add_layer(2.0)\r\n gw_upper = c.add_layer(5.0) \r\n gw_lower = c.add_layer(20.0)\r\n \r\n # Fill storages\r\n c.layers[0].volume = 15\r\n c.layers[1].volume = 80\r\n c.layers[2].volume = 120\r\n \r\n # Evapotranspiration\r\n cmf.HargreaveET(soil,c.transpiration)\r\n #cmf.PenmanMonteith()\r\n \r\n # Add the Fulda River\r\n self.fulda = p.NewOpenStorage(name=\"Fulda\",x=0,y=0,z=0, area = 3.3*10**6)\r\n # Giving the Fulda a mean depth\r\n self.fulda.potential = 1.5 \r\n \r\n # add the drinking water outlet\r\n self.trinkwasser = p.NewOutlet('trinkwasser',20,0,0)\r\n \r\n # Outlet\r\n self.outlet = p.NewOutlet('outlet',10,0,0)\r\n \r\n # Storage for the interception\r\n I=c.add_storage('Canopy','C')\r\n \r\n # Rain\r\n self.makestations(P,T,Tmin,Tmax)\r\n self.project = p\r\n self.begin = begin\r\n self.end = end \r\n self.with_valid_data = with_valid_data\r\n self.shift_one_day = shift_one_day\r\n \r\n def setparameters(self,\r\n tr_soil_GW = 12.36870481, \r\n tr_soil_fulda = 12.,\r\n tr_surf = 3.560855356,\r\n tr_GW_l = 829.7188064, \r\n tr_GW_u_fulda = 270.05035, \r\n tr_GW_u_GW_l = 270., \r\n tr_fulda = 2.264612944, \r\n\r\n V0_soil = 280.0850875, \r\n \r\n beta_soil_GW=1.158865311, \r\n beta_fulda = 1.1,\r\n \r\n ETV1=2.575261852,\r\n fETV0=0.014808919,\r\n \r\n meltrate = 4.464735097,\r\n snow_melt_temp = 4.51938545,\r\n \r\n Qd_max = 0.250552812,\r\n TW_threshold = 10.,\r\n \r\n LAI = 2.992013336,\r\n CanopyClosure = 5.,\r\n \r\n Ksat = 0.02\r\n ): # this list has to be identical with the one above\r\n \"\"\"\r\n sets the parameters, all parameterized connections will be created anew \r\n \"\"\"\r\n # Get all definitions from init method\r\n p = self.project\r\n c = p[0]\r\n outlet = self.outlet\r\n fulda = self.fulda\r\n trinkwasser = self.trinkwasser\r\n\r\n # Adjustment of the evapotranspiration\r\n c.set_uptakestress(cmf.VolumeStress(ETV1,ETV1 * fETV0))\r\n \r\n # Flux from the surfaces to the river\r\n cmf.kinematic_wave(c.surfacewater,fulda,tr_surf)\r\n # flux from surfaces to the soil (infiltration)\r\n cmf.SimpleInfiltration(c.layers[0], c.surfacewater) \r\n\r\n # change the saturated conductivity of the soil\r\n c.layers[0].soil.Ksat = Ksat\r\n \r\n # Flux from soil to river (interflow)\r\n cmf.kinematic_wave(c.layers[0],fulda,tr_soil_fulda/V0_soil, V0 = V0_soil) \r\n # flux from the soil to the upper groundwater (percolation)\r\n cmf.kinematic_wave(c.layers[0], c.layers[1],tr_soil_GW, exponent=beta_soil_GW) \r\n\r\n # flux from the upper groundwater to the river (baseflow)\r\n cmf.kinematic_wave(c.layers[1], fulda, tr_GW_u_fulda) \r\n # flux from upper to lower groundwater (percolation)\r\n cmf.kinematic_wave(c.layers[1], c.layers[2],tr_GW_u_GW_l)#, exponent=beta_GW_u_GW_l) \r\n \r\n # flux from the lower groundwater to river (baseflow)\r\n cmf.kinematic_wave(c.layers[2], fulda, tr_GW_l) \r\n # Flux from the lower groundwater to the drinking water outlet\r\n # the fourths argument is the amount that is now allowed to be slurped \r\n # out of the lower groundwater\r\n cmf.TechnicalFlux(c.layers[2],trinkwasser,Qd_max,TW_threshold,cmf.day)\r\n \r\n # Flux from drinking water to the river\r\n cmf.waterbalance_connection(trinkwasser, fulda) \r\n \r\n # flux from the river to the outlet\r\n cmf.kinematic_wave(fulda, outlet, tr_fulda, exponent = beta_fulda) \r\n \r\n # set snowmelt temperature\r\n cmf.Weather.set_snow_threshold(snow_melt_temp) \r\n # Snowmelt at the surfaces\r\n snowmelt_surf = cmf.SimpleTindexSnowMelt(c.snow,c.surfacewater,c,rate=meltrate)\r\n\r\n # Splits the rainfall in interzeption and throughfall\r\n cmf.Rainfall(c.canopy,c, False, True)\r\n cmf.Rainfall(c.surfacewater,c, True, False)\r\n # Makes a overflow for the interception storage\r\n cmf.RutterInterception(c.canopy,c.surfacewater,c)\r\n # Transpiration on the plants is added\r\n cmf.CanopyStorageEvaporation(c.canopy,c.evaporation,c)\r\n # Sets the parameters for the interception \r\n c.vegetation.LAI= LAI \r\n # Defines how much throughfall there is (in %)\r\n c.vegetation.CanopyClosure = CanopyClosure\r\n \r\n \r\n def loadPETQ(self):\r\n \"\"\"\r\n Loads climata and discharge data from the corresponding files fnQ, fnT and fnP \r\n \"\"\" \r\n # Fixed model starting point\r\n begin = datetime.datetime(1979,1,1)\r\n step = datetime.timedelta(days=1)\r\n # empty time series\r\n P = cmf.timeseries(begin, step)\r\n P.extend(float(Pstr) for Pstr in open(fnP))\r\n \r\n Q = cmf.timeseries(begin,step)\r\n Q.extend(float(Qstr) for Qstr in open(fnQ))\r\n # Convert m3/s to mm/day\r\n Q *= 86400 * 1e3 / (2976.41 * 1e6)\r\n T = cmf.timeseries(begin,step)\r\n Tmin = cmf.timeseries(begin,step)\r\n Tmax = cmf.timeseries(begin,step)\r\n \r\n # Go through all lines in the file\r\n for line in open(fnT):\r\n columns = line.split('\\t')\r\n if len(columns) == 3:\r\n Tmax.add(float(columns[0]))\r\n Tmin.add(float(columns[1]))\r\n T.add(float(columns[2]))\r\n \r\n return P,T,Tmin,Tmax,Q\r\n \r\n def makestations(self,P,T,Tmin,Tmax):\r\n \"\"\"\r\n Creates the rainfall and the climate stations\r\n P = time series precipitation\r\n T, Tmin, Tmax = time series of mean temperatur, min and max \r\n \"\"\"\r\n rainstation = self.project.rainfall_stations.add('Grebenau avg',P,(0,0,0))\r\n self.project.use_nearest_rainfall()\r\n\r\n # Temperature data\r\n meteo = self.project.meteo_stations.add_station('Grebenau avg',(0,0,0))\r\n meteo.T = T\r\n meteo.Tmin = Tmin\r\n meteo.Tmax = Tmax\r\n self.project.use_nearest_meteo()\r\n \r\n return rainstation\r\n\r\n def runmodel(self,verbose=False):\r\n \"\"\"\r\n starts the model\r\n if verboose = True --> give something out for every day \r\n \"\"\"\r\n try:\r\n # Creates a solver for the differential equations\r\n #solver = cmf.ImplicitEuler(self.project,1e-8)\r\n solver = cmf.CVodeIntegrator(self.project,1e-8)\r\n # usually the CVodeIntegrator computes the jakobi matrix only\r\n # partially to save computation time. But in models with low spatial\r\n # complexity this leads to a longer computational time\r\n # therefore the jakob matrix is computed completely to speed things up\r\n # this is done by LinearSolver = 0\r\n solver.LinearSolver = 0\r\n c = self.project[0]\r\n solver.max_step = cmf.h\r\n \r\n # New time series for model results (res - result)\r\n resQ = cmf.timeseries(self.begin,cmf.day)\r\n # starts the solver and calculates the daily time steps\r\n end = self.end\r\n if self.with_valid_data:\r\n end = datetime.datetime(1988,12,31)\r\n \r\n for t in solver.run(self.project.meteo_stations[0].T.begin, end,cmf.day):\r\n # Fill the results\r\n if t>=self.begin:\r\n resQ.add(self.outlet.waterbalance(t))\r\n # Print a status report\r\n if verbose:\r\n print (t,'Q=%5.3f, P=%5.3f' % (resQ[t],c.get_rainfall(t)))\r\n \r\n # Print that one year was calculated, so one knows the model is still working\r\n #### comment this out if run on supercomputer to avoid spam ######\r\n #if t % cmf.year == cmf.year - cmf.year:\r\n # print(\"Finished one year\") \r\n \r\n # Return the filled result time series\r\n return resQ\r\n except RuntimeError:\r\n return np.array(self.Q[self.begin:self.end + datetime.timedelta(days=1)])*np.nan\r\n \r\n def simulation(self,vector):\r\n \"\"\"\r\n SpotPy expects a method simulation. This methods calls setparameters\r\n and runmodels, so SpotPy is satisfied \r\n \"\"\" \r\n \r\n paramdict = dict((pp.name,v) for pp,v in zip(self.params,vector))\r\n self.setparameters(**paramdict)\r\n resQ = self.runmodel()\r\n return np.array(resQ)\r\n\r\n\r\n def evaluation(self):\r\n \"\"\"\r\n For Spotpy \r\n \"\"\"\r\n return np.array(self.Q[self.begin:self.end + datetime.timedelta(days=1)])\r\n \r\n\r\n def parameters(self):\r\n \"\"\"\r\n For Spotpy \r\n \"\"\" \r\n return spotpy.parameter.generate(self.params)\r\n\r\n def objectivefunction(self,simulation,evaluation):\r\n \"\"\"\r\n For Spotpy \r\n \"\"\"\r\n # to hit peaks better shift the timeseries by one day \r\n if self.shift_one_day:\r\n simulation = simulation[:-1]\r\n evaluation = evaluation[1:]\r\n\r\n # if the validation data is added to the simulated data as well it should not\r\n # be used for calibration. To avoid this we have to shorten the list of \r\n # the simulated data to the length of the calibration period \r\n \r\n if self.with_valid_data:\r\n simulation = simulation[:len(evaluation)]\r\n logNS = spotpy.objectivefunctions.lognashsutcliff(evaluation, simulation)\r\n \r\n # calulate pbias here instead of problems to avoid problems with \r\n # older spotpy versions\r\n sim = np.array(simulation)\r\n obs = np.array(evaluation)\r\n pbias = 100 * (float(np.sum( sim - obs )) / float(np.sum( obs )) ) \r\n \r\n rmse = spotpy.objectivefunctions.rmse(evaluation,simulation)\r\n standart_dev = obs.std()\r\n # rsr = Ratio between the root mean square error and the standart\r\n # deviation of the measured data (see Moriasi et al 2007)\r\n rsr = rmse / standart_dev\r\n \r\n# print(\"logNS: \"+str(logNS))\r\n# print(\"pbias: \"+str(pbias))\r\n# print(\"rsr: \"+str(rsr))\r\n# print()\r\n\r\n return [logNS, pbias, rsr]\r\n\r\n\r\nif __name__ == '__main__': \r\n # Import algorithm\r\n from spotpy.algorithms import lhs as Sampler\r\n\r\n # Find out if the model should run parallel (for supercomputer)\r\n parallel = 'mpi' if 'OMPI_COMM_WORLD_SIZE' in os.environ else 'seq'\r\n \r\n # Create the spotted model \r\n model = Fulda_lumped(datetime.datetime(begin,1,1),\r\n datetime.datetime(end,12,31), with_valid_data = True,\r\n shift_one_day = True)\r\n if 'i' in sys.argv:\r\n runs = 0\r\n elif 'v' in sys.argv:\r\n sys.argv.remove('v')\r\n best = eval(open(prefix + '-best.dict').read())\r\n best.pop('Eff')\r\n model.setparameters(**best)\r\n model.begin = datetime.datetime(1986,1,1)\r\n model.end = datetime.datetime(1988,12,31)\r\n resQ = np.array(model.runmodel())\r\n model.plotvalidation(np.array(resQ))\r\n runs = 0\r\n elif len(sys.argv)>1:\r\n runs = int(sys.argv[1])\r\n if runs:\r\n sampler = Sampler(model, parallel=parallel,dbformat = \"csv\", dbname = \"test\")\r\n # sampler.datawriter = DataWriter(prefix,model.params, model.begin, model.end, 0.0)\r\n # multi objective datawriter\r\n# sampler.datawriter = DataWriter(prefix, model.params, model.begin, model.end, simthreshold_NS = 0.50, \r\n# simthreshold_pbias = 25.0, simthreshold_rsr = 0.70,\r\n# with_valid_data = model.with_valid_data,\r\n# shift_one_day = model.shift_one_day)\r\n # Now we can sample with the implemented Latin hypercube algorithm:\r\n sampler.sample(runs)\r\n" ]
[ [ "numpy.array", "numpy.sum" ], [ "numpy.array", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cclauss/mantra
[ "19e2f72960da8314f11768d9acfe7836629b817c" ]
[ "mantraml/models/pytorch/callbacks.py" ]
[ "import matplotlib.pyplot as plt\nimport os\nimport torch\n\n\nclass EvaluateTask:\n\n def __init__(self, mantra_model):\n\n if mantra_model.task:\n mantra_model.task.latest_loss = mantra_model.task.evaluate(mantra_model)\n\n print('%s: %s' % (mantra_model.task.evaluation_name, mantra_model.task.latest_loss))\n\n if hasattr(mantra_model.task, 'secondary_metrics'):\n\n mantra_model.task.secondary_metrics_values = {}\n\n for metric in mantra_model.task.secondary_metrics:\n metric_result = getattr(mantra_model.task, metric)(mantra_model)\n mantra_model.task.secondary_metrics_values[metric] = float(metric_result)\n print('%s: %s' % (metric.capitalize(), metric_result))\n\n\nclass ModelCheckpoint:\n\n def __init__(self, mantra_model, torch_model):\n\n checkpoint_dir = '%s/trials/%s/checkpoint/' % (os.getcwd(), mantra_model.trial.trial_folder_name)\n\n if not os.path.isdir(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n\n if mantra_model.task:\n mantra_model.task.latest_loss = mantra_model.task.evaluate(self)\n\n if not hasattr(mantra_model.task, 'best_loss'):\n mantra_model.task.best_loss = None\n mantra_model.task.best_loss = mantra_model.task.latest_loss\n\n if mantra_model.save_best_only:\n if mantra_model.task.latest_loss < mantra_model.task.best_loss:\n torch.save(torch_model.state_dict(), '%s/trials/%s/checkpoint/model_weights.pt' % (os.getcwd(), mantra_model.trial.trial_folder_name))\n mantra_model.task.best_loss = mantra_model.task.latest_loss\n else:\n torch.save(torch_model.state_dict(), '%s/trials/%s/checkpoint/model_weights.pt' % (os.getcwd(), mantra_model.trial.trial_folder_name))\n\n else:\n torch.save(torch_model.state_dict(), '%s/trials/%s/checkpoint/model_weights.pt' % (os.getcwd(), mantra_model.trial.trial_folder_name))\n\n\n\nclass SavePlot:\n\n def __init__(self, mantra_model, plt, plt_name='default.png'):\n\n path = '%s/trials/%s/media' % (os.getcwd(), mantra_model.trial.trial_folder_name)\n \n if not os.path.exists(path):\n os.makedirs(path)\n \n plt.savefig(path + \"/%s\" % plt_name)\n\n\nclass StoreTrial:\n\n def __init__(self, mantra_model, epoch):\n mantra_model.store_trial_data(epoch)\n\n" ]
[ [ "matplotlib.pyplot.savefig" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dyeDeny/MachineLearninginAction
[ "4a6c0e1b719d797483a7360ad11fb218672a1025" ]
[ "ch03/treePlotter.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 15 22:07:53 2018\n\n@author: dye\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\ndecisionNode = dict(boxstyle=\"sawtooth\", fc=\"0.8\")\nleafNode = dict(boxstyle=\"round4\", fc=\"0.8\")\narrow_args = dict(arrowstyle=\"<-\")\n\ndef plotNode(nodeTxt, centerPt, parentPt, nodeType):\n createPlot.ax1.annotate(nodeTxt, xy=parentPt, xycoords=\"axes fraction\",\\\n xytext=centerPt, textcoords=\"axes fraction\",\\\n va=\"center\", ha=\"center\", bbox=nodeType, arrowprops=arrow_args)\n'''\ndef createPlot():\n fig = plt.figure(1, facecolor=\"white\")\n fig.clf()\n createPlot.ax1 = fig.add_subplot(111, frameon=False)\n plotNode(\"Decision Node\", (0.5, 0.1), (0.2, 0.2), decisionNode)\n plotNode(\"Leaf Node\", (0.8, 0.1), (0.5, 0.8), leafNode)\n plt.show()\n'''\n\ndef getNumLeafs(myTree):\n numLeafs = 0\n rootNode = list(myTree.keys())[0]\n secondDict = myTree[rootNode]\n for key in secondDict.keys():\n if isinstance(secondDict[key], dict):\n numLeafs += getNumLeafs(secondDict[key])\n else:\n numLeafs += 1\n return numLeafs\n\ndef getTreeDepth(myTree):\n depth = 0\n rootNode = list(myTree.keys())[0]\n secondDict = myTree[rootNode]\n for key in secondDict.keys():\n if isinstance(secondDict[key], dict):\n tmpDepth = 1 + getTreeDepth(secondDict[key])\n else:\n tmpDepth = 1\n #print(\"max(%d, %d) = %d\" % (tmpDepth, depth, max(tmpDepth, depth)))\n depth = max(tmpDepth, depth)\n return depth\n\ndef retriveTree(i):\n listOfTrees = [{'no surfacing': {0: 'no', 1: {'flippers': {0: 'no', 1: 'yes'}}}},\n {'no surfacing': {0: 'no', 1: {'flippers': {0: {'HEAD': {0: 'no', 1: 'yes'}}, 1: 'no'}}}},\n {'no surfacing': {0: 'no', 1: 'yes'}}\n ]\n return listOfTrees[i]\n\ndef plotMidText(cntrPt, parentPt, string):\n xMid = (parentPt[0] + cntrPt[0]) / 2.0\n yMid = (parentPt[1] + cntrPt[1]) / 2.0\n createPlot.ax1.text(xMid, yMid, string)\n\n\ndef plotTree(myTree, parentPt, nodeTxt):\n leafs = getNumLeafs(myTree)\n rootNode = list(myTree.keys())[0]\n cntrPt = (plotTree.xOff + (1.0 + float(leafs)) / 2.0 / plotTree.totalW, \\\n plotTree.yOff)\n \n print(\"myTree (%f, %f), parentPt (%f, %f), nodeTxt (%s)\" % \\\n (cntrPt[0], cntrPt[1], parentPt[0], parentPt[1], str(nodeTxt)))\n plotMidText(cntrPt, parentPt, nodeTxt)\n plotNode(rootNode, cntrPt, parentPt, decisionNode)\n \n secondDict = myTree[rootNode]\n plotTree.yOff = plotTree.yOff - 1.0 / plotTree.totalD\n \n for key in secondDict.keys():\n if isinstance(secondDict[key], dict):\n plotTree(secondDict[key], cntrPt, str(key))\n else:\n plotTree.xOff = plotTree.xOff + 1.0 / plotTree.totalW\n plotNode(secondDict[key], (plotTree.xOff, plotTree.yOff), cntrPt, leafNode)\n plotMidText((plotTree.xOff, plotTree.yOff), cntrPt, str(key))\n plotTree.yOff = plotTree.yOff + 1.0 / plotTree.totalD\n\ndef createPlot(inTree):\n fig = plt.figure(1, facecolor=\"white\")\n fig.clf()\n axprops = dict(xticks=[], yticks=[])\n createPlot.ax1 = plt.subplot(111, frameon=False, **axprops)\n plotTree.totalW = float(getNumLeafs(inTree))\n plotTree.totalD = float(getTreeDepth(inTree))\n plotTree.xOff = -0.5/plotTree.totalW\n plotTree.yOff = 1.0\n plotTree(inTree, (0.5, 1.0), \"\")\n plt.show()\n\ndef test():\n myTree = retriveTree(2)\n leafs = getNumLeafs(myTree)\n depth = getTreeDepth(myTree)\n print(\"leafs:%d\" % (leafs))\n print(\"depth:%d\" % (depth))\n createPlot(myTree)\n \nif __name__ == \"__main__\":\n test()" ]
[ [ "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Parskatt/caps
[ "030aea48a0b7c0480607fdf3a55fbdc3ffb47f9c" ]
[ "CAPS/utils.py" ]
[ "import numpy as np\nimport scipy\nimport cv2\n\n\ndef cycle(iterable):\n while True:\n for x in iterable:\n yield x\n\n\ndef evaluate_pose(E, P):\n R_gt = P[:3, :3]\n t_gt = P[:3, 3]\n R1, R2, t = cv2.decomposeEssentialMat(E)\n t = t.squeeze()\n theta_1 = np.linalg.norm(scipy.linalg.logm(R1.T.dot(R_gt)), 'fro') / np.sqrt(2)\n theta_2 = np.linalg.norm(scipy.linalg.logm(R2.T.dot(R_gt)), 'fro') / np.sqrt(2)\n theta = min(theta_1, theta_2) * 180 / np.pi\n tran_cos = np.inner(t, t_gt) / (np.linalg.norm(t_gt) * np.linalg.norm(t))\n tran = np.arccos(tran_cos) * 180 / np.pi\n return theta, tran\n\n\ndef average_precision(labels, logits):\n '''\n inputs: label: num_examples x num_pts\n logits: num_examples x num_pts\n :return: average precision\n '''\n from sklearn.metrics import average_precision_score\n sum_ap = 0\n count = 0\n for label, logit in zip(labels, logits):\n if np.sum(label) == 0:\n continue\n ap = average_precision_score(label, logit)\n sum_ap += ap\n count += 1\n\n map = sum_ap/count if count != 0 else 0\n return map\n\ndef homogenize(kp):\n '''\n turn into homogeneous coordinates\n :param kp: n*2 coordinates\n :return: n*3 coordinates where the last channel is 1\n '''\n ones = np.ones_like(kp[:, 0:1])\n return np.concatenate((kp, ones), 1)\n\n\ndef random_choice(array, size):\n rand = np.random.RandomState(1234)\n num_data = len(array)\n if num_data > size:\n idx = rand.choice(num_data, size, replace=False)\n else:\n idx = rand.choice(num_data, size, replace=True)\n return array[idx]\n\n\ndef drawlines(img1, img2, lines, pts1, pts2, color=None, thickness=-1):\n ''' img1 - image on which we draw the epilines for the points in img2\n lines - corresponding epilines '''\n r, c = img1.shape[:2]\n # img1 = cv2.cvtColor(img1, cv2.COLOR_GRAY2BGR)\n # img2 = cv2.cvtColor(img2, cv2.COLOR_GRAY2BGR)\n color_ = color\n for r, pt1, pt2 in zip(lines, pts1, pts2):\n if r[1] == 0:\n continue\n if color_ is None:\n color = tuple(np.random.randint(0, 255, 3).tolist())\n else:\n color = color_\n x0, y0 = map(int, [0, -r[2]/r[1]])\n x1, y1 = map(int, [c, -(r[2]+r[0]*c)/r[1]])\n img1 = cv2.line(img1, (x0, y0), (x1, y1), color, 1)\n img1 = cv2.circle(img1, tuple(pt1), 5, color, thickness)\n img2 = cv2.circle(img2, tuple(pt2), 5, color, thickness)\n return img1, img2\n\n\ndef to_jet(input, type='tensor', mode='HW1'):\n import matplotlib.pyplot as plt\n cm = plt.get_cmap('jet')\n\n if type == 'tensor':\n input = input.detach().cpu().numpy()\n\n if mode == '1HW':\n input = input.transpose(1, 2, 0)\n elif mode == 'B1HW':\n input = input.transpose(0, 2, 3, 1)\n elif mode == 'HW':\n input = input[..., np.newaxis] # hxwx1\n\n if input.ndim == 3:\n out = cm(input[:, :, 0])[:, :, :3]\n else:\n out = np.zeros_like(input).repeat(3, axis=-1)\n for i, data in enumerate(input):\n out[i] = cm(input[:, :, 0])[:, :, :3]\n return out\n\n\ndef drawlinesMatch(img1, img2, pts1, pts2, concat_row=True):\n rows1 = img1.shape[0]\n cols1 = img1.shape[1]\n rows2 = img2.shape[0]\n cols2 = img2.shape[1]\n interval = 5\n if concat_row:\n out = 255 * np.ones((max([rows1, rows2]), cols1 + cols2+interval, 3), dtype='uint8')\n out[:rows2, cols1+interval:cols1+cols2+interval, :] = img2\n pts2[:, 0] += cols1 + interval\n else:\n out = 255 * np.ones((rows1 + rows2 + interval, max(cols1, cols2), 3), dtype='uint8')\n out[rows1+interval:rows1+rows2+interval, :cols2] = img2\n pts2[:, 1] += rows1 + interval\n\n # Place the first image to the left\n out[:rows1, :cols1, :] = img1\n thickness = 3\n radius = 5\n\n for pt1, pt2 in zip(pts1, pts2):\n cv2.circle(out, (int(pt1[0]), int(pt1[1])), radius, tuple(np.array([255, 0, 0]).tolist()), -1, cv2.LINE_AA)\n cv2.circle(out, (int(pt2[0]), int(pt2[1])), radius, tuple(np.array([255, 0, 0]).tolist()), -1, cv2.LINE_AA)\n cv2.line(out, tuple(pt1.astype(int)), tuple(pt2.astype(int)), color=(0, 255, 0),\n lineType=cv2.LINE_AA, thickness=thickness)\n return out" ]
[ [ "numpy.ones_like", "numpy.sqrt", "numpy.inner", "matplotlib.pyplot.get_cmap", "numpy.linalg.norm", "numpy.arccos", "numpy.concatenate", "numpy.zeros_like", "sklearn.metrics.average_precision_score", "numpy.array", "numpy.random.RandomState", "numpy.sum", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dhirajpatnaik16297/text-gan-tensorflow
[ "fb9897ee55e8d674a16c6041a2c1fb67abad131b" ]
[ "layers.py" ]
[ "\"\"\" TensorFlow Layers\n\nConvenience functions but Input and Output should be tensors.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow.contrib import seq2seq\n\n\n_phase = tf.Variable(False, name='phase', trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES])\n_phase_train = _phase.assign(True)\n_phase_infer = _phase.assign(False)\n\n\n# TODO: move to ops\ndef _rank(x):\n return len(x.get_shape())\n\n\ndef _apply_dropout_mask(tensor_shape, keep_prob=1.0, normalize=True):\n random_tensor = keep_prob + tf.random_uniform(tensor_shape, dtype=tf.float32)\n binary_mask = tf.floor(random_tensor)\n if normalize:\n binary_mask = tf.reciprocal(keep_prob) * binary_mask\n return binary_mask\n\n\ndef _global_keep_prob(keep_prob):\n keep_prob = tf.convert_to_tensor(keep_prob, dtype=tf.float32)\n keep_prob = tf.cond(_phase, lambda: keep_prob, lambda: keep_prob * 0.0 + 1.0)\n return keep_prob\n\n\ndef layer(func):\n\n class Layer(object):\n def __init__(self, *args, **kwargs):\n self.func = func\n self.args = args\n self.kwargs = kwargs\n self.name = self.kwargs.get(\"name\", self.func.__name__)\n\n self._template = tf.make_template(self.name, self.func, create_scope_now_=True)\n self._unique_name = self._template.variable_scope.name.split(\"/\")[-1]\n self._summary_added = False\n\n def __call__(self, x):\n out = self.template(x, *self.args, **self.kwargs)\n self._layer_logging(x, out)\n self._add_summary()\n return out\n\n def __rrshift__(self, other):\n \"\"\" >> \"\"\"\n return self.__call__(other)\n\n def _layer_logging(self, other, out):\n tf.logging.info(\" {} {} {} -> {}\".format(\n self.unique_name, \"shape\", str(other.get_shape()), str(out.get_shape())))\n\n def _add_summary(self):\n if not self.kwargs.get(\"summary\"):\n return None\n if self.summary_added:\n return None\n for var in self.get_variables_in_scope():\n # TODO: different summary types\n tf.summary.scalar(var.name, tf.reduce_mean(var))\n self._summary_added = True\n\n def get_variables_in_scope(self):\n assert self.template._variables_created, \"Variables not yet created or undefined.\"\n variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.variable_scope_name)\n return variables\n\n @property\n def template(self):\n return self._template\n\n @property\n def unique_name(self):\n return self._unique_name\n\n @property\n def variable_scope_name(self):\n return self.template._variable_scope._name\n\n @property\n def summary_added(self):\n return self._summary_added\n\n return Layer\n\n\n@layer\ndef identity_layer(tensor, **opts):\n out = tf.identity(tensor)\n return out\n\n\n@layer\ndef embedding_layer(tensor, vocab_size=None, embedding_dim=None, embedding_matrix=None, **opts):\n if embedding_matrix is None:\n initializer = tf.contrib.layers.xavier_initializer(uniform=True)\n embedding_matrix = tf.get_variable(\"embedding_matrix\", initializer=initializer(shape=(vocab_size, embedding_dim)))\n\n out = tf.nn.embedding_lookup(embedding_matrix, tensor)\n return out \n\n\n@layer\ndef recurrent_layer(tensor, cell=None, hidden_dims=128, sequence_length=None, decoder_fn=None, \n activation=tf.nn.tanh, initializer=tf.orthogonal_initializer(), initial_state=None, \n keep_prob=1.0,\n return_final_state=False, return_next_cell_input=True, **opts):\n if cell is None:\n cell = tf.contrib.rnn.BasicRNNCell(hidden_dims, activation=activation)\n # cell = tf.contrib.rnn.LSTMCell(hidden_dims, activation=activation)\n\n if keep_prob < 1.0:\n keep_prob = _global_keep_prob(keep_prob)\n cell = tf.contrib.rnn.DropoutWrapper(cell, keep_prob, keep_prob)\n\n if opts.get(\"name\"):\n tf.add_to_collection(opts.get(\"name\"), cell)\n\n if decoder_fn is None:\n outputs, final_state = tf.nn.dynamic_rnn(cell, tensor, \n sequence_length=sequence_length, initial_state=initial_state, dtype=tf.float32)\n final_context_state = None\n else:\n # TODO: turn off sequence_length?\n outputs, final_state, final_context_state = seq2seq.dynamic_rnn_decoder(\n cell, decoder_fn, inputs=None, sequence_length=sequence_length)\n\n if return_final_state:\n return final_state\n else:\n return outputs\n\n\n@layer\ndef reshape_layer(tensor, shape, **opts):\n out = tf.reshape(tensor, shape=shape)\n return out\n\n\n@layer\ndef dense_layer(tensor, hidden_dims, weight=None, bias=None, **opts):\n original_tensor_shape = tf.shape(tensor)\n in_dim = int(tensor.get_shape()[-1])\n\n rank = _rank(tensor)\n if rank > 2:\n # -- time distributed dense\n tensor = tf.reshape(tensor, shape=(-1, in_dim))\n\n name = opts.get(\"name\", \"\")\n\n if weight is None:\n initializer = tf.contrib.layers.xavier_initializer(uniform=True)\n weight = tf.get_variable(\"{}_dense_W\".format(name), initializer=initializer(shape=(in_dim, hidden_dims)))\n if bias is None:\n bias = tf.get_variable(\"{}_dense_b\".format(name), initializer=tf.zeros(shape=hidden_dims))\n\n out = tf.add(tf.matmul(tensor, weight), bias)\n\n if rank > 2:\n # reshape back to time dimension\n out = tf.reshape(out, shape=original_tensor_shape)\n\n return out\n\n\n@layer\ndef dropout_layer(tensor, keep_prob=1.0, **opts):\n keep_prob = _global_keep_prob(keep_prob)\n out = tf.nn.dropout(tensor, keep_prob=keep_prob)\n return out\n\n\n# TODO: should i normalize?\n@layer\ndef word_dropout_layer(tensor, keep_prob=1.0, **opts):\n keep_prob = _global_keep_prob(keep_prob)\n\n rank = _rank(tensor)\n assert rank == 3, \"Use embedding lookup layer\"\n\n binary_mask = _apply_dropout_mask(tf.shape(tensor)[:2], keep_prob, normalize=False)\n binary_mask = tf.expand_dims(binary_mask, axis=-1) # proper broadcasting to zero out entire word vectors\n\n out = tensor * binary_mask\n return out\n\n\n@layer\ndef relu_layer(tensor):\n out = tf.nn.relu(tensor)\n return out\n\n\n@layer\ndef tanh_layer(tensor):\n out = tf.nn.tanh(tensor)\n return out\n\n\n@layer\ndef softmax_layer(tensor, softmax_func=None, **opts):\n if softmax_func is None:\n softmax_func = tf.nn.softmax\n\n out = softmax_func(tensor)\n return out\n\n\n@layer\ndef cross_entropy_layer(tensor, target, **opts):\n if _rank(tensor) > 1:\n target = tf.reshape(target, shape=(-1, ))\n\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=tensor, labels=target)\n mask = tf.cast(tf.not_equal(target, tf.zeros_like(target)), dtype=tf.float32)\n out = cross_entropy * mask\n return out\n\n\n@layer\ndef sigmoid_cross_entropy_layer(tensor, target, **opts):\n out = tf.nn.sigmoid_cross_entropy_with_logits(logits=tensor, labels=target)\n return out\n\n\n@layer\ndef mean_loss_by_example_layer(tensor, sequence_length, **opts):\n loss = tf.div(\n tf.reduce_sum(tensor, axis=1),\n tf.cast(sequence_length, dtype=tf.float32)\n )\n out = tf.reduce_mean(loss)\n tf.summary.scalar('cost', out)\n return out\n\n\n@layer\ndef conv1d_layer(tensor, dilation_rate=1, **opts):\n raise NotImplementedError\n\n\n@layer\ndef residual_layer(tensor, **opts):\n raise NotImplementedError\n\n\n@layer\ndef highway_layer(tensor, **opts):\n raise NotImplementedError\n\n\nif __name__ == \"__main__\":\n import numpy as np\n\n batch_size = 10\n sequence_length = 5\n vocab_size = 100\n embedding_dim = 32\n\n word_ids = np.random.randint(0, vocab_size, batch_size * sequence_length).reshape(batch_size, sequence_length)\n tensor = tf.constant(word_ids)\n\n # print(word_ids >> identity_layer() >> embedding_layer(vocab_size, embedding_dim))\n print(tensor >> identity_layer() >> embedding_layer(vocab_size, embedding_dim))\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.cond", "tensorflow.nn.dynamic_rnn", "tensorflow.zeros", "tensorflow.reduce_sum", "tensorflow.cast", "tensorflow.orthogonal_initializer", "tensorflow.nn.sigmoid_cross_entropy_with_logits", "tensorflow.contrib.rnn.BasicRNNCell", "tensorflow.make_template", "tensorflow.summary.scalar", "numpy.random.randint", "tensorflow.Variable", "tensorflow.get_collection", "tensorflow.floor", "tensorflow.contrib.layers.xavier_initializer", "tensorflow.nn.dropout", "tensorflow.reciprocal", "tensorflow.matmul", "tensorflow.shape", "tensorflow.identity", "tensorflow.nn.tanh", "tensorflow.zeros_like", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.nn.embedding_lookup", "tensorflow.nn.relu", "tensorflow.constant", "tensorflow.reduce_mean", "tensorflow.contrib.rnn.DropoutWrapper", "tensorflow.reshape", "tensorflow.contrib.seq2seq.dynamic_rnn_decoder", "tensorflow.expand_dims", "tensorflow.random_uniform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
QuChemPedIA/scanlog
[ "55c9b31259725bcee60d3aff43ffaf2835286aac" ]
[ "src/scanlog/scanlog.py" ]
[ "import os\nimport sys\nimport json\nimport pickle\nimport hashlib\nimport traceback\n\nimport cclib\nimport openbabel.pybel as pybel\nimport numpy as np\nimport openbabel as ob\nimport scipy.sparse as sp\nimport sklearn.preprocessing\n\n# constants\nCstBohr2Ang = 0.52917721092\nCstHartree2eV = 27.21138505\nCstHartree2cm1 = 219474.6313708\nscanlog_version = \"1.0.2\"\n\n\"\"\" Scanlog Exception class.\n\"\"\"\nclass ScanlogException(Exception):\n pass\n\n\"\"\"Redefining nuclear_repulsion_energy with 5 decimals of precision on coords.\n\"\"\"\ndef nuclear_repulsion_energy(data, slice_id=-1):\n nre = 0.0\n for i in range(data.natom):\n ri = np.array([float(\"%.5f\" % k) for k in data.atomcoords[slice_id][i]])\n zi = data.atomnos[i]\n for j in range(i + 1, data.natom):\n rj = np.array([float(\"%.5f\" % k) for k in data.atomcoords[slice_id][j]])\n zj = data.atomnos[j]\n d = np.linalg.norm(ri - rj)\n nre += zi * zj / d\n return float(\"%.5f\" % (nre * CstBohr2Ang))\n\n\"\"\"Utility function to simplify data recording from dict or other object.\n\"\"\"\ndef _try_key_insertion(res_json, key, obj, obj_path=[], nullable=True):\n # case : dictionary\n if obj.__class__ == dict :\n try:\n if obj_path:\n d = obj.copy()\n for k in obj_path:\n d = d[k]\n res_json[key] = d\n except Exception as e:\n if not nullable:\n raise ScanlogException(\"Fatal : error occured for required key %s\" % key)\n # else error occured but key is not required\n # case : simple object\n elif obj != 'N/A':\n res_json[key] = obj\n elif not nullable: \n raise ScanlogException(\"Fatal : key %s is N/A but is required\" % key)\n # else obj is 'N/A' ans is ignored\n \ndef general_param_subsection(res_json, data_json, data, obdata):\n res_json[\"comp_details\"][\"general\"] = {}\n section = res_json[\"comp_details\"][\"general\"]\n\n try:\n all_unique_theory = np.unique(data.metadata['methods'])\n if len(all_unique_theory) > 1:\n theo_array = np.array(data.metadata['methods'])\n _, theo_indices = np.unique(theo_array, return_index=True)\n theo_indices.sort()\n theo_array = theo_array[theo_indices]\n else:\n theo_array = all_unique_theory\n except:\n theo_array = 'N/A'\n if theo_array.__class__ != str :\n if len(theo_array) > 0:\n theo_array = theo_array.tolist() if (theo_array != 'N/A').any() else 'N/A'\n else:\n theo_array = 'N/A'\n if len(all_unique_theory) > 0:\n all_unique_theory = all_unique_theory.tolist() if (all_unique_theory != 'N/A').any() else 'N/A'\n else:\n all_unique_theory = 'N/A'\n methods = data.metadata.get('methods', ['N/A'])\n\n _try_key_insertion(section, \"package\", data.metadata, ['package'])\n _try_key_insertion(section, \"package_version\", data.metadata, ['package_version'])\n _try_key_insertion(section, \"all_unique_theory\", all_unique_theory)\n if len(methods) > 0:\n _try_key_insertion(section, \"last_theory\", methods[-1])\n _try_key_insertion(section, \"list_theory\", theo_array)\n _try_key_insertion(section, \"functional\", data.metadata, ['functional'])\n _try_key_insertion(section, \"basis_set_name\", data.metadata, ['basis_set'])\n # basis set Pickle version\n try:\n basis_str = pickle.dumps(data.gbasis, protocol=0)\n basis_hash = hashlib.md5(basis_str).hexdigest()\n _try_key_insertion(section, \"basis_set\", basis_str.decode())#\"%s\" % basis_str[2:-1])\n _try_key_insertion(section, \"basis_set_md5\", basis_hash)\n except:\n pass\n _try_key_insertion(section, \"basis_set_size\", data_json, ['properties', 'orbitals', 'basis number'])\n _try_key_insertion(section, \"ao_names\", data_json, ['atoms', 'orbitals', 'names'])\n try:\n section[\"is_closed_shell\"] = repr(len(data.moenergies) == 1 \n or np.allclose(*data.moenergies, atol=1e-6))\n except:\n pass\n _try_key_insertion(section, \"integration_grid\", data_json, ['properties', 'integration grid'])\n _try_key_insertion(section, \"solvent\", data.metadata, ['solvent'])\n _try_key_insertion(section, \"solvent_reaction_field\", data.metadata, ['scrf'])\n _try_key_insertion(section, \"scf_targets\", data_json, ['optimization', 'scf', 'targets'])\n _try_key_insertion(section, \"core_electrons_per_atoms\", data_json, ['atoms', 'core electrons'])\n\ndef geometry_param_subsection(res_json, data_json, data, obdata):\n res_json[\"comp_details\"][\"geometry\"] = {}\n section = res_json[\"comp_details\"][\"geometry\"]\n _try_key_insertion(section, \"geometric_targets\", data_json, ['optimization', 'geometric targets'])\n\ndef freq_param_subsection(res_json, data_json, data, obdata):\n res_json[\"comp_details\"][\"freq\"] = {}\n section = res_json[\"comp_details\"][\"freq\"]\n _try_key_insertion(section, \"temperature\", data_json, ['properties', 'temperature'])\n _try_key_insertion(section, \"anharmonicity\", data_json, ['vibrations', 'anharmonicity constants'])\n # default is None because tested in import (Quchempedia)\n if \"anharmonicity\" not in res_json[\"comp_details\"][\"freq\"]:\n res_json[\"comp_details\"][\"freq\"][\"anharmonicity\"] = \"None\"\n\ndef td_param_subsection(res_json, data_json, data, obdata):\n res_json[\"comp_details\"][\"excited_states\"] = {}\n section = res_json[\"comp_details\"][\"excited_states\"]\n\n et_states = data_json.get('transitions', {}).get('electronic transitions', None)\n if et_states:\n section[\"nb_et_states\"] = len(et_states)\n ## TODO\n # res_json[\"comp_details\"][\"excited_states\"][\"TDA\"] = 'N/A' # TODO : test Tamm Damcoff approx.\n # res_json[\"comp_details\"][\"excited_states\"][\"et_sym_constraints\"] = 'N/A'\n # res_json[\"comp_details\"][\"excited_states\"][\"et_optimization\"] = 'N/A' # boolean (if optimization of ES)\n # res_json[\"comp_details\"][\"excited_states\"][\"opt_root_number\"] = 'N/A' # optimized ES number\n\ndef wavefunction_results_subsection(res_json, data_json, data, obdata, sparse=True):\n res_json[\"results\"][\"wavefunction\"] = {}\n section = res_json[\"results\"][\"wavefunction\"]\n\n _try_key_insertion(section, \"homo_indexes\", data_json, ['properties', 'orbitals', 'homos'])\n _try_key_insertion(section, \"MO_energies\", data_json, ['properties', 'orbitals', 'energies'])\n _try_key_insertion(section, \"MO_sym\", data_json, ['properties', 'orbitals', 'molecular orbital symmetry'])\n # MO_number, MO_energies, MO_sym, MO_coefs\n\n try:\n _try_key_insertion(section, \"MO_number\", data_json, ['properties', 'orbitals', 'MO number'],\n nullable=False) # not nullable in this context, exception catched.\n # TODO : Pb with energies, if NaN -> -inf\n data.moenergies[-1][np.isnan(data.moenergies[-1])] = -np.inf\n w_cut = np.where(data.moenergies[-1] > 10.)\n b_cut = min(max(w_cut[0][0] if len(w_cut[0]) > 0 else 0,\n data.homos.max() + 31),\n len(data.moenergies[-1]))\n _try_key_insertion(section, \"MO_number_kept\", int(b_cut))\n\n # prune energies and sym\n _try_key_insertion(section, \"MO_energies\", [moen[:b_cut] for moen in section[\"MO_energies\"]])\n _try_key_insertion(section, \"MO_sym\", [mosym[:b_cut] for mosym in section[\"MO_sym\"]])\n\n # compress and prune mocoeffs\n mo_coefs = []\n # take last mocoeffs (-2 with alpha/beta or -1)\n nb_coef = -2 if len(data.moenergies) == 2 else -1\n if sparse is True:\n threshold = 0.05 # compression with loss threshold\n for a in data.mocoeffs[nb_coef:]:\n # normalization\n a_ = sklearn.preprocessing.normalize(np.abs(a), norm='l1', copy=False)\n # indices of sorting and sorting\n a_argsort = a_.argsort(1)\n a_.sort(axis=1)\n az = np.where(a_.cumsum(axis=1) < threshold )\n # zeroing\n a[az[0], a_argsort[az]] = 0.\n a = a[:b_cut, :]\n # to sparse csr matrix\n acsr = sp.csr_matrix(a)\n # append tuple for the csr to mo_coefs\n mo_coefs.append( (acsr.data.tolist(), acsr.indices.tolist(), acsr.indptr.tolist()) )\n else:\n for a in data.mocoeffs[nb_coef:]:\n mo_coefs.append(a.tolist())\n\n # data insertion into JSON\n section[\"MO_coefs\"] = mo_coefs\n except Exception as e:\n # partial MO data (qc lvl2 takes the decision)\n pass\n _try_key_insertion(section, \"total_molecular_energy\", data_json, ['properties', 'energy', 'total'])\n # eV to Hartree conversion\n try: \n _try_key_insertion(section, \"total_molecular_energy\", section[\"total_molecular_energy\"] / CstHartree2eV)\n except:\n ## TODO : pb with SP\n pass # SP ? failure ?\n _try_key_insertion(section, \"Mulliken_partial_charges\", data_json, ['properties', 'partial charges', 'mulliken'])\n try:\n section[\"SCF_values\"] = data_json['optimization']['scf']['values'][-1][-1] \n except:\n pass\n _try_key_insertion(section, \"virial_ratio\", data_json, ['optimization', 'scf', 'virialratio'])\n ## TODO # _try_key_insertion(section, \"Hirshfeld_partial_charges\"] = 'N/A' # see scanlog\n # try:\n # section[\"Hirshfeld_partial_charges\"] = data.atomcharges[\"hirshfeld\"].tolist()\n # except:\n # pass\n\ndef geom_results_subsection(res_json, data_json, data, obdata):\n res_json[\"results\"][\"geometry\"] = {}\n section = res_json[\"results\"][\"geometry\"]\n\n _try_key_insertion(section, \"nuclear_repulsion_energy_from_xyz\", nuclear_repulsion_energy(data))\n _try_key_insertion(section, \"OPT_DONE\", data_json, ['optimization', 'done'])\n _try_key_insertion(section, \"elements_3D_coords_converged\", data_json, ['atoms', 'coords', '3d'])\n _try_key_insertion(section, \"geometric_values\", data_json, ['optimization', 'geometric values'])\n \ndef freq_results_subsection(res_json, data_json, data, obdata):\n res_json[\"results\"][\"freq\"] = {}\n section = res_json[\"results\"][\"freq\"]\n\n _try_key_insertion(section, \"entropy\", data_json, ['properties', 'entropy'])\n try:\n _try_key_insertion(section, \"entropy\", float(\"%.9f\" % section[\"entropy\"]))\n except:\n pass\n _try_key_insertion(section, \"enthalpy\", data_json, ['properties', 'enthalpy'])\n _try_key_insertion(section, \"free_energy\", data_json, ['properties', 'energy', 'free energy'])\n _try_key_insertion(section, \"zero_point_energy\", data_json, ['properties', 'zero point energy'])\n _try_key_insertion(section, \"electronic_thermal_energy\", data_json, ['properties', 'electronic thermal energy'])\n\n _try_key_insertion(section, \"vibrational_freq\", data_json, ['vibrations', 'frequencies'])\n _try_key_insertion(section, \"vibrational_int\", data_json, ['vibrations', 'intensities', 'IR'])\n # here NWChem\n try:\n section[\"polarizabilities\"] = data.polarizabilities[0].tolist()\n except:\n pass\n _try_key_insertion(section, \"vibrational_sym\", data_json, ['vibrations', 'vibration symmetry'])\n _try_key_insertion(section, \"vibration_disp\", data_json, ['vibrations', 'displacement'])\n # here Gaussian\n _try_key_insertion(section, \"vibrational_anharms\", data_json, ['vibrations', 'anharmonicity constants'])\n _try_key_insertion(section, \"vibrational_raman\", data_json, ['vibrations', 'intensities', 'raman'])\n\ndef td_results_subsection(res_json, data_json, data, obdata):\n res_json[\"results\"][\"excited_states\"] = {}\n section = res_json[\"results\"][\"excited_states\"]\n\n _try_key_insertion(section, \"et_energies\", data_json, ['transitions', 'electronic transitions'])\n _try_key_insertion(section, \"et_oscs\", data_json, ['transitions', 'oscillator strength'])\n _try_key_insertion(section, \"et_sym\", data_json, ['transitions', 'symmetry'])\n _try_key_insertion(section, \"et_transitions\", data_json, ['transitions', 'one excited config'])\n # here NWChem\n _try_key_insertion(section, \"et_rot\", data_json, ['transitions', 'rotatory strength'])\n\ndef molecule_section(res_json, data_json, data, obdata, verbose=False):\n res_json[\"molecule\"] = {}\n section = res_json[\"molecule\"]\n\n if \"isChiral\" in dir(obdata.OBMol) and \"GetValence\" in obdata.atoms[0].OBAtom:\n # openbabel 2\n res_json[\"molecule\"][\"chirality\"] = obdata.OBMol.IsChiral()\n res_json[\"molecule\"][\"atoms_valence\"] = [at.OBAtom.GetValence() for at in obdata.atoms]\n else:\n # openbabel 3\n res_json[\"molecule\"][\"chirality\"] = obdata.OBMol.HasChiralityPerceived()\n # GetTotalValence() with implicit H, else GetExplicitValence()\n res_json[\"molecule\"][\"atoms_valence\"] = [at.OBAtom.GetTotalValence() for at in obdata.atoms]\n # Start OpenBabel (all are mandatory)\n try:\n res_json[\"molecule\"][\"inchi\"] = obdata.write(\"inchi\").strip() # remove trailing \\n\n res_json[\"molecule\"][\"smi\"] = obdata.write(\"smi\").split()[0]\n res_json[\"molecule\"][\"can\"] = obdata.write(\"can\").split()[0]\n res_json[\"molecule\"][\"monoisotopic_mass\"] = obdata.OBMol.GetExactMass() # in Dalton\n connectivity = {}\n connectivity[\"atom_pairs\"] = []\n connectivity[\"bond_orders\"] = []\n for i, a1 in enumerate(obdata.atoms):\n for j, a2 in enumerate(obdata.atoms):\n b = a1.OBAtom.GetBond(a2.OBAtom)\n if b is not None:\n connectivity[\"atom_pairs\"].append((i, j))\n connectivity[\"bond_orders\"].append(b.GetBondOrder())\n res_json[\"molecule\"][\"connectivity\"] = connectivity\n except:\n if verbose:\n traceback.print_exc() \n raise ScanlogException(\"Reading mandatory data failed (Openbabel)\")\n # End OpenBabel\n\n _try_key_insertion(section, \"formula\", data_json, ['formula'])\n # CRITICAL TODO formula versus inchi formula\n _try_key_insertion(section, \"nb_atoms\", data_json, ['properties', 'number of atoms'])\n _try_key_insertion(section, \"nb_heavy_atoms\", data_json, ['atoms', 'elements', 'heavy atom count'])\n _try_key_insertion(section, \"charge\", data_json, ['properties', 'charge'])\n _try_key_insertion(section, \"multiplicity\", data_json, ['properties', 'multiplicity'])\n _try_key_insertion(section, \"atoms_Z\", data_json, ['atoms', 'elements', 'number'])\n #_try_key_insertion(section, \"atoms_masses\", data.atommasses.tolist())\n #_try_key_insertion(section, \"nuclear_spins\", data.nuclearspins.tolist())\n #_try_key_insertion(section, \"atoms_Zeff\", data.atomzeff.tolist())\n #_try_key_insertion(section, \"nuclear_QMom\", data.nuclearqmom.tolist())\n #_try_key_insertion(section, \"nuclear_gfactors\", data.nucleargfactors.tolist())\n _try_key_insertion(section, \"starting_geometry\", data.atomcoords[0,:,:].tolist())\n ## TODO: pb with SP\n _try_key_insertion(section, \"starting_energy\", data_json, \n [\"optimization\", \"scf\", \"scf energies\"]) # in eV\n try:\n # eV to Hartree conversion\n _try_key_insertion(section, \"starting_energy\", section[\"starting_energy\"][0] / CstHartree2eV)\n except:\n pass # SP ?\n _try_key_insertion(section, \"starting_nuclear_repulsion\", nuclear_repulsion_energy(data, 0))\n\ndef parameters_section(res_json, data_json, data, obdata):\n res_json[\"comp_details\"] = {}\n # subsection : General parameters\n general_param_subsection(res_json, data_json, data, obdata)\n # subsection : Geometry \n geometry_param_subsection(res_json, data_json, data, obdata)\n # subsection : Thermochemistry and normal modes\n freq_param_subsection(res_json, data_json, data, obdata)\n # subsection : Excited states\n td_param_subsection(res_json, data_json, data, obdata)\n\ndef results_section(res_json, data_json, data, obdata, sparse=True):\n res_json[\"results\"] = {}\n # subsection : Wavefunction\n wavefunction_results_subsection(res_json, data_json, data, obdata, sparse=sparse)\n # subsection : Geometry\n geom_results_subsection(res_json, data_json, data, obdata)\n # subsection : Thermochemistry and normal modes\n freq_results_subsection(res_json, data_json, data, obdata)\n # subsection : Excited states\n td_results_subsection(res_json, data_json, data, obdata)\n\ndef metadata_section(logfile, res_json, data_json, data, obdata):\n res_json[\"metadata\"] = {}\n section = res_json[\"metadata\"]\n res_json[\"metadata\"][\"parser_version\"] = scanlog_version\n res_json[\"metadata\"][\"log_file\"] = os.path.basename(logfile)\n\ndef full_report(logfile, data_json, data, obdata, verbose=False, sparse=True):\n res_json = {}\n # section : Molecule\n molecule_section(res_json, data_json, data, obdata, verbose=verbose)\n # section : Computational details\n parameters_section(res_json, data_json, data, obdata)\n # section : Results\n results_section(res_json, data_json, data, obdata, sparse=sparse)\n # section : Metadata\n metadata_section(logfile, res_json, data_json, data, obdata)\n return res_json\n \ndef logfile_to_dict(logfile, verbose=False, sparse=True):\n # reading with cclib\n data = cclib.parser.ccopen(logfile).parse()\n data_json = json.loads(data.writejson())\n # openbabel sur XYZ\n obdata = pybel.readstring(\"xyz\", data.writexyz())\n # construct new dict \n return full_report(logfile, data_json, data, obdata, verbose=verbose, sparse=sparse)\n\n\ndef job_type_guess(res_json):\n job_type = []\n # TODO : verify for other solvers\n if ((res_json[\"comp_details\"][\"general\"][\"package\"] == \"Gaussian\") or \n (res_json[\"comp_details\"][\"general\"][\"package\"] == \"GAMESS\")): \n ### TODO: GAMESS not tested, accepted here only for Riken DB insertion purpose (only OPT)\n ### Note: tested and works for TD\n ### TODO: NWChem not tested\n if \"vibrational_freq\" in res_json[\"results\"][\"freq\"].keys():\n if \"nb_et_states\" in res_json[\"comp_details\"][\"excited_states\"].keys():\n job_type.append('FREQ_ES')\n else:\n job_type.append('FREQ')\n if \"geometric_targets\" in res_json[\"comp_details\"][\"geometry\"].keys(): # problem with STO\n if \"nb_et_states\" in res_json[\"comp_details\"][\"excited_states\"].keys():\n job_type.append('OPT_ES')\n else:\n job_type.append('OPT')\n elif \"nb_et_states\" in res_json[\"comp_details\"][\"excited_states\"].keys():\n job_type.append('TD')\n if len(job_type) == 0:\n job_type.append('SP')\n elif (res_json[\"comp_details\"][\"general\"][\"package\"] == \"NWChem\"):\n if \"vibrational_freq\" in res_json[\"results\"][\"freq\"].keys():\n job_type.append('FREQ')\n if \"geometric_targets\" in res_json[\"comp_details\"][\"geometry\"].keys(): # problem with STO\n job_type.append('OPT')\n if \"nb_et_states\" in res_json[\"comp_details\"][\"excited_states\"].keys():\n job_type.append('TD')\n if len(job_type) == 0:\n job_type.append('SP')\n # a SP can have MO\n res_json[\"metadata\"][\"discretizable\"] = \"False\"\n if \"MO_coefs\" in res_json[\"results\"][\"wavefunction\"].keys():\n res_json[\"metadata\"][\"discretizable\"] = \"True\"\n res_json[\"comp_details\"][\"general\"][\"job_type\"] = job_type\n\n\"\"\"Verify that Logfile is readable by cclib and extract solver.\n\"\"\"\ndef quality_check_lvl1(logfile, verbose=False):\n if verbose:\n print(\">>> START QC lvl1 <<<\")\n try:\n # reading with cclib\n data = cclib.parser.ccopen(logfile).parse()\n if verbose:\n print(\"OK\\n>>> END QC lvl1 <<<\\n\")\n except:\n raise ScanlogException(\"Quality check lvl1 failed : LOG file not readable (cclib failed on file %s).\" % logfile)\n solver = data.metadata['package']\n return solver\n\n\"\"\"Split Logfile.\n\"\"\"\ndef split_logfile(logfile, solver, log_storage_path=\"\", verbose=False):\n try:\n log_files = []\n # GAUSSIAN\n if verbose:\n print(\">>> SOLVER:\", solver)\n if solver == \"Gaussian\":\n TERM = \"Normal termination\"\n\n with open(logfile, 'r') as log_fd:\n lines = log_fd.readlines()\n nbl = len(lines)\n \n file_cnt = 0\n base_fname = os.path.basename(logfile).rsplit('.', 1)[0]\n log_pat = os.path.join(log_storage_path, \"%s_step_%s.log\" % (base_fname, \"%d\"))\n cur_log = log_pat % file_cnt\n if verbose:\n print(\">>> Processing\", cur_log, \"...\")\n cur_log_fd = open(cur_log, \"w\")\n # FLAG to add copyright at the beginning of each step.\n file_start = True\n\n for cur_l, line in enumerate(lines):\n if file_start:\n cur_log_fd.write(\" Copyright (c) 1988,1990,1992,1993,1995,1998,2003,2009,2013,\\n\")\n cur_log_fd.write(\" Gaussian, Inc. All Rights Reserved.\\n\")\n file_start = False\n \n cur_log_fd.write(line)\n if line.find(TERM) > -1 :\n file_start = True\n if verbose:\n print(\"=> \",line)\n log_files.append(cur_log)\n cur_log_fd.close()\n file_cnt += 1\n if nbl > (cur_l + 1) :\n cur_log = log_pat % file_cnt\n if verbose:\n print(\">>> Processing\", cur_log, \"...\")\n cur_log_fd = open(cur_log, \"w\")\n if not cur_log_fd.closed :\n cur_log_fd.close()\n elif solver == \"GAMESS\":\n ### TODO : GAMESS not tested, accepted here only for Riken\n ### DB insertion purpose (only OPT mono step)\n TERM = \"TERMINATED NORMALLY\"\n with open(logfile, 'r') as log_fd:\n lines = log_fd.readlines()\n for line in lines:\n if line.find(TERM) > -1 :\n if verbose:\n print(\"=> \",line)\n log_files.append(logfile)\n pass\n elif solver == \"NWChem\":\n ### TODO : NWChem not tested\n ### we assume normal termination already tested\n log_files.append(logfile)\n pass\n else: # unsupported solvers\n raise ScanlogException(\"Unsupported solver (%s).\" % solver)\n #log_files.append(logfile)\n \n if verbose:\n print(\">>> Steps :\", log_files, \"\\n\")\n return log_files\n except ScanlogException as err:\n raise err\n except:\n if verbose:\n traceback.print_exc()\n raise ScanlogException(\"File spliting failed.\")\n \n\n\"\"\"Check if logfile is archivable and candidate for a new entry.\n\"\"\" \ndef quality_check_lvl2(res_json, solver, verbose=False):\n qual = \"True\"\n qual2 = \"True\"\n # if not \"basis_set_md5\" in res_json[\"comp_details\"][\"general\"].keys():\n # qual = \"False\"\n if not \"total_molecular_energy\" in res_json[\"results\"][\"wavefunction\"].keys():\n qual = \"False\"\n # if OPT then res_json[\"results\"][\"wavefunction\"][\"MO_coefs\"] needed\n # if 'OPT' in res_json[\"comp_details\"][\"general\"][\"job_type\"]:\n # if \"MO_coefs\" in res_json[\"results\"][\"wavefunction\"].keys():\n # qual2 = \"True\"\n # # If only OPT then qual = False (not archivable) ???\n # if len(res_json[\"comp_details\"][\"general\"][\"job_type\"]) == 1:\n # qual = \"False\"\n # TODO : mandatory job_type, package & package_version for import policy\n res_json['metadata']['archivable'] = qual\n res_json['metadata']['archivable_for_new_entry'] = qual2\n if verbose:\n print(\">>> START QC lvl2 <<<\")\n print(\"File:\", res_json[\"metadata\"][\"log_file\"])\n print(\"Job type:\", res_json[\"comp_details\"][\"general\"][\"job_type\"])\n print(\"Archivable:\", res_json['metadata']['archivable'])\n print(\"Archivable for new entry:\", res_json['metadata']['archivable_for_new_entry'])\n print(\">>> END QC lvl2 <<<\\n\")\n\ndef process_logfile(logfile, log_storage_path=\"\", verbose=False, sparse=True):\n solver = quality_check_lvl1(logfile, verbose=verbose)\n log_files = split_logfile(logfile, solver, log_storage_path=log_storage_path, verbose=verbose)\n json_list = []\n for log in log_files:\n res_json = logfile_to_dict(log, verbose=verbose, sparse=sparse)\n job_type_guess(res_json) \n quality_check_lvl2(res_json, solver, verbose=verbose)\n json_list.append(res_json)\n return (log_files, json_list)\n\ndef process_logfile_list(logfilelist, log_storage_path=\"\", verbose=False, sparse=True):\n json_list = []\n log_files = []\n for logfile in logfilelist:\n l, j = process_logfile(logfile, \n log_storage_path=log_storage_path,\n verbose=verbose, sparse=sparse)\n json_list += j\n log_files += l\n \n return (log_files, json_list)\n\nif __name__ == \"__main__\" :\n log_files, json_list = process_logfile(sys.argv[1], log_storage_path=\"tmp\", verbose=True, sparse=True)\n if len(sys.argv) == 3 :\n with open(sys.argv[2], \"w\") as fp :\n json.dump(json_list, fp)\n else :\n print(\">>> Processed successfully %d steps (over %d detected).\" % (len(json_list),\n len(log_files)))\n print(json.dumps(json_list))\n\n" ]
[ [ "numpy.allclose", "numpy.abs", "numpy.unique", "numpy.isnan", "numpy.linalg.norm", "scipy.sparse.csr_matrix", "numpy.array", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
MaximeJumelle/pymc3
[ "4e695f635b2ead24e2e647651eadd2505ab1fa63" ]
[ "pymc3/tests/test_shared.py" ]
[ "import pymc3 as pm\nfrom .helpers import SeededTest\nimport numpy as np\nimport theano\n\n\nclass TestShared(SeededTest):\n def test_deterministic(self):\n with pm.Model() as model:\n data_values = np.array([.5, .4, 5, 2])\n X = theano.shared(np.asarray(data_values, dtype=theano.config.floatX), borrow=True)\n pm.Normal('y', 0, 1, observed=X)\n model.logp(model.test_point)\n\n def test_sample(self):\n x = np.random.normal(size=100)\n y = x + np.random.normal(scale=1e-2, size=100)\n\n x_pred = np.linspace(-3, 3, 200)\n\n x_shared = theano.shared(x)\n\n with pm.Model() as model:\n b = pm.Normal('b', 0., 10.)\n pm.Normal('obs', b * x_shared, np.sqrt(1e-2), observed=y)\n prior_trace0 = pm.sample_prior_predictive(1000)\n\n trace = pm.sample(1000, init=None, progressbar=False)\n pp_trace0 = pm.sample_ppc(trace, 1000)\n\n x_shared.set_value(x_pred)\n prior_trace1 = pm.sample_prior_predictive(1000)\n pp_trace1 = pm.sample_ppc(trace, 1000)\n\n assert prior_trace0['b'].shape == (1000,)\n assert prior_trace0['obs'].shape == (1000, 100)\n np.testing.assert_allclose(x, pp_trace0['obs'].mean(axis=0), atol=1e-1)\n\n assert prior_trace1['b'].shape == (1000,)\n assert prior_trace1['obs'].shape == (1000, 200)\n np.testing.assert_allclose(x_pred, pp_trace1['obs'].mean(axis=0), atol=1e-1)\n" ]
[ [ "numpy.sqrt", "numpy.linspace", "numpy.asarray", "numpy.random.normal", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
espectre/gcn_clustering
[ "46b6a82d92c95ef1ed9482c5a997b3138ade4143" ]
[ "feeder/feeder_visualization.py" ]
[ "###################################################################\n# File Name: feeder.py\n# Author: Zhongdao Wang\n# mail: [email protected]\n# Created Time: Thu 06 Sep 2018 01:06:16 PM CST\n###################################################################\n\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport numpy as np\nimport random\nimport torch\nimport torch.utils.data as data\nclass Feeder(data.Dataset):\n '''\n Generate a sub-graph from the feature graph centered at some node, \n and now the sub-graph has a fixed depth, i.e. 2\n '''\n def __init__(self, feat_path, knn_graph_path, label_path, seed=1, \n k_at_hop=[200,5], active_connection=5, train=True):\n np.random.seed(seed)\n random.seed(seed)\n self.features = np.load(feat_path)\n self.knn_graph = np.load(knn_graph_path)[:,:k_at_hop[0]+1]\n self.labels = np.load(label_path)\n self.num_samples = len(self.features)\n self.depth = len(k_at_hop)\n self.k_at_hop = k_at_hop\n self.active_connection = active_connection\n self.train = train\n assert np.mean(k_at_hop)>=active_connection\n\n def __len__(self):\n return self.num_samples\n\n def __getitem__(self, index):\n '''\n return the vertex feature and the adjacent matrix A, together \n with the indices of the center node and its 1-hop nodes\n '''\n # hops[0] for 1-hop neighbors, hops[1] for 2-hop neighbors\n hops = list()\n center_node = index \n hops.append(set(self.knn_graph[center_node][1:]))\n\n # Actually we dont need the loop since the depth is fixed here,\n # But we still remain the code for further revision\n for d in range(1,self.depth): \n hops.append(set())\n for h in hops[-2]:\n hops[-1].update(set(self.knn_graph[h][1:self.k_at_hop[d]+1]))\n\n \n hops_set = set([h for hop in hops for h in hop])\n hops_set.update([center_node,])\n unique_nodes_list = list(hops_set) \n unique_nodes_map = {j:i for i,j in enumerate(unique_nodes_list)}\n\n center_idx = torch.Tensor([unique_nodes_map[center_node],]).type(torch.long)\n one_hop_idcs = torch.Tensor([unique_nodes_map[i] for i in hops[0]]).type(torch.long)\n center_feat = torch.Tensor(self.features[center_node]).type(torch.float)\n feat = torch.Tensor(self.features[unique_nodes_list]).type(torch.float)\n feat = feat - center_feat\n \n max_num_nodes = self.k_at_hop[0] * (self.k_at_hop[1] + 1) + 1\n num_nodes = len(unique_nodes_list)\n A = torch.zeros(num_nodes, num_nodes)\n\n _, fdim = feat.shape\n feat = torch.cat([feat, torch.zeros(max_num_nodes - num_nodes, fdim)], dim=0)\n \n for node in unique_nodes_list:\n neighbors = self.knn_graph[node, 1:self.active_connection+1]\n for n in neighbors:\n if n in unique_nodes_list: \n A[unique_nodes_map[node], unique_nodes_map[n]] = 1\n A[unique_nodes_map[n], unique_nodes_map[node]] = 1\n\n D = A.sum(1, keepdim=True)\n A = A.div(D)\n A_ = torch.zeros(max_num_nodes,max_num_nodes)\n A_[:num_nodes,:num_nodes] = A\n\n \n labels = self.labels[np.asarray(unique_nodes_list)]\n labels = torch.from_numpy(labels).type(torch.long)\n #edge_labels = labels.expand(num_nodes,num_nodes).eq(\n # labels.expand(num_nodes,num_nodes).t())\n one_hop_labels = labels[one_hop_idcs]\n center_label = labels[center_idx]\n edge_labels = (center_label == one_hop_labels).long()\n \n if self.train:\n return (feat, A_, center_idx, one_hop_idcs), edge_labels, labels\n\n # Testing\n unique_nodes_list = torch.Tensor(unique_nodes_list)\n unique_nodes_list = torch.cat(\n [unique_nodes_list, torch.zeros(max_num_nodes-num_nodes)], dim=0)\n return(feat, A_, center_idx, one_hop_idcs, unique_nodes_list), edge_labels, labels\n\n\n\n" ]
[ [ "torch.Tensor", "numpy.random.seed", "torch.zeros", "numpy.asarray", "torch.from_numpy", "numpy.mean", "numpy.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dashiellfryer/Axelrod
[ "82918011c54f51624d78ae53a7d13de6460def86", "0d684b3273d15e3e0ecf70be8e893fffc5277c84" ]
[ "axelrod/eigen.py", "axelrod/moran.py" ]
[ "\"\"\"\nCompute the principal eigenvector of a matrix using power iteration.\n\nSee also numpy.linalg.eig which calculates all the eigenvalues and\neigenvectors.\n\"\"\"\n\nfrom typing import Tuple\n\nimport numpy\n\n\ndef _normalise(nvec: numpy.ndarray) -> numpy.ndarray:\n \"\"\"Normalises the given numpy array.\"\"\"\n with numpy.errstate(invalid=\"ignore\"):\n result = nvec / numpy.sqrt((nvec @ nvec))\n return result\n\n\ndef _squared_error(vector_1: numpy.ndarray, vector_2: numpy.ndarray) -> float:\n \"\"\"Computes the squared error between two numpy arrays.\"\"\"\n diff = vector_1 - vector_2\n s = diff @ diff\n return numpy.sqrt(s)\n\n\ndef _power_iteration(mat: numpy.array, initial: numpy.ndarray) -> numpy.ndarray:\n \"\"\"\n Generator of successive approximations.\n\n Params\n ------\n mat: numpy.array\n The matrix to use for multiplication iteration\n initial: numpy.array, None\n The initial state. Will be set to numpy.array([1, 1, ...]) if None\n\n Yields\n ------\n Successive powers (mat ^ k) * initial\n \"\"\"\n\n vec = initial\n while True:\n vec = _normalise(numpy.dot(mat, vec))\n yield vec\n\n\ndef principal_eigenvector(\n mat: numpy.array, maximum_iterations=1000, max_error=1e-3\n) -> Tuple[numpy.ndarray, float]:\n \"\"\"\n Computes the (normalised) principal eigenvector of the given matrix.\n\n Params\n ------\n mat: numpy.array\n The matrix to use for multiplication iteration\n maximum_iterations: int, None\n The maximum number of iterations of the approximation\n max_error: float, 1e-8\n Exit criterion -- error threshold of the difference of successive steps\n\n Returns\n -------\n ndarray\n Eigenvector estimate for the input matrix\n float\n Eigenvalue corresonding to the returned eigenvector\n \"\"\"\n\n mat_ = numpy.array(mat)\n size = mat_.shape[0]\n initial = numpy.ones(size)\n\n # Power iteration\n if not maximum_iterations:\n maximum_iterations = float(\"inf\")\n last = initial\n for i, vector in enumerate(_power_iteration(mat, initial=initial)):\n if i > maximum_iterations:\n break\n if _squared_error(vector, last) < max_error:\n break\n last = vector\n # Compute the eigenvalue (Rayleigh quotient)\n eigenvalue = ((mat_ @ vector) @ vector) / (vector @ vector)\n # Liberate the eigenvalue from numpy\n eigenvalue = float(eigenvalue)\n return vector, eigenvalue\n", "\"\"\"Implementation of the Moran process on Graphs.\"\"\"\n\nimport random\nfrom collections import Counter\nfrom typing import Callable, List, Optional, Set, Tuple\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom axelrod import EvolvablePlayer, DEFAULT_TURNS, Game, Player\n\nfrom .deterministic_cache import DeterministicCache\nfrom .graph import Graph, complete_graph\nfrom .match import Match\nfrom .random_ import randrange\n\n\ndef fitness_proportionate_selection(\n scores: List, fitness_transformation: Callable = None\n) -> int:\n \"\"\"Randomly selects an individual proportionally to score.\n\n Parameters\n ----------\n scores: Any sequence of real numbers\n fitness_transformation: A function mapping a score to a (non-negative) float\n\n Returns\n -------\n An index of the above list selected at random proportionally to the list\n element divided by the total.\n \"\"\"\n if fitness_transformation is None:\n csums = np.cumsum(scores)\n else:\n csums = np.cumsum([fitness_transformation(s) for s in scores])\n total = csums[-1]\n r = random.random() * total\n\n for i, x in enumerate(csums):\n if x >= r:\n break\n return i\n\n\nclass MoranProcess(object):\n def __init__(\n self,\n players: List[Player],\n turns: int = DEFAULT_TURNS,\n prob_end: float = None,\n noise: float = 0,\n game: Game = None,\n deterministic_cache: DeterministicCache = None,\n mutation_rate: float = 0.0,\n mode: str = \"bd\",\n interaction_graph: Graph = None,\n reproduction_graph: Graph = None,\n fitness_transformation: Callable = None,\n mutation_method=\"transition\",\n stop_on_fixation=True\n ) -> None:\n \"\"\"\n An agent based Moran process class. In each round, each player plays a\n Match with each other player. Players are assigned a fitness score by\n their total score from all matches in the round. A player is chosen to\n reproduce proportionally to fitness, possibly mutated, and is cloned.\n The clone replaces a randomly chosen player.\n\n If the mutation_rate is 0, the population will eventually fixate on\n exactly one player type. In this case a StopIteration exception is\n raised and the play stops. If the mutation_rate is not zero, then the\n process will iterate indefinitely, so mp.play() will never exit, and\n you should use the class as an iterator instead.\n\n When a player mutates it chooses a random player type from the initial\n population. This is not the only method yet emulates the common method\n in the literature.\n\n It is possible to pass interaction graphs and reproduction graphs to the\n Moran process. In this case, in each round, each player plays a\n Match with each neighboring player according to the interaction graph.\n Players are assigned a fitness score by their total score from all\n matches in the round. A player is chosen to reproduce proportionally to\n fitness, possibly mutated, and is cloned. The clone replaces a randomly\n chosen neighboring player according to the reproduction graph.\n\n Parameters\n ----------\n players\n turns:\n The number of turns in each pairwise interaction\n prob_end :\n The probability of a given turn ending a match\n noise:\n The background noise, if any. Randomly flips plays with probability\n `noise`.\n game: axelrod.Game\n The game object used to score matches.\n deterministic_cache:\n A optional prebuilt deterministic cache\n mutation_rate:\n The rate of mutation. Replicating players are mutated with\n probability `mutation_rate`\n mode:\n Birth-Death (bd) or Death-Birth (db)\n interaction_graph: Axelrod.graph.Graph\n The graph in which the replicators are arranged\n reproduction_graph: Axelrod.graph.Graph\n The reproduction graph, set equal to the interaction graph if not\n given\n fitness_transformation:\n A function mapping a score to a (non-negative) float\n mutation_method:\n A string indicating if the mutation method should be between original types (\"transition\")\n or based on the player's mutation method, if present (\"atomic\").\n stop_on_fixation:\n A bool indicating if the process should stop on fixation\n \"\"\"\n self.turns = turns\n self.prob_end = prob_end\n self.game = game\n self.noise = noise\n self.initial_players = players # save initial population\n self.players = [] # type: List\n self.populations = [] # type: List\n self.set_players()\n self.score_history = [] # type: List\n self.winning_strategy_name = None # type: Optional[str]\n self.mutation_rate = mutation_rate\n self.stop_on_fixation = stop_on_fixation\n m = mutation_method.lower()\n if m in [\"atomic\", \"transition\"]:\n self.mutation_method = m\n else:\n raise ValueError(\"Invalid mutation method {}\".format(mutation_method))\n assert (mutation_rate >= 0) and (mutation_rate <= 1)\n assert (noise >= 0) and (noise <= 1)\n mode = mode.lower()\n assert mode in [\"bd\", \"db\"]\n self.mode = mode\n if deterministic_cache is not None:\n self.deterministic_cache = deterministic_cache\n else:\n self.deterministic_cache = DeterministicCache()\n # Build the set of mutation targets\n # Determine the number of unique types (players)\n keys = set([str(p) for p in players])\n # Create a dictionary mapping each type to a set of representatives\n # of the other types\n d = dict()\n for p in players:\n d[str(p)] = p\n mutation_targets = dict()\n for key in sorted(keys):\n mutation_targets[key] = [v for (k, v) in sorted(d.items()) if k != key]\n self.mutation_targets = mutation_targets\n\n if interaction_graph is None:\n interaction_graph = complete_graph(len(players), loops=False)\n if reproduction_graph is None:\n reproduction_graph = Graph(\n interaction_graph.edges, directed=interaction_graph.directed\n )\n reproduction_graph.add_loops()\n # Check equal vertices\n v1 = interaction_graph.vertices\n v2 = reproduction_graph.vertices\n assert list(v1) == list(v2)\n self.interaction_graph = interaction_graph\n self.reproduction_graph = reproduction_graph\n self.fitness_transformation = fitness_transformation\n # Map players to graph vertices\n self.locations = sorted(interaction_graph.vertices)\n self.index = dict(zip(sorted(interaction_graph.vertices), range(len(players))))\n self.fixated = self.fixation_check()\n\n def set_players(self) -> None:\n \"\"\"Copy the initial players into the first population.\"\"\"\n self.players = []\n for player in self.initial_players:\n player.reset()\n self.players.append(player)\n self.populations = [self.population_distribution()]\n\n def mutate(self, index: int) -> Player:\n \"\"\"Mutate the player at index.\n\n Parameters\n ----------\n index:\n The index of the player to be mutated\n \"\"\"\n\n if self.mutation_method == \"atomic\":\n if not issubclass(self.players[index].__class__, EvolvablePlayer):\n raise TypeError(\"Player is not evolvable. Use a subclass of EvolvablePlayer.\")\n return self.players[index].mutate()\n\n # Assuming mutation_method == \"transition\"\n if self.mutation_rate > 0:\n # Choose another strategy at random from the initial population\n r = random.random()\n if r < self.mutation_rate:\n s = str(self.players[index])\n j = randrange(0, len(self.mutation_targets[s]))\n p = self.mutation_targets[s][j]\n return p.clone()\n # Just clone the player\n return self.players[index].clone()\n\n def death(self, index: int = None) -> int:\n \"\"\"\n Selects the player to be removed.\n\n Note that the in the birth-death case, the player that is reproducing\n may also be replaced. However in the death-birth case, this player will\n be excluded from the choices.\n\n Parameters\n ----------\n index:\n The index of the player to be removed\n \"\"\"\n if index is None:\n # Select a player to be replaced globally\n i = randrange(0, len(self.players))\n # Record internally for use in _matchup_indices\n self.dead = i\n else:\n # Select locally\n # index is not None in this case\n vertex = random.choice(\n sorted(self.reproduction_graph.out_vertices(self.locations[index]))\n )\n i = self.index[vertex]\n return i\n\n def birth(self, index: int = None) -> int:\n \"\"\"The birth event.\n\n Parameters\n ----------\n index:\n The index of the player to be copied\n \"\"\"\n # Compute necessary fitnesses.\n scores = self.score_all()\n if index is not None:\n # Death has already occurred, so remove the dead player from the\n # possible choices\n scores.pop(index)\n # Make sure to get the correct index post-pop\n j = fitness_proportionate_selection(\n scores, fitness_transformation=self.fitness_transformation\n )\n if j >= index:\n j += 1\n else:\n j = fitness_proportionate_selection(\n scores, fitness_transformation=self.fitness_transformation\n )\n return j\n\n def fixation_check(self) -> bool:\n \"\"\"\n Checks if the population is all of a single type\n\n Returns\n -------\n Boolean:\n True if fixation has occurred (population all of a single type)\n \"\"\"\n classes = set(str(p) for p in self.players)\n self.fixated = False\n if len(classes) == 1:\n # Set the winning strategy name variable\n self.winning_strategy_name = str(self.players[0])\n self.fixated = True\n return self.fixated\n\n def __next__(self) -> object:\n \"\"\"\n Iterate the population:\n\n - play the round's matches\n - chooses a player proportionally to fitness (total score) to reproduce\n - mutate, if appropriate\n - choose a player to be replaced\n - update the population\n\n Returns\n -------\n MoranProcess:\n Returns itself with a new population\n \"\"\"\n # Check the exit condition, that all players are of the same type.\n if self.stop_on_fixation and self.fixation_check():\n raise StopIteration\n if self.mode == \"bd\":\n # Birth then death\n j = self.birth()\n i = self.death(j)\n elif self.mode == \"db\":\n # Death then birth\n i = self.death()\n self.players[i] = None\n j = self.birth(i)\n # Mutate and/or replace player i with clone of player j\n self.players[i] = self.mutate(j)\n # Record population.\n self.populations.append(self.population_distribution())\n return self\n\n def _matchup_indices(self) -> Set[Tuple[int, int]]:\n \"\"\"\n Generate the matchup pairs.\n\n Returns\n -------\n indices:\n A set of 2 tuples of matchup pairs: the collection of all players\n who play each other.\n \"\"\"\n indices = set() # type: Set\n # For death-birth we only want the neighbors of the dead node\n # The other calculations are unnecessary\n if self.mode == \"db\":\n source = self.index[self.dead]\n self.dead = None\n sources = sorted(self.interaction_graph.out_vertices(source))\n else:\n # birth-death is global\n sources = sorted(self.locations)\n for i, source in enumerate(sources):\n for target in sorted(self.interaction_graph.out_vertices(source)):\n j = self.index[target]\n if (self.players[i] is None) or (self.players[j] is None):\n continue\n # Don't duplicate matches\n if ((i, j) in indices) or ((j, i) in indices):\n continue\n indices.add((i, j))\n return indices\n\n def score_all(self) -> List:\n \"\"\"Plays the next round of the process. Every player is paired up\n against every other player and the total scores are recorded.\n\n Returns\n -------\n scores:\n List of scores for each player\n \"\"\"\n N = len(self.players)\n scores = [0] * N\n for i, j in self._matchup_indices():\n player1 = self.players[i]\n player2 = self.players[j]\n match = Match(\n (player1, player2),\n turns=self.turns,\n prob_end=self.prob_end,\n noise=self.noise,\n game=self.game,\n deterministic_cache=self.deterministic_cache,\n )\n match.play()\n match_scores = match.final_score_per_turn()\n scores[i] += match_scores[0]\n scores[j] += match_scores[1]\n self.score_history.append(scores)\n return scores\n\n def population_distribution(self) -> Counter:\n \"\"\"Returns the population distribution of the last iteration.\n\n Returns\n -------\n counter:\n The counts of each strategy in the population of the last iteration\n \"\"\"\n player_names = [str(player) for player in self.players]\n counter = Counter(player_names)\n return counter\n\n def __iter__(self) -> object:\n \"\"\"\n Returns\n -------\n self\n \"\"\"\n return self\n\n def reset(self) -> None:\n \"\"\"Reset the process to replay.\"\"\"\n self.winning_strategy_name = None\n self.score_history = []\n # Reset all the players\n self.set_players()\n\n def play(self) -> List[Counter]:\n \"\"\"\n Play the process out to completion. If played with mutation this will\n not terminate.\n\n Returns\n -------\n populations:\n Returns a list of all the populations\n \"\"\"\n if not self.stop_on_fixation or self.mutation_rate != 0:\n raise ValueError(\n \"MoranProcess.play() will never exit if mutation_rate is\"\n \"nonzero or stop_on_fixation is False. Use iteration instead.\"\n )\n while True:\n try:\n self.__next__()\n except StopIteration:\n break\n return self.populations\n\n def __len__(self) -> int:\n \"\"\"\n Returns\n -------\n The length of the Moran process: the number of populations\n \"\"\"\n return len(self.populations)\n\n def populations_plot(self, ax=None):\n \"\"\"\n Create a stackplot of the population distributions at each iteration of\n the Moran process.\n\n Parameters\n ----------------\n ax: matplotlib axis\n Allows the plot to be written to a given matplotlib axis.\n Default is None.\n\n Returns\n -----------\n A matplotlib axis object\n\n \"\"\"\n player_names = self.populations[0].keys()\n if ax is None:\n _, ax = plt.subplots()\n else:\n ax = ax\n\n plot_data = []\n labels = []\n for name in player_names:\n labels.append(name)\n values = [counter[name] for counter in self.populations]\n plot_data.append(values)\n domain = range(len(values))\n\n ax.stackplot(domain, plot_data, labels=labels)\n ax.set_title(\"Moran Process Population by Iteration\")\n ax.set_xlabel(\"Iteration\")\n ax.set_ylabel(\"Number of Individuals\")\n ax.legend()\n return ax\n\n\nclass ApproximateMoranProcess(MoranProcess):\n \"\"\"\n A class to approximate a Moran process based\n on a distribution of potential Match outcomes.\n\n Instead of playing the matches, the result is sampled\n from a dictionary of player tuples to distribution of match outcomes\n \"\"\"\n\n def __init__(\n self, players: List[Player], cached_outcomes: dict, mutation_rate: float = 0\n ) -> None:\n \"\"\"\n Parameters\n ----------\n players:\n cached_outcomes:\n Mapping tuples of players to instances of the moran.Pdf class.\n mutation_rate:\n The rate of mutation. Replicating players are mutated with\n probability `mutation_rate`\n \"\"\"\n super(ApproximateMoranProcess, self).__init__(\n players,\n turns=0,\n noise=0,\n deterministic_cache=None,\n mutation_rate=mutation_rate,\n )\n self.cached_outcomes = cached_outcomes\n\n def score_all(self) -> List:\n \"\"\"Plays the next round of the process. Every player is paired up\n against every other player and the total scores are obtained from the\n cached outcomes.\n\n Returns\n -------\n scores:\n List of scores for each player\n \"\"\"\n N = len(self.players)\n scores = [0] * N\n for i in range(N):\n for j in range(i + 1, N):\n player_names = tuple([str(self.players[i]), str(self.players[j])])\n\n cached_score = self._get_scores_from_cache(player_names)\n scores[i] += cached_score[0]\n scores[j] += cached_score[1]\n self.score_history.append(scores)\n return scores\n\n def _get_scores_from_cache(self, player_names: Tuple) -> Tuple:\n \"\"\"\n Retrieve the scores from the players in the cache\n\n Parameters\n ----------\n player_names:\n The names of the players\n\n Returns\n -------\n scores:\n The scores of the players in that particular match\n \"\"\"\n try:\n match_scores = self.cached_outcomes[player_names].sample()\n return match_scores\n except KeyError: # If players are stored in opposite order\n match_scores = self.cached_outcomes[player_names[::-1]].sample()\n return match_scores[::-1]\n" ]
[ [ "numpy.dot", "numpy.sqrt", "numpy.ones", "numpy.errstate", "numpy.array" ], [ "matplotlib.pyplot.subplots", "numpy.cumsum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
BlueBrain/morphology-workflows
[ "629be1d2e68de33820f687a5e9018e4fb420e13f" ]
[ "tests/examples_test/get_dataset.py" ]
[ "\"\"\"Create the dataset.csv from some test morphologies.\"\"\"\nfrom pathlib import Path\n\nimport pandas as pd\n\nif __name__ == \"__main__\":\n dataset = pd.DataFrame(columns=[\"morph_path\", \"mtype\"])\n dataset.index.name = \"morph_name\"\n morph_path = Path(\"morphologies\")\n for morph in morph_path.iterdir():\n if morph.suffix in [\".asc\", \".h5\", \".swc\"]:\n dataset.loc[morph.stem, \"morph_path\"] = morph\n dataset.loc[morph.stem, \"mtype\"] = \"L1_AAA:C\"\n dataset.loc[\"AA0319\", \"mtype\"] = \"L6_TPC:A\"\n dataset.loc[\"rp100427-123_idC\", \"mtype\"] = \"L4_UPC\"\n dataset.loc[\"C270106A\", \"mtype\"] = \"L1_DAC\"\n dataset.sort_index(inplace=True)\n dataset.reset_index().to_csv(\"dataset.csv\", index=False)\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
AliBaheri/Cornell-MOE
[ "5c36a1c60eecfeea6e45c485179b671e12f07ad9" ]
[ "moe/optimal_learning/python/cpp_wrappers/knowledge_gradient.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Tools to compute KnowledgeGradient and optimize the next best point(s) to sample using KG through C++ calls.\n\nThis file contains a class to compute Knowledge Gradient + derivatives and a functions to solve the q,p-KG optimization problem.\nThe :class:`moe.optimal_learning.python.cpp_wrappers.knowledge_gradient.KnowledgeGradient`\nThe optimization functions are convenient wrappers around the matching C++ calls.\n\nSee gpp_knowledge_gradient_optimization.hpp/cpp for further details on knowledge gradient.\n\n\"\"\"\nimport numpy\n\nimport moe.build.GPP as C_GP\nfrom moe.optimal_learning.python.constant import DEFAULT_EXPECTED_IMPROVEMENT_MC_ITERATIONS, DEFAULT_MAX_NUM_THREADS\nimport moe.optimal_learning.python.cpp_wrappers.cpp_utils as cpp_utils\nfrom moe.optimal_learning.python.interfaces.optimization_interface import OptimizableInterface\n\n\ndef posterior_mean_optimization(\n ps_optimizer,\n initial_guess,\n randomness=None,\n max_num_threads=DEFAULT_MAX_NUM_THREADS,\n status=None,\n):\n \"\"\"Solve the q,p-KG problem, returning the optimal set of q points to sample CONCURRENTLY in future experiments.\n\n .. NOTE:: The following comments are copied from gpp_math.hpp, ComputeOptimalPointsToSample().\n These comments are copied into\n :func:`moe.optimal_learning.python.python_version.expected_improvement.multistart_expected_improvement_optimization`\n\n This is the primary entry-point for EI optimization in the optimal_learning library. It offers our best shot at\n improving robustness by combining higher accuracy methods like gradient descent with fail-safes like random/grid search.\n\n Returns the optimal set of q points to sample CONCURRENTLY by solving the q,p-EI problem. That is, we may want to run 4\n experiments at the same time and maximize the EI across all 4 experiments at once while knowing of 2 ongoing experiments\n (4,2-EI). This function handles this use case. Evaluation of q,p-EI (and its gradient) for q > 1 or p > 1 is expensive\n (requires monte-carlo iteration), so this method is usually very expensive.\n\n Compared to ComputeHeuristicPointsToSample() (``gpp_heuristic_expected_improvement_optimization.hpp``), this function\n makes no external assumptions about the underlying objective function. Instead, it utilizes a feature of the\n GaussianProcess that allows the GP to account for ongoing/incomplete experiments.\n\n If ``num_to_sample = 1``, this is the same as ComputeOptimalPointsToSampleWithRandomStarts().\n\n The option of using GPU to compute general q,p-EI via MC simulation is also available. To enable it, make sure you have\n installed GPU components of MOE, otherwise, it will throw Runtime excpetion.\n\n :param kg_optimizer: object that optimizes (e.g., gradient descent, newton) EI over a domain\n :type kg_optimizer: cpp_wrappers.optimization.*Optimizer object\n :param num_multistarts: number of times to multistart ``ei_optimizer`` (UNUSED, data is in ei_optimizer.optimizer_parameters)\n :type num_multistarts: int > 0\n :param num_to_sample: how many simultaneous experiments you would like to run (i.e., the q in q,p-EI)\n :type num_to_sample: int >= 1\n :param use_gpu: set to True if user wants to use GPU for MC simulation\n :type use_gpu: bool\n :param which_gpu: GPU device ID\n :type which_gpu: int >= 0\n :param randomness: RNGs used by C++ to generate initial guesses and as the source of normal random numbers when monte-carlo is used\n :type randomness: RandomnessSourceContainer (C++ object; e.g., from C_GP.RandomnessSourceContainer())\n :param max_num_threads: maximum number of threads to use, >= 1\n :type max_num_threads: int > 0\n :param status: (output) status messages from C++ (e.g., reporting on optimizer success, etc.)\n :type status: dict\n :return: point(s) that maximize the knowledge gradient (solving the q,p-KG problem)\n :rtype: array of float64 with shape (num_to_sample, ei_optimizer.objective_function.dim)\n\n \"\"\"\n # Create enough randomness sources if none are specified.\n if randomness is None:\n randomness = C_GP.RandomnessSourceContainer(max_num_threads)\n # Set seeds based on less repeatable factors (e.g,. time)\n randomness.SetRandomizedUniformGeneratorSeed(0)\n randomness.SetRandomizedNormalRNGSeed(0)\n\n # status must be an initialized dict for the call to C++.\n if status is None:\n status = {}\n\n best_points_to_sample = C_GP.posterior_mean_optimization(\n ps_optimizer.objective_function._gaussian_process._gaussian_process,\n ps_optimizer.objective_function._num_fidelity,\n ps_optimizer.optimizer_parameters,\n cpp_utils.cppify(ps_optimizer.domain.domain_bounds),\n cpp_utils.cppify(initial_guess),\n status,\n )\n\n # reform output to be a list of dim-dimensional points, dim = len(self.domain)\n return numpy.array(best_points_to_sample)\n\nclass PosteriorMean(OptimizableInterface):\n def __init__(\n self,\n gaussian_process,\n num_fidelity,\n points_to_sample=None,\n randomness=None,\n ):\n self._gaussian_process = gaussian_process\n self._num_fidelity = num_fidelity\n\n if points_to_sample is None:\n self._points_to_sample = numpy.zeros((1, self._gaussian_process.dim))\n\n if randomness is None:\n self._randomness = C_GP.RandomnessSourceContainer(1) # create randomness for only 1 thread\n # Set seed based on less repeatable factors (e.g,. time)\n self._randomness.SetRandomizedUniformGeneratorSeed(0)\n self._randomness.SetRandomizedNormalRNGSeed(0)\n else:\n self._randomness = randomness\n\n self.objective_type = None # Not used for KG, but the field is expected in C++\n\n @property\n def dim(self):\n \"\"\"Return the number of spatial dimensions.\"\"\"\n return self._gaussian_process.dim\n\n @property\n def problem_size(self):\n \"\"\"Return the number of independent parameters to optimize.\"\"\"\n return self.dim-self._num_fidelity\n\n def get_current_point(self):\n \"\"\"Get the current_point (array of float64 with shape (problem_size)) at which this object is evaluating the objective function, ``f(x)``.\"\"\"\n return numpy.copy(self._points_to_sample)\n\n def set_current_point(self, points_to_sample):\n \"\"\"Set current_point to the specified point; ordering must match.\n :param points_to_sample: current_point at which to evaluate the objective function, ``f(x)``\n :type points_to_sample: array of float64 with shape (problem_size)\n \"\"\"\n self._points_to_sample = numpy.copy(numpy.atleast_2d(points_to_sample))\n\n current_point = property(get_current_point, set_current_point)\n\n def compute_posterior_mean(self, force_monte_carlo=False):\n r\"\"\"Compute the knowledge gradient at ``points_to_sample``, with ``points_being_sampled`` concurrent points being sampled.\n\n .. Note:: These comments were copied from\n :meth:`moe.optimal_learning.python.interfaces.expected_improvement_interface.ExpectedImprovementInterface.compute_expected_improvement`\n\n ``points_to_sample`` is the \"q\" and ``points_being_sampled`` is the \"p\" in q,p-EI.\n\n Computes the knowledge gradient ``EI(Xs) = E_n[[f^*_n(X) - min(f(Xs_1),...,f(Xs_m))]^+]``, where ``Xs``\n are potential points to sample (union of ``points_to_sample`` and ``points_being_sampled``) and ``X`` are\n already sampled points. The ``^+`` indicates that the expression in the expectation evaluates to 0 if it\n is negative. ``f^*(X)`` is the MINIMUM over all known function evaluations (``points_sampled_value``),\n whereas ``f(Xs)`` are *GP-predicted* function evaluations.\n\n In words, we are computing the knowledge gradient (over the current ``best_so_far``, best known\n objective function value) that would result from sampling (aka running new experiments) at\n ``points_to_sample`` with ``points_being_sampled`` concurrent/ongoing experiments.\n\n In general, the EI expression is complex and difficult to evaluate; hence we use Monte-Carlo simulation to approximate it.\n When faster (e.g., analytic) techniques are available, we will prefer them.\n\n The idea of the MC approach is to repeatedly sample at the union of ``points_to_sample`` and\n ``points_being_sampled``. This is analogous to gaussian_process_interface.sample_point_from_gp,\n but we sample ``num_union`` points at once:\n ``y = \\mu + Lw``\n where ``\\mu`` is the GP-mean, ``L`` is the ``chol_factor(GP-variance)`` and ``w`` is a vector\n of ``num_union`` draws from N(0, 1). Then:\n ``improvement_per_step = max(max(best_so_far - y), 0.0)``\n Observe that the inner ``max`` means only the smallest component of ``y`` contributes in each iteration.\n We compute the improvement over many random draws and average.\n\n :param force_monte_carlo: whether to force monte carlo evaluation (vs using fast/accurate analytic eval when possible)\n :type force_monte_carlo: boolean\n :return: the knowledge gradient from sampling ``points_to_sample`` with ``points_being_sampled`` concurrent experiments\n :rtype: float64\n\n \"\"\"\n return C_GP.compute_posterior_mean(\n self._gaussian_process._gaussian_process,\n self._num_fidelity,\n cpp_utils.cppify(self._points_to_sample),\n )\n\n compute_objective_function = compute_posterior_mean\n\n def compute_grad_posterior_mean(self, force_monte_carlo=False):\n r\"\"\"Compute the gradient of knowledge gradient at ``points_to_sample`` wrt ``points_to_sample``, with ``points_being_sampled`` concurrent samples.\n\n .. Note:: These comments were copied from\n :meth:`moe.optimal_learning.python.interfaces.expected_improvement_interface.ExpectedImprovementInterface.compute_grad_expected_improvement`\n\n ``points_to_sample`` is the \"q\" and ``points_being_sampled`` is the \"p\" in q,p-EI.\n\n In general, the expressions for gradients of EI are complex and difficult to evaluate; hence we use\n Monte-Carlo simulation to approximate it. When faster (e.g., analytic) techniques are available, we will prefer them.\n\n The MC computation of grad EI is similar to the computation of EI (decsribed in\n compute_expected_improvement). We differentiate ``y = \\mu + Lw`` wrt ``points_to_sample``;\n only terms from the gradient of ``\\mu`` and ``L`` contribute. In EI, we computed:\n ``improvement_per_step = max(max(best_so_far - y), 0.0)``\n and noted that only the smallest component of ``y`` may contribute (if it is > 0.0).\n Call this index ``winner``. Thus in computing grad EI, we only add gradient terms\n that are attributable to the ``winner``-th component of ``y``.\n\n :param force_monte_carlo: whether to force monte carlo evaluation (vs using fast/accurate analytic eval when possible)\n :type force_monte_carlo: boolean\n :return: gradient of EI, ``\\pderiv{EI(Xq \\cup Xp)}{Xq_{i,d}}`` where ``Xq`` is ``points_to_sample``\n and ``Xp`` is ``points_being_sampled`` (grad EI from sampling ``points_to_sample`` with\n ``points_being_sampled`` concurrent experiments wrt each dimension of the points in ``points_to_sample``)\n :rtype: array of float64 with shape (num_to_sample, dim)\n\n \"\"\"\n grad_kg = C_GP.compute_grad_posterior_mean(\n self._gaussian_process._gaussian_process,\n self._num_fidelity,\n cpp_utils.cppify(self._points_to_sample),\n )\n return cpp_utils.uncppify(grad_kg, (1, self.dim-self._num_fidelity))\n\n compute_grad_objective_function = compute_grad_posterior_mean\n\n def compute_hessian_objective_function(self, **kwargs):\n \"\"\"We do not currently support computation of the (spatial) hessian of knowledge gradient.\"\"\"\n raise NotImplementedError('Currently we cannot compute the hessian of knowledge gradient.')\n\ndef multistart_knowledge_gradient_optimization(\n kg_optimizer,\n inner_optimizer,\n num_multistarts,\n discrete_pts,\n num_to_sample,\n num_pts,\n randomness=None,\n max_num_threads=DEFAULT_MAX_NUM_THREADS,\n status=None,\n):\n \"\"\"Solve the q,p-KG problem, returning the optimal set of q points to sample CONCURRENTLY in future experiments.\n\n .. NOTE:: The following comments are copied from gpp_math.hpp, ComputeOptimalPointsToSample().\n These comments are copied into\n :func:`moe.optimal_learning.python.python_version.expected_improvement.multistart_expected_improvement_optimization`\n\n This is the primary entry-point for EI optimization in the optimal_learning library. It offers our best shot at\n improving robustness by combining higher accuracy methods like gradient descent with fail-safes like random/grid search.\n\n Returns the optimal set of q points to sample CONCURRENTLY by solving the q,p-EI problem. That is, we may want to run 4\n experiments at the same time and maximize the EI across all 4 experiments at once while knowing of 2 ongoing experiments\n (4,2-EI). This function handles this use case. Evaluation of q,p-EI (and its gradient) for q > 1 or p > 1 is expensive\n (requires monte-carlo iteration), so this method is usually very expensive.\n\n Compared to ComputeHeuristicPointsToSample() (``gpp_heuristic_expected_improvement_optimization.hpp``), this function\n makes no external assumptions about the underlying objective function. Instead, it utilizes a feature of the\n GaussianProcess that allows the GP to account for ongoing/incomplete experiments.\n\n If ``num_to_sample = 1``, this is the same as ComputeOptimalPointsToSampleWithRandomStarts().\n\n The option of using GPU to compute general q,p-EI via MC simulation is also available. To enable it, make sure you have\n installed GPU components of MOE, otherwise, it will throw Runtime excpetion.\n\n :param kg_optimizer: object that optimizes (e.g., gradient descent, newton) EI over a domain\n :type kg_optimizer: cpp_wrappers.optimization.*Optimizer object\n :param num_multistarts: number of times to multistart ``ei_optimizer`` (UNUSED, data is in ei_optimizer.optimizer_parameters)\n :type num_multistarts: int > 0\n :param num_to_sample: how many simultaneous experiments you would like to run (i.e., the q in q,p-EI)\n :type num_to_sample: int >= 1\n :param use_gpu: set to True if user wants to use GPU for MC simulation\n :type use_gpu: bool\n :param which_gpu: GPU device ID\n :type which_gpu: int >= 0\n :param randomness: RNGs used by C++ to generate initial guesses and as the source of normal random numbers when monte-carlo is used\n :type randomness: RandomnessSourceContainer (C++ object; e.g., from C_GP.RandomnessSourceContainer())\n :param max_num_threads: maximum number of threads to use, >= 1\n :type max_num_threads: int > 0\n :param status: (output) status messages from C++ (e.g., reporting on optimizer success, etc.)\n :type status: dict\n :return: point(s) that maximize the knowledge gradient (solving the q,p-KG problem)\n :rtype: array of float64 with shape (num_to_sample, ei_optimizer.objective_function.dim)\n\n \"\"\"\n # Create enough randomness sources if none are specified.\n if randomness is None:\n randomness = C_GP.RandomnessSourceContainer(max_num_threads)\n # Set seeds based on less repeatable factors (e.g,. time)\n randomness.SetRandomizedUniformGeneratorSeed(0)\n randomness.SetRandomizedNormalRNGSeed(0)\n\n # status must be an initialized dict for the call to C++.\n if status is None:\n status = {}\n\n best_points_to_sample = C_GP.multistart_knowledge_gradient_optimization(\n kg_optimizer.optimizer_parameters,\n inner_optimizer.optimizer_parameters,\n kg_optimizer.objective_function._gaussian_process._gaussian_process,\n kg_optimizer.objective_function._num_fidelity,\n cpp_utils.cppify(kg_optimizer.domain.domain_bounds),\n cpp_utils.cppify(discrete_pts),\n cpp_utils.cppify(kg_optimizer.objective_function._points_being_sampled),\n num_pts, num_to_sample,\n kg_optimizer.objective_function.num_being_sampled,\n kg_optimizer.objective_function._best_so_far,\n kg_optimizer.objective_function._num_mc_iterations,\n max_num_threads,\n randomness,\n status,\n )\n\n # reform output to be a list of dim-dimensional points, dim = len(self.domain)\n return cpp_utils.uncppify(best_points_to_sample, (num_to_sample, kg_optimizer.objective_function.dim))\n\nclass KnowledgeGradient(OptimizableInterface):\n\n r\"\"\"Implementation of knowledge gradient computation via C++ wrappers: EI and its gradient at specified point(s) sampled from a GaussianProcess.\n\n A class to encapsulate the computation of knowledge gradient and its spatial gradient using points sampled from an\n associated GaussianProcess. The general EI computation requires monte-carlo integration; it can support q,p-EI optimization.\n It is designed to work with any GaussianProcess.\n\n .. Note:: Equivalent methods of ExpectedImprovementInterface and OptimizableInterface are aliased below (e.g.,\n compute_expected_improvement and compute_objective_function, etc).\n\n See :mod:`moe.optimal_learning.python.interfaces.expected_improvement_interface` docs for further details.\n\n \"\"\"\n\n def __init__(\n self,\n gaussian_process,\n num_fidelity,\n inner_optimizer,\n discrete_pts,\n points_to_sample=None,\n points_being_sampled=None,\n num_mc_iterations=DEFAULT_EXPECTED_IMPROVEMENT_MC_ITERATIONS,\n randomness=None,\n ):\n \"\"\"Construct a KnowledgeGradient object that supports q,p-KG.\n TODO(GH-56): Allow callers to pass in a source of randomness.\n :param gaussian_process: GaussianProcess describing\n :type gaussian_process: interfaces.gaussian_process_interface.GaussianProcessInterface subclass\n :param discrete_pts: a discrete set of points to approximate the KG\n :type discrete_pts: array of float64 with shape (num_pts, dim)\n :param noise: measurement noise\n :type noise: float64\n :param points_to_sample: points at which to evaluate KG and/or its gradient to check their value in future experiments (i.e., \"q\" in q,p-KG)\n :type points_to_sample: array of float64 with shape (num_to_sample, dim)\n :param points_being_sampled: points being sampled in concurrent experiments (i.e., \"p\" in q,p-KG)\n :type points_being_sampled: array of float64 with shape (num_being_sampled, dim)\n :param num_mc_iterations: number of monte-carlo iterations to use (when monte-carlo integration is used to compute KG)\n :type num_mc_iterations: int > 0\n :param randomness: random source(s) used for monte-carlo integration (when applicable) (UNUSED)\n :type randomness: (UNUSED)\n \"\"\"\n self._num_mc_iterations = num_mc_iterations\n self._gaussian_process = gaussian_process\n self._num_fidelity = num_fidelity\n self._inner_optimizer = inner_optimizer\n\n # self._num_derivatives = gaussian_process._historical_data.num_derivatives\n\n self._discrete_pts = numpy.copy(discrete_pts)\n\n full_points = numpy.zeros((discrete_pts.shape[0], discrete_pts.shape[1]+num_fidelity))\n for i, point in enumerate(discrete_pts):\n full_points[i, :discrete_pts.shape[1]] = numpy.array(point)\n full_points[i, discrete_pts.shape[1]:] = 1.0\n\n self._mu_star = self._gaussian_process.compute_mean_of_additional_points(full_points)\n\n self._best_so_far = numpy.amin(self._mu_star)\n\n if points_being_sampled is None:\n self._points_being_sampled = numpy.array([])\n else:\n self._points_being_sampled = numpy.copy(points_being_sampled)\n\n if points_to_sample is None:\n self._points_to_sample = numpy.zeros((1, self._gaussian_process.dim))\n else:\n self._points_to_sample = points_to_sample\n\n self._num_to_sample = self._points_to_sample.shape[0]\n\n if randomness is None:\n self._randomness = C_GP.RandomnessSourceContainer(1) # create randomness for only 1 thread\n # Set seed based on less repeatable factors (e.g,. time)\n self._randomness.SetRandomizedUniformGeneratorSeed(0)\n self._randomness.SetRandomizedNormalRNGSeed(0)\n else:\n self._randomness = randomness\n\n self.objective_type = None # Not used for KG, but the field is expected in C++\n\n @property\n def dim(self):\n \"\"\"Return the number of spatial dimensions.\"\"\"\n return self._gaussian_process.dim\n\n @property\n def num_to_sample(self):\n \"\"\"Number of points at which to compute/optimize KG, aka potential points to sample in future experiments; i.e., the ``q`` in ``q,p-kg``.\"\"\"\n return self._points_to_sample.shape[0]\n\n @property\n def num_being_sampled(self):\n \"\"\"Number of points being sampled in concurrent experiments; i.e., the ``p`` in ``q,p-KG``.\"\"\"\n return self._points_being_sampled.shape[0]\n\n @property\n def discrete(self):\n return self._discrete_pts.shape[0]\n\n @property\n def problem_size(self):\n \"\"\"Return the number of independent parameters to optimize.\"\"\"\n return self.num_to_sample * self.dim\n\n def get_current_point(self):\n \"\"\"Get the current_point (array of float64 with shape (problem_size)) at which this object is evaluating the objective function, ``f(x)``.\"\"\"\n return numpy.copy(self._points_to_sample)\n\n def set_current_point(self, points_to_sample):\n \"\"\"Set current_point to the specified point; ordering must match.\n :param points_to_sample: current_point at which to evaluate the objective function, ``f(x)``\n :type points_to_sample: array of float64 with shape (problem_size)\n \"\"\"\n self._points_to_sample = numpy.copy(numpy.atleast_2d(points_to_sample))\n\n current_point = property(get_current_point, set_current_point)\n\n def evaluate_at_point_list(\n self,\n points_to_evaluate,\n randomness=None,\n max_num_threads=DEFAULT_MAX_NUM_THREADS,\n status=None,\n ):\n \"\"\"Evaluate knowledge gradient (1,p-EI) over a specified list of ``points_to_evaluate``.\n\n .. Note:: We use ``points_to_evaluate`` instead of ``self._points_to_sample`` and compute the EI at those points only.\n ``self._points_to_sample`` is unchanged.\n\n Generally gradient descent is preferred but when they fail to converge this may be the only \"robust\" option.\n This function is also useful for plotting or debugging purposes (just to get a bunch of EI values).\n\n :param points_to_evaluate: points at which to compute EI\n :type points_to_evaluate: array of float64 with shape (num_to_evaluate, self.dim)\n :param randomness: RNGs used by C++ to generate initial guesses and as the source of normal random numbers when monte-carlo is used\n :type randomness: RandomnessSourceContainer (C++ object; e.g., from C_GP.RandomnessSourceContainer())\n :param max_num_threads: maximum number of threads to use, >= 1\n :type max_num_threads: int > 0\n :param status: (output) status messages from C++ (e.g., reporting on optimizer success, etc.)\n :type status: dict\n :return: EI evaluated at each of points_to_evaluate\n :rtype: array of float64 with shape (points_to_evaluate.shape[0])\n\n \"\"\"\n # Create enough randomness sources if none are specified.\n if randomness is None:\n if max_num_threads == 1:\n randomness = self._randomness\n else:\n randomness = C_GP.RandomnessSourceContainer(max_num_threads)\n # Set seeds based on less repeatable factors (e.g,. time)\n randomness.SetRandomizedUniformGeneratorSeed(0)\n randomness.SetRandomizedNormalRNGSeed(0)\n\n # status must be an initialized dict for the call to C++.\n if status is None:\n status = {}\n\n # num_to_sample need not match ei_evaluator.num_to_sample since points_to_evaluate\n # overrides any data inside ei_evaluator\n num_to_evaluate, num_to_sample, _ = points_to_evaluate.shape\n discrete_being_sampled = numpy.concatenate((self._discrete_pts, self._points_being_sampled))\n kg_values = C_GP.evaluate_KG_at_point_list(\n self._gaussian_process._gaussian_process,\n self._num_fidelity,\n self._inner_optimizer.optimizer_parameters,\n cpp_utils.cppify(self._inner_optimizer.domain.domain_bounds),\n cpp_utils.cppify(discrete_being_sampled),\n cpp_utils.cppify(points_to_evaluate),\n num_to_evaluate,\n self.discrete,\n num_to_sample,\n self.num_being_sampled,\n self._best_so_far,\n self._num_mc_iterations,\n max_num_threads,\n randomness,\n status,\n )\n return numpy.array(kg_values)\n\n def compute_knowledge_gradient(self, force_monte_carlo=False):\n r\"\"\"Compute the knowledge gradient at ``points_to_sample``, with ``points_being_sampled`` concurrent points being sampled.\n\n .. Note:: These comments were copied from\n :meth:`moe.optimal_learning.python.interfaces.expected_improvement_interface.ExpectedImprovementInterface.compute_expected_improvement`\n\n ``points_to_sample`` is the \"q\" and ``points_being_sampled`` is the \"p\" in q,p-EI.\n\n Computes the knowledge gradient ``EI(Xs) = E_n[[f^*_n(X) - min(f(Xs_1),...,f(Xs_m))]^+]``, where ``Xs``\n are potential points to sample (union of ``points_to_sample`` and ``points_being_sampled``) and ``X`` are\n already sampled points. The ``^+`` indicates that the expression in the expectation evaluates to 0 if it\n is negative. ``f^*(X)`` is the MINIMUM over all known function evaluations (``points_sampled_value``),\n whereas ``f(Xs)`` are *GP-predicted* function evaluations.\n\n In words, we are computing the knowledge gradient (over the current ``best_so_far``, best known\n objective function value) that would result from sampling (aka running new experiments) at\n ``points_to_sample`` with ``points_being_sampled`` concurrent/ongoing experiments.\n\n In general, the EI expression is complex and difficult to evaluate; hence we use Monte-Carlo simulation to approximate it.\n When faster (e.g., analytic) techniques are available, we will prefer them.\n\n The idea of the MC approach is to repeatedly sample at the union of ``points_to_sample`` and\n ``points_being_sampled``. This is analogous to gaussian_process_interface.sample_point_from_gp,\n but we sample ``num_union`` points at once:\n ``y = \\mu + Lw``\n where ``\\mu`` is the GP-mean, ``L`` is the ``chol_factor(GP-variance)`` and ``w`` is a vector\n of ``num_union`` draws from N(0, 1). Then:\n ``improvement_per_step = max(max(best_so_far - y), 0.0)``\n Observe that the inner ``max`` means only the smallest component of ``y`` contributes in each iteration.\n We compute the improvement over many random draws and average.\n\n :param force_monte_carlo: whether to force monte carlo evaluation (vs using fast/accurate analytic eval when possible)\n :type force_monte_carlo: boolean\n :return: the knowledge gradient from sampling ``points_to_sample`` with ``points_being_sampled`` concurrent experiments\n :rtype: float64\n\n \"\"\"\n return C_GP.compute_knowledge_gradient(\n self._gaussian_process._gaussian_process,\n self._num_fidelity,\n self._inner_optimizer.optimizer_parameters,\n cpp_utils.cppify(self._inner_optimizer.domain.domain_bounds),\n cpp_utils.cppify(self._discrete_pts),\n cpp_utils.cppify(self._points_to_sample),\n cpp_utils.cppify(self._points_being_sampled),\n self.discrete,\n self.num_to_sample,\n self.num_being_sampled,\n self._num_mc_iterations,\n self._best_so_far,\n self._randomness,\n )\n\n compute_objective_function = compute_knowledge_gradient\n\n def compute_grad_knowledge_gradient(self, force_monte_carlo=False):\n r\"\"\"Compute the gradient of knowledge gradient at ``points_to_sample`` wrt ``points_to_sample``, with ``points_being_sampled`` concurrent samples.\n\n .. Note:: These comments were copied from\n :meth:`moe.optimal_learning.python.interfaces.expected_improvement_interface.ExpectedImprovementInterface.compute_grad_expected_improvement`\n\n ``points_to_sample`` is the \"q\" and ``points_being_sampled`` is the \"p\" in q,p-EI.\n\n In general, the expressions for gradients of EI are complex and difficult to evaluate; hence we use\n Monte-Carlo simulation to approximate it. When faster (e.g., analytic) techniques are available, we will prefer them.\n\n The MC computation of grad EI is similar to the computation of EI (decsribed in\n compute_expected_improvement). We differentiate ``y = \\mu + Lw`` wrt ``points_to_sample``;\n only terms from the gradient of ``\\mu`` and ``L`` contribute. In EI, we computed:\n ``improvement_per_step = max(max(best_so_far - y), 0.0)``\n and noted that only the smallest component of ``y`` may contribute (if it is > 0.0).\n Call this index ``winner``. Thus in computing grad EI, we only add gradient terms\n that are attributable to the ``winner``-th component of ``y``.\n\n :param force_monte_carlo: whether to force monte carlo evaluation (vs using fast/accurate analytic eval when possible)\n :type force_monte_carlo: boolean\n :return: gradient of EI, ``\\pderiv{EI(Xq \\cup Xp)}{Xq_{i,d}}`` where ``Xq`` is ``points_to_sample``\n and ``Xp`` is ``points_being_sampled`` (grad EI from sampling ``points_to_sample`` with\n ``points_being_sampled`` concurrent experiments wrt each dimension of the points in ``points_to_sample``)\n :rtype: array of float64 with shape (num_to_sample, dim)\n\n \"\"\"\n grad_kg = C_GP.compute_grad_knowledge_gradient(\n self._gaussian_process._gaussian_process,\n self._num_fidelity,\n self._inner_optimizer.optimizer_parameters,\n cpp_utils.cppify(self._inner_optimizer.domain.domain_bounds),\n cpp_utils.cppify(self._discrete_pts),\n cpp_utils.cppify(self._points_to_sample),\n cpp_utils.cppify(self._points_being_sampled),\n self.discrete,\n self.num_to_sample,\n self.num_being_sampled,\n self._num_mc_iterations,\n self._best_so_far,\n self._randomness,\n )\n return cpp_utils.uncppify(grad_kg, (self.num_to_sample, self.dim))\n\n compute_grad_objective_function = compute_grad_knowledge_gradient\n\n def compute_hessian_objective_function(self, **kwargs):\n \"\"\"We do not currently support computation of the (spatial) hessian of knowledge gradient.\"\"\"\n raise NotImplementedError('Currently we cannot compute the hessian of knowledge gradient.')\n" ]
[ [ "numpy.amin", "numpy.concatenate", "numpy.atleast_2d", "numpy.copy", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
samiriff/scikit-dataaccess-ode
[ "935bfd54149abd9542fe38e77b7eabab48b1c3a1", "dc08fd67c772d3cd83d0d34183196661b6b53778" ]
[ "skdaccess/engineering/la/traffic_counts/stream.py", "skdaccess/planetary/ode/cache/data_fetcher_mini.py" ]
[ "# The MIT License (MIT)\n# Copyright (c) 2018 Massachusetts Institute of Technology\n#\n# Author: Cody Rude\n# This software has been created in projects supported by the US National\n# Science Foundation and NASA (PI: Pankratius)\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n# Standard library imports\nfrom collections import OrderedDict\n\n# Scikit Data Access\nfrom skdaccess.engineering.la.generic.stream import DataFetcher as GenericDataFetcher\nfrom skdaccess.utilities.support import join_string\n\n# Third party packages\nimport pandas as pd\n\nclass DataFetcher(GenericDataFetcher):\n \"\"\"\n DataFetcher for retrieving traffic counts from LA\n \"\"\"\n def __init__(self, limit=None, start_time=None, end_time=None, app_token=None, verbose=False):\n \"\"\"\n Initialize Data Fetcher to retrieve traffic couns from LA\n\n @param limit: Maximum number of rows\n @param start_time: Starting time\n @param end_time: Ending time\n @param app_token: Application token to avoid throttling\n @param verbose: Print extra information\n \"\"\"\n \n endpoint = 'w4g9-ux6z'\n\n where_string = ''\n\n time_list = []\n\n if start_time != None:\n time_list.append((start_time, \"count_date >= '{}'\"))\n\n if end_time != None:\n time_list.append((end_time, \"count_date <= '{}'\"))\n \n for time, compare_string in time_list:\n time = pd.to_datetime(time)\n \n where_string = join_string(where_string,\n compare_string.format(time.strftime('%Y-%m-%dT%H:%M:%S')))\n\n parameters = OrderedDict()\n\n if len(time_list) > 0:\n parameters['$where'] = where_string\n\n if limit != None:\n parameters['$limit'] = str(limit)\n \n super(DataFetcher, self).__init__(endpoint = endpoint,\n parameters = parameters,\n label = 'Traffic Counts',\n app_token = app_token,\n verbose = verbose,\n header = 0,\n parse_dates = [0])\n", "# The MIT License (MIT)\r\n# Copyright (c) 2018 Massachusetts Institute of Technology\r\n#\r\n# Author: Guillaume Rongier\r\n# This software has been created in projects supported by the US National\r\n# Science Foundation and NASA (PI: Pankratius)\r\n#\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy\r\n# of this software and associated documentation files (the \"Software\"), to deal\r\n# in the Software without restriction, including without limitation the rights\r\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\n# copies of the Software, and to permit persons to whom the Software is\r\n# furnished to do so, subject to the following conditions:\r\n#\r\n# The above copyright notice and this permission notice shall be included in\r\n# all copies or substantial portions of the Software.\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\r\n# THE SOFTWARE.\r\n\r\n# Scikit Data Access imports\r\nfrom skdaccess.framework.data_class import DataFetcherCache, ImageWrapper\r\nfrom skdaccess.utilities.ode_util import *\r\n\r\n# 3rd party imports\r\nimport matplotlib.image as mpimg\r\nfrom tqdm import tqdm\r\n\r\n# Standard library imports\r\nfrom collections import OrderedDict\r\n\r\n\r\nclass DataFetcherMini(DataFetcherCache):\r\n ''' Data Fetcher from the Orbital Data Explorer (ODE) '''\r\n\r\n def __init__(self, target, mission, instrument, product_type,\r\n western_lon=None, eastern_lon=None, min_lat=None, max_lat=None,\r\n min_ob_time='', max_ob_time='', product_id='', file_name='*',\r\n number_product_limit=10, result_offset_number=0, remove_ndv=True):\r\n\r\n '''\r\n Construct Data Fetcher object\r\n For more information about the different fields and the possible values,\r\n see the manual of ODE REST interface at http://oderest.rsl.wustl.edu\r\n @param target: Aimed planetary body, i.e., Mars, Mercury, Moon, Phobos, or Venus\r\n @param mission: Aimed mission, e.g., MGS or MRO\r\n @param instrument: Aimed instrument from the mission, e.g., HIRISE or CRISM\r\n @param product_type: Type of product to look for, e.g., DTM or RDRV11\r\n @param western_lon: Western longitude to look for the data, from 0 to 360\r\n @param eastern_lon: Eastern longitude to look for the data, from 0 to 360\r\n @param min_lat: Minimal latitude to look for the data, from -90 to 90\r\n @param max_lat: Maximal latitude to look for the data, from -90 to 90\r\n @param min_ob_time: Minimal observation time in (even partial) UTC format, e.g., '2017-03-01'\r\n @param max_ob_time: Maximal observation time in (even partial) UTC format, e.g., '2017-03-01'\r\n @param product_id: PDS Product ID to look for, with wildcards (*) allowed\r\n @param file_name: File name to look for, with wildcards (*) allowed\r\n @param number_product_limit: Maximal number of products to return (ODE allows 100 at most)\r\n @param result_offset_number: Offset the return products, to go beyond the limit of 100 returned products\r\n @param remove_ndv: Replace the no-data value as mentionned in the label by np.nan\r\n '''\r\n\r\n assert western_lon is None or 0. <= western_lon <= 360., 'Western longitude is not between 0 and 360 degrees'\r\n assert eastern_lon is None or 0. <= eastern_lon <= 360., 'Eastern longitude is not between 0 and 360 degrees'\r\n assert min_lat is None or -90. <= min_lat <= 90., 'Minimal latitude is not between -90 and 90 degrees'\r\n assert max_lat is None or -90. <= max_lat <= 90., 'Maximal latitude is not between -90 and 90 degrees'\r\n assert 1 <= number_product_limit <= 100, 'Number of product limit must be between 1 and 100'\r\n\r\n self.target = target\r\n self.mission = mission\r\n self.instrument = instrument\r\n self.product_type = product_type\r\n self.western_lon = western_lon\r\n self.eastern_lon = eastern_lon\r\n self.min_lat = min_lat\r\n self.max_lat = max_lat\r\n self.min_ob_time = min_ob_time\r\n self.max_ob_time = max_ob_time\r\n self.product_id = product_id\r\n self.file_name = file_name\r\n self.number_product_limit = number_product_limit\r\n self.result_offset_number = result_offset_number\r\n self.remove_ndv = remove_ndv\r\n self.limit_file_types = 'Browse';\r\n\r\n def output(self):\r\n '''\r\n Generate data wrapper from ODE data\r\n '''\r\n file_urls = query_files_urls(self.target, self.mission, self.instrument, self.product_type,\r\n self.western_lon, self.eastern_lon, self.min_lat, self.max_lat,\r\n self.min_ob_time, self.max_ob_time, self.product_id, self.file_name,\r\n self.number_product_limit, self.result_offset_number, self.limit_file_types)\r\n\r\n downloaded_files = self.cacheData('ode', file_urls.keys())\r\n\r\n # Gather the data and meta-data\r\n data_dict = OrderedDict()\r\n # print(\"File Urls = \", file_urls)\r\n for file, key in tqdm(zip(downloaded_files, file_urls.keys())):\r\n if file.endswith('.jpg') or file.endswith('.png'):\r\n file_description = file_urls.get(key)[1]\r\n # print(\"File description = \", file_description)\r\n # print(\"Product = \", file_urls.get(key)[0])\r\n product = file_urls.get(key)[0]\r\n # print(\"File = \", file)\r\n if data_dict.get(product, None) is None:\r\n data_dict[product] = OrderedDict()\r\n data_dict[product][file_description] = mpimg.imread(file)\r\n\r\n # print(\"data dict = \", data_dict)\r\n print(\"Processing complete\")\r\n\r\n return ImageWrapper(obj_wrap=data_dict)\r\n" ]
[ [ "pandas.to_datetime" ], [ "matplotlib.image.imread" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cassianobecker/dnn
[ "bb2ea04f77733de9df10f795bb049ac3b9d30478", "bb2ea04f77733de9df10f795bb049ac3b9d30478", "bb2ea04f77733de9df10f795bb049ac3b9d30478", "bb2ea04f77733de9df10f795bb049ac3b9d30478" ]
[ "dataset/synth/test/regression_test.py", "dataset/hcp/dti/dti.py", "dataset/mnist/dwi/sticks.py", "experiments/synth/experiment.py" ]
[ "import os\nfrom os.path import join\nimport shutil\nimport subprocess\nimport numpy.random as npr\nimport numpy as np\nimport scipy.stats\n\nfrom dipy.io.image import load_nifti, save_nifti\nfrom dipy.io import read_bvals_bvecs\nfrom dipy.segment.mask import median_otsu\nfrom dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel, recursive_response\nfrom dipy.reconst.csdeconv import auto_response\nfrom dipy.core.gradients import gradient_table\n\nfrom util.path import absolute_path\nfrom dataset.synth.fibercup import create_fibercup\nfrom dataset.synth.plot import plot_track_vis\nfrom dataset.synth.tract import Bundle, ControlPoint, Tractogram\n\n# general configurations\nbase_path = '~/mitk/dnn/.dnn/datasets'\ndataset_name = 'synth5'\nDWI_PARAMS_FILE = 'param.ffp'\n\n# docker paths for Fiberfox\ndocker_container_name = 'confident_nobel'\nbase_path_on_docker = '/dnn/.dnn/datasets'\nfiberfox_executable = '/dnn/MitkDiffusion/MitkFiberfox.sh'\n# singularity exec docker://harangju/ubuntu-mitk:latest ~/mitk2/dnn/MitkDiffusion/MitkFiberfox.sh -o ~/mitk2 -i ~/mitk2/args/tracts.fib -p ~/mitk2/args/dwi_params.ffp\n\n\ndef make_dir(path):\n if not os.path.isdir(path):\n os.makedirs(path)\n return path\n\n\ndef copy_param_dir(src_param_dir, dest_param_dir):\n if os.path.isdir(dest_param_dir):\n shutil.rmtree(dest_param_dir)\n shutil.copytree(src_param_dir, dest_param_dir)\n\n\nclass FibercupRegressionDataset:\n\n def __init__(self, dry_run=False):\n\n self.base_path = os.path.expanduser(base_path)\n self.dry_run = dry_run\n\n self.radius = 64\n self.depth = 6\n self.multiplier = 10\n\n tractogram, self.parcels = create_fibercup(radius=self.radius, depth=self.depth, mult=self.multiplier)\n self.tractogram: Tractogram = tractogram\n\n self.tracts_path = make_dir(join(self.base_path, dataset_name, 'tracts'))\n self.dwi_path = make_dir(join(self.base_path, dataset_name, 'dwi'))\n self.dti_path = make_dir(join(self.base_path, dataset_name, 'dti'))\n self.odf_path = make_dir(join(self.base_path, dataset_name, 'odf'))\n self.param_path = join(self.base_path, dataset_name, 'params')\n\n copy_param_dir(join(absolute_path('dataset'), 'synth', 'dwi_params'), join(self.base_path, dataset_name, 'params'))\n\n self.flip_evecs()\n\n def process_subject(self, sample_id):\n\n edge = (6, 7)\n\n self.tractogram.bundles.pop(edge, None)\n\n shift = npr.rand()\n offset = np.array([shift, 0, 0])\n bundle = self.create_bundle(edge, offset)\n\n self.tractogram.add(edge, bundle)\n\n self.save_tract_and_label(sample_id, self.tractogram, label=shift, show_plot=False)\n\n self.simulate_dwi(sample_id)\n\n self.fit_dti(sample_id)\n\n self.fit_odf(sample_id)\n\n def generate_samples(self, num_samples):\n\n for sample_id in range(num_samples):\n self.process_subject(sample_id)\n\n def save_tract_and_label(self, sample_id, tractogram, label, show_plot=False):\n\n path = join(self.tracts_path, f'{sample_id}')\n if not os.path.isdir(path):\n os.makedirs(path)\n\n np.savetxt(join(path, 'label.txt'), np.array([label]), fmt='%f', delimiter='')\n\n fname = 'tracts'\n offset = [self.radius, self.radius, self.depth]\n tractogram.save(join(path, fname), offset)\n\n if show_plot is True:\n url_trk = join(path, fname + '.trk')\n plot_track_vis(url_trk)\n\n return join(path, fname + '.fib')\n\n def create_bundle(self, edge, offset):\n\n multiplier = 1\n ctl_pt_variance = 5\n weight = 200\n\n radius = self.radius\n depth = int(0.5 * self.depth + offset[2])\n\n control_points = [\n ControlPoint((-int(0.65 * radius + offset[0]), -int(0.3 * radius + offset[1]), depth), ctl_pt_variance),\n ControlPoint((-int(0.5 * radius + offset[0]), -int(0.4 * radius + offset[1]), depth), ctl_pt_variance),\n ControlPoint((-int(0.5 * radius + offset[0]), -int(0.5 * radius + offset[1]), depth), ctl_pt_variance),\n ControlPoint((-int(0.6 * radius + offset[0]), -int(0.7 * radius + offset[1]), depth), ctl_pt_variance)\n ]\n\n node0 = self.parcels.nodes[edge[0]]\n node1 = self.parcels.nodes[edge[1]]\n\n num_streams = weight * multiplier\n\n bundle = Bundle(node0, node1, control_points, num_streams)\n\n return bundle\n\n def simulate_dwi(self, sample_id):\n\n # make target directory on docker locally\n path = join(self.dwi_path, f'{sample_id}')\n if not os.path.isdir(path):\n os.makedirs(path)\n\n # define all paths relative to docker\n dwi_base_path = base_path_on_docker\n params_url = join(dwi_base_path, dataset_name, 'params', DWI_PARAMS_FILE)\n tracts_url = join(dwi_base_path, dataset_name, 'tracts', f'{sample_id}', 'tracts.fib')\n target_url = join(dwi_base_path, dataset_name, 'dwi', f'{sample_id}', 'data')\n\n docker_prefix = f'/usr/local/bin/docker exec -i {docker_container_name}'\n\n str_cmd = f'{docker_prefix} {fiberfox_executable} -o {target_url} -i {tracts_url} -p {params_url} --verbose'\n\n subprocess.run(str_cmd, shell=True, check=True)\n\n @staticmethod\n def _perform_dti_fit(dti_params, save_tensor=False):\n\n dti_fit_command_str = f\"dtifit \" \\\n f\"-k {dti_params['data']} \" \\\n f\"-o {dti_params['output']} \" \\\n f\"-m {dti_params['mask']} \" \\\n f\"-r {dti_params['bvecs']} \" \\\n f\"-b {dti_params['bvals']} \"\n\n if save_tensor is True:\n dti_fit_command_str += '--save_tensor'\n\n subprocess.run(dti_fit_command_str, shell=True, check=True)\n\n @staticmethod\n def get_otsu_mask(image):\n b0_mask, mask = median_otsu(image, median_radius=2, numpass=1, vol_idx=np.array([0, 1, 2]))\n return mask\n\n @staticmethod\n def get_mode_mask(image):\n\n masks = np.full(image.shape, 0)\n\n for k in range(3):\n mode_k = scipy.stats.mode(image[..., k].ravel())[0][0]\n masks[image[..., k] < 0.99 * mode_k] = 1\n\n mask = np.any(masks, axis=3) * 1.\n\n return mask\n\n def make_mask_from_dwi(self, sample_id, strategy='mode'):\n\n dwi_file_url = join(self.dwi_path, f'{sample_id}', 'data.nii.gz')\n image, affine = load_nifti(dwi_file_url)\n\n if strategy == 'mode':\n mask = self.get_mode_mask(image)\n\n elif strategy == 'otsu':\n mask = self.get_otsu_mask(image)\n\n else:\n raise ValueError('Not implemented: unknown dwi mask type')\n\n mask_file_url = join(self.dwi_path, f'{sample_id}', 'data_mask.nii.gz')\n affine = np.eye(4)\n save_nifti(mask_file_url, mask, affine)\n\n def make_mask(self, sample_id):\n mask_file_url = join(self.dwi_path, f'{sample_id}', 'data_mask.nii.gz')\n self.parcels.save_mask(mask_file_url=mask_file_url)\n\n def fit_dti(self, sample_id):\n\n output_dti_path = join(self.dti_path, f'{sample_id}')\n\n dti_params = {\n 'data': join(self.dwi_path, f'{sample_id}', 'data.nii.gz'),\n 'mask': join(self.dwi_path, f'{sample_id}', 'data_mask.nii.gz'),\n 'bvals': self._flipped_bvals_url(),\n 'bvecs': self._flipped_bvecs_url(),\n 'output': join(output_dti_path, 'dti')\n }\n\n if not os.path.isdir(output_dti_path):\n os.makedirs(output_dti_path)\n\n self.make_mask(sample_id)\n self._perform_dti_fit(dti_params, save_tensor=True)\n\n registered_tensor_url = join(self.dti_path, f'{sample_id}', 'dti_tensor.*')\n fslconvert_command_str = f'fslchfiletype NIFTI_GZ {registered_tensor_url}'\n subprocess.run(fslconvert_command_str, shell=True, check=True)\n\n def _flipped_bvals_url(self):\n return join(self.param_path, 'flipped_' + DWI_PARAMS_FILE + '.bvals')\n\n def _flipped_bvecs_url(self):\n return join(self.param_path, 'flipped_' + DWI_PARAMS_FILE + '.bvecs')\n\n def flip_evecs(self, flips=(1, -1, 1)):\n\n # flip eigenvectors for compatibility between Mitk Fiberfox and FSL dtifit\n bvals_url = join(self.param_path, DWI_PARAMS_FILE + '.bvals')\n bvecs_url = join(self.param_path, DWI_PARAMS_FILE + '.bvecs')\n bvals, bvecs = read_bvals_bvecs(bvals_url, bvecs_url)\n new_bvecs = bvecs @ np.diag(flips)\n return self.save_bvals_bvecs(bvals, new_bvecs)\n\n def save_bvals_bvecs(self, bvals, bvecs):\n np.savetxt(self._flipped_bvals_url(), np.expand_dims(bvals, axis=0), fmt='%d', delimiter=' ')\n np.savetxt(self._flipped_bvecs_url(), bvecs.T, fmt='%2.6f', delimiter=' ')\n\n def fit_odf(self, sample_id):\n\n # bvals_url = self._flipped_bvals_url()\n # bvecs_url = self._flipped_bvecs_url()\n bvals_url = join(self.param_path, DWI_PARAMS_FILE + '.bvals')\n bvecs_url = join(self.param_path, DWI_PARAMS_FILE + '.bvecs')\n\n bvals, bvecs = read_bvals_bvecs(bvals_url, bvecs_url)\n gtab = gradient_table(bvals, bvecs)\n\n volumes_url = join(self.dwi_path, f'{sample_id}', 'data.nii.gz')\n volumes, volumes_affine = load_nifti(volumes_url)\n\n response, ratio = auto_response(gtab, volumes, roi_center=(29, 48, 2), roi_radius=1, fa_thr=0.24)\n\n # response = recursive_response(gtab, volumes, sh_order=8,\n # peak_thr=0.01, init_fa=0.08,\n # init_trace=0.0021, iter=8, convergence=0.001,\n # parallel=True)\n\n csd_model = ConstrainedSphericalDeconvModel(gtab, response)\n csd_fit = csd_model.fit(volumes)\n odf = csd_fit.shm_coeff\n\n # mask_url = join(self.dwi_path, f'{sample_id}', 'data_mask.nii.gz')\n # self.make_mask(sample_id)\n # mask, affine = load_nifti(mask_url)\n # odf_masked = (odf.transpose((3, 0, 1, 2)) * mask).transpose((1, 2, 3, 0))\n odf_masked = odf\n\n output_odf_path = join(self.odf_path, f'{sample_id}')\n if not os.path.isdir(output_odf_path):\n os.makedirs(output_odf_path)\n\n odf_url = join(output_odf_path, 'odf.nii.gz')\n\n save_nifti(odf_url, odf_masked, volumes_affine)\n\n\nif __name__ == '__main__':\n\n number_of_samples = 2\n dataset = FibercupRegressionDataset()\n dataset.generate_samples(number_of_samples)\n", "import glob\nimport os\nimport subprocess\nimport shutil\n\nimport nibabel as nib\nimport numpy as np\n\nfrom util.logging import get_logger, set_logger\nfrom fwk.config import Config\nfrom util.path import absolute_path, is_project_in_cbica\n\nfrom dataset.hcp.hcp import HcpDiffusionDatabase\n\n\nclass HcpDtiProcessor:\n\n def __init__(self):\n\n self.database = HcpDiffusionDatabase()\n\n self.processing_folder = os.path.expanduser(Config.config['DATABASE']['local_processing_directory'])\n if not os.path.isdir(self.processing_folder):\n os.makedirs(self.processing_folder, exist_ok=True)\n\n log_furl = os.path.join(self.processing_folder, 'log', 'preprocessing.log')\n if not os.path.isdir(log_furl):\n os.makedirs(os.path.join(self.processing_folder, 'log'), exist_ok=True)\n\n set_logger('HcpProcessor', Config.config['LOGGING']['processing_level'], log_furl)\n self.logger = get_logger('HcpProcessor')\n\n nib.imageglobals.logger = set_logger('Nibabel', Config.config['LOGGING']['nibabel_level'], log_furl)\n\n self.template_folder = absolute_path(Config.config['TEMPLATE']['folder'])\n self.template_file = Config.config['TEMPLATE']['template']\n\n self.dti_files = self._dti_files()\n self.converted_dti_files = self._converted_diti_files()\n self.registered_dti_files = self._registered_dti_files()\n self.ants_dti_files = self._converted_diti_files()\n\n def process_subject(self, subject, delete_folders=False):\n\n self.logger.info('processing subject {}'.format(subject))\n\n if not os.path.exists(self._processed_tensor_url(subject)):\n self.database.get_diffusion(subject)\n\n self.fit_dti(subject)\n self.convert_dti(subject)\n self.register_dti(subject)\n self.get_eigen(subject)\n self.save_dti_tensor_image(subject)\n\n if delete_folders is True:\n self.database.delete_diffusion_folder(subject)\n\n self._delete_fsl_folder(subject)\n self._delete_conversion_folder(subject)\n self._delete_reg_folder(subject)\n self._delete_ants_folder(subject)\n\n @staticmethod\n def _dti_files():\n return {'fsl_FA.nii.gz', 'fsl_L1.nii.gz', 'fsl_L2.nii.gz', 'fsl_L3.nii.gz', 'fsl_MD.nii.gz',\n 'fsl_MO.nii.gz', 'fsl_S0.nii.gz', 'fsl_V1.nii.gz', 'fsl_V2.nii.gz', 'fsl_V3.nii.gz',\n 'fsl_tensor.nii.gz'}\n\n @staticmethod\n def _converted_diti_files():\n return {'comp_dxz.nii.gz', 'FA.nii.gz', 'comp_dxx.nii.gz',\n 'comp_dyz.nii.gz', 'comp_dzz.nii.gz', 'comp_dyy.nii.gz',\n 'comp_dxy.nii.gz', 'dtUpper.nii.gz', 'DT.nii.gz'}\n\n @staticmethod\n def _registered_dti_files():\n return {'DTDeformed.nii.gz', 'FA_reg_0GenericAffine.mat', 'FA_reg_InverseWarped.nii.gz',\n 'FA_reg_combinedWarp.nii.gz', 'FA_reg_Warped.nii.gz', 'DTReorientedWarp.nii.gz',\n 'FA_reg_1InverseWarp.nii.gz', 'FA_reg_1Warp.nii.gz'}\n\n @staticmethod\n def _ants_dti_files():\n return {'V1Deformed.nii.gz', 'L1Deformed.nii.gz', 'L3Deformed.nii.gz',\n 'V3Deformed.nii.gz', 'V2Deformed.nii.gz', 'L2Deformed.nii.gz'}\n\n def _fsl_folder(self, subject):\n return os.path.join(self.processing_folder, 'HCP_1200_processed', subject, 'fsl')\n\n def _conversion_folder(self, subject):\n return os.path.join(self.processing_folder, 'HCP_1200_processed', subject, 'converted')\n\n def _reg_folder(self, subject):\n return os.path.join(self.processing_folder, 'HCP_1200_processed', subject, 'reg')\n\n def _ants_folder(self, subject):\n return os.path.join(self.processing_folder, 'HCP_1200_processed', subject, 'ants')\n\n def _processed_tensor_folder(self, subject):\n return os.path.join(self.processing_folder, 'HCP_1200_tensor', subject)\n\n def _processed_tensor_url(self, subject):\n return os.path.join(self.processing_folder, 'HCP_1200_tensor', subject, 'dti_tensor_' + subject + '.npz')\n\n def _is_dti_processed(self, subject):\n ants_dir = self._ants_folder(subject)\n dir_contents = os.listdir(ants_dir)\n return dir_contents == self.ants_dti_files\n\n def _is_tensor_saved(self, subject):\n exists = False\n if os.path.isdir(self._processed_tensor_folder(subject)):\n exists = os.path.isfile(self._processed_tensor_url(subject))\n return exists\n\n def _delete_fsl_folder(self, subject):\n processed_fsl_dir = self._fsl_folder(subject)\n if os.path.exists(processed_fsl_dir):\n shutil.rmtree(processed_fsl_dir)\n\n def _delete_conversion_folder(self, subject):\n converted_dir = self._conversion_folder(subject)\n if os.path.exists(converted_dir):\n shutil.rmtree(converted_dir)\n\n def _delete_reg_folder(self, subject):\n registered_dti_dir = self._reg_folder(subject)\n if os.path.exists(registered_dti_dir):\n shutil.rmtree(registered_dti_dir)\n\n def _delete_ants_folder(self, subject):\n ants_dir = self._ants_folder(subject)\n if os.path.exists(ants_dir):\n shutil.rmtree(ants_dir)\n\n def save_dti_tensor_image(self, subject):\n if not os.path.isdir(self._processed_tensor_folder(subject)):\n os.makedirs(self._processed_tensor_folder(subject))\n dti_tensor = self.build_dti_tensor_image(subject)\n np.savez_compressed(self._processed_tensor_url(subject), dwi_tensor=dti_tensor)\n\n def _is_dti_processed(self, subject):\n\n processed_fsl_dir = self._fsl_folder(subject)\n\n if os.path.isdir(processed_fsl_dir):\n dti_file = set(os.listdir(processed_fsl_dir))\n else:\n dti_file = {}\n\n return dti_file == self.dti_files\n\n diffusion_dir = os.path.join(self.database.mirror_folder, self.database.diffusion_dir(subject))\n processed_fsl_dir = self._fsl_folder(subject)\n\n if self._is_dti_processed(self, subject):\n self.logger.info('processed dti files found for subject {}'.format(subject))\n else:\n self.logger.info('processing dti files for subject {}'.format(subject))\n if not os.path.isdir(processed_fsl_dir):\n os.makedirs(processed_fsl_dir)\n\n dti_fit_command_str = \\\n 'dtifit -k {0}/data.nii.gz -o {1}/fsl -m {0}/nodif_brain_mask.nii.gz -r {0}/bvecs -b {0}/bvals ' \\\n '--save_tensor'.format(diffusion_dir, processed_fsl_dir)\n\n subprocess.run(dti_fit_command_str, shell=True, check=True)\n\n def fit_dti(self, subject):\n\n diffusion_dir = os.path.join(self.database.mirror_folder, self.database.diffusion_dir(subject))\n processed_fsl_dir = self._fsl_folder(subject)\n\n if os.path.isdir(processed_fsl_dir):\n dti_file = set(os.listdir(processed_fsl_dir))\n else:\n dti_file = {}\n\n if dti_file == self.dti_files:\n self.logger.info('processed dti files found for subject {}'.format(subject))\n\n else:\n self.logger.info('processing dti files for subject {}'.format(subject))\n if not os.path.isdir(processed_fsl_dir):\n os.makedirs(processed_fsl_dir)\n\n dti_fit_command_str = \\\n 'dtifit -k {0}/data.nii.gz -o {1}/fsl -m {0}/nodif_brain_mask.nii.gz -r {0}/bvecs -b {0}/bvals ' \\\n '--save_tensor'.format(diffusion_dir, processed_fsl_dir)\n\n subprocess.run(dti_fit_command_str, shell=True, check=True)\n\n def convert_dti(self, subject):\n \"\"\"\n Reads in FSL DTI outputs and convert to be ANTs-friendly \n \"\"\" \n processed_fsl_dir = self._fsl_folder(subject)\n converted_dir = self._conversion_folder(subject)\n\n if os.path.isdir(converted_dir):\n converted_file = set(os.listdir(converted_dir))\n else:\n converted_file = {}\n\n if converted_file == self.converted_dti_files:\n self.logger.info('converted dti files found for subject {}'.format(subject))\n else:\n self.logger.info('converting dti files for subject {}'.format(subject))\n if not os.path.isdir(converted_dir):\n os.makedirs(converted_dir)\n\n if os.path.exists(os.path.join(processed_fsl_dir, 'fsl_tensor.hdr')):\n\n fslconvert_command_str = 'fslchfiletype NIFTI_GZ {0}/fsl_tensor.*'.format(processed_fsl_dir)\n subprocess.run(fslconvert_command_str, shell=True, check=True)\n\n ants_command_str = \\\n 'ImageMath 3 {1}/dtUpper.nii.gz 4DTensorTo3DTensor {0}/fsl_tensor.nii.gz' \\\n .format(processed_fsl_dir, converted_dir)\n subprocess.run(ants_command_str, shell=True, check=True)\n\n comps = ['xx', 'xy', 'xz', 'yy', 'yz', 'zz']\n for i in range(len(comps)):\n ants_command_str = \\\n 'ImageMath 3 {0}/comp_d{1}.nii.gz TensorToVectorComponent {0}/dtUpper.nii.gz {2}' \\\n .format(converted_dir, comps[i], i+3)\n subprocess.run(ants_command_str, shell=True, check=True)\n\n ants_command_str = \\\n 'ImageMath 3 {0}/DT.nii.gz ComponentTo3DTensor {0}/comp_d .nii.gz' \\\n .format(converted_dir)\n subprocess.run(ants_command_str, shell=True, check=True)\n\n ants_command_str = \\\n 'ImageMath 3 {0}/FA.nii.gz TensorFA {0}/DT.nii.gz' \\\n .format(converted_dir)\n subprocess.run(ants_command_str, shell=True, check=True)\n\n def register_dti(self, subject):\n \"\"\"\n Uses ANTs to warp and reorient DTI to template space\n \"\"\" \n\n converted_dir = self._conversion_folder(subject)\n registered_dti_dir = self._reg_folder(subject)\n template_folder = self.template_folder\n template_file = self.template_file\n\n if os.path.isdir(registered_dti_dir):\n registered_file = set(os.listdir(registered_dti_dir))\n else:\n registered_file = {}\n\n if registered_file == self.registered_dti_files:\n self.logger.info('registered dti files found for subject {}'.format(subject))\n else:\n self.logger.info('registering dti files for subject {}'.format(subject))\n if not os.path.isdir(registered_dti_dir):\n os.makedirs(registered_dti_dir)\n\n # 1) run ANTS registration\n ants_command_str = \\\n 'antsRegistrationSyN.sh -p f -f {0}/{3} -m {1}/FA.nii.gz -t s -o {2}/FA_reg_' \\\n .format(template_folder, converted_dir, registered_dti_dir, template_file)\n subprocess.run(ants_command_str, shell=True, check=True)\n\n # 2) compose a single warp\n ants_command_str = \\\n 'antsApplyTransforms -d 3 -i {1}/FA.nii.gz -r {0}/{3} -t {2}/FA_reg_1Warp.nii.gz ' \\\n '-t {2}/FA_reg_0GenericAffine.mat -o [ {2}/FA_reg_combinedWarp.nii.gz , 1 ]' \\\n .format(template_folder, converted_dir, registered_dti_dir, template_file)\n subprocess.run(ants_command_str, shell=True, check=True)\n\n # 3) move DT to fixed\n ants_command_str = \\\n 'antsApplyTransforms -d 3 -e 2 -i {1}/DT.nii.gz -r {0}/{3} -t {2}/FA_reg_combinedWarp.nii.gz -o {2}/DTDeformed.nii.gz' \\\n .format(template_folder, converted_dir, registered_dti_dir, template_file)\n subprocess.run(ants_command_str, shell=True, check=True)\n\n # 4) reorient warped DT\n ants_command_str = \\\n 'ReorientTensorImage 3 {0}/DTDeformed.nii.gz {0}/DTReorientedWarp.nii.gz {0}/FA_reg_combinedWarp.nii.gz' \\\n .format(registered_dti_dir)\n subprocess.run(ants_command_str, shell=True, check=True)\n\n def get_eigen(self, subject):\n \"\"\"\n Uses ANTs to get eigenvalues and eigenvectors from DTI\n \"\"\"\n registered_dti_dir = self._reg_folder(subject)\n ants_dir = self._ants_folder(subject)\n\n if os.path.isdir(ants_dir):\n ants_file = set(os.listdir(ants_dir))\n else:\n ants_file = {}\n\n if ants_file == self.ants_dti_files:\n self.logger.info('re-processing dti files found for subject {}'.format(subject))\n else:\n self.logger.info('re-processing dti files for subject {}'.format(subject))\n if not os.path.isdir(ants_dir):\n os.makedirs(ants_dir)\n\n # get eigen values\n def return_eigenvector(indir, dt_image, vec_label, vec_idx, outdir, output):\n\n ants_command_str = \\\n 'ImageMath 3 {3}/{4}.nii.gz TensorToVector {0}/{1} {2}' \\\n .format(indir, dt_image, vec_idx, outdir, vec_label)\n subprocess.run(ants_command_str, shell=True, check=True)\n\n for i in range(3):\n ants_command_str = \\\n 'ImageMath 3 {0}/{1}_{2}.nii.gz ExtractVectorComponent {0}/{1}.nii.gz {2}' \\\n .format(outdir, vec_label, i)\n subprocess.run(ants_command_str, shell=True, check=True)\n\n ants_command_str = \\\n 'ImageMath 4 {0}/{2} TimeSeriesAssemble 1 0 {0}/{1}_0.nii.gz {0}/{1}_1.nii.gz {0}/{1}_2.nii.gz' \\\n .format(outdir, vec_label, output)\n subprocess.run(ants_command_str, shell=True, check=True)\n\n return_eigenvector(registered_dti_dir, 'DTReorientedWarp.nii.gz', 'V1', 2, ants_dir, 'V1Deformed.nii.gz')\n return_eigenvector(registered_dti_dir, 'DTReorientedWarp.nii.gz', 'V2', 1, ants_dir, 'V2Deformed.nii.gz')\n return_eigenvector(registered_dti_dir, 'DTReorientedWarp.nii.gz', 'V3', 0, ants_dir, 'V3Deformed.nii.gz')\n # ------------\n\n # get eigen values\n ants_command_str = \\\n 'ImageMath 3 {1}/L1Deformed.nii.gz TensorEigenvalue {0}/DTReorientedWarp.nii.gz 2' \\\n .format(registered_dti_dir, ants_dir)\n subprocess.run(ants_command_str, shell=True, check=True)\n\n ants_command_str = \\\n 'ImageMath 3 {1}/L2Deformed.nii.gz TensorEigenvalue {0}/DTReorientedWarp.nii.gz 1' \\\n .format(registered_dti_dir, ants_dir)\n subprocess.run(ants_command_str, shell=True, check=True)\n\n ants_command_str = \\\n 'ImageMath 3 {1}/L3Deformed.nii.gz TensorEigenvalue {0}/DTReorientedWarp.nii.gz 0' \\\n .format(registered_dti_dir, ants_dir)\n subprocess.run(ants_command_str, shell=True, check=True)\n\n # clean up\n file_list = glob.glob(os.path.join(ants_dir, '*_*.nii.gz'), recursive=False)\n for file in file_list: os.remove(file)\n os.remove(os.path.join(ants_dir, 'V1.nii.gz'))\n os.remove(os.path.join(ants_dir, 'V2.nii.gz'))\n os.remove(os.path.join(ants_dir, 'V3.nii.gz'))\n\n def build_dti_tensor_image(self, subject):\n \"\"\"\n Reads in eigenvectors and eigenvalues from DTI fit and returns 3*3*i*j*k DTI array for input to nn\n \"\"\"\n ants_dir = self._ants_folder(subject)\n\n dti_tensor = 0\n for i in range(1, 4):\n bvecs_file = glob.glob(os.path.join(ants_dir, 'V' + str(i) + '*'))[0]\n bvals_file = glob.glob(os.path.join(ants_dir, 'L' + str(i) + '*'))[0]\n bvecs = nib.load(bvecs_file).get_fdata()\n bvals = nib.load(bvals_file).get_fdata()\n dti_tensor = dti_tensor + np.einsum('abc, abci, abcj->ijabc', bvals, bvecs, bvecs)\n return dti_tensor\n", "import os\nimport numpy as np\nfrom scipy import signal\n\nfrom dipy.io.image import save_nifti\nfrom skimage.draw import line_nd\n\n\ndef create_stick_figure(width, height, depth):\n\n img = np.zeros((width, height, depth), dtype=np.double)\n\n line_coords = line_nd((0, height/2, 0), (width-1, height/2, depth-1))\n img[line_coords[0], line_coords[1], line_coords[2]] = 1\n\n # line_coords2 = line_nd((0, height/2, depth/2), (width-1, height/2, depth/2))\n # img[line_coords2[0], line_coords2[1], line_coords2[2]] = 1\n\n img = img / np.max(img)\n\n return img\n\n\ndef convolve_tube(img):\n\n sigma = 1.5 # width of kernel\n\n x = np.arange(-3, 4, 1) # coordinate arrays -- make sure they contain 0!\n y = np.arange(-3, 4, 1)\n z = np.arange(-3, 4, 1)\n xx, yy, zz = np.meshgrid(x, y, z)\n kernel = np.exp(-(xx**2 + yy**2 + zz**2)/(2*sigma**2))\n\n img = signal.convolve(img, kernel, mode=\"same\")\n\n return img\n\n\ndef save_image(img, affine, name):\n\n rel_path = '~/.dnn/datasets/sticks'\n\n path = os.path.expanduser(rel_path)\n\n if not os.path.isdir(path):\n os.makedirs(path)\n\n url = os.path.join(path, name)\n\n np.savez(url, img=img)\n\n save_nifti(url + '.nii.gz', img, affine)\n\n\ndef generate_figures():\n\n name = 'sticks1'\n\n width = 30\n height = 30\n depth = 30\n\n img = create_stick_figure(width, height, depth)\n img = convolve_tube(img)\n\n affine = np.eye(4)\n save_image(img, affine, name)\n\n\nif __name__ == '__main__':\n\n generate_figures()\n", "import torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.optim.lr_scheduler import StepLR\n\nfrom fwk.config import Config\nfrom fwk.metrics import MetricsHandler\nfrom fwk.model import ModelHandler\nfrom util.lang import to_bool\n\nfrom dataset.synth.loader import SynthDataset, SynthDataLoader\nfrom dataset.synth.subjects import Subjects\nfrom util.lang import class_for_name\nfrom util.encode import one_hot_to_int\n\n\nclass BatchTrain:\n\n def __init__(self):\n self.epochs = None\n self.model = None\n self.loss = None\n self.optimizer = None\n self.scheduler = None\n self.device = None\n self.data_loaders = dict()\n self.accumulation_steps = None\n self.regression = None\n\n def execute(self):\n\n MetricsHandler.dispatch_event(locals(), 'before_setup')\n\n self.setup()\n\n MetricsHandler.dispatch_event(locals(), 'after_setup')\n\n if to_bool(Config.config['OUTPUTS']['load_model']) is True:\n self.model = ModelHandler.load_model(epoch=1)\n\n for epoch in range(self.epochs):\n\n MetricsHandler.dispatch_event(locals(), 'before_epoch')\n\n self.train_batch(epoch)\n self.test_batch(epoch)\n\n self.scheduler.step()\n\n MetricsHandler.dispatch_event(locals(), 'after_epoch')\n\n if to_bool(Config.config['OUTPUTS']['save_model']) is True:\n ModelHandler.save_model(self.model, epoch)\n\n self._teardown()\n\n def _teardown(self):\n pass\n\n def setup(self):\n\n torch.manual_seed(1234)\n\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n half_precision = to_bool(Config.get_option('ALGORITHM', 'half_precision', 'False'))\n max_img_channels = int(Config.get_option('ALGORITHM', 'max_img_channels', 1000))\n cholesky_weights = to_bool(Config.get_option('ARCHITECTURE', 'cholesky_weights', 'False'))\n\n perturb = to_bool(Config.get_option('DATABASE', 'perturb', 'False'))\n self.regression = to_bool(Config.get_option('COVARIATES', 'regression', 'False'))\n\n train_subjects, test_subjects = Subjects.create_list_from_config()\n\n train_set = SynthDataset(\n self.device,\n subjects=train_subjects,\n half_precision=half_precision,\n max_img_channels=max_img_channels,\n perturb=perturb,\n regression=self.regression\n )\n\n self.data_loaders['train'] = SynthDataLoader(\n train_set,\n shuffle=False,\n batch_size=int(Config.config['ALGORITHM']['train_batch_size'])\n )\n\n test_set = SynthDataset(\n self.device,\n subjects=test_subjects,\n half_precision=half_precision,\n max_img_channels=max_img_channels,\n perturb=False,\n regression=self.regression\n )\n\n self.data_loaders['test'] = SynthDataLoader(\n test_set,\n shuffle=False,\n batch_size=int(Config.config['ALGORITHM']['test_batch_size'])\n )\n\n img_dims = train_set.tensor_size()\n\n num_classes = train_set.number_of_classes() if not self.regression else 1\n num_outputs = train_set.number_of_outputs()\n\n arch_class_name = Config.config['ARCHITECTURE']['arch_class_name']\n model_class = class_for_name(arch_class_name)\n\n self.model = model_class(img_dims,\n number_of_classes=num_classes,\n number_of_outputs=num_outputs,\n cholesky_weights=cholesky_weights)\n\n self.model.to(self.device)\n\n self.epochs = int(Config.config['ALGORITHM']['epochs'])\n\n self.optimizer = optim.Adadelta(\n self.model.parameters(),\n lr=float(Config.config['ALGORITHM']['lr'])\n )\n\n self.scheduler = StepLR(\n self.optimizer,\n step_size=1,\n gamma=float(Config.config['ALGORITHM']['gamma'])\n )\n\n self.accumulation_steps = int(Config.get_option('ALGORITHM', 'accumulation_steps', 1))\n\n def train_batch(self, epoch):\n\n self.model.train()\n\n self.optimizer.zero_grad()\n\n for batch_idx, (dwi_tensors, targets, subjects) in enumerate(self.data_loaders['train']):\n\n MetricsHandler.dispatch_event(locals(), 'before_train_batch')\n\n # dwi_tensors, targets = dwi_tensors.to(self.device).type(\n # torch.float32), targets.to(self.device).type(torch.long)\n\n # dwi_tensors, targets = dwi_tensors.to(self.device).type(\n # torch.float32), targets.to(self.device).type(torch.float32)\n\n dwi_tensors = dwi_tensors.to(self.device).type(torch.float32)\n\n for key, value in targets.items():\n targets[key] = value.to(self.device).type(torch.float32)\n\n targets = torch.stack(list(targets.values()))\n\n # self.optimizer.zero_grad()\n outputs = self.model(dwi_tensors)\n\n if self.regression is True:\n loss = F.mse_loss(outputs, targets)\n else:\n loss = F.nll_loss(outputs, one_hot_to_int(targets))\n\n loss.backward()\n # self.optimizer.step()\n\n print(f'train loss {loss}')\n\n if (batch_idx + 1) % self.accumulation_steps == 0:\n self.optimizer.step()\n self.model.zero_grad()\n\n MetricsHandler.dispatch_event(locals(), 'after_train_batch')\n\n def test_batch(self, epoch):\n\n self.model.eval()\n\n with torch.no_grad():\n for batch_idx, (dwi_tensors, targets, subjects) in enumerate(self.data_loaders['test']):\n MetricsHandler.dispatch_event(locals(), 'before_test_batch')\n\n # dwi_tensors, targets = dwi_tensors.to(self.device).type(\n # torch.float32), targets.to(self.device).type(torch.float32)\n\n dwi_tensors = dwi_tensors.to(self.device).type(torch.float32)\n\n for key, value in targets.items():\n targets[key] = value.to(self.device).type(torch.float32)\n\n targets = torch.stack(list(targets.values()))\n\n\n outputs = self.model(dwi_tensors)\n # outputs = targets\n\n MetricsHandler.dispatch_event(locals(), 'after_test_batch')\n" ]
[ [ "numpy.diag", "numpy.expand_dims", "numpy.eye", "numpy.full", "numpy.random.rand", "numpy.any", "numpy.array" ], [ "numpy.einsum" ], [ "numpy.savez", "numpy.meshgrid", "numpy.arange", "numpy.eye", "numpy.max", "numpy.exp", "numpy.zeros", "scipy.signal.convolve" ], [ "torch.nn.functional.mse_loss", "torch.manual_seed", "torch.no_grad", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
5laps2go/xbrr
[ "4c0824b53bfe971111d60e6c1ff4e36f4f4845a3" ]
[ "xbrr/edinet/reader/aspects/finance.py" ]
[ "import warnings\nimport re\nimport collections\nimport importlib\nif importlib.util.find_spec(\"pandas\") is not None:\n import pandas as pd\nfrom xbrr.base.reader.base_parser import BaseParser\nfrom xbrr.edinet.reader.element_value import ElementValue\n\n\nclass Finance(BaseParser):\n\n def __init__(self, reader):\n tags = {\n \"voluntary_accounting_policy_change\": \"jpcrp_cor:NotesVoluntaryChangesInAccountingPoliciesConsolidatedFinancialStatementsTextBlock\",\n \"segment_information\": \"jpcrp_cor:NotesSegmentInformationEtcConsolidatedFinancialStatementsTextBlock\",\n \"real_estate_for_lease\": \"jpcrp_cor:NotesRealEstateForLeaseEtcFinancialStatementsTextBlock\",\n \"accounting_standards\": \"jpdei_cor:AccountingStandardsDEI\", # 会計基準 from metadata\n }\n\n super().__init__(reader, ElementValue, tags)\n\n @property\n def use_IFRS(self):\n return self.accounting_standards.value == 'IFRS'\n\n def bs(self, ifrs=False, use_cal_link=True):\n role = self.__find_role_name('bs')\n role_uri = self.reader.get_role(role[0]).uri\n # role_uri = \"http://disclosure.edinet-fsa.go.jp/role/jppfs/rol_BalanceSheet\"\n # if ifrs and self.use_IFRS:\n # role_uri = \"http://disclosure.edinet-fsa.go.jp/role/jpigp/rol_ConsolidatedStatementOfFinancialPositionIFRS\"\n\n bs = self.reader.read_value_by_role(role_uri, use_cal_link=use_cal_link)\n return self.__filter_duplicate(bs)\n\n def pl(self, ifrs=False, use_cal_link=True):\n role = self.__find_role_name('pl')\n role_uri = self.reader.get_role(role[0]).uri\n # role_uri = \"http://disclosure.edinet-fsa.go.jp/role/jppfs/rol_StatementOfIncome\"\n # if ifrs and self.use_IFRS:\n # role_base = \"http://disclosure.edinet-fsa.go.jp/role/jpigp/\"\n # role_uri = f\"{role_base}rol_ConsolidatedStatementOfComprehensiveIncomeIFRS\"\n\n pl = self.reader.read_value_by_role(role_uri, use_cal_link=use_cal_link)\n return self.__filter_duplicate(pl)\n\n def cf(self, ifrs=False, use_cal_link=True):\n role = self.__find_role_name('cf')\n if len(role) == 0:\n textblock = self.__read_value_by_textblock([\"StatementOfCashFlows\"])\n return self.__read_finance_statement(textblock.html) if textblock is not None else None\n role = role[0]\n role_uri = self.reader.get_role(role).uri\n\n cf = self.reader.read_value_by_role(role_uri, use_cal_link=use_cal_link)\n return self.__filter_duplicate(cf)\n\n def __filter_duplicate(self, data):\n # Exclude dimension member\n if data is not None:\n data.drop_duplicates(subset=(\"name\", \"period\"), keep=\"first\",\n inplace=True)\n return data\n\n def __find_role_name(self, finance_statement):\n role_candiates = {\n 'bs': [\"StatementOfFinancialPositionIFRS\", \"ConsolidatedBalanceSheet\", \"BalanceSheet\"],\n 'pl': [\"StatementOfProfitOrLossIFRS\", \"StatementOfIncome\"],\n 'cf': [\"StatementOfCashFlowsIFRS\", \"StatementOfCashFlows\"],\n }\n roles = []\n for name in role_candiates[finance_statement]:\n roles += [x for x in self.reader.custom_roles.keys() if name in x and 'Notes' not in x and x not in roles]\n return roles\n\n def __read_value_by_textblock(self, candidates):\n values = self.reader.find_value_names(candidates)\n textblocks = [x for x in values if x.endswith('TextBlock')]\n if len(textblocks) == 0:\n return None\n element_value = self.reader.findv(textblocks[0])\n return element_value\n\n def __read_finance_statement(self, statement_xml):\n def myen(value):\n if value=='-':\n return '000'\n myen = value.replace(',','').replace('△', '-')\n return myen\n def isnum(myen):\n try:\n float(myen)\n except ValueError:\n return False\n else:\n return True\n indent_state = []\n def indent_label(margin_left):\n delidx = [i for i,x in enumerate(indent_state) if int(x) > int(margin_left)]\n if len(delidx) > 0: del indent_state[delidx[0]:]\n indent_state.append(margin_left)\n c = collections.Counter(indent_state)\n ks = sorted(c.keys(), key=int)\n return \"-\".join([str(c[x]) for x in ks])\n\n unit = ''\n values = []\n for table in statement_xml.select('table'):\n for record in table.select('tr'):\n columns = list(record.select('td'))\n label = ''.join([x.text.strip() for x in columns[0].select('p')])\n value = myen(columns[-1].text.strip())\n style_str = columns[0].find('p')['style'] if label != \"\" else \"\"\n m = re.match(r'.*margin-left: *([0-9]*).?[0-9]*px.*', style_str)\n margin = m.groups()[0] if m is not None else \"0\"\n\n if isnum(value):\n values.append({\n 'label': label,\n 'value': value + unit,\n 'indent': indent_label(margin)\n })\n elif label != \"\" and value == \"\":\n values.append({\n 'label': label,\n 'indent': indent_label(margin)\n })\n else:\n assert value=='' or '単位:' in value or '百万円' in value or '当連結会計年度' in value\n if '百万円' in value: # 単位:百万円 金額(百万円)\n unit = '000000'\n elif '単位:円' in value:\n unit = ''\n return pd.DataFrame(values)\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
marcobb8/tr_bn
[ "f6600d046a34266ec815384790659b4e33db325c" ]
[ "example_mbcs.py" ]
[ "# Import packages\nimport pandas\nfrom mbc import learn_mbc_cll, learn_mbc_generative, l_bfgs\nfrom export import get_adjacency_matrix_from_et\nfrom var_elim import PyFactorTree\nfrom data_type import data as datat\n\n# Load ASIA dataframe\nfile_name = \"data/asia.csv\"\ndata = pandas.read_csv(file_name)\nvar_classes = [['yes','no'] for _ in range(8)] # Categories of the variables\ncll_query = [0,1,2] #Class variables in the dataset\nfeatures = [3,4,5,6,7]\n\n# ----LEARNING FROM INCOMPLETE DATASETS---- #\n# Learn model with GS-pruned (Generative learning)\nmbc_gen = learn_mbc_generative(data, cll_query, metric='bic', tw_bound=5, custom_classes = var_classes)\n# Learn model with DGS (Discriminative learning)\nmbc_disc = learn_mbc_cll(data, cll_query, metric='bic', tw_bound=5, custom_classes = var_classes)\n\n\n\n# ----LEARN PARAMETERS---- #\nnum_vars = len(var_classes) #Number of variables\n\n# Get cppet from from MBC\net = mbc_gen\net_descriptor = [[et.nodes[i].parent_et for i in range(num_vars)], [et.nodes[i].nFactor for i in range(num_vars)], [[i] + et.nodes[i].parents.display() for i in range(num_vars)], [len(c) for c in var_classes]]\nmbc_gen_cpp = PyFactorTree(et_descriptor[0], et_descriptor[1], et_descriptor[2],et_descriptor[3])\n\n# Transfrom dataframe to data_type\ndata_p = datat(data,var_classes) \n# Learn parameters: alpha is the Dirichlet hyperparameter for the Bayesian estimation. \n# If alpha=0, the Maximum likelihood parameters are obtained \nmbc_gen_cpp.learn_parameters(data_p, alpha = 1)\n\n# (Optional) Optimize conditional likelihood of the parameters using l_bfgs\nmbc_gen_cpp = l_bfgs(data, mbc_gen, cll_query, var_classes)\n\n\n# ----INTERPRETING THE BAYESIAN NETWORK---- #\n# Obtaining the MBC adjacency matrix from cppet\nnum_nodes = data.shape[1]\nadj_mat = get_adjacency_matrix_from_et(mbc_disc)\n# Obtaining the parameters of node Tub\nxi = 1 #Tub is node 1\nfactor = mbc_gen_cpp.get_factor(num_nodes + xi)\nparameters = factor.get_prob()\n\n\n# ----Multidimensional classification---- #\n# Obtaining most probable explanations (MPEs)\n# Set evidence. For example, Asia = 'yes' and Lung Cancer = 'no'\nmbc_gen_cpp.set_evidence(features,[0,1,0,0,1])\n# Compute MPE\nmbc_gen_cpp.max_compute_tree()\n# Get factor with results\nfactor = mbc_gen_cpp.get_factor(-1)\nmpe_idx = factor.get_mpe()[0] # Get the MPE \nmpe = [var_classes[i][ci] for i,ci in enumerate(mpe_idx)]\nprob = factor.get_prob() # Get probability of the MPE \n# Retract evidence\nmbc_gen_cpp.retract_evidence()\n\n# Compute marginals of the class variables for each instance in the dataset\nprobs = mbc_gen_cpp.pred_data_distribute(data_p, cll_query, features)\n#The result is a 3D list\n# If we access probs[i][j][k], i is the row in the dataset, j is the index of the class variables in cll_query, and k is the category (index in var_classes)\n\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
anas-awadalla/mmf
[ "306f8f758831b2abf2c7ef5a8f010670a2cb33ed", "306f8f758831b2abf2c7ef5a8f010670a2cb33ed", "306f8f758831b2abf2c7ef5a8f010670a2cb33ed", "306f8f758831b2abf2c7ef5a8f010670a2cb33ed", "306f8f758831b2abf2c7ef5a8f010670a2cb33ed", "306f8f758831b2abf2c7ef5a8f010670a2cb33ed", "306f8f758831b2abf2c7ef5a8f010670a2cb33ed" ]
[ "mmf/models/unimodal.py", "mmf/utils/phoc/build_phoc.py", "tests/datasets/test_processors.py", "mmf/utils/checkpoint.py", "mmf/modules/decoders.py", "tests/modules/test_metrics.py", "projects/m4c/scripts/extract_ocr_frcn_feature.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n\nfrom copy import deepcopy\n\nimport torch\nfrom mmf.common.registry import registry\nfrom mmf.models.base_model import BaseModel\nfrom mmf.modules.encoders import MultiModalEncoderBase\nfrom mmf.utils.build import build_classifier_layer\n\n\nclass UnimodalBase(MultiModalEncoderBase):\n def __init__(self, config, *args, **kwargs):\n super().__init__(config, *args, **kwargs)\n\n def build(self):\n encoders = self._build_encoders(self.config)\n # Text Encoder mode\n if \"modal_encoder\" not in self.config:\n self.encoder = encoders[0]\n # Modal encoder mode\n elif \"text_encoder\" not in self.config:\n self.encoder = encoders[1]\n else:\n raise RuntimeError(\n \"Unimodal Encoder can't have both text and modal encoder\"\n )\n\n def forward(self, x, *args, **kwargs):\n x = self.encoder(x, *args, **kwargs)\n # Case of bert encoder, we only need pooled output\n if not torch.is_tensor(x) and len(x) == 2:\n x = x[1]\n\n x = torch.flatten(x, start_dim=1)\n\n return x\n\n\[email protected]_model(\"unimodal_text\")\nclass UnimodalText(BaseModel):\n def __init__(self, config, *args, **kwargs):\n super().__init__(config)\n\n @classmethod\n def config_path(cls):\n return \"configs/models/unimodal/text.yaml\"\n\n def build(self):\n self.base = UnimodalBase(self.config)\n # As the in_dim is dynamically calculated we need to copy classifier_config\n classifier_config = deepcopy(self.config.classifier)\n classifier_config.params.in_dim = self.config.text_hidden_size\n self.classifier = build_classifier_layer(classifier_config)\n\n def forward(self, sample_list):\n # BERT Based Encoders\n args = []\n if \"input_ids\" in sample_list:\n text = sample_list.input_ids\n args.append(sample_list.input_mask)\n args.append(sample_list.segment_ids)\n else:\n text = sample_list.text\n\n embedding = self.base(text, *args)\n output = {}\n output[\"scores\"] = self.classifier(embedding)\n\n return output\n\n\[email protected]_model(\"unimodal_image\")\nclass UnimodalModal(BaseModel):\n def __init__(self, config, *args, **kwargs):\n super().__init__(config)\n\n @classmethod\n def config_path(cls):\n return \"configs/models/unimodal/image.yaml\"\n\n def build(self):\n self.base = UnimodalBase(self.config)\n self._is_direct_features_input = self.config.direct_features_input\n num_features = self.config.modal_encoder.params.num_output_features\n\n # As the in_dim is dynamically calculated we need to copy classifier_config\n classifier_config = deepcopy(self.config.classifier)\n classifier_config.params.in_dim = num_features * self.config.modal_hidden_size\n self.classifier = build_classifier_layer(classifier_config)\n\n def forward(self, sample_list):\n # BERT Based Encoders\n args = []\n if self._is_direct_features_input:\n modal = sample_list.image_feature_0\n modal = torch.mean(modal, dim=1)\n else:\n modal = sample_list.image\n\n embedding = self.base(modal, *args)\n output = {}\n output[\"scores\"] = self.classifier(embedding)\n\n return output\n", "import numpy as np\n\nfrom .cphoc import build_phoc as _build_phoc_raw\n\n\n_alphabet = {\n \"a\",\n \"b\",\n \"c\",\n \"d\",\n \"e\",\n \"f\",\n \"g\",\n \"h\",\n \"i\",\n \"j\",\n \"k\",\n \"l\",\n \"m\",\n \"n\",\n \"o\",\n \"p\",\n \"q\",\n \"r\",\n \"s\",\n \"t\",\n \"u\",\n \"v\",\n \"w\",\n \"x\",\n \"y\",\n \"z\",\n \"0\",\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n \"5\",\n \"6\",\n \"7\",\n \"8\",\n \"9\",\n} # NoQA\n\n\ndef build_phoc(token):\n token = token.lower().strip()\n token = \"\".join([c for c in token if c in _alphabet])\n phoc = _build_phoc_raw(token)\n phoc = np.array(phoc, dtype=np.float32)\n return phoc\n", "# Copyright (c) Facebook, Inc. and its affiliates.\nimport os\nimport tempfile\nimport unittest\n\nimport torch\nfrom mmf.datasets.processors.processors import (\n CaptionProcessor,\n EvalAIAnswerProcessor,\n MultiClassFromFile,\n MultiHotAnswerFromVocabProcessor,\n TransformerBboxProcessor,\n)\nfrom mmf.utils.configuration import load_yaml\nfrom omegaconf import OmegaConf\n\nfrom ..test_utils import compare_tensors\n\n\nclass TestDatasetProcessors(unittest.TestCase):\n def _get_config(self, path):\n path = os.path.join(os.path.abspath(__file__), path)\n config = load_yaml(os.path.abspath(path))\n return config\n\n def test_caption_processor(self):\n config = self._get_config(\"../../../mmf/configs/datasets/coco/defaults.yaml\")\n captioning_config = config.dataset_config.coco\n caption_processor_config = captioning_config.processors.caption_processor\n\n vocab_path = os.path.join(\n os.path.abspath(__file__), \"..\", \"..\", \"data\", \"vocab.txt\"\n )\n caption_processor_config.params.vocab.type = \"random\"\n caption_processor_config.params.vocab.vocab_file = os.path.abspath(vocab_path)\n caption_processor = CaptionProcessor(caption_processor_config.params)\n\n tokens = [1, 4, 5, 6, 4, 7, 8, 2, 0, 0, 0]\n caption = caption_processor(tokens)\n\n # Test start, stop, pad are removed\n self.assertNotIn(\"<s>\", caption[\"tokens\"])\n self.assertNotIn(\"</s>\", caption[\"tokens\"])\n self.assertNotIn(\"<pad>\", caption[\"tokens\"])\n\n # Test caption is correct\n self.assertEqual(caption[\"caption\"], \"a man with a red helmet\")\n\n def test_multi_hot_answer_from_vocab_processor(self):\n config = self._get_config(\"../../../mmf/configs/datasets/clevr/defaults.yaml\")\n clevr_config = config.dataset_config.clevr\n answer_processor_config = clevr_config.processors.answer_processor\n\n # Test num_answers==1 case\n vocab_path = os.path.join(\n os.path.abspath(__file__), \"..\", \"..\", \"data\", \"vocab.txt\"\n )\n answer_processor_config.params.vocab_file = os.path.abspath(vocab_path)\n answer_processor = MultiHotAnswerFromVocabProcessor(\n answer_processor_config.params\n )\n processed = answer_processor({\"answers\": [\"helmet\"]})\n answers_indices = processed[\"answers_indices\"]\n answers_scores = processed[\"answers_scores\"]\n\n self.assertTrue(\n compare_tensors(answers_indices, torch.tensor([5] * 10, dtype=torch.long))\n )\n expected_answers_scores = torch.zeros(19, dtype=torch.float)\n expected_answers_scores[5] = 1.0\n self.assertTrue(compare_tensors(answers_scores, expected_answers_scores))\n\n # Test multihot when num answers greater than 1\n answer_processor_config.params.vocab_file = os.path.abspath(vocab_path)\n answer_processor_config.params.num_answers = 3\n answer_processor = MultiHotAnswerFromVocabProcessor(\n answer_processor_config.params\n )\n processed = answer_processor({\"answers\": [\"man\", \"with\", \"countryside\"]})\n answers_indices = processed[\"answers_indices\"]\n answers_scores = processed[\"answers_scores\"]\n self.assertTrue(\n compare_tensors(\n answers_indices,\n torch.tensor([2, 3, 15, 2, 3, 15, 2, 3, 15, 2], dtype=torch.long),\n )\n )\n expected_answers_scores = torch.zeros(19, dtype=torch.float)\n expected_answers_scores[2] = 1.0\n expected_answers_scores[3] = 1.0\n expected_answers_scores[15] = 1.0\n self.assertTrue(compare_tensors(answers_scores, expected_answers_scores))\n\n # Test unk\n processed = answer_processor({\"answers\": [\"test\", \"answer\", \"man\"]})\n answers_indices = processed[\"answers_indices\"]\n answers_scores = processed[\"answers_scores\"]\n self.assertTrue(\n compare_tensors(\n answers_indices,\n torch.tensor([0, 0, 2, 0, 0, 2, 0, 0, 2, 0], dtype=torch.long),\n )\n )\n expected_answers_scores = torch.zeros(19, dtype=torch.float)\n expected_answers_scores[2] = 1.0\n self.assertTrue(compare_tensors(answers_scores, expected_answers_scores))\n\n def test_evalai_answer_processor(self):\n evalai_answer_processor = EvalAIAnswerProcessor()\n\n # Test number\n processed = evalai_answer_processor(\"two\")\n expected = \"2\"\n self.assertEqual(processed, expected)\n\n # Test article\n processed = evalai_answer_processor(\"a building\")\n expected = \"building\"\n self.assertEqual(processed, expected)\n\n # Test tokenize\n processed = evalai_answer_processor(\"snow, mountain\")\n expected = \"snow mountain\"\n self.assertEqual(processed, expected)\n\n # Test contractions\n processed = evalai_answer_processor(\"isnt\")\n expected = \"isn't\"\n self.assertEqual(processed, expected)\n\n # Test processor\n processed = evalai_answer_processor(\"the two mountain's \\t \\n \")\n expected = \"2 mountain 's\"\n self.assertEqual(processed, expected)\n\n def test_transformer_bbox_processor(self):\n import numpy as np\n\n config = {\n \"params\": {\n \"bbox_key\": \"bbox\",\n \"image_width_key\": \"image_width\",\n \"image_height_key\": \"image_height\",\n }\n }\n\n bbox_processor = TransformerBboxProcessor(config)\n item = {\n \"bbox\": np.array([[100, 100, 100, 100]]),\n \"image_width\": 100,\n \"image_height\": 100,\n }\n processed_box = bbox_processor(item)[\"bbox\"]\n self.assertTrue(\n torch.equal(\n processed_box, torch.tensor([[1, 1, 1, 1, 0]], dtype=torch.float)\n )\n )\n\n def test_multi_class_from_file(self):\n f = tempfile.NamedTemporaryFile(mode=\"w\", delete=False)\n f.writelines(\"\\n\".join([\"abc\", \"bcd\", \"def\", \"efg\"]))\n f.close()\n config = OmegaConf.create({\"vocab_file\": f.name})\n processor = MultiClassFromFile(config)\n\n output = processor({\"label\": \"abc\"})\n self.assertEqual(output[\"class_index\"], 0)\n output = processor({\"label\": \"efg\"})\n self.assertEqual(output[\"class_index\"], 3)\n output = processor(\"def\")\n self.assertEqual(output[\"class_index\"], 2)\n\n self.assertRaises(AssertionError, processor, {\"label\": \"UNK\"})\n os.unlink(f.name)\n", "# Copyright (c) Facebook, Inc. and its affiliates.\n\nimport glob\nimport importlib\nimport logging\nimport os\nimport sys\nimport warnings\n\nimport torch\nfrom mmf.common.registry import registry\nfrom mmf.utils.configuration import get_mmf_env, load_yaml\nfrom mmf.utils.distributed import is_master, synchronize\nfrom mmf.utils.download import download_pretrained_model\nfrom mmf.utils.file_io import PathManager\nfrom mmf.utils.general import updir\nfrom omegaconf import OmegaConf\n\n\ntry:\n import git\nexcept ImportError:\n git = None\n\nlogger = logging.getLogger(__name__)\n\n\ndef _hack_imports():\n # NOTE: This can probably be made universal to support backwards\n # compatibility with name \"pythia\" if needed.\n sys.modules[\"pythia\"] = importlib.import_module(\"mmf\")\n sys.modules[\"pythia.utils.configuration\"] = importlib.import_module(\n \"mmf.utils.configuration\"\n )\n\n\ndef load_pretrained_model(model_name_or_path, *args, **kwargs):\n # If this is a file, then load this directly else download and load\n if PathManager.exists(model_name_or_path):\n download_path = model_name_or_path\n model_name = model_name_or_path\n else:\n download_path = download_pretrained_model(model_name_or_path, *args, **kwargs)\n model_name = model_name_or_path\n\n configs = glob.glob(os.path.join(download_path, \"*.yaml\"))\n assert len(configs) <= 1, (\n \"Multiple yaml files with the pretrained model. \"\n + \"MMF doesn't know what to do.\"\n )\n\n ckpts = []\n allowed_ckpt_types = (\"*.ckpt\", \"*.pth\", \"*.pt\")\n for ckpt_type in allowed_ckpt_types:\n ckpts.extend(glob.glob(os.path.join(download_path, ckpt_type)))\n\n assert (\n len(ckpts) == 1\n ), \"None or multiple checkpoints files. MMF doesn't know what to do.\"\n\n _hack_imports()\n\n with PathManager.open(ckpts[0], \"rb\") as f:\n ckpt = torch.load(f, map_location=lambda storage, loc: storage)\n # If configs are not present, will ckpt provide the config?\n if len(configs) == 0:\n assert \"config\" in ckpt, (\n \"No configs provided with pretrained model \"\n \" while checkpoint also doesn't have configuration.\"\n )\n config = ckpt[\"config\"]\n else:\n config = load_yaml(configs[0])\n\n model_config = config.get(\"model_config\", config)\n ckpt = ckpt.get(\"model\", ckpt)\n # Also handle the case of model_name is path\n model_config = model_config.get(model_name.split(os.path.sep)[-1].split(\".\")[0])\n\n return {\"config\": model_config, \"checkpoint\": ckpt, \"full_config\": config}\n\n\nclass Checkpoint:\n def __init__(self, trainer):\n \"\"\"\n Generates a path for saving model which can also be used for resuming\n from a checkpoint.\n \"\"\"\n self.trainer = trainer\n\n self.config = self.trainer.config\n self.save_dir = get_mmf_env(key=\"save_dir\")\n self.model_name = self.config.model\n\n self.ckpt_foldername = self.save_dir\n\n self.device = registry.get(\"current_device\")\n\n self.ckpt_prefix = \"\"\n\n if hasattr(self.trainer.model, \"get_ckpt_name\"):\n self.ckpt_prefix = self.trainer.model.get_ckpt_name() + \"_\"\n\n self.pth_filepath = os.path.join(\n self.ckpt_foldername, self.ckpt_prefix + self.model_name + \"_final.pth\"\n )\n\n self.models_foldername = os.path.join(self.ckpt_foldername, \"models\")\n if not PathManager.exists(self.models_foldername):\n PathManager.mkdirs(self.models_foldername)\n\n self.save_config()\n\n self.repo_path = updir(os.path.abspath(__file__), n=3)\n self.git_repo = None\n if git and self.config.checkpoint.save_git_details:\n try:\n self.git_repo = git.Repo(self.repo_path)\n except git.exc.InvalidGitRepositoryError:\n # Not a git repo, don't do anything\n pass\n\n self.max_to_keep = self.config.checkpoint.max_to_keep\n self.saved_iterations = []\n\n def save_config(self):\n cfg_file = os.path.join(self.ckpt_foldername, \"config.yaml\")\n with PathManager.open(cfg_file, \"w\") as f:\n f.write(self.config.pretty(resolve=True))\n\n def load_state_dict(self):\n ckpt_config = self.config.checkpoint\n\n suffix = \"best.ckpt\" if ckpt_config.resume_best else \"current.ckpt\"\n reverse_suffix = \"best.ckpt\" if not ckpt_config.resume_best else \"current.ckpt\"\n ckpt_filepath = os.path.join(self.ckpt_foldername, self.ckpt_prefix + suffix)\n\n # In case of interrupts and resume, ckpt_config.resume_file would be there\n # But, if the checkpoints are already created in the save dir\n # and resume is true signifying the interrupt resume, we should skip\n # loading the resume file.\n if (\n ckpt_config.resume_file is not None or ckpt_config.resume_zoo is not None\n ) and (not ckpt_config.resume or not PathManager.exists(ckpt_filepath)):\n if ckpt_config.resume_file and PathManager.exists(ckpt_config.resume_file):\n self._load(\n ckpt_config.resume_file,\n load_pretrained=ckpt_config.resume_pretrained,\n )\n return\n # resume_file doesn't exist, try from zoo now\n elif ckpt_config.resume_zoo is not None:\n self._load(\n ckpt_config.resume_zoo,\n load_zoo=True,\n load_pretrained=ckpt_config.resume_pretrained,\n )\n return\n else:\n raise RuntimeError(f\"{ckpt_config.resume_file} doesn't exist\")\n\n if ckpt_config.resume:\n if PathManager.exists(ckpt_filepath):\n self._load(ckpt_filepath)\n else:\n warnings.warn(\n \"Tried to resume but checkpoint filepath {} \"\n \"is not present. Trying {}, otherwise skipping.\".format(\n ckpt_filepath, reverse_suffix\n )\n )\n ckpt_filepath = ckpt_filepath.replace(suffix, reverse_suffix)\n if PathManager.exists(ckpt_filepath):\n self._load(ckpt_filepath)\n\n def _load(self, file, force=False, load_zoo=False, load_pretrained=False):\n ckpt_config = self.config.checkpoint\n logger.info(\"Loading checkpoint\")\n if load_zoo:\n ckpt, should_continue = self._load_from_zoo(file)\n if not should_continue:\n return\n else:\n ckpt = self._torch_load(file)\n\n if \"model\" in ckpt:\n ckpt_model = ckpt[\"model\"]\n else:\n ckpt_model = ckpt\n ckpt = {\"model\": ckpt}\n\n pretrained_state_mapping = ckpt_config.pretrained_state_mapping\n\n if not load_pretrained or force is True:\n pretrained_state_mapping = {}\n\n new_dict = {}\n\n new_dict = self.upgrade_state_dict(ckpt_model)\n\n if len(pretrained_state_mapping.items()) == 0:\n final_dict = new_dict\n\n self.trainer.model.load_state_dict(final_dict, strict=False)\n\n reset_optimizer = ckpt_config.reset.optimizer or ckpt_config.reset.all\n if not reset_optimizer:\n self._load_optimizer(ckpt)\n\n self.trainer.early_stop_callback.early_stopping.init_from_checkpoint(ckpt)\n reset_counts = ckpt_config.reset.all or ckpt_config.reset.counts\n\n if not reset_counts:\n self._load_counts_and_lr_scheduler(ckpt)\n else:\n self._load_pretrained(new_dict)\n\n logger.info(\"Checkpoint loaded.\")\n logger.info(f\"Current num updates: {self.trainer.num_updates}\")\n logger.info(f\"Current iteration: {self.trainer.current_iteration}\")\n logger.info(f\"Current epoch: {self.trainer.current_epoch}\")\n\n def _load_optimizer(self, ckpt):\n if \"optimizer\" in ckpt:\n try:\n self.trainer.optimizer.load_state_dict(ckpt[\"optimizer\"])\n except ValueError:\n logger.info(\n \"Optimizer failed to load. Try with \"\n + \"checkpoint.reset.optimizer=True\"\n )\n raise\n else:\n warnings.warn(\n \"'optimizer' key is not present in the \"\n \"checkpoint asked to be loaded. Skipping.\"\n )\n\n def _load_counts_and_lr_scheduler(self, ckpt):\n ckpt_config = self.trainer.config.checkpoint\n if \"best_update\" in ckpt:\n if ckpt_config.resume_best:\n self.trainer.num_updates = ckpt.get(\n \"best_update\", self.trainer.num_updates\n )\n self.trainer.current_iteration = ckpt.get(\n \"best_iteration\", self.trainer.current_iteration\n )\n else:\n self.trainer.num_updates = ckpt.get(\n \"num_updates\", self.trainer.num_updates\n )\n self.trainer.current_iteration = ckpt.get(\n \"current_iteration\", self.trainer.current_iteration\n )\n\n self.trainer.current_epoch = ckpt.get(\n \"current_epoch\", self.trainer.current_epoch\n )\n elif \"best_iteration\" in ckpt:\n # Preserve old behavior for old checkpoints where we always\n # load best iteration\n if ckpt_config.resume_best and \"current_iteration\" in ckpt:\n self.trainer.current_iteration = ckpt[\"current_iteration\"]\n else:\n self.trainer.current_iteration = ckpt.get(\n \"best_iteration\", self.trainer.current_iteration\n )\n\n self.trainer.num_updates = self.trainer.current_iteration\n\n lr_scheduler = self.trainer.lr_scheduler_callback._scheduler\n if lr_scheduler is not None:\n if \"lr_scheduler\" in ckpt:\n lr_scheduler.load_state_dict(ckpt[\"lr_scheduler\"])\n else:\n warnings.warn(\n \"'lr_scheduler' key is not present in the \"\n \"checkpoint asked to be loaded. Setting lr_scheduler's \"\n \"last_epoch to current_iteration.\"\n )\n lr_scheduler.last_epoch = self.trainer.current_iteration\n\n registry.register(\"current_iteration\", self.trainer.current_iteration)\n registry.register(\"num_updates\", self.trainer.num_updates)\n\n self.trainer.current_epoch = ckpt.get(\"best_epoch\", self.trainer.current_epoch)\n registry.register(\"current_epoch\", self.trainer.current_epoch)\n\n def _load_pretrained(self, ckpt):\n model = self.trainer.model\n own_state = model.state_dict()\n mapping = self.trainer.config.checkpoint.pretrained_state_mapping\n for key, value in mapping.items():\n key += \".\"\n value += \".\"\n for attr in ckpt:\n for own_attr in own_state:\n if hasattr(model, \"format_state_key\"):\n formatted_attr = model.format_state_key(attr)\n else:\n formatted_attr = attr\n if (\n key in own_attr\n and value in formatted_attr\n and own_attr.replace(key, \"\")\n == formatted_attr.replace(value, \"\")\n ):\n logger.info(\"Copying \" + own_attr + \" from \" + attr)\n own_state[own_attr].copy_(ckpt[attr])\n logger.info(\"Pretrained model loaded\")\n\n def upgrade_state_dict(self, state_dict):\n data_parallel = registry.get(\"data_parallel\") or registry.get(\"distributed\")\n data_parallel = data_parallel or isinstance(\n self.trainer.model,\n (torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel),\n )\n new_dict = {}\n for attr in state_dict:\n new_attr = attr\n\n if not data_parallel and attr.startswith(\"module.\"):\n # In case the ckpt was actually a data parallel model\n # replace first module. from dataparallel with empty string\n new_dict[new_attr.replace(\"module.\", \"\", 1)] = state_dict[attr]\n elif data_parallel and not attr.startswith(\"module.\"):\n new_dict[\"module.\" + new_attr] = state_dict[attr]\n else:\n new_dict[new_attr] = state_dict[attr]\n return new_dict\n\n def _load_from_zoo(self, file):\n ckpt_config = self.trainer.config.checkpoint\n zoo_ckpt = load_pretrained_model(file)\n\n # If zoo_config_override, load the model directly using `from_pretrained`\n if ckpt_config.zoo_config_override:\n model_cls = registry.get_model_class(self.trainer.config.model)\n self.trainer.model = model_cls.from_pretrained(ckpt_config.resume_zoo)\n self.trainer.config.model_config = zoo_ckpt[\"full_config\"].model_config\n return None, False\n else:\n return self.upgrade_state_dict(zoo_ckpt[\"checkpoint\"]), True\n\n def _torch_load(self, file):\n # Backwards compatibility to Pythia\n _hack_imports()\n\n with PathManager.open(file, \"rb\") as f:\n if \"cuda\" in str(self.device):\n return torch.load(f, map_location=self.device)\n else:\n return torch.load(f, map_location=lambda storage, loc: storage)\n\n def _get_vcs_fields(self):\n \"\"\"Returns a dict with git fields of the current repository\n\n To reproduce an experiment directly from a checkpoint\n\n 1) Export `config` key as a yaml\n 2) Clone repository and checkout at given commit on given branch\n 3) Any local change (diff) while running the experiment is stored\n in the value with key `git/diff`, output the diff to a `path.diff`\n file and apply the patch to the current state by simply\n\n `patch -p0 < path.diff`\n \"\"\"\n\n return {\n \"git/branch\": self.git_repo.active_branch.name,\n \"git/commit_hash\": self.git_repo.head.commit.name_rev,\n \"git/commit_author\": self.git_repo.head.commit.author.name,\n \"git/commit_message\": self.git_repo.head.commit.message,\n \"git/diff\": self.git_repo.git.diff(\"--no-prefix\"),\n }\n\n def save(self, update, iteration=None, update_best=False):\n # Only save in main process\n if not is_master():\n return\n\n if not iteration:\n iteration = update\n\n ckpt_filepath = os.path.join(self.models_foldername, \"model_%d.ckpt\" % update)\n best_ckpt_filepath = os.path.join(\n self.ckpt_foldername, self.ckpt_prefix + \"best.ckpt\"\n )\n current_ckpt_filepath = os.path.join(\n self.ckpt_foldername, self.ckpt_prefix + \"current.ckpt\"\n )\n\n best_iteration = (\n self.trainer.early_stop_callback.early_stopping.best_monitored_iteration\n )\n best_update = (\n self.trainer.early_stop_callback.early_stopping.best_monitored_update\n )\n best_metric = (\n self.trainer.early_stop_callback.early_stopping.best_monitored_value\n )\n model = self.trainer.model\n data_parallel = registry.get(\"data_parallel\") or registry.get(\"distributed\")\n\n if data_parallel is True:\n model = model.module\n\n ckpt = {\n \"model\": model.state_dict(),\n \"optimizer\": self.trainer.optimizer.state_dict(),\n \"best_iteration\": best_iteration,\n \"current_iteration\": iteration,\n \"current_epoch\": self.trainer.current_epoch,\n \"num_updates\": update,\n \"best_update\": best_update,\n \"best_metric_value\": best_metric,\n # Convert to container to avoid any dependencies\n \"config\": OmegaConf.to_container(self.config, resolve=True),\n }\n\n lr_scheduler = self.trainer.lr_scheduler_callback._scheduler\n if lr_scheduler is not None:\n ckpt[\"lr_scheduler\"] = lr_scheduler.state_dict()\n\n if self.git_repo:\n git_metadata_dict = self._get_vcs_fields()\n ckpt.update(git_metadata_dict)\n\n with PathManager.open(ckpt_filepath, \"wb\") as f:\n torch.save(ckpt, f)\n\n if update_best:\n with PathManager.open(best_ckpt_filepath, \"wb\") as f:\n torch.save(ckpt, f)\n\n # Save current always\n with PathManager.open(current_ckpt_filepath, \"wb\") as f:\n torch.save(ckpt, f)\n\n # Remove old checkpoints if max_to_keep is set\n if self.max_to_keep > 0:\n if len(self.saved_iterations) == self.max_to_keep:\n self.remove(self.saved_iterations.pop(0))\n self.saved_iterations.append(update)\n\n def remove(self, update):\n ckpt_filepath = os.path.join(self.models_foldername, \"model_%d.ckpt\" % update)\n if PathManager.isfile(ckpt_filepath):\n PathManager.rm(ckpt_filepath)\n\n def restore(self):\n synchronize()\n logger.info(\"Restoring checkpoint\")\n best_path = os.path.join(self.ckpt_foldername, self.ckpt_prefix + \"best.ckpt\")\n\n if PathManager.exists(best_path):\n self._load(best_path, force=True)\n\n def finalize(self):\n if is_master():\n with PathManager.open(self.pth_filepath, \"wb\") as f:\n torch.save(self.trainer.model.state_dict(), f)\n", "# Copyright (c) Facebook, Inc. and its affiliates.\nimport torch\nfrom mmf.common.registry import registry\nfrom torch import nn\nfrom torch.nn.utils.weight_norm import weight_norm\n\n\nclass VisDialDiscriminator(nn.Module):\n def __init__(self, config, embedding):\n super().__init__()\n self.config = config\n self.embedding = embedding\n\n self.emb_out_dim = embedding.text_out_dim\n self.hidden_dim = self.config.hidden_dim\n\n self.projection_layer = nn.Linear(self.emb_out_dim, self.hidden_dim)\n\n def forward(self, encoder_output, batch):\n answer_options_len = batch[\"answer_options_len\"]\n\n # BATCH_SIZE X DIALOGUES X 100 X SEQ_LEN\n answer_options = batch[\"answer_options\"]\n\n max_seq_len = answer_options.size(-1)\n\n batch_size, ndialogues, noptions, seq_len = answer_options.size()\n\n # (B X D X 100) X SEQ_LEN\n answer_options = answer_options.view(-1, max_seq_len)\n answer_options_len = answer_options_len.view(-1)\n\n # (B x D x 100) x EMB_OUT_DIM\n answer_options = self.embedding(answer_options)\n\n # (B x D x 100) x HIDDEN_DIM\n answer_options = self.projection_layer(answer_options)\n\n # (B x D) x 100 x HIDDEN_DIM\n answer_options = answer_options.view(\n batch_size * ndialogues, noptions, self.hidden_dim\n )\n\n # (B x D) x HIDDEN_DIM => (B x D) x 100 x HIDDEN_DIM\n encoder_output = encoder_output.unsqueeze(1).expand(-1, noptions, -1)\n\n # (B x D) x 100 x HIDDEN_DIM * (B x D) x 100 x HIDDEN_DIM = SAME THING\n # SUM => (B x D) x 100\n scores = torch.sum(answer_options * encoder_output, dim=2)\n\n return scores\n\n\nclass LanguageDecoder(nn.Module):\n def __init__(self, in_dim, out_dim, **kwargs):\n super().__init__()\n\n self.language_lstm = nn.LSTMCell(\n in_dim + kwargs[\"hidden_dim\"], kwargs[\"hidden_dim\"], bias=True\n )\n self.fc = weight_norm(nn.Linear(kwargs[\"hidden_dim\"], out_dim))\n self.dropout = nn.Dropout(p=kwargs[\"dropout\"])\n self.init_weights(kwargs[\"fc_bias_init\"])\n\n def init_weights(self, fc_bias_init):\n self.fc.bias.data.fill_(fc_bias_init)\n self.fc.weight.data.uniform_(-0.1, 0.1)\n\n def forward(self, weighted_attn):\n # Get LSTM state\n state = registry.get(f\"{weighted_attn.device}_lstm_state\")\n h1, c1 = state[\"td_hidden\"]\n h2, c2 = state[\"lm_hidden\"]\n\n # Language LSTM\n h2, c2 = self.language_lstm(torch.cat([weighted_attn, h1], dim=1), (h2, c2))\n predictions = self.fc(self.dropout(h2))\n\n # Update hidden state for t+1\n state[\"lm_hidden\"] = (h2, c2)\n\n return predictions\n", "# Copyright (c) Facebook, Inc. and its affiliates.\nimport os\nimport unittest\n\nimport mmf.modules.metrics as metrics\nimport torch\nfrom mmf.common.registry import registry\nfrom mmf.common.sample import Sample\nfrom mmf.datasets.processors import CaptionProcessor\nfrom mmf.utils.configuration import load_yaml\n\n\nclass TestModuleMetrics(unittest.TestCase):\n def test_caption_bleu4(self):\n path = os.path.join(\n os.path.abspath(__file__),\n \"../../../mmf/configs/datasets/coco/defaults.yaml\",\n )\n config = load_yaml(os.path.abspath(path))\n captioning_config = config.dataset_config.coco\n caption_processor_config = captioning_config.processors.caption_processor\n vocab_path = os.path.join(\n os.path.abspath(__file__), \"..\", \"..\", \"data\", \"vocab.txt\"\n )\n caption_processor_config.params.vocab.type = \"random\"\n caption_processor_config.params.vocab.vocab_file = os.path.abspath(vocab_path)\n caption_processor = CaptionProcessor(caption_processor_config.params)\n registry.register(\"coco_caption_processor\", caption_processor)\n\n caption_bleu4 = metrics.CaptionBleu4Metric()\n expected = Sample()\n predicted = dict()\n\n # Test complete match\n expected.answers = torch.empty((5, 5, 10))\n expected.answers.fill_(4)\n predicted[\"scores\"] = torch.zeros((5, 10, 19))\n predicted[\"scores\"][:, :, 4] = 1.0\n\n self.assertEqual(caption_bleu4.calculate(expected, predicted).item(), 1.0)\n\n # Test partial match\n expected.answers = torch.empty((5, 5, 10))\n expected.answers.fill_(4)\n predicted[\"scores\"] = torch.zeros((5, 10, 19))\n predicted[\"scores\"][:, 0:5, 4] = 1.0\n predicted[\"scores\"][:, 5:, 18] = 1.0\n\n self.assertAlmostEqual(\n caption_bleu4.calculate(expected, predicted).item(), 0.3928, 4\n )\n\n def _test_binary_metric(self, metric, value):\n sample = Sample()\n predicted = dict()\n\n sample.targets = torch.tensor(\n [[0, 1], [1, 0], [1, 0], [0, 1]], dtype=torch.float\n )\n predicted[\"scores\"] = torch.tensor(\n [\n [-0.9332, 0.8149],\n [-0.8391, 0.6797],\n [-0.7235, 0.7220],\n [-0.9043, 0.3078],\n ],\n dtype=torch.float,\n )\n self.assertAlmostEqual(metric.calculate(sample, predicted).item(), value, 4)\n\n sample.targets = torch.tensor([1, 0, 0, 1], dtype=torch.long)\n self.assertAlmostEqual(metric.calculate(sample, predicted).item(), value, 4)\n\n def _test_multiclass_metric(self, metric, value):\n sample = Sample()\n predicted = dict()\n\n sample.targets = torch.tensor(\n [[0, 1, 0], [0, 0, 1], [1, 0, 0], [0, 0, 1]], dtype=torch.float\n )\n predicted[\"scores\"] = torch.tensor(\n [\n [-0.9332, 0.8149, 0.3491],\n [-0.8391, 0.6797, -0.3410],\n [-0.7235, 0.7220, 0.9104],\n [0.9043, 0.3078, -0.4210],\n ],\n dtype=torch.float,\n )\n self.assertAlmostEqual(metric.calculate(sample, predicted).item(), value, 4)\n\n sample.targets = torch.tensor([1, 2, 0, 2], dtype=torch.long)\n self.assertAlmostEqual(metric.calculate(sample, predicted).item(), value, 4)\n\n def _test_multilabel_metric(self, metric, value):\n sample = Sample()\n predicted = dict()\n\n sample.targets = torch.tensor(\n [[0, 1, 1], [1, 0, 1], [1, 0, 1], [0, 0, 1]], dtype=torch.float\n )\n predicted[\"scores\"] = torch.tensor(\n [\n [-0.9332, 0.8149, 0.3491],\n [-0.8391, 0.6797, -0.3410],\n [-0.7235, 0.7220, 0.9104],\n [0.9043, 0.3078, -0.4210],\n ],\n dtype=torch.float,\n )\n self.assertAlmostEqual(metric.calculate(sample, predicted).item(), value, 4)\n\n def test_micro_f1(self):\n metric = metrics.MicroF1()\n self._test_binary_metric(metric, 0.5)\n self._test_multiclass_metric(metric, 0.25)\n\n def test_macro_f1(self):\n metric = metrics.MacroF1()\n self._test_binary_metric(metric, 0.3333)\n self._test_multiclass_metric(metric, 0.2222)\n\n def test_binary_f1(self):\n metric = metrics.BinaryF1()\n self._test_binary_metric(metric, 0.66666666)\n\n def test_multilabel_micro_f1(self):\n metric = metrics.MultiLabelMicroF1()\n self._test_binary_metric(metric, 0.5)\n\n def test_multilabel_macro_f1(self):\n metric = metrics.MultiLabelMacroF1()\n self._test_multilabel_metric(metric, 0.355555)\n\n def test_macro_roc_auc(self):\n metric = metrics.MacroROC_AUC()\n self._test_binary_metric(metric, 0.5)\n self._test_multiclass_metric(metric, 0.2222)\n\n def test_micro_roc_auc(self):\n metric = metrics.MicroROC_AUC()\n self._test_binary_metric(metric, 0.5)\n self._test_multiclass_metric(metric, 0.34375)\n\n def test_binary_ap(self):\n metric = metrics.BinaryAP()\n self._test_binary_metric(metric, 0.75)\n\n def test_recall_at_precision_k(self):\n metric = metrics.RecallAtPrecisionK(50)\n self._test_binary_metric(metric, 1.0)\n\n metric = metrics.RecallAtPrecisionK(90)\n self._test_binary_metric(metric, 0.5)\n\n metric = metrics.RecallAtPrecisionK(110)\n self._test_binary_metric(metric, 0)\n\n def test_micro_ap(self):\n metric = metrics.MicroAP()\n self._test_binary_metric(metric, 0.642857)\n self._test_multiclass_metric(metric, 0.354166)\n\n def test_macro_ap(self):\n metric = metrics.MacroAP()\n self._test_binary_metric(metric, 0.6666666)\n self._test_multiclass_metric(metric, 0.3888888)\n", "# Copyright (c) Facebook, Inc. and its affiliates.\n\n# install `vqa-maskrcnn-benchmark` from\n# https://github.com/ronghanghu/vqa-maskrcnn-benchmark-m4c\nimport argparse\nimport os\nimport sys\n\nimport cv2\nimport numpy as np\nimport torch\nimport tqdm\nfrom maskrcnn_benchmark.config import cfg\nfrom maskrcnn_benchmark.layers import nms\nfrom maskrcnn_benchmark.modeling.detector import build_detection_model\nfrom maskrcnn_benchmark.structures.image_list import to_image_list\nfrom maskrcnn_benchmark.utils.model_serialization import load_state_dict\nfrom PIL import Image\n\n\nsys.path.append(\"/private/home/ronghanghu/workspace/vqa-maskrcnn-benchmark\") # NoQA\n\n\ndef load_detection_model(yaml_file, yaml_ckpt):\n cfg.merge_from_file(yaml_file)\n cfg.freeze()\n\n model = build_detection_model(cfg)\n checkpoint = torch.load(yaml_ckpt, map_location=torch.device(\"cpu\"))\n\n load_state_dict(model, checkpoint.pop(\"model\"))\n\n model.to(\"cuda\")\n model.eval()\n return model\n\n\ndef _image_transform(image_path):\n img = Image.open(image_path)\n im = np.array(img).astype(np.float32)\n # handle a few corner cases\n if im.ndim == 2: # gray => RGB\n im = np.tile(im[:, :, None], (1, 1, 3))\n if im.shape[2] > 3: # RGBA => RGB\n im = im[:, :, :3]\n\n im = im[:, :, ::-1] # RGB => BGR\n im -= np.array([102.9801, 115.9465, 122.7717])\n im_shape = im.shape\n im_size_min = np.min(im_shape[0:2])\n im_size_max = np.max(im_shape[0:2])\n im_scale = float(800) / float(im_size_min)\n # Prevent the biggest axis from being more than max_size\n if np.round(im_scale * im_size_max) > 1333:\n im_scale = float(1333) / float(im_size_max)\n im = cv2.resize(\n im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR\n )\n img = torch.from_numpy(im).permute(2, 0, 1)\n return img, im_scale\n\n\ndef _process_feature_extraction(output, im_scales, feat_name=\"fc6\"):\n batch_size = len(output[0][\"proposals\"])\n n_boxes_per_image = [len(_) for _ in output[0][\"proposals\"]]\n score_list = output[0][\"scores\"].split(n_boxes_per_image)\n score_list = [torch.nn.functional.softmax(x, -1) for x in score_list]\n feats = output[0][feat_name].split(n_boxes_per_image)\n cur_device = score_list[0].device\n\n feat_list = []\n bbox_list = []\n\n for i in range(batch_size):\n dets = output[0][\"proposals\"][i].bbox / im_scales[i]\n scores = score_list[i]\n\n max_conf = torch.zeros(scores.shape[0]).to(cur_device)\n\n for cls_ind in range(1, scores.shape[1]):\n cls_scores = scores[:, cls_ind]\n keep = nms(dets, cls_scores, 0.5)\n max_conf[keep] = torch.where(\n cls_scores[keep] > max_conf[keep], cls_scores[keep], max_conf[keep]\n )\n\n keep_boxes = torch.argsort(max_conf, descending=True)[:100]\n feat_list.append(feats[i][keep_boxes])\n bbox_list.append(output[0][\"proposals\"][i].bbox[keep_boxes])\n return feat_list, bbox_list\n\n\ndef extract_features(detection_model, image_path, input_boxes=None, feat_name=\"fc6\"):\n im, im_scale = _image_transform(image_path)\n if input_boxes is not None:\n if isinstance(input_boxes, np.ndarray):\n input_boxes = torch.from_numpy(input_boxes.copy())\n input_boxes *= im_scale\n img_tensor, im_scales = [im], [im_scale]\n current_img_list = to_image_list(img_tensor, size_divisible=32)\n current_img_list = current_img_list.to(\"cuda\")\n with torch.no_grad():\n output = detection_model(current_img_list, input_boxes=input_boxes)\n\n if input_boxes is None:\n feat_list, bbox_list = _process_feature_extraction(output, im_scales, feat_name)\n feat = feat_list[0].cpu().numpy()\n bbox = bbox_list[0].cpu().numpy() / im_scale\n else:\n feat = output[0][feat_name].cpu().numpy()\n bbox = output[0][\"proposals\"][0].bbox.cpu().numpy() / im_scale\n\n return feat, bbox\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--detection_cfg\",\n type=str,\n default=\"/private/home/ronghanghu/workspace/pythia/data/\"\n + \"frcn_feature_extraction/detectron_model.yaml\",\n help=\"Detectron config file; download it from \"\n + \"https://dl.fbaipublicfiles.com/pythia/detectron_model/\"\n + \"detectron_model.yaml\",\n )\n parser.add_argument(\n \"--detection_model\",\n type=str,\n default=\"/private/home/ronghanghu/workspace/pythia/data/\"\n + \"frcn_feature_extraction/detectron_model.pth\",\n help=\"Detectron model file; download it\"\n + \" from https://dl.fbaipublicfiles.com/pythia/detectron_model/\"\n + \"detectron_model.pth\",\n )\n parser.add_argument(\n \"--imdb_file\",\n type=str,\n default=\"/private/home/ronghanghu/workspace/pythia/data/\"\n + \"imdb/m4c_textvqa/imdb_train_ocr_en.npy\",\n help=\"The imdb to extract features\",\n )\n parser.add_argument(\n \"--image_dir\",\n type=str,\n default=\"/private/home/ronghanghu/workspace/DATASETS/TextVQA\",\n help=\"The directory containing images\",\n )\n parser.add_argument(\n \"--save_dir\",\n type=str,\n default=\"/private/home/ronghanghu/workspace/pythia/data/\"\n + \"m4c_textvqa_ocr_en_frcn_features_2/train_images\",\n help=\"The directory to save extracted features\",\n )\n args = parser.parse_args()\n\n DETECTION_YAML = args.detection_cfg\n DETECTION_CKPT = args.detection_model\n IMDB_FILE = args.imdb_file\n IMAGE_DIR = args.image_dir\n SAVE_DIR = args.save_dir\n\n imdb = np.load(IMDB_FILE, allow_pickle=True)[1:]\n # keep only one entry per image_id\n image_id2info = {info[\"image_id\"]: info for info in imdb}\n imdb = list(image_id2info[k] for k in sorted(image_id2info))\n\n detection_model = load_detection_model(DETECTION_YAML, DETECTION_CKPT)\n print(\"Faster R-CNN OCR features\")\n print(\"\\textracting from\", IMDB_FILE)\n print(\"\\tsaving to\", SAVE_DIR)\n for _, info in enumerate(tqdm.tqdm(imdb)):\n image_path = os.path.join(IMAGE_DIR, info[\"image_path\"])\n save_feat_path = os.path.join(SAVE_DIR, info[\"feature_path\"])\n save_info_path = save_feat_path.replace(\".npy\", \"_info.npy\")\n os.makedirs(os.path.dirname(save_feat_path), exist_ok=True)\n\n w = info[\"image_width\"]\n h = info[\"image_height\"]\n ocr_normalized_boxes = np.array(info[\"ocr_normalized_boxes\"])\n ocr_boxes = ocr_normalized_boxes.reshape(-1, 4) * [w, h, w, h]\n ocr_tokens = info[\"ocr_tokens\"]\n if len(ocr_boxes) > 0:\n extracted_feat, _ = extract_features(\n detection_model, image_path, input_boxes=ocr_boxes\n )\n else:\n extracted_feat = np.zeros((0, 2048), np.float32)\n\n np.save(save_info_path, {\"ocr_boxes\": ocr_boxes, \"ocr_tokens\": ocr_tokens})\n np.save(save_feat_path, extracted_feat)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.mean", "torch.is_tensor", "torch.flatten" ], [ "numpy.array" ], [ "torch.tensor", "numpy.array", "torch.zeros" ], [ "torch.save", "torch.load" ], [ "torch.nn.Dropout", "torch.cat", "torch.sum", "torch.nn.LSTMCell", "torch.nn.Linear" ], [ "torch.tensor", "torch.empty", "torch.zeros" ], [ "torch.nn.functional.softmax", "numpy.min", "torch.zeros", "numpy.tile", "numpy.save", "torch.from_numpy", "numpy.round", "numpy.max", "torch.no_grad", "torch.where", "torch.device", "numpy.load", "torch.argsort", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rlleshi/mmaction2
[ "6993693f178b1a59e5eb07f1a3db484d5e5de61a", "6993693f178b1a59e5eb07f1a3db484d5e5de61a" ]
[ "mmaction/models/common/transformer.py", "tests/test_data/test_pipelines/test_loadings/test_sampling.py" ]
[ "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nfrom einops import rearrange\nfrom mmcv.cnn import build_norm_layer, constant_init\nfrom mmcv.cnn.bricks.registry import ATTENTION, FEEDFORWARD_NETWORK\nfrom mmcv.cnn.bricks.transformer import FFN, build_dropout\nfrom mmcv.runner.base_module import BaseModule\nfrom mmcv.utils import digit_version\n\n\[email protected]_module()\nclass DividedTemporalAttentionWithNorm(BaseModule):\n \"\"\"Temporal Attention in Divided Space Time Attention.\n\n Args:\n embed_dims (int): Dimensions of embedding.\n num_heads (int): Number of parallel attention heads in\n TransformerCoder.\n num_frames (int): Number of frames in the video.\n attn_drop (float): A Dropout layer on attn_output_weights. Defaults to\n 0..\n proj_drop (float): A Dropout layer after `nn.MultiheadAttention`.\n Defaults to 0..\n dropout_layer (dict): The dropout_layer used when adding the shortcut.\n Defaults to `dict(type='DropPath', drop_prob=0.1)`.\n norm_cfg (dict): Config dict for normalization layer. Defaults to\n `dict(type='LN')`.\n init_cfg (dict | None): The Config for initialization. Defaults to\n None.\n \"\"\"\n\n def __init__(self,\n embed_dims,\n num_heads,\n num_frames,\n attn_drop=0.,\n proj_drop=0.,\n dropout_layer=dict(type='DropPath', drop_prob=0.1),\n norm_cfg=dict(type='LN'),\n init_cfg=None,\n **kwargs):\n super().__init__(init_cfg)\n self.embed_dims = embed_dims\n self.num_heads = num_heads\n self.num_frames = num_frames\n self.norm = build_norm_layer(norm_cfg, self.embed_dims)[1]\n\n if digit_version(torch.__version__) < digit_version('1.9.0'):\n kwargs.pop('batch_first', None)\n self.attn = nn.MultiheadAttention(embed_dims, num_heads, attn_drop,\n **kwargs)\n self.proj_drop = nn.Dropout(proj_drop)\n self.dropout_layer = build_dropout(\n dropout_layer) if dropout_layer else nn.Identity()\n self.temporal_fc = nn.Linear(self.embed_dims, self.embed_dims)\n\n self.init_weights()\n\n def init_weights(self):\n constant_init(self.temporal_fc, val=0, bias=0)\n\n def forward(self, query, key=None, value=None, residual=None, **kwargs):\n assert residual is None, (\n 'Always adding the shortcut in the forward function')\n\n init_cls_token = query[:, 0, :].unsqueeze(1)\n identity = query_t = query[:, 1:, :]\n\n # query_t [batch_size, num_patches * num_frames, embed_dims]\n b, pt, m = query_t.size()\n p, t = pt // self.num_frames, self.num_frames\n\n # res_temporal [batch_size * num_patches, num_frames, embed_dims]\n query_t = self.norm(query_t.reshape(b * p, t, m)).permute(1, 0, 2)\n res_temporal = self.attn(query_t, query_t, query_t)[0].permute(1, 0, 2)\n res_temporal = self.dropout_layer(\n self.proj_drop(res_temporal.contiguous()))\n res_temporal = self.temporal_fc(res_temporal)\n\n # res_temporal [batch_size, num_patches * num_frames, embed_dims]\n res_temporal = res_temporal.reshape(b, p * t, m)\n\n # ret_value [batch_size, num_patches * num_frames + 1, embed_dims]\n new_query_t = identity + res_temporal\n new_query = torch.cat((init_cls_token, new_query_t), 1)\n return new_query\n\n\[email protected]_module()\nclass DividedSpatialAttentionWithNorm(BaseModule):\n \"\"\"Spatial Attention in Divided Space Time Attention.\n\n Args:\n embed_dims (int): Dimensions of embedding.\n num_heads (int): Number of parallel attention heads in\n TransformerCoder.\n num_frames (int): Number of frames in the video.\n attn_drop (float): A Dropout layer on attn_output_weights. Defaults to\n 0..\n proj_drop (float): A Dropout layer after `nn.MultiheadAttention`.\n Defaults to 0..\n dropout_layer (dict): The dropout_layer used when adding the shortcut.\n Defaults to `dict(type='DropPath', drop_prob=0.1)`.\n norm_cfg (dict): Config dict for normalization layer. Defaults to\n `dict(type='LN')`.\n init_cfg (dict | None): The Config for initialization. Defaults to\n None.\n \"\"\"\n\n def __init__(self,\n embed_dims,\n num_heads,\n num_frames,\n attn_drop=0.,\n proj_drop=0.,\n dropout_layer=dict(type='DropPath', drop_prob=0.1),\n norm_cfg=dict(type='LN'),\n init_cfg=None,\n **kwargs):\n super().__init__(init_cfg)\n self.embed_dims = embed_dims\n self.num_heads = num_heads\n self.num_frames = num_frames\n self.norm = build_norm_layer(norm_cfg, self.embed_dims)[1]\n if digit_version(torch.__version__) < digit_version('1.9.0'):\n kwargs.pop('batch_first', None)\n self.attn = nn.MultiheadAttention(embed_dims, num_heads, attn_drop,\n **kwargs)\n self.proj_drop = nn.Dropout(proj_drop)\n self.dropout_layer = build_dropout(\n dropout_layer) if dropout_layer else nn.Identity()\n\n self.init_weights()\n\n def init_weights(self):\n # init DividedSpatialAttentionWithNorm by default\n pass\n\n def forward(self, query, key=None, value=None, residual=None, **kwargs):\n assert residual is None, (\n 'Always adding the shortcut in the forward function')\n\n identity = query\n init_cls_token = query[:, 0, :].unsqueeze(1)\n query_s = query[:, 1:, :]\n\n # query_s [batch_size, num_patches * num_frames, embed_dims]\n b, pt, m = query_s.size()\n p, t = pt // self.num_frames, self.num_frames\n\n # cls_token [batch_size * num_frames, 1, embed_dims]\n cls_token = init_cls_token.repeat(1, t, 1).reshape(b * t,\n m).unsqueeze(1)\n\n # query_s [batch_size * num_frames, num_patches + 1, embed_dims]\n query_s = rearrange(query_s, 'b (p t) m -> (b t) p m', p=p, t=t)\n query_s = torch.cat((cls_token, query_s), 1)\n\n # res_spatial [batch_size * num_frames, num_patches + 1, embed_dims]\n query_s = self.norm(query_s).permute(1, 0, 2)\n res_spatial = self.attn(query_s, query_s, query_s)[0].permute(1, 0, 2)\n res_spatial = self.dropout_layer(\n self.proj_drop(res_spatial.contiguous()))\n\n # cls_token [batch_size, 1, embed_dims]\n cls_token = res_spatial[:, 0, :].reshape(b, t, m)\n cls_token = torch.mean(cls_token, 1, True)\n\n # res_spatial [batch_size * num_frames, num_patches + 1, embed_dims]\n res_spatial = rearrange(\n res_spatial[:, 1:, :], '(b t) p m -> b (p t) m', p=p, t=t)\n res_spatial = torch.cat((cls_token, res_spatial), 1)\n\n new_query = identity + res_spatial\n return new_query\n\n\n@FEEDFORWARD_NETWORK.register_module()\nclass FFNWithNorm(FFN):\n \"\"\"FFN with pre normalization layer.\n\n FFNWithNorm is implemented to be compatible with `BaseTransformerLayer`\n when using `DividedTemporalAttentionWithNorm` and\n `DividedSpatialAttentionWithNorm`.\n\n FFNWithNorm has one main difference with FFN:\n\n - It apply one normalization layer before forwarding the input data to\n feed-forward networks.\n\n Args:\n embed_dims (int): Dimensions of embedding. Defaults to 256.\n feedforward_channels (int): Hidden dimension of FFNs. Defaults to 1024.\n num_fcs (int, optional): Number of fully-connected layers in FFNs.\n Defaults to 2.\n act_cfg (dict): Config for activate layers.\n Defaults to `dict(type='ReLU')`\n ffn_drop (float, optional): Probability of an element to be\n zeroed in FFN. Defaults to 0..\n add_residual (bool, optional): Whether to add the\n residual connection. Defaults to `True`.\n dropout_layer (dict | None): The dropout_layer used when adding the\n shortcut. Defaults to None.\n init_cfg (dict): The Config for initialization. Defaults to None.\n norm_cfg (dict): Config dict for normalization layer. Defaults to\n `dict(type='LN')`.\n \"\"\"\n\n def __init__(self, *args, norm_cfg=dict(type='LN'), **kwargs):\n super().__init__(*args, **kwargs)\n self.norm = build_norm_layer(norm_cfg, self.embed_dims)[1]\n\n def forward(self, x, residual=None):\n assert residual is None, ('Cannot apply pre-norm with FFNWithNorm')\n return super().forward(self.norm(x), x)\n", "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\n\nimport numpy as np\nimport pytest\nfrom mmcv.utils import assert_dict_has_keys\nfrom numpy.testing import assert_array_equal\n\nfrom mmaction.datasets.pipelines import (AudioFeatureSelector,\n DenseSampleFrames, SampleAVAFrames,\n SampleFrames, SampleProposalFrames,\n UntrimmedSampleFrames)\nfrom .base import BaseTestLoading\n\n\nclass TestSampling(BaseTestLoading):\n\n def test_sample_frames(self):\n target_keys = [\n 'frame_inds', 'clip_len', 'frame_interval', 'num_clips',\n 'total_frames'\n ]\n\n with pytest.warns(UserWarning):\n # start_index has been deprecated\n config = dict(\n clip_len=3, frame_interval=1, num_clips=5, start_index=1)\n SampleFrames(**config)\n\n # Sample Frame with tail Frames\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n config = dict(\n clip_len=3, frame_interval=1, num_clips=5, keep_tail_frames=True)\n sample_frames = SampleFrames(**config)\n sample_frames(video_result)\n sample_frames(frame_result)\n\n # Sample Frame with no temporal_jitter\n # clip_len=3, frame_interval=1, num_clips=5\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n config = dict(\n clip_len=3, frame_interval=1, num_clips=5, temporal_jitter=False)\n sample_frames = SampleFrames(**config)\n sample_frames_results = sample_frames(video_result)\n assert assert_dict_has_keys(sample_frames_results, target_keys)\n assert len(sample_frames_results['frame_inds']) == 15\n sample_frames_results = sample_frames(frame_result)\n assert len(sample_frames_results['frame_inds']) == 15\n assert np.max(sample_frames_results['frame_inds']) <= 5\n assert np.min(sample_frames_results['frame_inds']) >= 1\n assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('\n f'clip_len={3}, '\n f'frame_interval={1}, '\n f'num_clips={5}, '\n f'temporal_jitter={False}, '\n f'twice_sample={False}, '\n f'out_of_bound_opt=loop, '\n f'test_mode={False})')\n\n # Sample Frame with no temporal_jitter\n # clip_len=5, frame_interval=1, num_clips=5,\n # out_of_bound_opt='repeat_last'\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n config = dict(\n clip_len=5,\n frame_interval=1,\n num_clips=5,\n temporal_jitter=False,\n out_of_bound_opt='repeat_last')\n sample_frames = SampleFrames(**config)\n sample_frames_results = sample_frames(video_result)\n assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('\n f'clip_len={5}, '\n f'frame_interval={1}, '\n f'num_clips={5}, '\n f'temporal_jitter={False}, '\n f'twice_sample={False}, '\n f'out_of_bound_opt=repeat_last, '\n f'test_mode={False})')\n\n def check_monotonous(arr):\n length = arr.shape[0]\n for i in range(length - 1):\n if arr[i] > arr[i + 1]:\n return False\n return True\n\n assert assert_dict_has_keys(sample_frames_results, target_keys)\n assert len(sample_frames_results['frame_inds']) == 25\n frame_inds = sample_frames_results['frame_inds'].reshape([5, 5])\n for i in range(5):\n assert check_monotonous(frame_inds[i])\n\n sample_frames_results = sample_frames(frame_result)\n assert len(sample_frames_results['frame_inds']) == 25\n frame_inds = sample_frames_results['frame_inds'].reshape([5, 5])\n for i in range(5):\n assert check_monotonous(frame_inds[i])\n assert np.max(sample_frames_results['frame_inds']) <= 5\n assert np.min(sample_frames_results['frame_inds']) >= 1\n\n # Sample Frame with temporal_jitter\n # clip_len=4, frame_interval=2, num_clips=5\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n config = dict(\n clip_len=4, frame_interval=2, num_clips=5, temporal_jitter=True)\n sample_frames = SampleFrames(**config)\n sample_frames_results = sample_frames(video_result)\n assert assert_dict_has_keys(sample_frames_results, target_keys)\n assert len(sample_frames_results['frame_inds']) == 20\n sample_frames_results = sample_frames(frame_result)\n assert len(sample_frames_results['frame_inds']) == 20\n assert np.max(sample_frames_results['frame_inds']) <= 5\n assert np.min(sample_frames_results['frame_inds']) >= 1\n assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('\n f'clip_len={4}, '\n f'frame_interval={2}, '\n f'num_clips={5}, '\n f'temporal_jitter={True}, '\n f'twice_sample={False}, '\n f'out_of_bound_opt=loop, '\n f'test_mode={False})')\n\n # Sample Frame with no temporal_jitter in test mode\n # clip_len=4, frame_interval=1, num_clips=6\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n config = dict(\n clip_len=4,\n frame_interval=1,\n num_clips=6,\n temporal_jitter=False,\n test_mode=True)\n sample_frames = SampleFrames(**config)\n sample_frames_results = sample_frames(video_result)\n assert assert_dict_has_keys(sample_frames_results, target_keys)\n assert len(sample_frames_results['frame_inds']) == 24\n sample_frames_results = sample_frames(frame_result)\n assert len(sample_frames_results['frame_inds']) == 24\n assert np.max(sample_frames_results['frame_inds']) <= 5\n assert np.min(sample_frames_results['frame_inds']) >= 1\n assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('\n f'clip_len={4}, '\n f'frame_interval={1}, '\n f'num_clips={6}, '\n f'temporal_jitter={False}, '\n f'twice_sample={False}, '\n f'out_of_bound_opt=loop, '\n f'test_mode={True})')\n\n # Sample Frame with no temporal_jitter in test mode\n # clip_len=3, frame_interval=1, num_clips=6\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n config = dict(\n clip_len=3,\n frame_interval=1,\n num_clips=6,\n temporal_jitter=False,\n test_mode=True)\n sample_frames = SampleFrames(**config)\n sample_frames_results = sample_frames(video_result)\n assert assert_dict_has_keys(sample_frames_results, target_keys)\n assert len(sample_frames_results['frame_inds']) == 18\n sample_frames_results = sample_frames(frame_result)\n assert len(sample_frames_results['frame_inds']) == 18\n assert np.max(sample_frames_results['frame_inds']) <= 5\n assert np.min(sample_frames_results['frame_inds']) >= 1\n\n # Sample Frame with no temporal_jitter to get clip_offsets\n # clip_len=1, frame_interval=1, num_clips=8\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n frame_result['total_frames'] = 6\n config = dict(\n clip_len=1,\n frame_interval=1,\n num_clips=8,\n temporal_jitter=False,\n test_mode=True)\n sample_frames = SampleFrames(**config)\n sample_frames_results = sample_frames(video_result)\n assert assert_dict_has_keys(sample_frames_results, target_keys)\n assert len(sample_frames_results['frame_inds']) == 8\n sample_frames_results = sample_frames(frame_result)\n assert len(sample_frames_results['frame_inds']) == 8\n assert_array_equal(sample_frames_results['frame_inds'],\n np.array([1, 2, 2, 3, 4, 5, 5, 6]))\n\n # Sample Frame with no temporal_jitter to get clip_offsets\n # clip_len=1, frame_interval=1, num_clips=8\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n frame_result['total_frames'] = 6\n config = dict(\n clip_len=1,\n frame_interval=1,\n num_clips=8,\n temporal_jitter=False,\n test_mode=True)\n sample_frames = SampleFrames(**config)\n sample_frames_results = sample_frames(video_result)\n assert sample_frames_results['start_index'] == 0\n assert assert_dict_has_keys(sample_frames_results, target_keys)\n assert len(sample_frames_results['frame_inds']) == 8\n sample_frames_results = sample_frames(frame_result)\n assert len(sample_frames_results['frame_inds']) == 8\n assert_array_equal(sample_frames_results['frame_inds'],\n np.array([1, 2, 2, 3, 4, 5, 5, 6]))\n\n # Sample Frame with no temporal_jitter to get clip_offsets zero\n # clip_len=6, frame_interval=1, num_clips=1\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n frame_result['total_frames'] = 5\n config = dict(\n clip_len=6,\n frame_interval=1,\n num_clips=1,\n temporal_jitter=False,\n test_mode=True)\n sample_frames = SampleFrames(**config)\n sample_frames_results = sample_frames(video_result)\n assert sample_frames_results['start_index'] == 0\n assert assert_dict_has_keys(sample_frames_results, target_keys)\n assert len(sample_frames_results['frame_inds']) == 6\n sample_frames_results = sample_frames(frame_result)\n assert len(sample_frames_results['frame_inds']) == 6\n assert_array_equal(sample_frames_results['frame_inds'],\n [1, 2, 3, 4, 5, 1])\n\n # Sample Frame with no temporal_jitter to get avg_interval <= 0\n # clip_len=12, frame_interval=1, num_clips=20\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n frame_result['total_frames'] = 30\n config = dict(\n clip_len=12,\n frame_interval=1,\n num_clips=20,\n temporal_jitter=False,\n test_mode=False)\n sample_frames = SampleFrames(**config)\n sample_frames_results = sample_frames(video_result)\n assert sample_frames_results['start_index'] == 0\n assert assert_dict_has_keys(sample_frames_results, target_keys)\n assert len(sample_frames_results['frame_inds']) == 240\n sample_frames_results = sample_frames(frame_result)\n assert len(sample_frames_results['frame_inds']) == 240\n assert np.max(sample_frames_results['frame_inds']) <= 30\n assert np.min(sample_frames_results['frame_inds']) >= 1\n\n # Sample Frame with no temporal_jitter to get clip_offsets\n # clip_len=1, frame_interval=1, num_clips=8\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n frame_result['total_frames'] = 6\n config = dict(\n clip_len=1,\n frame_interval=1,\n num_clips=8,\n temporal_jitter=False,\n test_mode=False)\n sample_frames = SampleFrames(**config)\n sample_frames_results = sample_frames(video_result)\n assert assert_dict_has_keys(sample_frames_results, target_keys)\n assert sample_frames_results['start_index'] == 0\n assert len(sample_frames_results['frame_inds']) == 8\n sample_frames_results = sample_frames(frame_result)\n assert len(sample_frames_results['frame_inds']) == 8\n assert_array_equal(sample_frames_results['frame_inds'],\n np.array([1, 2, 3, 3, 4, 5, 5, 6]))\n\n # Sample Frame with no temporal_jitter to get clip_offsets zero\n # clip_len=12, frame_interval=1, num_clips=2\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n frame_result['total_frames'] = 10\n config = dict(\n clip_len=12,\n frame_interval=1,\n num_clips=2,\n temporal_jitter=False,\n test_mode=False)\n sample_frames = SampleFrames(**config)\n sample_frames_results = sample_frames(video_result)\n assert sample_frames_results['start_index'] == 0\n assert assert_dict_has_keys(sample_frames_results, target_keys)\n assert len(sample_frames_results['frame_inds']) == 24\n sample_frames_results = sample_frames(frame_result)\n assert len(sample_frames_results['frame_inds']) == 24\n assert np.max(sample_frames_results['frame_inds']) <= 10\n assert np.min(sample_frames_results['frame_inds']) >= 1\n\n # Sample Frame using twice sample\n # clip_len=12, frame_interval=1, num_clips=2\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n frame_result['total_frames'] = 40\n config = dict(\n clip_len=12,\n frame_interval=1,\n num_clips=2,\n temporal_jitter=False,\n twice_sample=True,\n test_mode=True)\n sample_frames = SampleFrames(**config)\n sample_frames_results = sample_frames(video_result)\n assert sample_frames_results['start_index'] == 0\n assert assert_dict_has_keys(sample_frames_results, target_keys)\n assert len(sample_frames_results['frame_inds']) == 48\n sample_frames_results = sample_frames(frame_result)\n assert len(sample_frames_results['frame_inds']) == 48\n assert np.max(sample_frames_results['frame_inds']) <= 40\n assert np.min(sample_frames_results['frame_inds']) >= 1\n\n def test_dense_sample_frames(self):\n target_keys = [\n 'frame_inds', 'clip_len', 'frame_interval', 'num_clips',\n 'total_frames'\n ]\n\n # Dense sample with no temporal_jitter in test mode\n # clip_len=4, frame_interval=1, num_clips=6\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n config = dict(\n clip_len=4,\n frame_interval=1,\n num_clips=6,\n temporal_jitter=False,\n test_mode=True)\n dense_sample_frames = DenseSampleFrames(**config)\n dense_sample_frames_results = dense_sample_frames(video_result)\n assert dense_sample_frames_results['start_index'] == 0\n assert assert_dict_has_keys(dense_sample_frames_results, target_keys)\n assert len(dense_sample_frames_results['frame_inds']) == 240\n dense_sample_frames_results = dense_sample_frames(frame_result)\n assert len(dense_sample_frames_results['frame_inds']) == 240\n assert repr(dense_sample_frames) == (\n f'{dense_sample_frames.__class__.__name__}('\n f'clip_len={4}, '\n f'frame_interval={1}, '\n f'num_clips={6}, '\n f'sample_range={64}, '\n f'num_sample_positions={10}, '\n f'temporal_jitter={False}, '\n f'out_of_bound_opt=loop, '\n f'test_mode={True})')\n\n # Dense sample with no temporal_jitter\n # clip_len=4, frame_interval=1, num_clips=6\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n config = dict(\n clip_len=4, frame_interval=1, num_clips=6, temporal_jitter=False)\n dense_sample_frames = DenseSampleFrames(**config)\n dense_sample_frames_results = dense_sample_frames(video_result)\n assert dense_sample_frames_results['start_index'] == 0\n assert assert_dict_has_keys(dense_sample_frames_results, target_keys)\n assert len(dense_sample_frames_results['frame_inds']) == 24\n dense_sample_frames_results = dense_sample_frames(frame_result)\n assert len(dense_sample_frames_results['frame_inds']) == 24\n\n # Dense sample with no temporal_jitter, sample_range=32 in test mode\n # clip_len=4, frame_interval=1, num_clips=6\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n config = dict(\n clip_len=4,\n frame_interval=1,\n num_clips=6,\n sample_range=32,\n temporal_jitter=False,\n test_mode=True)\n dense_sample_frames = DenseSampleFrames(**config)\n dense_sample_frames_results = dense_sample_frames(video_result)\n assert dense_sample_frames_results['start_index'] == 0\n assert assert_dict_has_keys(dense_sample_frames_results, target_keys)\n assert len(dense_sample_frames_results['frame_inds']) == 240\n dense_sample_frames_results = dense_sample_frames(frame_result)\n assert len(dense_sample_frames_results['frame_inds']) == 240\n\n # Dense sample with no temporal_jitter, sample_range=32\n # clip_len=4, frame_interval=1, num_clips=6\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n config = dict(\n clip_len=4,\n frame_interval=1,\n num_clips=6,\n sample_range=32,\n temporal_jitter=False)\n dense_sample_frames = DenseSampleFrames(**config)\n dense_sample_frames_results = dense_sample_frames(video_result)\n assert dense_sample_frames_results['start_index'] == 0\n assert assert_dict_has_keys(dense_sample_frames_results, target_keys)\n assert len(dense_sample_frames_results['frame_inds']) == 24\n dense_sample_frames_results = dense_sample_frames(frame_result)\n assert len(dense_sample_frames_results['frame_inds']) == 24\n assert repr(dense_sample_frames) == (\n f'{dense_sample_frames.__class__.__name__}('\n f'clip_len={4}, '\n f'frame_interval={1}, '\n f'num_clips={6}, '\n f'sample_range={32}, '\n f'num_sample_positions={10}, '\n f'temporal_jitter={False}, '\n f'out_of_bound_opt=loop, '\n f'test_mode={False})')\n\n # Dense sample with no temporal_jitter, sample_range=1000 to check mod\n # clip_len=4, frame_interval=1, num_clips=6\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n config = dict(\n clip_len=4,\n frame_interval=1,\n num_clips=6,\n sample_range=1000,\n temporal_jitter=False)\n dense_sample_frames = DenseSampleFrames(**config)\n dense_sample_frames_results = dense_sample_frames(video_result)\n assert dense_sample_frames_results['start_index'] == 0\n assert assert_dict_has_keys(dense_sample_frames_results, target_keys)\n assert len(dense_sample_frames_results['frame_inds']) == 24\n dense_sample_frames_results = dense_sample_frames(frame_result)\n assert len(dense_sample_frames_results['frame_inds']) == 24\n\n # Dense sample with no temporal_jitter in test mode\n # sample_range=32, num_sample_positions=5\n # clip_len=4, frame_interval=1, num_clips=6\n video_result = copy.deepcopy(self.video_results)\n frame_result = copy.deepcopy(self.frame_results)\n config = dict(\n clip_len=4,\n frame_interval=1,\n num_clips=6,\n num_sample_positions=5,\n sample_range=32,\n temporal_jitter=False,\n test_mode=True)\n dense_sample_frames = DenseSampleFrames(**config)\n dense_sample_frames_results = dense_sample_frames(video_result)\n assert dense_sample_frames_results['start_index'] == 0\n assert assert_dict_has_keys(dense_sample_frames_results, target_keys)\n assert len(dense_sample_frames_results['frame_inds']) == 120\n dense_sample_frames_results = dense_sample_frames(frame_result)\n assert len(dense_sample_frames_results['frame_inds']) == 120\n assert repr(dense_sample_frames) == (\n f'{dense_sample_frames.__class__.__name__}('\n f'clip_len={4}, '\n f'frame_interval={1}, '\n f'num_clips={6}, '\n f'sample_range={32}, '\n f'num_sample_positions={5}, '\n f'temporal_jitter={False}, '\n f'out_of_bound_opt=loop, '\n f'test_mode={True})')\n\n def test_untrim_sample_frames(self):\n\n target_keys = [\n 'frame_inds', 'clip_len', 'frame_interval', 'num_clips',\n 'total_frames'\n ]\n\n frame_result = dict(\n frame_dir=None,\n total_frames=100,\n filename_tmpl=None,\n modality='RGB',\n start_index=0,\n label=1)\n video_result = copy.deepcopy(self.video_results)\n\n config = dict(clip_len=1, frame_interval=16, start_index=0)\n sample_frames = UntrimmedSampleFrames(**config)\n sample_frames_results = sample_frames(frame_result)\n assert assert_dict_has_keys(sample_frames_results, target_keys)\n assert len(sample_frames_results['frame_inds']) == 6\n assert_array_equal(sample_frames_results['frame_inds'],\n np.array([8, 24, 40, 56, 72, 88]))\n assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('\n f'clip_len={1}, '\n f'frame_interval={16})')\n\n config = dict(clip_len=1, frame_interval=16, start_index=0)\n sample_frames = UntrimmedSampleFrames(**config)\n sample_frames_results = sample_frames(video_result)\n assert assert_dict_has_keys(sample_frames_results, target_keys)\n frame_inds = np.array(list(range(8, 300, 16)))\n assert len(sample_frames_results['frame_inds']) == frame_inds.shape[0]\n assert_array_equal(sample_frames_results['frame_inds'], frame_inds)\n assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('\n f'clip_len={1}, '\n f'frame_interval={16})')\n\n config = dict(clip_len=1, frame_interval=16)\n sample_frames = UntrimmedSampleFrames(**config)\n frame_result_ = copy.deepcopy(frame_result)\n frame_result_['start_index'] = 1\n sample_frames_results = sample_frames(frame_result_)\n assert assert_dict_has_keys(sample_frames_results, target_keys)\n assert len(sample_frames_results['frame_inds']) == 6\n assert_array_equal(sample_frames_results['frame_inds'],\n np.array([8, 24, 40, 56, 72, 88]) + 1)\n assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('\n f'clip_len={1}, '\n f'frame_interval={16})')\n\n config = dict(clip_len=3, frame_interval=16, start_index=0)\n sample_frames = UntrimmedSampleFrames(**config)\n sample_frames_results = sample_frames(frame_result)\n assert assert_dict_has_keys(sample_frames_results, target_keys)\n assert len(sample_frames_results['frame_inds']) == 18\n assert_array_equal(\n sample_frames_results['frame_inds'],\n np.array([\n 7, 8, 9, 23, 24, 25, 39, 40, 41, 55, 56, 57, 71, 72, 73, 87,\n 88, 89\n ]))\n assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('\n f'clip_len={3}, '\n f'frame_interval={16})')\n\n def test_sample_ava_frames(self):\n target_keys = [\n 'fps', 'timestamp', 'timestamp_start', 'shot_info', 'frame_inds',\n 'clip_len', 'frame_interval'\n ]\n config = dict(clip_len=32, frame_interval=2)\n sample_ava_dataset = SampleAVAFrames(**config)\n ava_result = sample_ava_dataset(results=self.ava_results)\n assert assert_dict_has_keys(ava_result, target_keys)\n assert ava_result['clip_len'] == 32\n assert ava_result['frame_interval'] == 2\n assert len(ava_result['frame_inds']) == 32\n assert repr(sample_ava_dataset) == (\n f'{sample_ava_dataset.__class__.__name__}('\n f'clip_len={32}, '\n f'frame_interval={2}, '\n f'test_mode={False})')\n\n # add test case in Issue #306\n config = dict(clip_len=8, frame_interval=8)\n sample_ava_dataset = SampleAVAFrames(**config)\n ava_result = sample_ava_dataset(results=self.ava_results)\n assert assert_dict_has_keys(ava_result, target_keys)\n assert ava_result['clip_len'] == 8\n assert ava_result['frame_interval'] == 8\n assert len(ava_result['frame_inds']) == 8\n assert repr(sample_ava_dataset) == (\n f'{sample_ava_dataset.__class__.__name__}('\n f'clip_len={8}, '\n f'frame_interval={8}, '\n f'test_mode={False})')\n\n def test_sample_proposal_frames(self):\n target_keys = [\n 'frame_inds', 'clip_len', 'frame_interval', 'num_clips',\n 'total_frames', 'start_index'\n ]\n\n # test error cases\n with pytest.raises(TypeError):\n proposal_result = copy.deepcopy(self.proposal_results)\n config = dict(\n clip_len=1,\n frame_interval=1,\n body_segments=2,\n aug_segments=('error', 'error'),\n aug_ratio=0.5,\n temporal_jitter=False)\n sample_frames = SampleProposalFrames(**config)\n sample_frames(proposal_result)\n\n # test normal cases\n # Sample Frame with no temporal_jitter\n # clip_len=1, frame_interval=1\n # body_segments=2, aug_segments=(1, 1)\n proposal_result = copy.deepcopy(self.proposal_results)\n proposal_result['total_frames'] = 9\n config = dict(\n clip_len=1,\n frame_interval=1,\n body_segments=2,\n aug_segments=(1, 1),\n aug_ratio=0.5,\n temporal_jitter=False)\n sample_frames = SampleProposalFrames(**config)\n sample_frames_results = sample_frames(proposal_result)\n assert assert_dict_has_keys(sample_frames_results, target_keys)\n assert len(sample_frames_results['frame_inds']) == 8\n assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('\n f'clip_len={1}, '\n f'body_segments={2}, '\n f'aug_segments={(1, 1)}, '\n f'aug_ratio={(0.5, 0.5)}, '\n f'frame_interval={1}, '\n f'test_interval={6}, '\n f'temporal_jitter={False}, '\n f'mode=train)')\n\n # Sample Frame with temporal_jitter\n # clip_len=1, frame_interval=1\n # body_segments=2, aug_segments=(1, 1)\n proposal_result = copy.deepcopy(self.proposal_results)\n proposal_result['total_frames'] = 9\n config = dict(\n clip_len=1,\n frame_interval=1,\n body_segments=2,\n aug_segments=(1, 1),\n aug_ratio=0.5,\n temporal_jitter=True)\n sample_frames = SampleProposalFrames(**config)\n sample_frames_results = sample_frames(proposal_result)\n assert assert_dict_has_keys(sample_frames_results, target_keys)\n assert len(sample_frames_results['frame_inds']) == 8\n assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('\n f'clip_len={1}, '\n f'body_segments={2}, '\n f'aug_segments={(1, 1)}, '\n f'aug_ratio={(0.5, 0.5)}, '\n f'frame_interval={1}, '\n f'test_interval={6}, '\n f'temporal_jitter={True}, '\n f'mode=train)')\n\n # Sample Frame with no temporal_jitter in val mode\n # clip_len=1, frame_interval=1\n # body_segments=2, aug_segments=(1, 1)\n proposal_result = copy.deepcopy(self.proposal_results)\n proposal_result['total_frames'] = 9\n config = dict(\n clip_len=1,\n frame_interval=1,\n body_segments=2,\n aug_segments=(1, 1),\n aug_ratio=0.5,\n temporal_jitter=False,\n mode='val')\n sample_frames = SampleProposalFrames(**config)\n sample_frames_results = sample_frames(proposal_result)\n assert assert_dict_has_keys(sample_frames_results, target_keys)\n assert len(sample_frames_results['frame_inds']) == 8\n assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('\n f'clip_len={1}, '\n f'body_segments={2}, '\n f'aug_segments={(1, 1)}, '\n f'aug_ratio={(0.5, 0.5)}, '\n f'frame_interval={1}, '\n f'test_interval={6}, '\n f'temporal_jitter={False}, '\n f'mode=val)')\n\n # Sample Frame with no temporal_jitter in test mode\n # test_interval=2\n proposal_result = copy.deepcopy(self.proposal_results)\n proposal_result['out_proposals'] = None\n proposal_result['total_frames'] = 10\n config = dict(\n clip_len=1,\n frame_interval=1,\n body_segments=2,\n aug_segments=(1, 1),\n aug_ratio=0.5,\n test_interval=2,\n temporal_jitter=False,\n mode='test')\n sample_frames = SampleProposalFrames(**config)\n sample_frames_results = sample_frames(proposal_result)\n assert assert_dict_has_keys(sample_frames_results, target_keys)\n assert len(sample_frames_results['frame_inds']) == 5\n assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('\n f'clip_len={1}, '\n f'body_segments={2}, '\n f'aug_segments={(1, 1)}, '\n f'aug_ratio={(0.5, 0.5)}, '\n f'frame_interval={1}, '\n f'test_interval={2}, '\n f'temporal_jitter={False}, '\n f'mode=test)')\n\n # Sample Frame with no temporal_jitter to get clip_offsets zero\n # clip_len=1, frame_interval=1\n # body_segments=2, aug_segments=(1, 1)\n proposal_result = copy.deepcopy(self.proposal_results)\n proposal_result['total_frames'] = 3\n config = dict(\n clip_len=1,\n frame_interval=1,\n body_segments=2,\n aug_segments=(1, 1),\n aug_ratio=0.5,\n temporal_jitter=False)\n sample_frames = SampleProposalFrames(**config)\n sample_frames_results = sample_frames(proposal_result)\n assert assert_dict_has_keys(sample_frames_results, target_keys)\n assert len(sample_frames_results['frame_inds']) == 8\n assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('\n f'clip_len={1}, '\n f'body_segments={2}, '\n f'aug_segments={(1, 1)}, '\n f'aug_ratio={(0.5, 0.5)}, '\n f'frame_interval={1}, '\n f'test_interval={6}, '\n f'temporal_jitter={False}, '\n f'mode=train)')\n\n # Sample Frame with no temporal_jitter to\n # get clip_offsets zero in val mode\n # clip_len=1, frame_interval=1\n # body_segments=4, aug_segments=(2, 2)\n proposal_result = copy.deepcopy(self.proposal_results)\n proposal_result['total_frames'] = 3\n config = dict(\n clip_len=1,\n frame_interval=1,\n body_segments=4,\n aug_segments=(2, 2),\n aug_ratio=0.5,\n temporal_jitter=False,\n mode='val')\n sample_frames = SampleProposalFrames(**config)\n sample_frames_results = sample_frames(proposal_result)\n assert assert_dict_has_keys(sample_frames_results, target_keys)\n assert len(sample_frames_results['frame_inds']) == 16\n assert repr(sample_frames) == (f'{sample_frames.__class__.__name__}('\n f'clip_len={1}, '\n f'body_segments={4}, '\n f'aug_segments={(2, 2)}, '\n f'aug_ratio={(0.5, 0.5)}, '\n f'frame_interval={1}, '\n f'test_interval={6}, '\n f'temporal_jitter={False}, '\n f'mode=val)')\n\n def test_audio_feature_selector(self):\n target_keys = ['audios']\n # test frame selector with 2 dim input\n inputs = copy.deepcopy(self.audio_feature_results)\n inputs['frame_inds'] = np.arange(0, self.audio_total_frames,\n 2)[:, np.newaxis]\n inputs['num_clips'] = 1\n inputs['length'] = 1280\n audio_feature_selector = AudioFeatureSelector()\n results = audio_feature_selector(inputs)\n assert assert_dict_has_keys(results, target_keys)\n assert repr(audio_feature_selector) == (\n f'{audio_feature_selector.__class__.__name__}('\n f'fix_length={128})')\n" ]
[ [ "torch.mean", "torch.nn.Dropout", "torch.nn.MultiheadAttention", "torch.cat", "torch.nn.Linear", "torch.nn.Identity" ], [ "numpy.min", "numpy.arange", "numpy.testing.assert_array_equal", "numpy.max", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Mingy2018/SwitchVAE
[ "cf9c06ce3af50a559d79b9cba14851472e43a70b", "cf9c06ce3af50a559d79b9cba14851472e43a70b" ]
[ "utils/metrics.py", "analyse/generate_latent.py" ]
[ "import numpy as np\nimport tensorflow as tf\n\n\ndef evaluate_voxel_prediction(predictions, gt, threshold=1):\n \"\"\"\n Calculate metrics based on the output of model\n Args:\n predictions: the ouput of voxel decoder\n gt: the ground truth of object\n Returns:\n \"\"\"\n predtions_occupy = predictions >= threshold\n gt = gt >= 1\n inverse_gt = gt < 1\n inverse_predtions_occupy = predictions <1\n\n intersection = np.sum(np.logical_and(predtions_occupy, gt)) # true positive\n inverse_intersection = np.sum(np.logical_and(inverse_predtions_occupy, inverse_gt)) # true negative\n union = np.sum(np.logical_or(predtions_occupy, gt)) #\n num_fp = np.sum(np.logical_and(predtions_occupy, inverse_gt)) # false positive\n num_fn = np.sum(np.logical_and(np.logical_not(predtions_occupy), gt)) # false negative\n\n precision = intersection / (intersection + num_fp)\n IoU = intersection / union\n recall = intersection / (intersection + num_fn)\n accuracy = (intersection + inverse_intersection) / (intersection+ inverse_intersection + num_fp + num_fn)\n\n return precision, IoU, recall, accuracy\n\n\ndef get_precision(y_true, y_pred):\n \"\"\"\n Calculate metrics in the training process\n \"\"\"\n ones = tf.ones_like(y_true)\n zero = tf.zeros_like(y_true)\n\n y_pred = tf.where(y_pred > 0, ones, zero)\n inverse_y_ture = tf.where(y_true > 0, zero, ones)\n\n y_pred = tf.cast(y_pred, dtype=tf.bool)\n y_true = tf.cast(y_true, dtype=tf.bool)\n inverse_y_ture = tf.cast(inverse_y_ture, dtype=tf.bool)\n\n intersection = tf.reduce_sum(tf.cast(tf.math.logical_and(y_pred, y_true), dtype=tf.float32))\n num_fp = tf.reduce_sum(tf.cast(tf.math.logical_and(y_pred, inverse_y_ture), dtype=tf.float32))\n precision = intersection / (intersection + num_fp)\n\n return precision\n\ndef get_accuracy(y_true, y_pred):\n \"\"\"\n Calculate metrics in the training process\n \"\"\"\n ones = tf.ones_like(y_true)\n zero = tf.zeros_like(y_true)\n\n y_pred = tf.where(y_pred > 0, ones, zero)\n inverse_y_pred = tf.where(y_true > 0, zero, ones)\n inverse_y_ture = tf.where(y_true > 0, zero, ones)\n\n y_pred = tf.cast(y_pred, dtype=tf.bool)\n y_true = tf.cast(y_true, dtype=tf.bool)\n inverse_y_ture = tf.cast(inverse_y_ture, dtype=tf.bool)\n inverse_y_pred = tf.cast(inverse_y_pred, dtype=tf.bool)\n\n intersection = tf.reduce_sum(tf.cast(tf.math.logical_and(y_pred, y_true), dtype=tf.float32)) # true positive\n inverse_intersection = tf.reduce_sum(tf.cast(tf.math.logical_and(inverse_y_pred, inverse_y_ture), dtype=tf.float32)) # true negative\n num_fp = tf.reduce_sum(tf.cast(tf.math.logical_and(y_pred, inverse_y_ture), dtype=tf.float32))\n num_fn = tf.reduce_sum(tf.cast(tf.math.logical_and(y_pred, y_true), dtype=tf.float32))\n accuracy = (intersection + inverse_intersection) / (intersection+ inverse_intersection + num_fp + num_fn)\n\n return accuracy\n\ndef get_IoU(y_true, y_pred):\n \"\"\"\n Calculate metrics in the training process\n \"\"\"\n ones = tf.ones_like(y_true)\n zero = tf.zeros_like(y_true)\n\n y_pred = tf.where(y_pred > 0, ones, zero)\n\n y_pred = tf.cast(y_pred, dtype=tf.bool)\n y_true = tf.cast(y_true, dtype=tf.bool)\n\n union = tf.reduce_sum(tf.cast(tf.math.logical_or(y_pred, y_true), dtype=tf.float32))\n intersection = tf.reduce_sum(tf.cast(tf.math.logical_and(y_pred, y_true), dtype=tf.float32))\n\n IoU = intersection / union\n\n return IoU\n", "import numpy as np\nimport shutil, sys, os, pickle\n\nsys.path.append(\"..\")\n\nfrom SwitchVAE import *\nfrom utils import save_volume, data_IO, arg_parser, model\n\nfrom utils import model\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input\nfrom sklearn.utils import shuffle\nimport time\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\nConFig = tf.ConfigProto()\nConFig.gpu_options.allow_growth = True\nsession = tf.Session(config=ConFig)\n\n\ndef latent2dict(hash, z_mean, z_logvar, z):\n output = {}\n for i in range(len(hash)):\n output[hash[i] + '_z_mean'] = z_mean[i]\n output[hash[i] + '_z_logvar'] = z_logvar[i]\n output[hash[i] + '_z'] = z[i]\n return output\n\ndef main(args):\n weights_dir = args.weights_dir\n save_the_img = args.generate_img\n voxel_data_path = args.voxel_data_dir\n image_data_path = args.image_data_dir\n input_form = args.input_form\n dataset = args.dataset\n\n z_dim = args.latent_vector_size\n\n if input_form == 'voxel':\n reconstructions_save_path = args.save_dir + '/analyse_voxel_input'\n latent_save_path = args.save_dir + '/voxel_latent_dict'\n if not os.path.exists(latent_save_path):\n os.makedirs(latent_save_path)\n\n voxel_input = Input(shape=g.VOXEL_INPUT_SHAPE)\n voxel_encoder = model.get_voxel_encoder(z_dim)\n decoder = model.get_voxel_decoder(z_dim)\n output = decoder(voxel_encoder(voxel_input))\n test_model = Model(voxel_input, output)\n\n voxel_encoder.load_weights(os.path.join(weights_dir, 'weightsEnd_voxEncoder.h5'), by_name=True)\n decoder.load_weights(os.path.join(weights_dir, 'weightsEnd_voxDecoder.h5'), by_name=True)\n\n if dataset == 'shapenet':\n hash = os.listdir(voxel_data_path)\n voxel_file_list = [os.path.join(voxel_data_path, id) for id in hash]\n voxels = data_IO.voxelPathList2matrix(voxel_file_list)\n\n z_mean, z_logvar, z = voxel_encoder.predict(voxels)\n latent_dict = latent2dict(hash, z_mean, z_logvar, z)\n latent_dict_save_path = os.path.join(latent_save_path, 'voxel_latent_dict_table_all.pkl')\n save_latent_dict = open(latent_dict_save_path, 'wb')\n pickle.dump(latent_dict, save_latent_dict)\n save_latent_dict.close()\n reconstructions = test_model.predict(voxels)\n\n elif dataset == 'modelnet':\n X = {'train_z_mean': [], 'train_z': [], 'test_z_mean':[], 'test_z':[],'train_z_cat':[] ,'test_z_cat':[]}\n y = {'train_label': [], 'test_label': []}\n voxel_data = np.load(args.voxel_npz)\n train_voxels, train_labels = shuffle(voxel_data['X_train'], voxel_data['y_train'])\n test_voxels, test_labels = shuffle(voxel_data['X_test'], voxel_data['y_test'])\n train_batch = int(train_voxels.shape[0] / args.batch_size)\n test_batch = int(test_voxels.shape[0] / args.batch_size)\n for i in range(train_batch):\n train_batch_voxels = train_voxels[args.batch_size * i:args.batch_size * (i + 1),:]\n train_batch_labels = train_labels[args.batch_size * i:args.batch_size * (i + 1)]\n train_z_mean, train_z_logvar, train_z = voxel_encoder.predict(train_batch_voxels)\n train_z_concatenate = np.concatenate((train_z_mean, train_z_logvar), 1)\n for j in range(train_z_mean.shape[0]):\n X['train_z_mean'].append(train_z_mean[j])\n X['train_z'].append(train_z[j])\n X['train_z_cat'].append(train_z_concatenate[j])\n y['train_label'].append(train_batch_labels[j])\n for i in range(test_batch):\n test_batch_voxels = test_voxels[args.batch_size * i:args.batch_size * (i + 1), :]\n test_batch_labels = test_labels[args.batch_size * i:args.batch_size * (i + 1)]\n test_z_mean, test_z_logvar, test_z = voxel_encoder.predict(test_batch_voxels)\n test_z_concatenate = np.concatenate((test_z_mean, test_z_logvar), 1)\n for j in range(test_z_mean.shape[0]):\n X['test_z_mean'].append(test_z_mean[j])\n X['test_z'].append(test_z[j])\n X['test_z_cat'].append(test_z_concatenate[j])\n y['test_label'].append(test_batch_labels[j])\n\n np.savez_compressed(os.path.join(args.save_dir,'modelnet10_voxel_latent_cat.npz'),\n train_z=X['train_z'],\n train_z_mean=X['train_z_mean'],\n train_z_cat=X['train_z_cat'],\n train_labels=y['train_label'],\n test_z=X['test_z'],\n test_z_mean=X['test_z_mean'],\n test_z_cat=X['test_z_cat'],\n test_labels=y['test_label'])\n\n elif input_form == 'image':\n reconstructions_save_path = args.save_dir + '/analyse_image_input'\n latent_save_path = args.save_dir + '/image_latent_dict'\n if not os.path.exists(latent_save_path):\n os.makedirs(latent_save_path)\n\n image_input = Input(shape=g.VIEWS_IMAGE_SHAPE_SHAPENET)\n image_encoder = model.get_img_encoder(z_dim, g.VIEWS_IMAGE_SHAPE_MODELNET)['image_encoder']\n image_encoder.load_weights(os.path.join(weights_dir, 'weightsEnd_imgEncoder.h5'), by_name=True)\n\n decoder = model.get_voxel_decoder(z_dim)\n decoder.load_weights(os.path.join(weights_dir, 'weightsEnd_voxDecoder.h5'), by_name=True)\n output = decoder(image_encoder(image_input)[0])\n test_model = Model(image_input, output)\n\n if dataset == 'shapenet':\n\n hash = os.listdir(image_data_path)\n image_file_list = [os.path.join(image_data_path, id) for id in hash]\n images = data_IO.imagePathList2matrix(image_file_list, train=False)\n\n # Get latent vector information\n z_mean, z_logvar, z = image_encoder.predict(images)\n\n # record latent vectors in dictionary and save it in .pkl form\n latent_dict = latent2dict(hash, z_mean, z_logvar, z)\n latent_dict_save_path = os.path.join(latent_save_path, 'latent_dict.pkl')\n save_latent_dict = open(latent_dict_save_path, 'wb')\n pickle.dump(latent_dict, save_latent_dict)\n save_latent_dict.close()\n\n reconstructions = test_model.predict(images)\n elif dataset == 'modelnet':\n X = {'train_z_mean': [], 'train_z': [], 'test_z_mean': [], 'test_z': []}\n y = {'train_label': [], 'test_label': []}\n object_id_data = np.load(args.image_npz)\n modelnet_image_path = '/home/zmy/mmi_dataset/ModelNet40_images/modelnet40_images_new_12x'\n train_images_id, train_labels = shuffle(object_id_data['X_train'], object_id_data['y_train'])\n test_images_id, test_labels = shuffle(object_id_data['X_test'], object_id_data['y_test'])\n train_batch = int(train_images_id.shape[0] / args.batch_size)\n test_batch = int(test_images_id.shape[0] / args.batch_size)\n count=1\n\n for i in range(train_batch):\n train_batch_object_id = train_images_id[args.batch_size * i:args.batch_size * (i + 1)]\n train_batch_images = data_IO.objectIdList2matrix(train_batch_object_id,modelnet_image_path,'train')\n train_batch_labels = train_labels[args.batch_size * i:args.batch_size * (i + 1)]\n print(\"Predicting train batch:\", str(count)+'/'+str(train_batch))\n count += 1\n train_z_mean, train_z_logvar, train_z = image_encoder.predict(train_batch_images)\n for j in range(train_z_mean.shape[0]):\n X['train_z_mean'].append(train_z_mean[j])\n X['train_z'].append(train_z[j])\n y['train_label'].append(train_batch_labels[j])\n count =1\n for i in range(test_batch):\n test_batch_object_id = test_images_id[args.batch_size * i:args.batch_size * (i + 1)]\n test_batch_images = data_IO.objectIdList2matrix(test_batch_object_id, modelnet_image_path, 'test')\n test_batch_labels = test_labels[args.batch_size * i:args.batch_size * (i + 1)]\n print(\"Predicting test batch:\", str(count) + '/' + str(test_batch))\n count += 1\n test_z_mean, test_z_logvar, test_z = image_encoder.predict(test_batch_images)\n for j in range(test_z_mean.shape[0]):\n X['test_z_mean'].append(test_z_mean[j])\n X['test_z'].append(test_z[j])\n y['test_label'].append(test_batch_labels[j])\n\n np.savez_compressed(os.path.join(args.save_dir, 'modelnet10_image_BG0_latent.npz'),\n train_z=X['train_z'],\n train_z_mean=X['train_z_mean'],\n train_labels=y['train_label'],\n test_z=X['test_z'],\n test_z_mean=X['test_z_mean'],\n test_labels=y['test_label'])\n\n if bool(args.generation):\n\n reconstructions[reconstructions > 0] = 1\n reconstructions[reconstructions < 0] = 0\n\n if not os.path.exists(reconstructions_save_path):\n os.makedirs(reconstructions_save_path)\n\n save_volume.save_metrics(reconstructions, voxels, voxel_data_path, image_data_path, input_form, reconstructions_save_path)\n\n for i in range(reconstructions.shape[0]):\n save_volume.save_binvox_output_2(reconstructions[i, 0, :], hash[i], reconstructions_save_path, '_gen',\n save_bin=True, save_img=save_the_img)\n\n\nif __name__ == '__main__':\n start = time.time()\n main(arg_parser.parse_test_arguments(sys.argv[1:]))\n end = time.time()\n interval = int(end) - int(start)\n print(\"The time spent on seconds: \", interval)" ]
[ [ "numpy.logical_not", "tensorflow.cast", "tensorflow.ones_like", "numpy.logical_or", "tensorflow.zeros_like", "tensorflow.math.logical_and", "tensorflow.where", "tensorflow.math.logical_or", "numpy.logical_and" ], [ "tensorflow.keras.models.Model", "sklearn.utils.shuffle", "numpy.concatenate", "numpy.load", "tensorflow.keras.layers.Input" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
Hari-07/manim
[ "bbe113e7d33636c8901d6c7cee81cb2f4b69cc8b" ]
[ "manim/mobject/matrix.py" ]
[ "r\"\"\"Mobjects representing matrices.\n\nExamples\n--------\n\n.. manim:: MatrixExamples\n :save_last_frame:\n\n class MatrixExamples(Scene):\n def construct(self):\n m0 = Matrix([[2, 0], [-1, 1]])\n m1 = Matrix([[1, 0], [0, 1]],\n left_bracket=\"\\\\big(\",\n right_bracket=\"\\\\big)\")\n m2 = DecimalMatrix(\n [[3.456, 2.122], [33.2244, 12.33]],\n element_to_mobject_config={\"num_decimal_places\": 2},\n left_bracket=\"\\\\{\",\n right_bracket=\"\\\\}\")\n\n self.add(m0.shift(LEFT - (3, 0, 0)))\n self.add(m1)\n self.add(m2.shift(RIGHT + (3, 0, 0)))\n\n\"\"\"\n\n__all__ = [\n \"Matrix\",\n \"DecimalMatrix\",\n \"IntegerMatrix\",\n \"MobjectMatrix\",\n \"matrix_to_tex_string\",\n \"matrix_to_mobject\",\n \"get_det_text\",\n]\n\n\nimport numpy as np\n\nfrom ..constants import *\nfrom ..mobject.numbers import DecimalNumber, Integer\nfrom ..mobject.shape_matchers import BackgroundRectangle\nfrom ..mobject.svg.tex_mobject import MathTex, Tex\nfrom ..mobject.types.vectorized_mobject import VGroup, VMobject\nfrom ..utils.color import WHITE\n\n\ndef matrix_to_tex_string(matrix):\n matrix = np.array(matrix).astype(\"str\")\n if matrix.ndim == 1:\n matrix = matrix.reshape((matrix.size, 1))\n n_rows, n_cols = matrix.shape\n prefix = \"\\\\left[ \\\\begin{array}{%s}\" % (\"c\" * n_cols)\n suffix = \"\\\\end{array} \\\\right]\"\n rows = [\" & \".join(row) for row in matrix]\n return prefix + \" \\\\\\\\ \".join(rows) + suffix\n\n\ndef matrix_to_mobject(matrix):\n return MathTex(matrix_to_tex_string(matrix))\n\n\nclass Matrix(VMobject):\n \"\"\"A mobject that displays a matrix on the screen.\"\"\"\n\n def __init__(\n self,\n matrix,\n v_buff=0.8,\n h_buff=1.3,\n bracket_h_buff=MED_SMALL_BUFF,\n bracket_v_buff=MED_SMALL_BUFF,\n add_background_rectangles_to_entries=False,\n include_background_rectangle=False,\n element_to_mobject=MathTex,\n element_to_mobject_config={},\n element_alignment_corner=DR,\n left_bracket=\"\\\\big[\",\n right_bracket=\"\\\\big]\",\n **kwargs,\n ):\n \"\"\"\n\n Parameters\n ----------\n matrix : :class:`typing.Iterable`\n A numpy 2d array or list of lists\n v_buff : :class:`float`, optional\n vertical buffer, by default 0.8\n h_buff : :class:`float`, optional\n horizontal buffer, by default 1.3\n bracket_h_buff : :class:`float`, optional\n bracket horizonal buffer, by default MED_SMALL_BUFF\n bracket_v_buff : :class:`float`, optional\n bracket veritical buffer, by default MED_SMALL_BUFF\n add_background_rectangles_to_entries : :class:`bool`, optional\n `True` if should add backgraound rectangles to entries, by default False\n include_background_rectangle : :class:`bool`, optional\n `True` if should include background rectangle, by default False\n element_to_mobject : :class:`~.Mobject`, optional\n element to mobject, by default MathTex\n element_to_mobject_config : Dict[:class:`str`, :class:`~.Mobject`], optional\n element to mobject config, by default {}\n element_alignment_corner : :class:`np.ndarray`, optional\n the element alignment corner, by default DR\n left_bracket : :class:`str`, optional\n the left bracket type, by default \"\\\\\\\\big[\"\n right_bracket : :class:`str`, optional\n the right bracket type, by default \"\\\\\\\\big]\"\n\n \"\"\"\n\n self.v_buff = v_buff\n self.h_buff = h_buff\n self.bracket_h_buff = bracket_h_buff\n self.bracket_v_buff = bracket_v_buff\n self.add_background_rectangles_to_entries = add_background_rectangles_to_entries\n self.include_background_rectangle = include_background_rectangle\n self.element_to_mobject = element_to_mobject\n self.element_to_mobject_config = element_to_mobject_config\n self.element_alignment_corner = element_alignment_corner\n self.left_bracket = left_bracket\n self.right_bracket = right_bracket\n VMobject.__init__(self, **kwargs)\n matrix = np.array(matrix)\n if len(matrix.shape) < 2:\n raise ValueError(\n f\"{self.__str__()} class requires a two-dimensional array!\"\n )\n mob_matrix = self.matrix_to_mob_matrix(matrix)\n self.organize_mob_matrix(mob_matrix)\n self.elements = VGroup(*mob_matrix.flatten())\n self.add(self.elements)\n self.add_brackets(self.left_bracket, self.right_bracket)\n self.center()\n self.mob_matrix = mob_matrix\n if self.add_background_rectangles_to_entries:\n for mob in self.elements:\n mob.add_background_rectangle()\n if self.include_background_rectangle:\n self.add_background_rectangle()\n\n def matrix_to_mob_matrix(self, matrix):\n return np.vectorize(self.element_to_mobject)(\n matrix, **self.element_to_mobject_config\n )\n\n def organize_mob_matrix(self, matrix):\n for i, row in enumerate(matrix):\n for j, _ in enumerate(row):\n mob = matrix[i][j]\n mob.move_to(\n i * self.v_buff * DOWN + j * self.h_buff * RIGHT,\n self.element_alignment_corner,\n )\n return self\n\n def add_brackets(self, left=\"\\\\big[\", right=\"\\\\big]\"):\n \"\"\"Add the brackets to the Matrix mobject\n\n See Latex document for various bracket types.\n\n Parameters\n ----------\n left : :class:`str`, optional\n the left bracket, by default \"\\\\\\\\big[\"\n right : :class:`str`, optional\n the right bracket, by default \"\\\\\\\\big]\"\n\n Returns\n -------\n :class:`Matrix`\n The current matrix object (self).\n \"\"\"\n\n bracket_pair = MathTex(left, right)\n bracket_pair.scale(2)\n bracket_pair.stretch_to_fit_height(self.height + 2 * self.bracket_v_buff)\n l_bracket, r_bracket = bracket_pair.split()\n l_bracket.next_to(self, LEFT, self.bracket_h_buff)\n r_bracket.next_to(self, RIGHT, self.bracket_h_buff)\n self.add(l_bracket, r_bracket)\n self.brackets = VGroup(l_bracket, r_bracket)\n return self\n\n def get_columns(self):\n \"\"\"Return columns of the matrix as VGroups\n\n Returns\n --------\n List[:class:`~.VGroup`]\n Each VGroup contains a column of the matrix.\n \"\"\"\n return VGroup(\n *[VGroup(*self.mob_matrix[:, i]) for i in range(self.mob_matrix.shape[1])]\n )\n\n def set_column_colors(self, *colors):\n \"\"\"Set individual colors for each columns of the matrix\n\n Parameters\n ----------\n colors : :class:`str`\n The list of colors; each color specified corresponds to a column.\n\n Returns\n -------\n :class:`Matrix`\n The current matrix object (self).\n \"\"\"\n columns = self.get_columns()\n for color, column in zip(colors, columns):\n column.set_color(color)\n return self\n\n def get_rows(self):\n \"\"\"Return rows of the matrix as VGroups\n\n Returns\n --------\n List[:class:`~.VGroup`]\n Each VGroup contains a row of the matrix.\n \"\"\"\n return VGroup(\n *[VGroup(*self.mob_matrix[i, :]) for i in range(self.mob_matrix.shape[0])]\n )\n\n def set_row_colors(self, *colors):\n \"\"\"Set individual colors for each row of the matrix\n\n Parameters\n ----------\n colors : :class:`str`\n The list of colors; each color specified corresponds to a row.\n\n Returns\n -------\n :class:`Matrix`\n The current matrix object (self).\n \"\"\"\n rows = self.get_rows()\n for color, row in zip(colors, rows):\n row.set_color(color)\n return self\n\n def add_background_to_entries(self):\n for mob in self.get_entries():\n mob.add_background_rectangle()\n return self\n\n def get_mob_matrix(self):\n \"\"\"Return the underlying mob matrix mobjects\n\n Returns\n --------\n List[:class:`~.VGroup`]\n Each VGroup contains a row of the matrix.\n \"\"\"\n return self.mob_matrix\n\n def get_entries(self):\n \"\"\"Return the individual entries of the matrix\n\n Returns\n --------\n :class:`~.VGroup`\n VGroup containing entries of the matrix\n \"\"\"\n return VGroup(*self.get_mob_matrix().flatten())\n\n def get_brackets(self):\n \"\"\"Return the bracket mobjects\n\n Returns\n --------\n List[:class:`~.VGroup`]\n Each VGroup contains a bracket\n \"\"\"\n return self.brackets\n\n\nclass DecimalMatrix(Matrix):\n \"\"\"A mobject that displays a matrix with decimal entries on the screen.\"\"\"\n\n def __init__(\n self,\n matrix,\n element_to_mobject=DecimalNumber,\n element_to_mobject_config={\"num_decimal_places\": 1},\n **kwargs,\n ):\n \"\"\"\n Will round/truncate the decimal places as per the provided config.\n\n Parameters\n ----------\n matrix : :class:`typing.Iterable`\n A numpy 2d array or list of lists\n element_to_mobject : :class:`~.Mobject`, optional\n Mobject to use, by default DecimalNumber\n element_to_mobject_config : Dict[:class:`str`, :class:`~.Mobject`], optional\n Config for the desired mobject, by default {\"num_decimal_places\": 1}\n \"\"\"\n Matrix.__init__(\n self,\n matrix,\n element_to_mobject=element_to_mobject,\n element_to_mobject_config=element_to_mobject_config,\n **kwargs,\n )\n\n\nclass IntegerMatrix(Matrix):\n \"\"\"A mobject that displays a matrix with integer entries on the screen.\"\"\"\n\n def __init__(self, matrix, element_to_mobject=Integer, **kwargs):\n \"\"\"\n Note- Will round if there are decimal entries in the matrix.\n\n Parameters\n ----------\n matrix : :class:`typing.Iterable`\n A numpy 2d array or list of lists\n element_to_mobject : :class:`~.Mobject`, optional\n Mobject to use, by default Integer\n \"\"\"\n Matrix.__init__(self, matrix, element_to_mobject=element_to_mobject, **kwargs)\n\n\nclass MobjectMatrix(Matrix):\n \"\"\"A mobject that displays a matrix of mobject entries on the screen.\"\"\"\n\n def __init__(self, matrix, element_to_mobject=lambda m: m, **kwargs):\n Matrix.__init__(self, matrix, element_to_mobject=element_to_mobject, **kwargs)\n\n\ndef get_det_text(\n matrix, determinant=None, background_rect=False, initial_scale_factor=2\n):\n r\"\"\"Helper function to create determinant\n\n Parameters\n ----------\n matrix : :class:`~.Matrix`\n The matrix whose determinant is to be created\n\n determinant : :class:`int|str`\n The value of the determinant of the matrix\n\n background_rect : :class:`bool`\n The background rectangle\n\n initial_scale_factor : :class:`float`\n The scale of the text `det` w.r.t the matrix\n\n Returns\n --------\n :class:`~.VGroup`\n A VGroup containing the determinant\n\n Examples\n --------\n\n .. manim:: DeterminantOfAMatrix\n :save_last_frame:\n\n class DeterminantOfAMatrix(Scene):\n def construct(self):\n matrix = Matrix([\n [2, 0],\n [-1, 1]\n ])\n\n # scaling down the `det` string\n det = get_det_text(matrix,\n determinant=3,\n initial_scale_factor=1)\n\n # must add the matrix\n self.add(matrix)\n self.add(det)\n\n \"\"\"\n parens = MathTex(\"(\", \")\")\n parens.scale(initial_scale_factor)\n parens.stretch_to_fit_height(matrix.height)\n l_paren, r_paren = parens.split()\n l_paren.next_to(matrix, LEFT, buff=0.1)\n r_paren.next_to(matrix, RIGHT, buff=0.1)\n det = Tex(\"det\")\n det.scale(initial_scale_factor)\n det.next_to(l_paren, LEFT, buff=0.1)\n if background_rect:\n det.add_background_rectangle()\n det_text = VGroup(det, l_paren, r_paren)\n if determinant is not None:\n eq = MathTex(\"=\")\n eq.next_to(r_paren, RIGHT, buff=0.1)\n result = MathTex(str(determinant))\n result.next_to(eq, RIGHT, buff=0.2)\n det_text.add(eq, result)\n return det_text\n" ]
[ [ "numpy.array", "numpy.vectorize" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xchen034/dyson_sphere_kg
[ "d9c6f28f5ae01f9087ecc4462e0d247640576f3e" ]
[ "dyson_search/read_tuples.py" ]
[ "import numpy as np\nimport pandas as pd\nimport re\nimport math\nfrom typing import Union\n\ndef get_all_entity(tuples: list) -> list:\n entities = []\n for _tuple in tuples:\n if _tuple[1] in [\"类型\", \"产地\", \"生产时间(s)\"]:\n if not \"公式\" in _tuple[0]:\n entities.append(_tuple[0])\n else:\n entities.append(_tuple[0])\n entities.append(_tuple[-1])\n return list(set(entities))\n\ndef get_category_list(tuples:list) -> list:\n category_list = []\n entity_list = []\n for _tuple in tuples:\n if _tuple[1] == \"类型\":\n category_list.append(_tuple)\n entity_list.append(_tuple[0])\n return category_list, entity_list\n\nclass DysonTuples:\n def __init__(self, txt_path:str, extra_resources:list) -> None:\n self.txt_path = txt_path\n self.extra_resources = extra_resources\n self.dyson_tuples = self.read_tuples_txt(txt_path)\n self._all_formulas = self.get_all_formulas()\n self._all_formulas_dict = {}\n self._all_raw_products = self.get_all_raw_products(self.dyson_tuples)\n for formula in self._all_formulas:\n if formula in [\"X射线裂解公式\",'重氢公式(2)']:\n continue\n self._all_formulas_dict[formula] = self.get_formula_dict(formula)\n self.remove_special_formula()\n self._all_products_formulas_dict = self.get_all_products_formulas_dict(self._all_formulas_dict)\n self._all_products_formulas_dict[\"氢\"].remove(\"反物质公式\")\n self._all_products_formulas_dict[\"氢\"].remove(\"等离子精炼公式\")\n\n\n def check_extra_resources(self):\n for resource in self.extra_resources:\n extra_resources = [\"硫酸\",\"光栅石\",\"分型硅石\",\"单级磁石\",\"刺笋结晶\",\"金伯利矿石\",\"可燃冰\"]\n if resource not in extra_resources:\n raise ValueError(\"extra_resource must in {}, but {} received\".format(extra_resources, resource))\n\n def read_tuples_txt(self, txt_path: str) -> list:\n with open(txt_path, encoding=\"utf-8\") as f:\n contents = f.readlines()\n tuples = []\n for content in contents:\n head, rel, tail = re.split(\"\\t|\\n\", content)[:-1]\n tuples.append((head, rel, tail))\n return tuples\n\n def get_all_products_formulas_dict(self, formulas_dict:dict) -> dict:\n product_formula_dict = {}\n for formula in formulas_dict.keys():\n products = list(formulas_dict[formula]['产物'].keys())\n for _product in products:\n if _product not in product_formula_dict.keys():\n product_formula_dict[_product] = [formula]\n else:\n product_formula_dict[_product].append(formula)\n return product_formula_dict\n\n def get_all_raw_products(self, tuples:list) -> list:\n raw_products = []\n for _tuple in tuples:\n if _tuple[2] == \"原料\":\n raw_products.append(_tuple[0])\n return raw_products\n\n def get_all_formulas(self) -> list:\n formulas = []\n for _tuple in self.dyson_tuples:\n if _tuple[-1] == \"生产公式\":\n formulas.append(_tuple[0])\n return list(set(formulas))\n \n def get_formula_dict(self, formula:str) -> dict:\n temp_list = []\n for _tuple in self.dyson_tuples:\n if formula in _tuple:\n temp_list.append(_tuple)\n formula_dict = self._get_formula_dict_from_list(formula, temp_list)\n return formula_dict\n\n def _get_formula_dict_from_list(self, formula:str, formula_list:list) -> dict:\n temp_dict = {'名称': formula,'原料':{},'产物':{}}\n for _tuple in formula_list:\n try:\n count = int(_tuple[1])\n if _tuple[0] == formula:\n temp_dict['产物'][_tuple[-1]] = count\n else:\n temp_dict['原料'][_tuple[0]] = count \n except:\n try:\n temp_dict[_tuple[1]] = int(_tuple[-1])\n except:\n temp_dict[_tuple[1]] = _tuple[-1]\n return temp_dict\n\n def find_method(self, target_product:str) -> list:\n '''\n count: nums/s\n '''\n methods = []\n for _tuple in self.dyson_tuples:\n if _tuple[-1] == target_product:\n methods.append(_tuple[0])\n return methods\n\n def remove_special_formula(self):\n self.all_raw_products.append(\"硅石\")\n self.all_formulas_dict.pop(\"石材公式\")\n if \"硫酸\" in self.extra_resources:\n self.all_formulas_dict.pop(\"硫酸公式\")\n self.all_raw_products.append(\"硫酸\")\n if \"光栅石\" in self.extra_resources:\n self.all_formulas_dict.pop(\"光子合并器公式\")\n self.all_formulas_dict.pop(\"卡西米尔晶体公式\")\n else:\n self.all_formulas_dict.pop(\"光子合并器公式(高效)\")\n self.all_formulas_dict.pop(\"卡西米尔晶体公式(高效)\")\n\n if \"分型硅石\" in self.extra_resources:\n self.all_formulas_dict.pop(\"晶格硅公式\")\n else:\n self.all_formulas_dict.pop(\"晶格硅公式(高效)\")\n\n if \"单级磁石\" in self.extra_resources:\n self.all_formulas_dict.pop(\"粒子容器公式\")\n else:\n self.all_formulas_dict.pop(\"粒子容器公式(高效)\")\n if \"刺笋结晶\" in self.extra_resources:\n self.all_formulas_dict.pop(\"碳纳米管公式\")\n else:\n self.all_formulas_dict.pop(\"碳纳米管公式(高效)\")\n if \"金伯利矿石\" in self.extra_resources:\n self.all_formulas_dict.pop(\"金刚石公式\")\n else:\n self.all_formulas_dict.pop(\"金刚石公式(高效)\")\n if \"可燃冰\" in self.extra_resources:\n self.all_formulas_dict.pop(\"石墨烯公式\")\n else:\n self.all_formulas_dict.pop(\"石墨烯公式(高效)\")\n\n @property\n def all_formulas(self):\n return self._all_formulas\n\n @property\n def all_formulas_dict(self):\n return self._all_formulas_dict\n\n @property\n def all_raw_products(self):\n return self._all_raw_products\n\n @property\n def all_products_formulas_dict(self):\n return self._all_products_formulas_dict\n\n\nclass DysonTuplesAnalysis(DysonTuples):\n def __init__(self, txt_path, extra_resources:list) -> None:\n super(DysonTuplesAnalysis, self).__init__(txt_path, extra_resources)\n \n def __call__(self, target_product:str, count:float, filter_station_num:int=np.inf):\n plan_list = []\n all_list = self._analysis_method(target_product, count, plan_list)\n for i in range(len(all_list)):\n # all_list[i] = self.analysis_result(all_list[i], filter_station_num)\n all_list[i] = self.analysis_result_pivot_table(all_list[i])\n return all_list\n\n def analysis_result_pivot_table(self, plan_list:list):\n plan_dict = {\"公式\":[],\"产地\":[],\"数量\":[]}\n extra_dict = {\"额外产物\":[],\"数量/s\":[]}\n for _plan in plan_list:\n plan_dict[\"公式\"].append(_plan[\"公式\"])\n plan_dict[\"产地\"].append(_plan[\"产地\"])\n plan_dict[\"数量\"].append(_plan[\"数量\"])\n if \"额外产物\" in _plan.keys():\n for _extra_product in _plan[\"额外产物\"]:\n product_name = list(_extra_product.keys())[0]\n extra_dict[\"额外产物\"].append(product_name)\n extra_dict[\"数量/s\"].append(_extra_product[product_name])\n plan_df = pd.DataFrame(plan_dict)\n plan_pt = pd.pivot_table(plan_df,index=[\"公式\",\"产地\"],values=[\"数量\"],aggfunc=np.sum)\n extra_df = pd.DataFrame(extra_dict)\n # extra_df[\"数量/min\"] = extra_df[\"数量/min\"].apply(lambda x: x*60)\n extra_pt = pd.pivot_table(extra_df,index=[\"额外产物\"],values=[\"数量/s\"],aggfunc=np.sum)\n return (plan_pt, extra_pt)\n\n def analysis_result(self, plan_list:list, station_num_filter:int=np.inf) -> dict:\n plan_dict = {\"额外产物\": {}}\n max_station_num = 0\n for _plan in plan_list:\n if _plan['公式'] not in plan_dict:\n plan_dict[_plan['公式']] = {'产地':_plan['产地'], '数量':_plan['数量']}\n else:\n plan_dict[_plan['公式']]['数量'] += _plan['数量']\n max_station_num = max(max_station_num, plan_dict[_plan['公式']]['数量'])\n if \"额外产物\" in _plan.keys():\n for _extra_product in _plan[\"额外产物\"]:\n product_name = list(_extra_product.keys())[0]\n if product_name not in plan_dict[\"额外产物\"]:\n plan_dict[\"额外产物\"][product_name] = _extra_product[product_name]\n else:\n plan_dict[\"额外产物\"][product_name] += _extra_product[product_name]\n if max_station_num > station_num_filter:\n plan_dict = {}\n return plan_dict\n\n def update_dict(self, _dict, dict_list):\n for _d in dict_list:\n key = list(_d.keys())[0]\n if key not in _dict:\n _dict[key] = _d[key]\n else:\n _dict[key] += _d[key]\n return _dict\n\n def _analysis_product_extra(self, product:str, product_count:Union[int, float], extra_products:dict):\n cost_extra_product = {}\n if product in extra_products:\n if product_count < extra_products[product]:\n extra_products[product] = extra_products[product] - product_count\n product_count = 0\n cost_extra_product = {product: -1*product_count}\n else:\n product_count = product_count - extra_products[product_count]\n cost_extra_product = {product: -1*extra_products[product_count]}\n extra_products.pop(product)\n return product_count, extra_products, cost_extra_product\n\n def _analysis_method(self, target_product:str, count:float, plan_list:list) -> list:\n target_plan_list = self.all_products_formulas_dict[target_product]\n all_plan_list = []\n for _plan in target_plan_list:\n if _plan in plan_list:\n continue\n raw_products = self.all_formulas_dict[_plan]['原料']\n raw_dict = {}\n products = self.all_formulas_dict[_plan]['产物']\n _plan_extra_products = []\n station = self.all_formulas_dict[_plan]['产地']\n product_time = self.all_formulas_dict[_plan]['生产时间(s)']\n station_nums = math.ceil(count*product_time/products[target_product])\n extra_nums = station_nums*products[target_product]/product_time - count\n for _product in products.keys():\n if _product == target_product and extra_nums:\n _plan_extra_products.append({_product:extra_nums})\n elif _product != target_product:\n _plan_extra_products.append({_product:station_nums*products[_product]/product_time})\n temp_plan_list = plan_list + [_plan]\n for _product in raw_products:\n if _product not in self.all_raw_products:\n raw_count = station_nums*raw_products[_product]/product_time\n raw_plan = self._analysis_method(_product, raw_count, temp_plan_list)\n raw_dict[_product] = raw_plan\n raw_methods = self.find_all_raw_mehods(raw_dict)\n if _plan_extra_products:\n _plan_dict = {\"公式\": _plan, \"产地\": station, \"数量\": station_nums, \"额外产物\":_plan_extra_products}\n else:\n _plan_dict = {\"公式\": _plan, \"产地\": station, \"数量\": station_nums}\n if len(raw_methods):\n for i in range(len(raw_methods)):\n raw_methods[i] += [_plan_dict]\n else:\n raw_methods = [[_plan_dict]]\n all_plan_list+=raw_methods\n return all_plan_list\n\n def find_all_raw_mehods(self, raw_dict) -> list:\n def find_all_probs(x:list,y:list)->list:\n all_list = []\n for i in range(len(x)):\n if isinstance(x[i], list):\n temp = x[i]\n else:\n temp = [x[i]]\n for j in range(len(y)):\n if isinstance(y[j], list):\n all_list.append(temp+y[j])\n else:\n all_list.append(temp+[y[i]])\n return all_list\n temp_list = []\n if len(raw_dict) == 1:\n return raw_dict[list(raw_dict.keys())[0]]\n for key in raw_dict.keys():\n if not temp_list:\n temp_list = raw_dict[key]\n else:\n temp_list = find_all_probs(temp_list, raw_dict[key])\n return temp_list\n\nif __name__ == \"__main__\":\n dyson_file_path = \"/Users/chenxi/projects/dyson_sphere_kg/dyson_sphere_tuples.txt\"\n dyson = DysonTuplesAnalysis(dyson_file_path, [\"硫酸\",\"刺笋结晶\",\"可燃冰\",\"光栅石\"])\n # print(dyson.all_formulas_dict)\n # for key in dyson.all_products_formulas_dict.keys():\n # print(\"{}: {}\".format(key, dyson.all_products_formulas_dict[key]))\n # print(dyson.all_raw_products)\n\n for plan in [\"氘核燃料棒\"]:#[\"电动机\",\"卡西米尔晶体\",\"氘核燃料棒\",\"碳纳米管\",\"小型运载火箭\",\"太阳帆\"]:\n plans = dyson(plan, 1)\n # print(len(plans))\n for _plan in plans:\n temp_plan = _plan[0]\n temp_extra = _plan[1]\n print(temp_plan)\n print(temp_extra)\n # for key in _plan.keys():\n # print(\"{}: {}\".format(key, _plan[key]))\n # print(\"\\n\\n\")" ]
[ [ "pandas.DataFrame", "pandas.pivot_table" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
biocore/microsetta-public-api
[ "1ec4c31e11127a8f480e4921b71ad36aa7d39c76" ]
[ "microsetta_public_api/resources.py" ]
[ "import os\nimport pandas as pd\nimport biom\nfrom copy import deepcopy\nfrom microsetta_public_api.exceptions import ConfigurationError\nfrom skbio.stats.ordination import OrdinationResults\nfrom qiime2.core.type.grammar import TypeExp\nfrom qiime2 import Artifact, Metadata\nfrom qiime2.metadata.io import MetadataFileError\nfrom q2_types.sample_data import AlphaDiversity, SampleData\nfrom q2_types.feature_table import FeatureTable, Frequency\nfrom q2_types.feature_data import FeatureData, Taxonomy\nfrom q2_types.ordination import PCoAResults\n\nfrom microsetta_public_api._logging import timeit\nfrom microsetta_public_api.models._taxonomy import Taxonomy as TaxonomyModel\n\n\n@timeit('_dict_of_literals_to_dict')\ndef _dict_of_literals_to_dict(dict_of_detail, resource_name):\n # passthrough\n return dict_of_detail\n\n\n@timeit('_dict_of_paths_to_alpha_data')\ndef _dict_of_paths_to_alpha_data(dict_of_qza_paths, resource_name):\n _validate_dict_of_paths(dict_of_qza_paths,\n resource_name)\n new_resource = _replace_paths_with_qza(dict_of_qza_paths,\n SampleData[AlphaDiversity],\n view_type=pd.Series)\n return new_resource\n\n\n@timeit('_dict_of_paths_to_pcoa')\ndef _dict_of_paths_to_pcoa(dict_of_qza_paths, resource_name):\n _validate_dict_of_paths(dict_of_qza_paths,\n resource_name)\n new_resource = _replace_paths_with_qza(dict_of_qza_paths,\n PCoAResults,\n view_type=OrdinationResults)\n return new_resource\n\n\n@timeit('_dict_of_dict_of_paths_to_pcoa')\ndef _dict_of_dict_of_paths_to_pcoa(dict_of_dict_of_qza_paths, resource_name):\n new_resource = dict()\n for key, value in dict_of_dict_of_qza_paths.items():\n new_resource[key] = _dict_of_paths_to_pcoa(value, resource_name)\n return new_resource\n\n\n@timeit('_transform_dict_of_table')\ndef _transform_dict_of_table(dict_, resource_name):\n if not isinstance(dict_, dict):\n raise TypeError(f\"Expected field '{resource_name}' to contain a \"\n f\"dictionary. Got {dict_}.\")\n new_resource = dict()\n for table_name, attributes in dict_.items():\n res = _transform_single_table(attributes, table_name)\n new_resource[table_name] = res\n return new_resource\n\n\n@timeit('_transform_single_table')\ndef _transform_single_table(dict_, resource_name):\n taxonomy = {'feature-data-taxonomy': dict_.pop('feature-data-taxonomy',\n None)}\n supported_table_types = {'qza', 'biom'}\n table_type = dict_.get('table-format', 'qza')\n if table_type not in supported_table_types:\n raise ValueError(f\"'table-format'={table_type} not in supported table \"\n f\"types: {supported_table_types}.\")\n\n _validate_dict_of_paths(dict_, resource_name, allow_none=True,\n required_fields=['table'],\n non_ext_entries=['q2-type', 'table-format',\n 'cache-taxonomy'],\n allow_extras=True,\n extensions=['.' + table_type]\n )\n _validate_dict_of_paths(taxonomy, resource_name,\n allow_none=True,\n )\n\n if taxonomy['feature-data-taxonomy'] is not None:\n dict_.update(taxonomy)\n\n semantic_types = {\n 'feature-data-taxonomy': FeatureData[Taxonomy],\n }\n biom_kws = set()\n if table_type == 'qza':\n semantic_types.update({\n 'table': dict_.get('table-type', FeatureTable[Frequency]),\n 'variances': FeatureTable[Frequency],\n })\n elif table_type == 'biom':\n biom_kws.update({'table', 'variances'})\n else:\n # shouldn't happen because error check earlier but seems better than\n # silently ignoring....\n raise ValueError(f\"'table-type'={table_type} not in supported table \"\n f\"types: {supported_table_types}.\")\n\n views = {\n 'table': biom.Table,\n 'feature-data-taxonomy': pd.DataFrame,\n 'variances': biom.Table,\n }\n new_resource = deepcopy(dict_)\n for key, value in dict_.items():\n if key in semantic_types:\n new_resource[key] = _parse_q2_data(value,\n semantic_types[key],\n view_type=views.get(key, None),\n )\n elif key in biom_kws:\n new_resource[key] = biom.load_table(value)\n\n cache_taxonomy = new_resource.get('cache-taxonomy', True)\n if 'feature-data-taxonomy' in new_resource and cache_taxonomy:\n table = new_resource['table']\n taxonomy = new_resource['feature-data-taxonomy']\n variances = new_resource.get('variances', None)\n\n # rank_level=5 -> genus\n model = TaxonomyModel(table, taxonomy, variances, rank_level=5)\n new_resource['model'] = model\n\n return new_resource\n\n\n@timeit('_parse_q2_data')\ndef _parse_q2_data(filepath, semantic_type, view_type=None,\n ignore_predicate=True):\n try:\n data = _q2_load(filepath)\n except ValueError as e:\n raise ConfigurationError(*e.args)\n\n data_type = data.type\n if ignore_predicate:\n data_type = TypeExp(data_type.template, fields=data_type.fields)\n\n if data_type != semantic_type:\n raise ConfigurationError(f\"Expected QZA '{filepath}' to have type \"\n f\"'{semantic_type}'. \"\n f\"Received '{data.type}'.\")\n if view_type is not None:\n data = _q2_view(data, view_type)\n\n return data\n\n\n@timeit('_q2_view')\ndef _q2_view(data, view_type):\n data = data.view(view_type=view_type)\n return data\n\n\n@timeit('_q2_load')\ndef _q2_load(filepath):\n data = Artifact.load(filepath)\n return data\n\n\n@timeit('_validate_dict_of_paths')\ndef _validate_dict_of_paths(dict_of_paths, name, allow_none=False,\n required_fields=None, allow_extras=False,\n non_ext_entries=None, extensions=None,\n ):\n if extensions is None:\n extensions = ['.qza']\n if non_ext_entries is None:\n non_ext_entries = []\n if not isinstance(dict_of_paths, dict):\n raise ValueError(f\"Expected '{name}' field to contain a dict. \"\n f\"Got {type(dict_of_paths).__name__}\")\n if required_fields:\n for field in required_fields:\n if field not in dict_of_paths:\n raise ValueError(f\"Did not get required field '{field}'.\")\n if not allow_extras:\n allowed_keys = set(required_fields) | set(non_ext_entries)\n extra_keys = list(filter(lambda x: x not in allowed_keys,\n dict_of_paths.keys()))\n if extra_keys:\n raise ValueError(f\"Extra keys: {extra_keys} not allowed.\")\n\n for key, value in dict_of_paths.items():\n if key in non_ext_entries:\n continue\n has_ext = isinstance(value, str) and value.endswith(tuple(extensions))\n exists = isinstance(value, str) and os.path.exists(value)\n is_none = value is None\n value_is_existing_qza_path = (has_ext and exists) or \\\n (is_none and allow_none)\n\n if not value_is_existing_qza_path:\n exp_ext = extensions[0] if len(extensions) == 1 else extensions\n raise ValueError('Expected existing path with {} '\n 'extension. Got: {}'.format(exp_ext, value))\n\n\n@timeit('_replace_paths_with_qza')\ndef _replace_paths_with_qza(dict_of_qza_paths, semantic_type, view_type=None):\n new_resource = dict()\n for key, value in dict_of_qza_paths.items():\n new_resource[key] = _parse_q2_data(value,\n semantic_type,\n view_type=view_type,\n )\n return new_resource\n\n\n@timeit('_load_q2_metadata')\ndef _load_q2_metadata(metadata_path, name):\n try:\n new_resource = Metadata.load(metadata_path)\n except TypeError:\n # if metadata_path is some type that does not have '+' method with\n # str, e.g., dict then q2 metadata will get a type error. Except this\n # error and give a MetadataFileError, which is more informative\n raise MetadataFileError(str(metadata_path))\n return new_resource.to_dataframe()\n\n\n@timeit('_load_neighbors_tsv')\ndef _load_neighbors_tsv(dict_of_paths, name):\n new_resource = dict()\n for key, value in dict_of_paths.items():\n new_resource[key] = pd.read_csv(value, sep='\\t',\n dtype=str).set_index('sample_id')\n return new_resource\n\n\nclass ResourceManager(dict):\n\n transformers = {\n 'alpha_resources': _dict_of_paths_to_alpha_data,\n 'table_resources': _transform_dict_of_table,\n 'pcoa': _dict_of_dict_of_paths_to_pcoa,\n 'metadata': _load_q2_metadata,\n }\n\n def update(self, *args, **kwargs):\n \"\"\"\n Updates the managers resources.\n\n Parameters\n ----------\n other : optional dict\n Resource identifier to resource mapping. 'alpha_resources' is\n reserved for a dictionary. The values in 'alpha_resources' must be\n existing file paths with a .qza extension, they will be read\n into a python QZA.\n kwargs : dict\n kwargs for dict.update. Similar to `other`, but can be passed as\n keywords.\n\n Returns\n -------\n NoneType\n\n Examples\n --------\n >>> resources = ResourceManager(\n ... alpha_resources={\n ... 'faith_pd': '/path/to/some.qza',\n ... 'chao1': '/another/path/to/a.qza',\n ... },\n ... table_resources={\n ... 'greengenes_13.8_insertion': {\n ... 'table': '/path/to/feature-table.biom',\n ... 'feature-data-taxonomy': '/a/feat-data-taxonomy.qza',\n ... 'variances': '/a/variance/feature-table.qza',\n ... 'table-format': 'biom'\n ... },\n ... 'some_other_feature_table': {\n ... 'table': '/another/path/tofeature-table.qza',\n ... 'variances': '/a/variance/feature-table.qza',\n ... 'q2-type': FeatureTable[Frequency],\n ... },\n ... },\n ... pcoa={\n ... 'fecal': {\n ... 'unifrac': '/a/pcoa/path1.qza',\n ... 'jaccard': '/another/pcoa/path2.qza',\n ... },\n ... 'all_samples': {\n ... 'unifrac': '/a/path/to/all_samples/pcoa.qza',\n ... }\n ... },\n ... metadata='/path/to/some/metadata.txt',\n ... some_other_resource='here is a string resource',\n ... )\n\n \"\"\"\n to_add = dict()\n if len(args) == 1 and isinstance(args[0], dict):\n other = args[0]\n elif len(args) == 0:\n other = dict()\n else:\n raise TypeError(f'update expected at most 1 positional argument '\n f'that is a dict. Got {args}')\n\n to_add.update(other, **kwargs)\n\n for resource_name, transformer in self.transformers.items():\n if resource_name in other:\n new_resource = transformer(other[resource_name],\n resource_name)\n to_add.update({resource_name: new_resource})\n if resource_name in kwargs:\n new_resource = transformer(kwargs[resource_name],\n resource_name)\n to_add.update({resource_name: new_resource})\n\n return dict.update(self, to_add, **kwargs)\n\n\nresources = ResourceManager()\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
Maurynho/tcc_ciencia_dados
[ "f92c773da118f530d48159d38ea606307a65ba9c" ]
[ "codigo_fonte_python/dados_idh.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n#Estabelecendo conexão e requisitando a página que onde os dados estão\nimport urllib3\nurl = 'https://www.br.undp.org/content/brazil/pt/home/idh0/rankings/idhm-uf-2010.html'\nconexao = urllib3.PoolManager()\nretorno = conexao.request('GET',url)\n\n#Iniciando a manipulação dos dados da página\nfrom bs4 import BeautifulSoup \npagina = BeautifulSoup(retorno.data, 'html.parser')\n\n\n# In[2]:\n\n\n#Recuperando apenas a estrutura de tabela no HTML\ntabela = pagina.find_all('table', class_ = 'tableizer-table')\ntabela\n\n\n# In[3]:\n\n\ndado = []\n\nfor celulas in tabela:\n celula = celulas.find_all('td')\n for dados in celula:\n dado.append(dados.find(text=True))\n\ndado\n\n\n# In[4]:\n\n\n#Importando biblioteca Pandas para converter a lista em Dataframe\nimport pandas as pd\ndados_uf = pd.DataFrame(dado)\n\n#Importando biblioteca numpy para ajustar os dados em uma tabela com 27 linhas por 6 colunas\nimport numpy as np\nmatriz_np = np.array(dados_uf)\nmatriz_ajustada = np.reshape(matriz_np, (27,6)) \n\n#Criando o dataframe final inserindo os títulos das colunas\nestados_idh = pd.DataFrame(matriz_ajustada,columns=['rank','nome','idh_geral','idh_renda','idh_logenvidade','idh_educacao'])\nestados_idh\n\n\n# In[5]:\n\n\nestados_idh.to_csv('C://fontes_dados///bases_tratadas/dados_idh.csv',sep=';')\n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "numpy.reshape", "numpy.array", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
prasoongoyal/pixl2r
[ "b0691be6b27e705a62534b58f97ff7b8b6655c7d", "b0691be6b27e705a62534b58f97ff7b8b6655c7d" ]
[ "metaworld/metaworld/envs/mujoco/mujoco_env.py", "metaworld/metaworld/core/wrapper_env.py" ]
[ "import os\n\nfrom gym import error, spaces\nfrom gym.utils import seeding\nimport numpy as np\nfrom os import path\nimport gym\n\ntry:\n\timport mujoco_py\nexcept ImportError as e:\n\traise error.DependencyNotInstalled(\"{}. (HINT: you need to install mujoco_py, and also perform the setup instructions here: https://github.com/openai/mujoco-py/.)\".format(e))\n\nDEFAULT_SIZE = 50\n\nclass MujocoEnv(gym.Env):\n\t\"\"\"\n\tThis is a simplified version of the gym MujocoEnv class.\n\n\tSome differences are:\n\t - Do not automatically set the observation/action space.\n\t\"\"\"\n\tdef __init__(self, model_path, frame_skip, device_id=-1, automatically_set_spaces=False):\n\t\tfullpath = model_path\n\t\t# if model_path.startswith(\"/\"):\n\t\t# \tfullpath = model_path\n\t\t# else:\n\t\t# \tfullpath = os.path.join(os.path.dirname(__file__), \"assets\", model_path)\n\t\tif not path.exists(fullpath):\n\t\t\traise IOError(\"File %s does not exist\" % fullpath)\n\t\tself.frame_skip = frame_skip\n\t\tself.model = mujoco_py.load_model_from_path(fullpath)\n\t\tself.sim = mujoco_py.MjSim(self.model)\n\t\tself.data = self.sim.data\n\t\tself.viewer = None\n\t\tself._viewers = {}\n\n\t\tself.metadata = {\n\t\t\t'render.modes': ['human', 'rgb_array'],\n\t\t\t'video.frames_per_second': int(np.round(1.0 / self.dt))\n\t\t}\n\t\tif device_id == -1 and 'gpu_id' in os.environ:\n\t\t\tdevice_id =int(os.environ['gpu_id'])\n\t\tself.device_id = device_id\n\t\tself.init_qpos = self.sim.data.qpos.ravel().copy()\n\t\tself.init_qvel = self.sim.data.qvel.ravel().copy()\n\t\tif automatically_set_spaces:\n\t\t\tobservation, _reward, done, _info = self.step(np.zeros(self.model.nu))\n\t\t\tassert not done\n\t\t\tself.obs_dim = observation.size\n\n\t\t\tbounds = self.model.actuator_ctrlrange.copy()\n\t\t\tlow = bounds[:, 0]\n\t\t\thigh = bounds[:, 1]\n\t\t\tself.action_space = spaces.Box(low=low, high=high)\n\n\t\t\thigh = np.inf*np.ones(self.obs_dim)\n\t\t\tlow = -high\n\t\t\tself.observation_space = spaces.Box(low, high)\n\n\t\tself.seed()\n\n\tdef seed(self, seed=None):\n\t\tself.np_random, seed = seeding.np_random(seed)\n\t\treturn [seed]\n\n\t# methods to override:\n\t# ----------------------------\n\n\tdef reset_model(self):\n\t\t\"\"\"\n\t\tReset the robot degrees of freedom (qpos and qvel).\n\t\tImplement this in each subclass.\n\t\t\"\"\"\n\t\traise NotImplementedError\n\n\tdef viewer_setup(self):\n\t\t\"\"\"\n\t\tThis method is called when the viewer is initialized and after every reset\n\t\tOptionally implement this method, if you need to tinker with camera position\n\t\tand so forth.\n\t\t\"\"\"\n\t\tpass\n\n\t# -----------------------------\n\n\tdef reset(self):\n\t\tself.sim.reset()\n\t\tob = self.reset_model()\n\t\tif self.viewer is not None:\n\t\t\tself.viewer_setup()\n\t\treturn ob\n\n\tdef reset_to_idx(self, idx):\n\t\tself.sim.reset()\n\t\tob = self.reset_model_to_idx(idx)\n\t\tif self.viewer is not None:\n\t\t\tself.viewer_setup()\n\t\treturn ob\n\n\tdef set_state(self, qpos, qvel):\n\t\tassert qpos.shape == (self.model.nq,) and qvel.shape == (self.model.nv,)\n\t\told_state = self.sim.get_state()\n\t\tnew_state = mujoco_py.MjSimState(old_state.time, qpos, qvel,\n\t\t\t\t\t\t\t\t\t\t old_state.act, old_state.udd_state)\n\t\tself.sim.set_state(new_state)\n\t\tself.sim.forward()\n\n\t@property\n\tdef dt(self):\n\t\treturn self.model.opt.timestep * self.frame_skip\n\n\tdef do_simulation(self, ctrl, n_frames=None):\n\t\tif n_frames is None:\n\t\t\tn_frames = self.frame_skip\n\t\tif self.sim.data.ctrl is not None and ctrl is not None:\n\t\t\tself.sim.data.ctrl[:] = ctrl\n\t\tfor _ in range(n_frames):\n\t\t\tself.sim.step()\n\n\tdef render(self, mode='human', width=DEFAULT_SIZE, height=DEFAULT_SIZE, depth=False):\n\t\tif 'rgb_array' in mode:\n\t\t\tself._get_viewer(mode).render(width, height)\n\t\t\t# window size used for old mujoco-py:\n\t\t\tdata = self._get_viewer(mode).read_pixels(width, height, depth=depth)\n\t\t\t# original image is upside-down, so flip it\n\t\t\tif not depth:\n\t\t\t\treturn data[::-1, :, :]\n\t\t\telse:\n\t\t\t\treturn data[0][::-1, :, :], data[1][::-1, :]\n\t\telif mode == 'human':\n\t\t\tself._get_viewer(mode).render()\n\n\tdef close(self):\n\t\tif self.viewer is not None:\n\t\t\tself.viewer.finish()\n\t\t\tself.viewer = None\n\n\tdef _get_viewer(self, mode):\n\t\tself.viewer = self._viewers.get(mode)\n\t\tif self.viewer is None:\n\t\t\tif mode == 'human':\n\t\t\t\tself.viewer = mujoco_py.MjViewer(self.sim)\n\t\t\telif 'rgb_array' in mode:\n\t\t\t\tself.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, -1)\n\t\t\tself.viewer_setup()\n\t\t\tself._viewers[mode] = self.viewer\n\t\t# if mode == 'rgb_array_y':\n\t\t# self.viewer_setup(view_angle='y')\n\t\t# else:\n\t\t# self.viewer_setup(view_angle='x')\n\t\tself.viewer_setup()\n\t\treturn self.viewer\n\n\tdef get_body_com(self, body_name):\n\t\treturn self.data.get_body_xpos(body_name)\n\n\tdef state_vector(self):\n\t\treturn np.concatenate([\n\t\t\tself.sim.data.qpos.flat,\n\t\t\tself.sim.data.qvel.flat\n\t\t])\n\n\tdef get_image(self, width=84, height=84, camera_name=None):\n\t\treturn self.sim.render(\n\t\t\twidth=width,\n\t\t\theight=height,\n\t\t\tcamera_name=camera_name,\n\t\t)\n\n\tdef initialize_camera(self, init_fctn):\n\t\tsim = self.sim\n\t\tviewer = mujoco_py.MjRenderContextOffscreen(sim, device_id=self.device_id)\n\t\t# viewer = mujoco_py.MjViewer(sim)\n\t\tinit_fctn(viewer.cam)\n\t\tsim.add_render_context(viewer)\n", "from gym.spaces import Box\n\nfrom metaworld.core.serializable import Serializable\nimport numpy as np\n\nclass ProxyEnv(Serializable):\n def __init__(self, wrapped_env):\n self.quick_init(locals())\n self._wrapped_env = wrapped_env\n\n @property\n def wrapped_env(self):\n return self._wrapped_env\n\n def __getattr__(self, attrname):\n if attrname == '_serializable_initialized':\n return None\n return getattr(self._wrapped_env, attrname)\n\n def __str__(self):\n return '{}({})'.format(type(self).__name__, self.wrapped_env)\n\n\nclass NormalizedBoxEnv(ProxyEnv, Serializable):\n \"\"\"\n Normalize action to in [-1, 1].\n\n Optionally normalize observations.\n \"\"\"\n def __init__(\n self,\n env,\n obs_means=None,\n obs_stds=None,\n obs_to_normalize_keys=['observation'],\n ):\n # self._wrapped_env needs to be called first because\n # Serializable.quick_init calls getattr, on this class. And the\n # implementation of getattr (see below) calls self._wrapped_env.\n # Without setting this first, the call to self._wrapped_env would call\n # getattr again (since it's not set yet) and therefore loop forever.\n # Or else serialization gets delegated to the wrapped_env. Serialize\n # this env separately from the wrapped_env.\n self._wrapped_env = env\n Serializable.quick_init(self, locals())\n ProxyEnv.__init__(self, env)\n self._should_normalize = not (obs_means is None and obs_stds is None)\n num_obs_types = len(obs_to_normalize_keys)\n if self._should_normalize:\n if obs_means is None:\n obs_means = dict()\n for key in self.obs_to_normalize_keys:\n obs_means[key] = np.zeros_like(env.observation_space[key].low)\n else:\n obs_means = dict()\n for key in self.obs_to_normalize_keys:\n obs_means[key] = np.array(obs_means[key])\n if obs_stds is None:\n obs_stds = dict()\n for key in self.obs_to_normalize_keys:\n obs_stds[key] = np.zeros_like(env.observation_space[key].low)\n else:\n obs_stds = dict()\n for key in self.obs_to_normalize_keys:\n obs_stds[key] = np.array(obs_stds[key])\n self._obs_means = obs_means\n self._obs_stds = obs_stds\n ub = np.ones(self._wrapped_env.action_space.shape)\n self.action_space = Box(-1 * ub, ub)\n self.obs_to_normalize_keys=obs_to_normalize_keys\n\n def estimate_obs_stats(self, obs_batch, override_values=False):\n raise NotImplementedError()\n\n def _apply_normalize_obs(self, obs):\n for key in self.obs_to_normalize_keys:\n obs[key]= (obs[key] - self._obs_means[key]) / (self._obs_stds[key] + 1e-8)\n\n def __getstate__(self):\n d = Serializable.__getstate__(self)\n # Add these explicitly in case they were modified\n d[\"_obs_means\"] = self._obs_means\n d[\"_obs_stds\"] = self._obs_stds\n return d\n\n def __setstate__(self, d):\n Serializable.__setstate__(self, d)\n self._obs_means = d[\"_obs_means\"]\n self._obs_stds = d[\"_obs_stds\"]\n\n def step(self, action):\n lb = self._wrapped_env.action_space.low\n ub = self._wrapped_env.action_space.high\n scaled_action = lb + (action + 1.) * 0.5 * (ub - lb)\n scaled_action = np.clip(scaled_action, lb, ub)\n wrapped_step = self._wrapped_env.step(scaled_action)\n next_obs, reward, done, info = wrapped_step\n if self._should_normalize:\n next_obs = self._apply_normalize_obs(next_obs)\n return next_obs, reward, done, info\n\n def __str__(self):\n return \"Normalized: %s\" % self._wrapped_env\n" ]
[ [ "numpy.concatenate", "numpy.round", "numpy.zeros", "numpy.ones" ], [ "numpy.array", "numpy.zeros_like", "numpy.clip", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xchange11/bundesterminator
[ "d7e92bfa8ffda54821364c74ac33a48ffa5f51b9", "d7e92bfa8ffda54821364c74ac33a48ffa5f51b9" ]
[ "bundestag/bundestrainer.py", "bundestag/trainer.py" ]
[ "import os\nimport pickle\n\nfrom google.cloud import storage\nfrom bundestag import data, utils\nfrom bundestag.bundes_w2v import BundesW2V\n\nimport pandas as pd\nimport numpy as np\n\nfrom tensorflow import keras\nfrom tensorflow.keras import Sequential, layers\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.utils import to_categorical\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\n\nimport mlflow\nfrom mlflow.tracking import MlflowClient\nfrom memoized_property import memoized_property\n\n\n\n# Google Cloud Platform Data\nGCP_BUCKET_NAME = \"\" # NEEDS TO BE PROVIDED HERE IN CODE\nGCP_BUCKET_DATA_FOLDER = 'trained'\n\n# MLFLOW server address\nMLFLOW_URL = \"\" # NEEDS TO BE PROVIDED HERE IN CODE\n\n\nclass Bundestrainer():\n model = None\n loss = None\n optimizer = None\n metrics = None\n lstm_nodes = None\n keras_dense_layers = None\n last_layer_nodes = None\n batch_size = None\n patience = None\n epochs = None\n validation_split = None\n X = None\n y = None\n X_train = None\n X_test = None\n y_train = None\n y_test = None\n speech_data = None\n bio_data = None\n balance_treshold = None\n w2v_model = None\n pad_len = None\n party_mapping = None\n\n def __init__(self,\n loss=\"categorical_crossentropy\",\n optimizer=\"adam\",\n metrics=['accuracy'],\n lstm_nodes=20,\n keras_dense_layers={15: 'relu'},\n last_layer_nodes=5,\n batch_size=32,\n patience=3,\n epochs=10,\n validation_split=0.3,\n balance_treshold=500_000,\n pad_len=300,\n experiment_name=\"\"): # NEEDS TO BE PROVIDED HERE IN CODE\n self.loss = loss\n self.optimizer = optimizer\n self.metrics = metrics\n self.lstm_nodes = lstm_nodes\n self.keras_dense_layers = keras_dense_layers\n self.last_layer_nodes = last_layer_nodes\n self.batch_size = batch_size\n self.patience = patience\n self.epochs = epochs\n self.validation_split = validation_split\n self.balance_treshold = balance_treshold\n self.pad_len = pad_len\n self.experiment_name = experiment_name\n\n def get_data(self):\n all_data = data.get_data()\n self.speech_data = all_data['speech_segments'][[\"text\", \"party\",\n \"speech_id\",\n \"speaker_id\"]]\n self.bio_data = all_data['bio_data']\n\n def preprocess_dataframe(self):\n self.speech_data = utils.impute_party(self.speech_data, self.bio_data)\n self.speech_data = utils.remove_non_party(self.speech_data)\n self.speech_data = self.speech_data.dropna()\n self.speech_data[\"text\"] = self.speech_data[\"text\"].map(utils.basic_preprocess)\n self.speech_data = self.speech_data.dropna()\n self.speech_data = utils.balance(self.speech_data,\n self.balance_treshold)\n\n\n def prepare_data_for_training(self):\n self.X = self.speech_data[\"text\"]\n self.y = self.speech_data[\"party\"]\n\n self.encode_target() # labeling and cat generation\n self.split() # train-test-split and assign X_train etc. to instance prop\n self.init_w2v() # create w2v dict with X_train\n\n #Prepare X\n self.X_train = self.preprocess_text(self.X_train)\n self.X_test = self.preprocess_text(self.X_test)\n\n def preprocess_text(self, document_series):\n documents = document_series.to_list()\n documents = self.w2v_model.embedding(documents)\n documents = pad_sequences(documents,\n dtype='float32',\n padding='post',\n maxlen=self.pad_len)\n return documents\n\n def encode_target(self):\n party_df = pd.DataFrame()\n party_df[\"party\"] = self.y\n party_df[\"party_encoded\"] = LabelEncoder().fit_transform(\n party_df[\"party\"])\n party_mapping = party_df.groupby(\"party_encoded\").first()\n party_mapping = list(party_mapping[\"party\"])\n self.party_mapping = party_mapping\n self.y = to_categorical(party_df[\"party_encoded\"],\n num_classes=len(party_mapping),\n dtype=\"int32\")\n\n def split(self):\n self.X_train, self.X_test, self.y_train, self.y_test = \\\n train_test_split(self.X, self.y, test_size=0.3, random_state=42)\n\n def init_w2v(self):\n self.w2v_model = BundesW2V()\n self.w2v_model.init_model(self.X_train)\n\n def init_model(self):\n self.model = Sequential()\n self.model.add(layers.Masking())\n self.model.add(layers.LSTM(self.lstm_nodes, activation='tanh'))\n\n # Create dense layers based on user input.\n # Custom amount of neurons + different activation functions possible.\n for nodes, act in self.keras_dense_layers.items():\n self.model.add(layers.Dense(nodes, activation=act))\n\n # Try grabbing the correct number of last layer nodes\n # from the amount of present parties\n try:\n self.model.add(\n layers.Dense(len(self.party_mapping), activation='softmax'))\n except:\n self.model.add(\n layers.Dense(self.last_layer_nodes, activation='softmax'))\n\n self.model.compile(loss=self.loss,\n optimizer=self.optimizer,\n metrics=self.metrics)\n\n def fit_model(self):\n es = EarlyStopping(patience=self.patience, restore_best_weights=True)\n self.model.fit(self.X_train,\n self.y_train,\n batch_size=self.batch_size,\n epochs=self.epochs,\n validation_split=self.validation_split,\n callbacks=[es])\n\n # MLFLOW Logging\n # Log the parameters\n self.mlflow_log_param('loss', self.loss)\n self.mlflow_log_param('optimizer', self.optimizer)\n self.mlflow_log_param('lstm_nodes', self.lstm_nodes)\n\n # Log the dense layers of the model\n for i, (nodes, act) in enumerate(self.keras_dense_layers.items(), 1):\n self.mlflow_log_param(f'dense_{i}_nodes', nodes)\n self.mlflow_log_param(f'dense_{i}_activation', act)\n\n self.mlflow_log_param('last_layer_nodes', self.last_layer_nodes)\n self.mlflow_log_param('batch_size', self.batch_size)\n self.mlflow_log_param('patience', self.patience)\n self.mlflow_log_param('epochs', self.epochs)\n self.mlflow_log_param('validation_split', self.validation_split)\n self.mlflow_log_param('balance_treshold', self.balance_treshold)\n self.mlflow_log_param('pad_len', self.pad_len)\n\n # Evaluate and log the metrics\n evaluation = self.model.evaluate(self.X_test, self.y_test, verbose=0)\n for metric, value in zip(self.model.metrics_names, evaluation):\n try:\n self.mlflow_log_metric(metric, value)\n except:\n print(f\"Metric :{metric} can't be logged. Does it even exist?\")\n\n def get_init_fit(self):\n self.get_data()\n self.preprocess_dataframe()\n self.prepare_data_for_training()\n self.init_model()\n self.fit_model()\n\n def predict_party_by_string(self, text_string):\n processed_string = utils.basic_preprocess(text_string)\n processed_string_as_list = [processed_string]\n processed_string_as_series = pd.Series(processed_string_as_list)\n vectorized_list = self.preprocess_text(processed_string_as_series)\n predicted_party_as_classes = self.model.predict_classes(vectorized_list)\n predicted_party_as_class = predicted_party_as_classes[0]\n predicted_party_as_string = self.party_mapping[predicted_party_as_class]\n return predicted_party_as_string\n\n def save_model(self, name):\n '''Save the trained model and upload to Google Cloud Platform'''\n filename = os.path.join(name)\n self.model.save(filename)\n self.upload_file_to_gcp(filename)\n\n def load_model(self, path):\n self.model = keras.models.load_model(path)\n\n def upload_file_to_gcp(self, location):\n '''Upload a file to the Google Cloud Platform'''\n client = storage.Client()\n bucket = client.bucket(GCP_BUCKET_NAME)\n blob = bucket.blob(location)\n blob.upload_from_filename(location)\n\n def save_w2v(self, name):\n '''Save Word2vec model and also uplaod it to the Google Cloud'''\n filename = os.path.join(name)\n self.w2v_model.save(filename)\n self.upload_file_to_gcp(filename)\n\n def load_w2c(self, path):\n self.w2v_model = BundesW2V()\n self.w2v_model.load(path)\n\n def save_party_mapping(self, path):\n with open(path, \"wb\") as f:\n pickle.dump(self.party_mapping, f)\n self.upload_file_to_gcp(path)\n\n def load_party_mapping(self, path):\n with open(path, \"rb\") as f:\n self.party_mapping = pickle.load(f)\n\n def save_speech_data(self, path):\n with open(path, \"wb\") as f:\n pickle.dump(self.speech_data, f)\n\n def load_speech_data(self, path):\n with open(path, 'rb') as f:\n self.speech_data = pickle.load(f)\n\n # MLFLOW\n @memoized_property\n def mlflow_client(self):\n mlflow.set_tracking_uri(MLFLOW_URL)\n return MlflowClient()\n\n @memoized_property\n def mlflow_experiment_id(self):\n try:\n return self.mlflow_client.create_experiment(self.experiment_name)\n except BaseException:\n return self.mlflow_client.get_experiment_by_name(self.experiment_name).experiment_id\n\n @memoized_property\n def mlflow_run(self):\n return self.mlflow_client.create_run(self.mlflow_experiment_id)\n\n def mlflow_log_param(self, key, value):\n self.mlflow_client.log_param(self.mlflow_run.info.run_id, key, value)\n\n def mlflow_log_metric(self, key, value):\n self.mlflow_client.log_metric(self.mlflow_run.info.run_id, key, value)\n\n\nif __name__ == '__main__':\n # Hire the trainer\n trainer = Bundestrainer()\n\n # Train those bastards\n trainer.get_init_fit()\n\n # Save the result\n trainer.save_w2v('model2.w2v')\n trainer.save_model('model2.tf')\n trainer.save_party_mapping('model2.pm')\n", "from bundestag.data import get_data, clean_data\nfrom bundestag.utils import impute_party\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\n\nimport joblib\nimport os\nfrom tempfile import mkdtemp\nfrom google.cloud import storage\n\n\n\n# Google Cloud Platform Data\nGCP_BUCKET_NAME = \"\" # NEEDS TO BE PROVIDED HERE IN CODE\nGCP_BUCKET_DATA_FOLDER = 'trained'\n\n\nclass Trainer():\n def __init__(self, X, y):\n self.pipeline = None\n self.X = X\n self.y = y\n\n def set_pipeline(self):\n preprocess = Pipeline([('vectorize', CountVectorizer())])\n\n pipe = Pipeline([('prep', preprocess),\n ('multinb_model', MultinomialNB())])\n\n self.pipeline = pipe\n\n def run(self):\n self.set_pipeline()\n self.pipeline.fit(self.X, self.y)\n\n def evaluate(self, X_test, y_test):\n '''evaluates the pipeline'''\n prediction = self.pipeline.predict(X_test)\n out = []\n for i, pred in enumerate(prediction):\n out.append(\n f'pred: {pred.ljust(25)} truth: {y_test.ravel()[i].ljust(25)}')\n # truth = y_test\n return '\\n'.join(out[:30])\n # return y_test.ravel()\n\n def save_model(self, name):\n \"\"\"Save the model into a .joblib format and upload to gcloud BUCKET!\"\"\"\n filename = os.path.join(name)\n joblib.dump(self.pipeline, filename)\n self.upload_model_to_gcp(filename)\n\n def upload_model_to_gcp(self, location):\n client = storage.Client()\n bucket = client.bucket(GCP_BUCKET_NAME)\n blob = bucket.blob(location)\n blob.upload_from_filename(location)\n\n\nif __name__ == '__main__':\n # Grab the data\n data = get_data()\n\n df = data['speech_segments']\n bio = data['bio_data']\n\n # Impute missing party values\n df = impute_party(df, bio)\n\n # Clean the data\n df = clean_data(df)\n\n # X and y\n X = df.drop('party', axis=1)\n y = df.party\n\n # Train Test Split\n X_train, X_test, y_train, y_test = \\\n train_test_split(X.text, y, test_size=0.2)\n\n # train\n trainer = Trainer(X_train, y_train)\n trainer.run()\n\n # Save model\n trainer.save_model('model.joblib')\n\n # evaluate\n # print(trainer.evaluate(X_test, y_test))\n" ]
[ [ "tensorflow.keras.models.load_model", "pandas.Series", "tensorflow.keras.layers.Masking", "tensorflow.keras.layers.Dense", "tensorflow.keras.Sequential", "sklearn.model_selection.train_test_split", "pandas.DataFrame", "sklearn.preprocessing.LabelEncoder", "tensorflow.keras.layers.LSTM", "tensorflow.keras.callbacks.EarlyStopping", "tensorflow.keras.preprocessing.sequence.pad_sequences" ], [ "sklearn.feature_extraction.text.CountVectorizer", "sklearn.naive_bayes.MultinomialNB", "sklearn.model_selection.train_test_split" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [ "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wangyunjeff/yolov3
[ "9d8fc6c6a028d92a9e1f761c086a28ba1c392b40" ]
[ "utils/dataloader.py" ]
[ "from random import shuffle\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport math\nimport torch.nn.functional as F\nfrom PIL import Image\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.dataset import Dataset\nfrom matplotlib.colors import rgb_to_hsv, hsv_to_rgb\nfrom nets.yolo_training import Generator\n\n\nclass YoloDataset(Dataset):\n def __init__(self, train_lines, image_size):\n super(YoloDataset, self).__init__()\n\n self.train_lines = train_lines\n self.train_batches = len(train_lines)\n self.image_size = image_size\n\n def __len__(self):\n return self.train_batches\n\n def rand(self, a=0, b=1):\n return np.random.rand() * (b - a) + a\n\n def get_random_data(self, annotation_line, input_shape, jitter=.3, hue=.1, sat=1.5, val=1.5):\n \"\"\"实时数据增强的随机预处理\"\"\"\n line = annotation_line.split()\n image = Image.open(line[0])\n iw, ih = image.size\n h, w = input_shape\n box = np.array([np.array(list(map(int, box.split(',')))) for box in line[1:]])\n\n # 调整图片大小\n new_ar = w / h * self.rand(1 - jitter, 1 + jitter) / self.rand(1 - jitter, 1 + jitter)\n scale = self.rand(.25, 2)\n if new_ar < 1:\n nh = int(scale * h)\n nw = int(nh * new_ar)\n else:\n nw = int(scale * w)\n nh = int(nw / new_ar)\n image = image.resize((nw, nh), Image.BICUBIC)\n\n # 放置图片\n dx = int(self.rand(0, w - nw))\n dy = int(self.rand(0, h - nh))\n new_image = Image.new('RGB', (w, h),\n (np.random.randint(0, 255), np.random.randint(0, 255), np.random.randint(0, 255)))\n new_image.paste(image, (dx, dy))\n image = new_image\n\n # 是否翻转图片\n flip = self.rand() < .5\n if flip:\n image = image.transpose(Image.FLIP_LEFT_RIGHT)\n\n # 色域变换\n hue = self.rand(-hue, hue)\n sat = self.rand(1, sat) if self.rand() < .5 else 1 / self.rand(1, sat)\n val = self.rand(1, val) if self.rand() < .5 else 1 / self.rand(1, val)\n x = rgb_to_hsv(np.array(image) / 255.)\n x[..., 0] += hue\n x[..., 0][x[..., 0] > 1] -= 1\n x[..., 0][x[..., 0] < 0] += 1\n x[..., 1] *= sat\n x[..., 2] *= val\n x[x > 1] = 1\n x[x < 0] = 0\n image_data = hsv_to_rgb(x) * 255 # numpy array, 0 to 1\n\n # 调整目标框坐标\n box_data = np.zeros((len(box), 5))\n if len(box) > 0:\n np.random.shuffle(box)\n box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx\n box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy\n if flip:\n box[:, [0, 2]] = w - box[:, [2, 0]]\n box[:, 0:2][box[:, 0:2] < 0] = 0\n box[:, 2][box[:, 2] > w] = w\n box[:, 3][box[:, 3] > h] = h\n box_w = box[:, 2] - box[:, 0]\n box_h = box[:, 3] - box[:, 1]\n box = box[np.logical_and(box_w > 1, box_h > 1)] # 保留有效框\n box_data = np.zeros((len(box), 5))\n box_data[:len(box)] = box\n if len(box) == 0:\n return image_data, []\n\n if (box_data[:, :4] > 0).any():\n return image_data, box_data\n else:\n return image_data, []\n\n def __getitem__(self, index):\n if index == 0:\n shuffle(self.train_lines)\n lines = self.train_lines\n n = self.train_batches\n index = index % n\n img, y = self.get_random_data(lines[index], self.image_size[0:2])\n if len(y) != 0:\n # 从坐标转换成0~1的百分比\n boxes = np.array(y[:, :4], dtype=np.float32)\n boxes[:, 0] = boxes[:, 0] / self.image_size[1]\n boxes[:, 1] = boxes[:, 1] / self.image_size[0]\n boxes[:, 2] = boxes[:, 2] / self.image_size[1]\n boxes[:, 3] = boxes[:, 3] / self.image_size[0]\n\n boxes = np.maximum(np.minimum(boxes, 1), 0)\n boxes[:, 2] = boxes[:, 2] - boxes[:, 0]\n boxes[:, 3] = boxes[:, 3] - boxes[:, 1]\n\n boxes[:, 0] = boxes[:, 0] + boxes[:, 2] / 2\n boxes[:, 1] = boxes[:, 1] + boxes[:, 3] / 2\n y = np.concatenate([boxes, y[:, -1:]], axis=-1)\n\n img = np.array(img, dtype=np.float32)\n\n tmp_inp = np.transpose(img / 255.0, (2, 0, 1))\n tmp_targets = np.array(y, dtype=np.float32)\n return tmp_inp, tmp_targets\n\n\n# DataLoader中collate_fn使用\ndef yolo_dataset_collate(batch):\n images = []\n bboxes = []\n for img, box in batch:\n images.append(img)\n bboxes.append(box)\n images = np.array(images)\n bboxes = np.array(bboxes)\n return images, bboxes\n\n" ]
[ [ "numpy.minimum", "numpy.logical_and", "matplotlib.colors.hsv_to_rgb", "numpy.random.shuffle", "numpy.concatenate", "numpy.random.rand", "numpy.transpose", "numpy.array", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
luissen/SSDT-A-single-shot-detector-for-PCB--defects
[ "d15c355e89fdde1f11c72fb5a5a68eb59fea6818" ]
[ "models/MOD_vgg_1125_2.py" ]
[ "'''\nMicro Object Detector Net\nthe author:Luis\ndate : 11.25\n'''\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom layers import *\nfrom .base_models import vgg, vgg_base\n\n\nclass BasicConv(nn.Module):\n\n def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True,\n bn=False, bias=True, up_size=0):\n super(BasicConv, self).__init__()\n self.out_channels = out_planes\n self.in_channels = in_planes\n self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding,\n dilation=dilation, groups=groups, bias=bias)\n self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn else None\n self.relu = nn.ReLU(inplace=True) if relu else None\n self.up_size = up_size\n self.up_sample = nn.Upsample(size=(up_size, up_size), mode='bilinear') if up_size != 0 else None\n\n def forward(self, x):\n x = self.conv(x)\n if self.bn is not None:\n x = self.bn(x)\n if self.relu is not None:\n x = self.relu(x)\n if self.up_size > 0:\n x = self.up_sample(x)\n return x\n\nclass SELayer(nn.Module):\n def __init__(self, channel, reduction=16):\n super(SELayer, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc=nn.Sequential(\n nn.Linear(channel, channel // reduction),\n nn.ReLU(inplace=True),\n nn.Linear(channel // reduction,channel),\n nn.Sigmoid()\n )\n \n def forward(self,x):\n b,c,_,_ = x.size()\n y = self.avg_pool(x).view(b,c)\n y = self.fc(y).view(b,c,1,1)\n return x*y\n \nclass MOD(nn.Module):\n def __init__(self, base, extras, upper,upper2 ,head,num_classes,size):\n super(MOD, self).__init__()\n self.num_classes = num_classes\n self.extras = nn.ModuleList(extras)\n self.size = size\n self.base = nn.ModuleList(base)\n #self.L2Norm = nn.ModuleList(extras)\n self.upper = nn.ModuleList(upper)\n self.upper2 = nn.ModuleList(upper2)\n self.loc = nn.ModuleList(head[0])\n self.conf = nn.ModuleList(head[1])\n self.softmax = nn.Softmax()\n \n def forward(self, x,test=False):\n \n scale_source = []\n upper_source = []\n loc = []\n conf = []\n mid_trans = []\n #get the F.T of conv4\n for k in range(23):\n x = self.base[k](x)\n scale_source.append(x)\n for k in range(23,len(self.base)):\n x = self.base[k](x)\n scale_source.append(x)\n for k,v in enumerate(self.extras):\n x = F.relu(v(x),inplace=True)\n if k%2 == 1:\n scale_source.append(x)\n upper_source = scale_source\n lenscale = len(scale_source)\n orgin = x\n for k in range(len(self.upper)-1):\n #bn = nn.BatchNorm2d(self.upper[lenscale-k-2].in_channels,affine=True)\n #print(self.upper[lenscale-k-2].in_channels)\n #print(self.upper[lenscale-k-1].out_channels)\n #print(scale_source[lenscale-k-2].size())\n se = SELayer(self.upper[lenscale-k-1].out_channels,16)\n #upper_source[0] =upper_source[0]+ se(self.upper[lenscale-k-1](upper_source[lenscale-k-1]))\n upper_source[0] =torch.cat([upper_source[0],se(self.upper[lenscale-k-1](upper_source[lenscale-k-1]))],1)\n #print(upper_source[0].shape)\n for k in range(len(self.upper)-2):\n se = SELayer(self.upper2[lenscale-k-1].out_channels,16)\n #upper_source[1] = upper_source[1] + se(self.upper2[lenscale-k-1](upper_source[lenscale-k-1]))\n upper_source[1] =torch.cat([upper_source[1],se(self.upper2[lenscale-k-1](upper_source[lenscale-k-1]))],1)\n #print(upper_source[1].shape)\n bn = nn.BatchNorm2d(2816,affine = True)\n upper_source[0] = bn(upper_source[0])\n #bn1 = nn.BatchNorm2d(1024,affine = True)\n #upper_source[1] = bn1(upper_source[1])\n for (x, l, c) in zip(upper_source, self.loc, self.conf):\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)\n #print(loc.size())\n #print(conf.size())\n if test:\n output = (\n loc.view(loc.size(0), -1, 4), # loc preds\n self.softmax(conf.view(-1, self.num_classes)), # conf preds\n )\n \n else:\n output = (\n loc.view(loc.size(0), -1, 4),\n conf.view(conf.size(0), -1, self.num_classes),\n )\n #print(loc.size())\n #print(conf.size())\n return output\n\ndef low_pooling(vgg, extracts, size):\n if size == 300:\n up_size = layer_size('300')[k]\n elif size ==512:\n up_size = layer_size('512')[k]\n layers = []\n \n \ndef upper_deconv(vgg, extracts, size): \n layers = []\n layers2 = []\n if size == 300:\n layers.append(BasicConv(512, 512, kernel_size=1, padding=0))\n layers+=[(BasicConv(vgg[-2].out_channels,1024,kernel_size=1,padding=0,up_size = 38))]\n layers.append(BasicConv(extracts[1].out_channels,256,kernel_size=1,padding=0,up_size = 38))\n layers.append(BasicConv(extracts[3].out_channels,256,kernel_size=1,padding=0,up_size = 38))\n layers.append(BasicConv(extracts[5].out_channels,256,kernel_size=1,padding=0,up_size = 38))\n layers.append(BasicConv(extracts[7].out_channels,256,kernel_size=1,padding=0,up_size = 38))\n\n layers2.append(BasicConv(512, 128*4, kernel_size=1, padding=0))\n layers2+=[(BasicConv(vgg[-2].out_channels,1024,kernel_size=1,padding=0,up_size = 19))]\n layers2.append(BasicConv(extracts[1].out_channels,256,kernel_size=1,padding=0,up_size = 19))\n layers2.append(BasicConv(extracts[3].out_channels,256,kernel_size=1,padding=0,up_size = 19))\n layers2.append(BasicConv(extracts[5].out_channels,256,kernel_size=1,padding=0,up_size = 19))\n layers2.append(BasicConv(extracts[7].out_channels,256,kernel_size=1,padding=0,up_size = 19))\n\n elif size ==512:\n layers.append(BasicConv(512, 512, kernel_size=1, padding=0))\n layers.append(BasicConv(vgg[-2].out_channels,1024,kernel_size=1,padding=0,up_size = 64))\n layers.append(BasicConv(extracts[1].out_channels,256,kernel_size=1,padding=0,up_size = 64))\n layers.append(BasicConv(extracts[3].out_channels,256,kernel_size=1,padding=0,up_size = 64))\n layers.append(BasicConv(extracts[5].out_channels,256,kernel_size=1,padding=0,up_size = 64))\n layers.append(BasicConv(extracts[7].out_channels,256,kernel_size=1,padding=0,up_size = 64))\n layers.append(BasicConv(extracts[9].out_channels,256,kernel_size=1,padding=0,up_size = 64))\n\n layers2.append(BasicConv(512, 128*4, kernel_size=1, padding=0))\n layers2.append(BasicConv(vgg[-2].out_channels,1024,kernel_size=1,padding=0,up_size = 32))\n layers2.append(BasicConv(extracts[1].out_channels,256,kernel_size=1,padding=0,up_size = 32))\n layers2.append(BasicConv(extracts[3].out_channels,256,kernel_size=1,padding=0,up_size = 32))\n layers2.append(BasicConv(extracts[5].out_channels,256,kernel_size=1,padding=0,up_size = 32))\n layers2.append(BasicConv(extracts[7].out_channels,256,kernel_size=1,padding=0,up_size = 32))\n layers2.append(BasicConv(extracts[9].out_channels,256,kernel_size=1,padding=0,up_size = 32))\n\n return vgg, extracts,layers,layers2\n\n\ndef add_extras(cfg, i, batch_norm=False, size=300):\n # Extra layers added to VGG for feature scaling\n layers = []\n in_channels = i\n flag = False\n for k, v in enumerate(cfg):\n if in_channels != 'S':\n if v == 'S':\n layers += [nn.Conv2d(in_channels, cfg[k + 1],\n kernel_size=(1, 3)[flag], stride=2, padding=1)]\n else:\n layers += [nn.Conv2d(in_channels, v, kernel_size=(1, 3)[flag])]\n flag = not flag\n in_channels = v\n if size == 512:\n layers.append(nn.Conv2d(in_channels, 128, kernel_size=1, stride=1))\n layers.append(nn.Conv2d(128, 256, kernel_size=4, stride=1, padding=1))\n #print(len(layers))\n return layers\n\ndef multibox(vgg,extra_layers, upper,upper2,cfg, num_classes):\n loc_layers = []\n conf_layers = []\n vgg_source = [24,-2]\n loc_layers += [nn.Conv2d(2816,cfg[0] * 4,kernel_size=3,padding=1)]\n conf_layers += [nn.Conv2d(2816,cfg[0]*num_classes,kernel_size=3,padding=1)]\n loc_layers += [nn.Conv2d(2304,cfg[1] * 4,kernel_size=3,padding=1)]\n conf_layers += [nn.Conv2d(2304,cfg[1]*num_classes,kernel_size=3,padding=1)]\n for k,v in enumerate(upper):\n if k ==0 or k==1:\n continue\n loc_layers += [nn.Conv2d(v.in_channels,cfg[k] * 4,kernel_size=3,padding=1)]\n conf_layers += [nn.Conv2d(v.in_channels,cfg[k]*num_classes,kernel_size=3,padding=1)]\n '''\n for k, v in enumerate(vgg_source):\n loc_layers += [nn.Conv2d(vgg[v].out_channels,\n cfg[k] * 4, kernel_size=3, padding=1)]\n conf_layers += [nn.Conv2d(vgg[v].out_channels,\n cfg[k] * num_classes, kernel_size=3, padding=1)]\n for k, v in enumerate(extra_layers[1::2], 2):\n loc_layers += [nn.Conv2d(v.out_channels, cfg[k]\n * 4, kernel_size=3, padding=1)]\n conf_layers += [nn.Conv2d(v.out_channels, cfg[k]\n * num_classes, kernel_size=3, padding=1)]\n '''\n return vgg, extra_layers, upper ,upper2 , (loc_layers, conf_layers)\n\nlayer_size = {\n '300':[38,19,10,5,3,1],\n '512':[64,32,16,8,4,2,1],\n}\nextras = {\n '300': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256],\n '512': [256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256],\n}\nmbox = {\n '300': [6, 6, 6, 6, 4, 4], # number of boxes per feature map location\n '512': [6, 6, 6, 6, 6, 4, 4],\n}\ndef build_net(size=300, num_classes=21):\n if size != 300 and size != 512:\n print(\"Error: Sorry only SSD300 and SSD512 is supported currently!\")\n return\n\n return MOD(*multibox(*upper_deconv(vgg(vgg_base[str(size)], 3),\n add_extras(extras[str(size)] ,1024, size=size),size),\n mbox[str(size)], num_classes), num_classes=num_classes,size=size)\n\n" ]
[ [ "torch.nn.Softmax", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.Sigmoid", "torch.nn.Linear", "torch.nn.AdaptiveAvgPool2d", "torch.nn.Upsample", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
janvdvegt/scikit-lego
[ "774e557c4d19f67ef54f3f0d1622c64ef9903b63" ]
[ "tests/conftest.py" ]
[ "import itertools as it\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom sklearn.utils import estimator_checks\n\nn_vals = (10, 100, 5000)\nk_vals = (1, 2, 5)\nnp_types = (np.int32, np.float32, np.float64)\n\ntransformer_checks = (\n estimator_checks.check_transformer_data_not_an_array,\n estimator_checks.check_transformer_general,\n estimator_checks.check_transformers_unfitted,\n)\n\ngeneral_checks = (\n estimator_checks.check_fit2d_predict1d,\n estimator_checks.check_methods_subset_invariance,\n estimator_checks.check_fit2d_1sample,\n estimator_checks.check_fit2d_1feature,\n estimator_checks.check_fit1d,\n estimator_checks.check_get_params_invariance,\n estimator_checks.check_set_params,\n estimator_checks.check_dict_unchanged,\n estimator_checks.check_dont_overwrite_parameters,\n)\n\nnonmeta_checks = (\n estimator_checks.check_estimators_dtypes,\n estimator_checks.check_fit_score_takes_y,\n estimator_checks.check_dtype_object,\n estimator_checks.check_sample_weights_pandas_series,\n estimator_checks.check_sample_weights_list,\n estimator_checks.check_sample_weights_invariance,\n estimator_checks.check_estimators_fit_returns_self,\n estimator_checks.check_complex_data,\n estimator_checks.check_estimators_empty_data_messages,\n estimator_checks.check_pipeline_consistency,\n estimator_checks.check_estimators_nan_inf,\n estimator_checks.check_estimators_overwrite_params,\n estimator_checks.check_estimator_sparse_data,\n estimator_checks.check_estimators_pickle,\n)\n\nclassifier_checks = (\n estimator_checks.check_classifier_data_not_an_array,\n estimator_checks.check_classifiers_one_label,\n estimator_checks.check_classifiers_classes,\n estimator_checks.check_estimators_partial_fit_n_features,\n estimator_checks.check_classifiers_train,\n estimator_checks.check_supervised_y_2d,\n estimator_checks.check_supervised_y_no_nan,\n estimator_checks.check_estimators_unfitted,\n estimator_checks.check_non_transformer_estimators_n_iter,\n estimator_checks.check_decision_proba_consistency,\n)\n\nregressor_checks = (\n estimator_checks.check_regressors_train,\n estimator_checks.check_regressor_data_not_an_array,\n estimator_checks.check_estimators_partial_fit_n_features,\n estimator_checks.check_regressors_no_decision_function,\n estimator_checks.check_supervised_y_2d,\n estimator_checks.check_supervised_y_no_nan,\n estimator_checks.check_regressors_int,\n estimator_checks.check_estimators_unfitted,\n)\n\noutlier_checks = (\n estimator_checks.check_outliers_fit_predict,\n estimator_checks.check_outliers_train,\n estimator_checks.check_classifier_data_not_an_array,\n estimator_checks.check_estimators_unfitted,\n)\n\n\[email protected](scope=\"module\", params=[_ for _ in it.product(n_vals, k_vals, np_types)])\ndef random_xy_dataset_regr(request):\n n, k, np_type = request.param\n np.random.seed(42)\n X = np.random.normal(0, 2, (n, k)).astype(np_type)\n y = np.random.normal(0, 2, (n,))\n return X, y\n\n\[email protected](scope=\"module\", params=[_ for _ in it.product(n_vals, k_vals, np_types)])\ndef random_xy_dataset_clf(request):\n n, k, np_type = request.param\n np.random.seed(42)\n X = np.random.normal(0, 2, (n, k)).astype(np_type)\n y = np.random.normal(0, 2, (n,)) > 0.0\n return X, y\n\n\[email protected](scope=\"module\", params=[_ for _ in it.product(n_vals, k_vals, np_types)])\ndef random_xy_dataset_multiclf(request):\n n, k, np_type = request.param\n np.random.seed(42)\n X = np.random.normal(0, 2, (n, k)).astype(np_type)\n y = pd.cut(np.random.normal(0, 2, (n,)), 3).codes\n return X, y\n\n\[email protected]\ndef sensitive_classification_dataset():\n df = pd.DataFrame({\"x1\": [1, 0, 1, 0, 1, 0, 1, 1],\n \"x2\": [0, 0, 0, 0, 0, 1, 1, 1],\n \"y\": [1, 1, 1, 0, 1, 0, 0, 0]})\n\n return df[['x1', 'x2']], df['y']\n\n\[email protected]\ndef sensitive_multiclass_classification_dataset():\n df = pd.DataFrame({\n 'x1': [1, 0, 1, 0, 1, 0, 1, 1, -2, -2, -2, -2],\n 'x2': [0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1],\n 'y': [1, 1, 1, 0, 1, 0, 0, 0, 2, 2, 0, 0],\n })\n return df[['x1', 'x2']], df['y']\n\n\ndef id_func(param):\n \"\"\"Returns the repr of an object for usage in pytest parametrize\"\"\"\n return repr(param)\n" ]
[ [ "numpy.random.normal", "numpy.random.seed", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
cevans216/yt
[ "c19c3c615b996c8a6e418362ffea9041a616d673", "c19c3c615b996c8a6e418362ffea9041a616d673", "c19c3c615b996c8a6e418362ffea9041a616d673", "c19c3c615b996c8a6e418362ffea9041a616d673" ]
[ "yt/visualization/profile_plotter.py", "yt/frontends/enzo_p/data_structures.py", "yt/frontends/enzo/tests/test_outputs.py", "yt/visualization/volume_rendering/tests/test_composite.py" ]
[ "import base64\nimport builtins\nimport os\nfrom collections import OrderedDict\nfrom distutils.version import LooseVersion\nfrom functools import wraps\n\nimport matplotlib\nimport numpy as np\n\nfrom yt.data_objects.data_containers import YTSelectionContainer\nfrom yt.data_objects.profiles import create_profile, sanitize_field_tuple_keys\nfrom yt.data_objects.static_output import Dataset\nfrom yt.frontends.ytdata.data_structures import YTProfileDataset\nfrom yt.funcs import ensure_list, get_image_suffix, iterable, matplotlib_style_context\nfrom yt.utilities.exceptions import YTNotInsideNotebook\nfrom yt.utilities.logger import ytLogger as mylog\n\nfrom .base_plot_types import ImagePlotMPL, PlotMPL\nfrom .plot_container import (\n ImagePlotContainer,\n get_log_minorticks,\n invalidate_plot,\n linear_transform,\n log_transform,\n validate_plot,\n)\n\nMPL_VERSION = LooseVersion(matplotlib.__version__)\n\n\ndef get_canvas(name):\n from . import _mpl_imports as mpl\n\n suffix = get_image_suffix(name)\n\n if suffix == \"\":\n suffix = \".png\"\n if suffix == \".png\":\n canvas_cls = mpl.FigureCanvasAgg\n elif suffix == \".pdf\":\n canvas_cls = mpl.FigureCanvasPdf\n elif suffix in (\".eps\", \".ps\"):\n canvas_cls = mpl.FigureCanvasPS\n else:\n mylog.warning(\"Unknown suffix %s, defaulting to Agg\", suffix)\n canvas_cls = mpl.FigureCanvasAgg\n return canvas_cls\n\n\ndef invalidate_profile(f):\n @wraps(f)\n def newfunc(*args, **kwargs):\n rv = f(*args, **kwargs)\n args[0]._profile_valid = False\n return rv\n\n return newfunc\n\n\nclass PlotContainerDict(OrderedDict):\n def __missing__(self, key):\n plot = PlotMPL((10, 8), [0.1, 0.1, 0.8, 0.8], None, None)\n self[key] = plot\n return self[key]\n\n\nclass FigureContainer(OrderedDict):\n def __init__(self, plots):\n self.plots = plots\n super(FigureContainer, self).__init__()\n\n def __missing__(self, key):\n self[key] = self.plots[key].figure\n return self[key]\n\n def __iter__(self):\n return iter(self.plots)\n\n\nclass AxesContainer(OrderedDict):\n def __init__(self, plots):\n self.plots = plots\n self.ylim = {}\n self.xlim = (None, None)\n super(AxesContainer, self).__init__()\n\n def __missing__(self, key):\n self[key] = self.plots[key].axes\n return self[key]\n\n def __setitem__(self, key, value):\n super(AxesContainer, self).__setitem__(key, value)\n self.ylim[key] = (None, None)\n\n\ndef sanitize_label(label, nprofiles):\n label = ensure_list(label)\n\n if len(label) == 1:\n label = label * nprofiles\n\n if len(label) != nprofiles:\n raise RuntimeError(\"Number of labels must match number of profiles\")\n\n for l in label:\n if l is not None and not isinstance(l, str):\n raise RuntimeError(\"All labels must be None or a string\")\n\n return label\n\n\ndef data_object_or_all_data(data_source):\n if isinstance(data_source, Dataset):\n data_source = data_source.all_data()\n\n if not isinstance(data_source, YTSelectionContainer):\n raise RuntimeError(\"data_source must be a yt selection data object\")\n\n return data_source\n\n\nclass ProfilePlot:\n r\"\"\"\n Create a 1d profile plot from a data source or from a list\n of profile objects.\n\n Given a data object (all_data, region, sphere, etc.), an x field,\n and a y field (or fields), this will create a one-dimensional profile\n of the average (or total) value of the y field in bins of the x field.\n\n This can be used to create profiles from given fields or to plot\n multiple profiles created from\n `yt.data_objects.profiles.create_profile`.\n\n Parameters\n ----------\n data_source : YTSelectionContainer Object\n The data object to be profiled, such as all_data, region, or\n sphere. If a dataset is passed in instead, an all_data data object\n is generated internally from the dataset.\n x_field : str\n The binning field for the profile.\n y_fields : str or list\n The field or fields to be profiled.\n weight_field : str\n The weight field for calculating weighted averages. If None,\n the profile values are the sum of the field values within the bin.\n Otherwise, the values are a weighted average.\n Default : \"cell_mass\".\n n_bins : int\n The number of bins in the profile.\n Default: 64.\n accumulation : bool\n If True, the profile values for a bin N are the cumulative sum of\n all the values from bin 0 to N.\n Default: False.\n fractional : If True the profile values are divided by the sum of all\n the profile data such that the profile represents a probability\n distribution function.\n label : str or list of strings\n If a string, the label to be put on the line plotted. If a list,\n this should be a list of labels for each profile to be overplotted.\n Default: None.\n plot_spec : dict or list of dicts\n A dictionary or list of dictionaries containing plot keyword\n arguments. For example, dict(color=\"red\", linestyle=\":\").\n Default: None.\n x_log : bool\n Whether the x_axis should be plotted with a logarithmic\n scaling (True), or linear scaling (False).\n Default: True.\n y_log : dict or bool\n A dictionary containing field:boolean pairs, setting the logarithmic\n property for that field. May be overridden after instantiation using\n set_log\n A single boolean can be passed to signify all fields should use\n logarithmic (True) or linear scaling (False).\n Default: True.\n\n Examples\n --------\n\n This creates profiles of a single dataset.\n\n >>> import yt\n >>> ds = yt.load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n >>> ad = ds.all_data()\n >>> plot = yt.ProfilePlot(ad, \"density\", [\"temperature\", \"velocity_x\"],\n ... weight_field=\"cell_mass\",\n ... plot_spec=dict(color='red', linestyle=\"--\"))\n >>> plot.save()\n\n This creates profiles from a time series object.\n\n >>> es = yt.simulation(\"AMRCosmology.enzo\", \"Enzo\")\n >>> es.get_time_series()\n\n >>> profiles = []\n >>> labels = []\n >>> plot_specs = []\n >>> for ds in es[-4:]:\n ... ad = ds.all_data()\n ... profiles.append(create_profile(ad, [\"density\"],\n ... fields=[\"temperature\",\n ... \"velocity_x\"]))\n ... labels.append(ds.current_redshift)\n ... plot_specs.append(dict(linestyle=\"--\", alpha=0.7))\n >>>\n >>> plot = yt.ProfilePlot.from_profiles(profiles, labels=labels,\n ... plot_specs=plot_specs)\n >>> plot.save()\n\n Use set_line_property to change line properties of one or all profiles.\n\n \"\"\"\n\n x_log = None\n y_log = None\n x_title = None\n y_title = None\n _plot_valid = False\n\n def __init__(\n self,\n data_source,\n x_field,\n y_fields,\n weight_field=\"cell_mass\",\n n_bins=64,\n accumulation=False,\n fractional=False,\n label=None,\n plot_spec=None,\n x_log=True,\n y_log=True,\n ):\n\n data_source = data_object_or_all_data(data_source)\n y_fields = ensure_list(y_fields)\n logs = {x_field: bool(x_log)}\n if isinstance(y_log, bool):\n y_log = {y_field: y_log for y_field in y_fields}\n\n if isinstance(data_source.ds, YTProfileDataset):\n profiles = [data_source.ds.profile]\n else:\n profiles = [\n create_profile(\n data_source,\n [x_field],\n n_bins=[n_bins],\n fields=y_fields,\n weight_field=weight_field,\n accumulation=accumulation,\n fractional=fractional,\n logs=logs,\n )\n ]\n\n if plot_spec is None:\n plot_spec = [dict() for p in profiles]\n if not isinstance(plot_spec, list):\n plot_spec = [plot_spec.copy() for p in profiles]\n\n ProfilePlot._initialize_instance(self, profiles, label, plot_spec, y_log)\n\n @validate_plot\n def save(self, name=None, suffix=None, mpl_kwargs=None):\n r\"\"\"\n Saves a 1d profile plot.\n\n Parameters\n ----------\n name : str\n The output file keyword.\n suffix : string\n Specify the image type by its suffix. If not specified, the output\n type will be inferred from the filename. Defaults to PNG.\n mpl_kwargs : dict\n A dict of keyword arguments to be passed to matplotlib.\n \"\"\"\n if not self._plot_valid:\n self._setup_plots()\n unique = set(self.plots.values())\n if len(unique) < len(self.plots):\n iters = zip(range(len(unique)), sorted(unique))\n else:\n iters = self.plots.items()\n if not suffix:\n suffix = \"png\"\n suffix = \".%s\" % suffix\n fullname = False\n if name is None:\n if len(self.profiles) == 1:\n prefix = self.profiles[0].ds\n else:\n prefix = \"Multi-data\"\n name = \"%s%s\" % (prefix, suffix)\n else:\n sfx = get_image_suffix(name)\n if sfx != \"\":\n suffix = sfx\n prefix = name[: name.rfind(suffix)]\n fullname = True\n else:\n prefix = name\n xfn = self.profiles[0].x_field\n if isinstance(xfn, tuple):\n xfn = xfn[1]\n fns = []\n for uid, plot in iters:\n if isinstance(uid, tuple):\n uid = uid[1]\n if fullname:\n fns.append(\"%s%s\" % (prefix, suffix))\n else:\n fns.append(\"%s_1d-Profile_%s_%s%s\" % (prefix, xfn, uid, suffix))\n mylog.info(\"Saving %s\", fns[-1])\n with matplotlib_style_context():\n plot.save(fns[-1], mpl_kwargs=mpl_kwargs)\n return fns\n\n @validate_plot\n def show(self):\n r\"\"\"This will send any existing plots to the IPython notebook.\n\n If yt is being run from within an IPython session, and it is able to\n determine this, this function will send any existing plots to the\n notebook for display.\n\n If yt can't determine if it's inside an IPython session, it will raise\n YTNotInsideNotebook.\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n >>> pp = ProfilePlot(ds.all_data(), 'density', 'temperature')\n >>> pp.show()\n\n \"\"\"\n if \"__IPYTHON__\" in dir(builtins):\n from IPython.display import display\n\n display(self)\n else:\n raise YTNotInsideNotebook\n\n @validate_plot\n def _repr_html_(self):\n \"\"\"Return an html representation of the plot object. Will display as a\n png for each WindowPlotMPL instance in self.plots\"\"\"\n ret = \"\"\n unique = set(self.plots.values())\n if len(unique) < len(self.plots):\n iters = zip(range(len(unique)), sorted(unique))\n else:\n iters = self.plots.items()\n for uid, plot in iters:\n with matplotlib_style_context():\n img = plot._repr_png_()\n img = base64.b64encode(img).decode()\n ret += (\n r'<img style=\"max-width:100%%;max-height:100%%;\" '\n r'src=\"data:image/png;base64,{0}\"><br>'.format(img)\n )\n return ret\n\n def _setup_plots(self):\n if self._plot_valid:\n return\n for f in self.axes:\n self.axes[f].cla()\n if f in self._plot_text:\n self.plots[f].axes.text(\n self._text_xpos[f],\n self._text_ypos[f],\n self._plot_text[f],\n fontproperties=self._font_properties,\n **self._text_kwargs[f],\n )\n\n for i, profile in enumerate(self.profiles):\n for field, field_data in profile.items():\n self.axes[field].plot(\n np.array(profile.x),\n np.array(field_data),\n label=self.label[i],\n **self.plot_spec[i],\n )\n\n for profile in self.profiles:\n for fname in profile.keys():\n axes = self.axes[fname]\n xscale, yscale = self._get_field_log(fname, profile)\n xtitle, ytitle = self._get_field_title(fname, profile)\n\n axes.set_xscale(xscale)\n axes.set_yscale(yscale)\n\n axes.set_ylabel(ytitle)\n axes.set_xlabel(xtitle)\n\n axes.set_ylim(*self.axes.ylim[fname])\n axes.set_xlim(*self.axes.xlim)\n\n if fname in self._plot_title:\n axes.set_title(self._plot_title[fname])\n\n if any(self.label):\n axes.legend(loc=\"best\")\n self._set_font_properties()\n self._plot_valid = True\n\n @classmethod\n def _initialize_instance(cls, obj, profiles, labels, plot_specs, y_log):\n obj._plot_title = {}\n obj._plot_text = {}\n obj._text_xpos = {}\n obj._text_ypos = {}\n obj._text_kwargs = {}\n\n from matplotlib.font_manager import FontProperties\n\n obj._font_properties = FontProperties(family=\"stixgeneral\", size=18)\n obj._font_color = None\n obj.profiles = ensure_list(profiles)\n obj.x_log = None\n obj.y_log = sanitize_field_tuple_keys(y_log, obj.profiles[0].data_source) or {}\n obj.y_title = {}\n obj.x_title = None\n obj.label = sanitize_label(labels, len(obj.profiles))\n if plot_specs is None:\n plot_specs = [dict() for p in obj.profiles]\n obj.plot_spec = plot_specs\n obj.plots = PlotContainerDict()\n obj.figures = FigureContainer(obj.plots)\n obj.axes = AxesContainer(obj.plots)\n obj._setup_plots()\n return obj\n\n @classmethod\n def from_profiles(cls, profiles, labels=None, plot_specs=None, y_log=None):\n r\"\"\"\n Instantiate a ProfilePlot object from a list of profiles\n created with :func:`~yt.data_objects.profiles.create_profile`.\n\n Parameters\n ----------\n profiles : a profile or list of profiles\n A single profile or list of profile objects created with\n :func:`~yt.data_objects.profiles.create_profile`.\n labels : list of strings\n A list of labels for each profile to be overplotted.\n Default: None.\n plot_specs : list of dicts\n A list of dictionaries containing plot keyword\n arguments. For example, [dict(color=\"red\", linestyle=\":\")].\n Default: None.\n\n Examples\n --------\n\n >>> from yt import simulation\n >>> es = simulation(\"AMRCosmology.enzo\", \"Enzo\")\n >>> es.get_time_series()\n\n >>> profiles = []\n >>> labels = []\n >>> plot_specs = []\n >>> for ds in es[-4:]:\n ... ad = ds.all_data()\n ... profiles.append(create_profile(ad, [\"Density\"],\n ... fields=[\"Temperature\",\n ... \"x-velocity\"]))\n ... labels.append(ds.current_redshift)\n ... plot_specs.append(dict(linestyle=\"--\", alpha=0.7))\n >>>\n >>> plot = ProfilePlot.from_profiles(profiles, labels=labels,\n ... plot_specs=plot_specs)\n >>> plot.save()\n\n \"\"\"\n if labels is not None and len(profiles) != len(labels):\n raise RuntimeError(\"Profiles list and labels list must be the same size.\")\n if plot_specs is not None and len(plot_specs) != len(profiles):\n raise RuntimeError(\n \"Profiles list and plot_specs list must be the same size.\"\n )\n obj = cls.__new__(cls)\n return cls._initialize_instance(obj, profiles, labels, plot_specs, y_log)\n\n @invalidate_plot\n def set_line_property(self, property, value, index=None):\n r\"\"\"\n Set properties for one or all lines to be plotted.\n\n Parameters\n ----------\n property : str\n The line property to be set.\n value : str, int, float\n The value to set for the line property.\n index : int\n The index of the profile in the list of profiles to be\n changed. If None, change all plotted lines.\n Default : None.\n\n Examples\n --------\n\n Change all the lines in a plot\n plot.set_line_property(\"linestyle\", \"-\")\n\n Change a single line.\n plot.set_line_property(\"linewidth\", 4, index=0)\n\n \"\"\"\n if index is None:\n specs = self.plot_spec\n else:\n specs = [self.plot_spec[index]]\n for spec in specs:\n spec[property] = value\n return self\n\n @invalidate_plot\n def set_log(self, field, log):\n \"\"\"set a field to log or linear.\n\n Parameters\n ----------\n field : string\n the field to set a transform\n log : boolean\n Log on/off.\n \"\"\"\n if field == \"all\":\n self.x_log = log\n for field in list(self.profiles[0].field_data.keys()):\n self.y_log[field] = log\n else:\n (field,) = self.profiles[0].data_source._determine_fields([field])\n if field == self.profiles[0].x_field:\n self.x_log = log\n elif field in self.profiles[0].field_data:\n self.y_log[field] = log\n else:\n raise KeyError(\"Field %s not in profile plot!\" % (field))\n return self\n\n @invalidate_plot\n def set_ylabel(self, field, label):\n \"\"\"Sets a new ylabel for the specified fields\n\n Parameters\n ----------\n field : string\n The name of the field that is to be changed.\n\n label : string\n The label to be placed on the y-axis\n \"\"\"\n if field == \"all\":\n for field in self.profiles[0].field_data:\n self.y_title[field] = label\n else:\n (field,) = self.profiles[0].data_source._determine_fields([field])\n if field in self.profiles[0].field_data:\n self.y_title[field] = label\n else:\n raise KeyError(\"Field %s not in profile plot!\" % (field))\n\n return self\n\n @invalidate_plot\n def set_xlabel(self, label):\n \"\"\"Sets a new xlabel for all profiles\n\n Parameters\n ----------\n label : string\n The label to be placed on the x-axis\n \"\"\"\n self.x_title = label\n\n return self\n\n @invalidate_plot\n def set_unit(self, field, unit):\n \"\"\"Sets a new unit for the requested field\n\n Parameters\n ----------\n field : string\n The name of the field that is to be changed.\n\n new_unit : string or Unit object\n The name of the new unit.\n \"\"\"\n fd = self.profiles[0].data_source._determine_fields(field)[0]\n for profile in self.profiles:\n if fd == profile.x_field:\n profile.set_x_unit(unit)\n elif fd[1] in self.profiles[0].field_map:\n profile.set_field_unit(field, unit)\n else:\n raise KeyError(\"Field %s not in profile plot!\" % (field))\n return self\n\n @invalidate_plot\n def set_xlim(self, xmin=None, xmax=None):\n \"\"\"Sets the limits of the bin field\n\n Parameters\n ----------\n\n xmin : float or None\n The new x minimum. Defaults to None, which leaves the xmin\n unchanged.\n\n xmax : float or None\n The new x maximum. Defaults to None, which leaves the xmax\n unchanged.\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n >>> pp = yt.ProfilePlot(ds.all_data(), 'density', 'temperature')\n >>> pp.set_xlim(1e-29, 1e-24)\n >>> pp.save()\n\n \"\"\"\n self.axes.xlim = (xmin, xmax)\n for i, p in enumerate(self.profiles):\n if xmin is None:\n xmi = p.x_bins.min()\n else:\n xmi = xmin\n if xmax is None:\n xma = p.x_bins.max()\n else:\n xma = xmax\n extrema = {p.x_field: ((xmi, str(p.x.units)), (xma, str(p.x.units)))}\n units = {p.x_field: str(p.x.units)}\n if self.x_log is None:\n logs = None\n else:\n logs = {p.x_field: self.x_log}\n for field in p.field_map.values():\n units[field] = str(p.field_data[field].units)\n self.profiles[i] = create_profile(\n p.data_source,\n p.x_field,\n n_bins=len(p.x_bins) - 1,\n fields=list(p.field_map.values()),\n weight_field=p.weight_field,\n accumulation=p.accumulation,\n fractional=p.fractional,\n logs=logs,\n extrema=extrema,\n units=units,\n )\n return self\n\n @invalidate_plot\n def set_ylim(self, field, ymin=None, ymax=None):\n \"\"\"Sets the plot limits for the specified field we are binning.\n\n Parameters\n ----------\n\n field : string or field tuple\n\n The field that we want to adjust the plot limits for.\n\n ymin : float or None\n The new y minimum. Defaults to None, which leaves the ymin\n unchanged.\n\n ymax : float or None\n The new y maximum. Defaults to None, which leaves the ymax\n unchanged.\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n >>> pp = yt.ProfilePlot(ds.all_data(), 'density', ['temperature', 'x-velocity'])\n >>> pp.set_ylim('temperature', 1e4, 1e6)\n >>> pp.save()\n\n \"\"\"\n if field == \"all\":\n fields = list(self.axes.keys())\n else:\n fields = ensure_list(field)\n for profile in self.profiles:\n for field in profile.data_source._determine_fields(fields):\n if field in profile.field_map:\n field = profile.field_map[field]\n self.axes.ylim[field] = (ymin, ymax)\n # Continue on to the next profile.\n break\n return self\n\n def _set_font_properties(self):\n for f in self.plots:\n self.plots[f]._set_font_properties(self._font_properties, self._font_color)\n\n def _get_field_log(self, field_y, profile):\n yfi = profile.field_info[field_y]\n if self.x_log is None:\n x_log = profile.x_log\n else:\n x_log = self.x_log\n y_log = self.y_log.get(field_y, yfi.take_log)\n scales = {True: \"log\", False: \"linear\"}\n return scales[x_log], scales[y_log]\n\n def _get_field_label(self, field, field_info, field_unit, fractional=False):\n field_unit = field_unit.latex_representation()\n field_name = field_info.display_name\n if isinstance(field, tuple):\n field = field[1]\n if field_name is None:\n field_name = r\"$\\rm{\" + field + r\"}$\"\n field_name = r\"$\\rm{\" + field.replace(\"_\", \"\\ \").title() + r\"}$\"\n elif field_name.find(\"$\") == -1:\n field_name = field_name.replace(\" \", \"\\ \")\n field_name = r\"$\\rm{\" + field_name + r\"}$\"\n if fractional:\n label = field_name + r\"$\\rm{\\ Probability\\ Density}$\"\n elif field_unit is None or field_unit == \"\":\n label = field_name\n else:\n label = field_name + r\"$\\ \\ (\" + field_unit + r\")$\"\n return label\n\n def _get_field_title(self, field_y, profile):\n field_x = profile.x_field\n xfi = profile.field_info[field_x]\n yfi = profile.field_info[field_y]\n x_unit = profile.x.units\n y_unit = profile.field_units[field_y]\n fractional = profile.fractional\n x_title = self.x_title or self._get_field_label(field_x, xfi, x_unit)\n y_title = self.y_title.get(field_y, None) or self._get_field_label(\n field_y, yfi, y_unit, fractional\n )\n\n return (x_title, y_title)\n\n @invalidate_plot\n def annotate_title(self, title, field=\"all\"):\n r\"\"\"Set a title for the plot.\n\n Parameters\n ----------\n title : str\n The title to add.\n field : str or list of str\n The field name for which title needs to be set.\n\n Examples\n --------\n >>> # To set title for all the fields:\n >>> plot.annotate_title(\"This is a Profile Plot\")\n\n >>> # To set title for specific fields:\n >>> plot.annotate_title(\"Profile Plot for Temperature\", \"temperature\")\n\n >>> # Setting same plot title for both the given fields\n >>> plot.annotate_title(\"Profile Plot: Temperature-Dark Matter Density\",\n [\"temperature\", \"dark_matter_density\"])\n\n \"\"\"\n if field == \"all\":\n fields = list(self.axes.keys())\n else:\n fields = ensure_list(field)\n for profile in self.profiles:\n for field in profile.data_source._determine_fields(fields):\n if field in profile.field_map:\n field = profile.field_map[field]\n self._plot_title[field] = title\n return self\n\n @invalidate_plot\n def annotate_text(self, xpos=0.0, ypos=0.0, text=None, field=\"all\", **text_kwargs):\n r\"\"\"Allow the user to insert text onto the plot\n\n The x-position and y-position must be given as well as the text string.\n Add *text* to plot at location *xpos*, *ypos* in plot coordinates for\n the given fields or by default for all fields.\n (see example below).\n\n Parameters\n ----------\n xpos : float\n Position on plot in x-coordinates.\n ypos : float\n Position on plot in y-coordinates.\n text : str\n The text to insert onto the plot.\n field : str or tuple\n The name of the field to add text to.\n text_kwargs : dict\n Dictionary of text keyword arguments to be passed to matplotlib\n\n >>> import yt\n >>> from yt.units import kpc\n >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n >>> my_galaxy = ds.disk(ds.domain_center, [0.0, 0.0, 1.0], 10*kpc, 3*kpc)\n >>> plot = yt.ProfilePlot(my_galaxy, \"density\", [\"temperature\"])\n\n >>> # Annotate text for all the fields\n >>> plot.annotate_text(1e-26, 1e5, \"This is annotated text in the plot area.\")\n >>> plot.save()\n\n >>> # Annotate text for a given field\n >>> plot.annotate_text(1e-26, 1e5, \"Annotated text\", \"Temperature\")\n >>> plot.save()\n\n >>> # Annotate text for multiple fields\n >>> fields = [\"temperature\", \"density\"]\n >>> plot.annotate_text(1e-26, 1e5, \"Annotated text\", fields)\n >>> plot.save()\n\n \"\"\"\n if field == \"all\":\n fields = list(self.axes.keys())\n else:\n fields = ensure_list(field)\n for profile in self.profiles:\n for field in profile.data_source._determine_fields(fields):\n if field in profile.field_map:\n field = profile.field_map[field]\n self._plot_text[field] = text\n self._text_xpos[field] = xpos\n self._text_ypos[field] = ypos\n self._text_kwargs[field] = text_kwargs\n return self\n\n\nclass PhasePlot(ImagePlotContainer):\n r\"\"\"\n Create a 2d profile (phase) plot from a data source or from\n profile object created with\n `yt.data_objects.profiles.create_profile`.\n\n Given a data object (all_data, region, sphere, etc.), an x field,\n y field, and z field (or fields), this will create a two-dimensional\n profile of the average (or total) value of the z field in bins of the\n x and y fields.\n\n Parameters\n ----------\n data_source : YTSelectionContainer Object\n The data object to be profiled, such as all_data, region, or\n sphere. If a dataset is passed in instead, an all_data data object\n is generated internally from the dataset.\n x_field : str\n The x binning field for the profile.\n y_field : str\n The y binning field for the profile.\n z_fields : str or list\n The field or fields to be profiled.\n weight_field : str\n The weight field for calculating weighted averages. If None,\n the profile values are the sum of the field values within the bin.\n Otherwise, the values are a weighted average.\n Default : \"cell_mass\".\n x_bins : int\n The number of bins in x field for the profile.\n Default: 128.\n y_bins : int\n The number of bins in y field for the profile.\n Default: 128.\n accumulation : bool or list of bools\n If True, the profile values for a bin n are the cumulative sum of\n all the values from bin 0 to n. If -True, the sum is reversed so\n that the value for bin n is the cumulative sum from bin N (total bins)\n to n. A list of values can be given to control the summation in each\n dimension independently.\n Default: False.\n fractional : If True the profile values are divided by the sum of all\n the profile data such that the profile represents a probability\n distribution function.\n fontsize : int\n Font size for all text in the plot.\n Default: 18.\n figure_size : int\n Size in inches of the image.\n Default: 8 (8x8)\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n >>> ad = ds.all_data()\n >>> plot = yt.PhasePlot(ad, \"density\", \"temperature\", [\"cell_mass\"],\n ... weight_field=None)\n >>> plot.save()\n\n >>> # Change plot properties.\n >>> plot.set_cmap(\"cell_mass\", \"jet\")\n >>> plot.set_zlim(\"cell_mass\", 1e8, 1e13)\n >>> plot.annotate_title(\"This is a phase plot\")\n\n \"\"\"\n x_log = None\n y_log = None\n plot_title = None\n _plot_valid = False\n _profile_valid = False\n _plot_type = \"Phase\"\n _xlim = (None, None)\n _ylim = (None, None)\n\n def __init__(\n self,\n data_source,\n x_field,\n y_field,\n z_fields,\n weight_field=\"cell_mass\",\n x_bins=128,\n y_bins=128,\n accumulation=False,\n fractional=False,\n fontsize=18,\n figure_size=8.0,\n ):\n\n data_source = data_object_or_all_data(data_source)\n\n if isinstance(data_source.ds, YTProfileDataset):\n profile = data_source.ds.profile\n else:\n profile = create_profile(\n data_source,\n [x_field, y_field],\n ensure_list(z_fields),\n n_bins=[x_bins, y_bins],\n weight_field=weight_field,\n accumulation=accumulation,\n fractional=fractional,\n )\n\n type(self)._initialize_instance(\n self, data_source, profile, fontsize, figure_size\n )\n\n @classmethod\n def _initialize_instance(cls, obj, data_source, profile, fontsize, figure_size):\n obj.plot_title = {}\n obj.z_log = {}\n obj.z_title = {}\n obj._initfinished = False\n obj.x_log = None\n obj.y_log = None\n obj._plot_text = {}\n obj._text_xpos = {}\n obj._text_ypos = {}\n obj._text_kwargs = {}\n obj._profile = profile\n obj._profile_valid = True\n obj._xlim = (None, None)\n obj._ylim = (None, None)\n super(PhasePlot, obj).__init__(data_source, figure_size, fontsize)\n obj._setup_plots()\n obj._initfinished = True\n return obj\n\n def _get_field_title(self, field_z, profile):\n field_x = profile.x_field\n field_y = profile.y_field\n xfi = profile.field_info[field_x]\n yfi = profile.field_info[field_y]\n zfi = profile.field_info[field_z]\n x_unit = profile.x.units\n y_unit = profile.y.units\n z_unit = profile.field_units[field_z]\n fractional = profile.fractional\n x_label, y_label, z_label = self._get_axes_labels(field_z)\n x_title = x_label or self._get_field_label(field_x, xfi, x_unit)\n y_title = y_label or self._get_field_label(field_y, yfi, y_unit)\n z_title = z_label or self._get_field_label(field_z, zfi, z_unit, fractional)\n return (x_title, y_title, z_title)\n\n def _get_field_label(self, field, field_info, field_unit, fractional=False):\n field_unit = field_unit.latex_representation()\n field_name = field_info.display_name\n if isinstance(field, tuple):\n field = field[1]\n if field_name is None:\n field_name = r\"$\\rm{\" + field + r\"}$\"\n field_name = r\"$\\rm{\" + field.replace(\"_\", \"\\ \").title() + r\"}$\"\n elif field_name.find(\"$\") == -1:\n field_name = field_name.replace(\" \", \"\\ \")\n field_name = r\"$\\rm{\" + field_name + r\"}$\"\n if fractional:\n label = field_name + r\"$\\rm{\\ Probability\\ Density}$\"\n elif field_unit is None or field_unit == \"\":\n label = field_name\n else:\n label = field_name + r\"$\\ \\ (\" + field_unit + r\")$\"\n return label\n\n def _get_field_log(self, field_z, profile):\n zfi = profile.field_info[field_z]\n if self.x_log is None:\n x_log = profile.x_log\n else:\n x_log = self.x_log\n if self.y_log is None:\n y_log = profile.y_log\n else:\n y_log = self.y_log\n if field_z in self.z_log:\n z_log = self.z_log[field_z]\n else:\n z_log = zfi.take_log\n scales = {True: \"log\", False: \"linear\"}\n return scales[x_log], scales[y_log], scales[z_log]\n\n def _recreate_frb(self):\n # needed for API compatibility with PlotWindow\n pass\n\n @property\n def profile(self):\n if not self._profile_valid:\n self._recreate_profile()\n return self._profile\n\n @property\n def fields(self):\n return list(self.plots.keys())\n\n def _setup_plots(self):\n if self._plot_valid:\n return\n for f, data in self.profile.items():\n fig = None\n axes = None\n cax = None\n draw_colorbar = True\n draw_axes = True\n zlim = (None, None)\n xlim = self._xlim\n ylim = self._ylim\n if f in self.plots:\n draw_colorbar = self.plots[f]._draw_colorbar\n draw_axes = self.plots[f]._draw_axes\n zlim = (self.plots[f].zmin, self.plots[f].zmax)\n if self.plots[f].figure is not None:\n fig = self.plots[f].figure\n axes = self.plots[f].axes\n cax = self.plots[f].cax\n\n x_scale, y_scale, z_scale = self._get_field_log(f, self.profile)\n x_title, y_title, z_title = self._get_field_title(f, self.profile)\n\n if zlim == (None, None):\n if z_scale == \"log\":\n positive_values = data[data > 0.0]\n if len(positive_values) == 0:\n mylog.warning(\n \"Profiled field %s has no positive \"\n \"values. Max = %f.\" % (f, np.nanmax(data))\n )\n mylog.warning(\"Switching to linear colorbar scaling.\")\n zmin = np.nanmin(data)\n z_scale = \"linear\"\n self._field_transform[f] = linear_transform\n else:\n zmin = positive_values.min()\n self._field_transform[f] = log_transform\n else:\n zmin = np.nanmin(data)\n self._field_transform[f] = linear_transform\n zlim = [zmin, np.nanmax(data)]\n\n font_size = self._font_properties.get_size()\n f = self.profile.data_source._determine_fields(f)[0]\n\n # if this is a Particle Phase Plot AND if we using a single color,\n # override the colorbar here.\n splat_color = getattr(self, \"splat_color\", None)\n if splat_color is not None:\n cmap = matplotlib.colors.ListedColormap(splat_color, \"dummy\")\n else:\n cmap = self._colormaps[f]\n\n self.plots[f] = PhasePlotMPL(\n self.profile.x,\n self.profile.y,\n data,\n x_scale,\n y_scale,\n z_scale,\n cmap,\n zlim,\n self.figure_size,\n font_size,\n fig,\n axes,\n cax,\n )\n\n self.plots[f]._toggle_axes(draw_axes)\n self.plots[f]._toggle_colorbar(draw_colorbar)\n\n self.plots[f].axes.xaxis.set_label_text(x_title)\n self.plots[f].axes.yaxis.set_label_text(y_title)\n self.plots[f].cax.yaxis.set_label_text(z_title)\n\n self.plots[f].axes.set_xlim(xlim)\n self.plots[f].axes.set_ylim(ylim)\n\n color = self._background_color[f]\n\n if MPL_VERSION < LooseVersion(\"2.0.0\"):\n self.plots[f].axes.set_axis_bgcolor(color)\n else:\n self.plots[f].axes.set_facecolor(color)\n\n if f in self._plot_text:\n self.plots[f].axes.text(\n self._text_xpos[f],\n self._text_ypos[f],\n self._plot_text[f],\n fontproperties=self._font_properties,\n **self._text_kwargs[f],\n )\n\n if f in self.plot_title:\n self.plots[f].axes.set_title(self.plot_title[f])\n\n # x-y axes minorticks\n if f not in self._minorticks:\n self._minorticks[f] = True\n if self._minorticks[f]:\n self.plots[f].axes.minorticks_on()\n else:\n self.plots[f].axes.minorticks_off()\n\n # colorbar minorticks\n if f not in self._cbar_minorticks:\n self._cbar_minorticks[f] = True\n if self._cbar_minorticks[f]:\n if self._field_transform[f] == linear_transform:\n self.plots[f].cax.minorticks_on()\n elif MPL_VERSION < LooseVersion(\"3.0.0\"):\n # before matplotlib 3 log-scaled colorbars internally used\n # a linear scale going from zero to one and did not draw\n # minor ticks. Since we want minor ticks, calculate\n # where the minor ticks should go in this linear scale\n # and add them manually.\n vmin = np.float64(self.plots[f].cb.norm.vmin)\n vmax = np.float64(self.plots[f].cb.norm.vmax)\n mticks = self.plots[f].image.norm(get_log_minorticks(vmin, vmax))\n self.plots[f].cax.yaxis.set_ticks(mticks, minor=True)\n else:\n self.plots[f].cax.minorticks_off()\n\n self._set_font_properties()\n\n # if this is a particle plot with one color only, hide the cbar here\n if hasattr(self, \"use_cbar\") and not self.use_cbar:\n self.plots[f].hide_colorbar()\n\n self._plot_valid = True\n\n @classmethod\n def from_profile(cls, profile, fontsize=18, figure_size=8.0):\n r\"\"\"\n Instantiate a PhasePlot object from a profile object created\n with :func:`~yt.data_objects.profiles.create_profile`.\n\n Parameters\n ----------\n profile : An instance of :class:`~yt.data_objects.profiles.ProfileND`\n A single profile object.\n fontsize : float\n The fontsize to use, in points.\n figure_size : float\n The figure size to use, in inches.\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n >>> extrema = {\n ... 'density': (1e-31, 1e-24),\n ... 'temperature': (1e1, 1e8),\n ... 'cell_mass': (1e-6, 1e-1),\n ... }\n >>> profile = yt.create_profile(ds.all_data(), ['density', 'temperature'],\n ... fields=['cell_mass'],extrema=extrema,\n ... fractional=True)\n >>> ph = yt.PhasePlot.from_profile(profile)\n >>> ph.save()\n \"\"\"\n obj = cls.__new__(cls)\n data_source = profile.data_source\n return cls._initialize_instance(\n obj, data_source, profile, fontsize, figure_size\n )\n\n def annotate_text(self, xpos=0.0, ypos=0.0, text=None, **text_kwargs):\n r\"\"\"\n Allow the user to insert text onto the plot\n The x-position and y-position must be given as well as the text string.\n Add *text* tp plot at location *xpos*, *ypos* in plot coordinates\n (see example below).\n\n Parameters\n ----------\n field : str or tuple\n The name of the field to add text to.\n xpos : float\n Position on plot in x-coordinates.\n ypos : float\n Position on plot in y-coordinates.\n text : str\n The text to insert onto the plot.\n text_kwargs : dict\n Dictionary of text keyword arguments to be passed to matplotlib\n\n >>> plot.annotate_text(1e-15, 5e4, \"Hello YT\")\n\n \"\"\"\n for f in self.data_source._determine_fields(list(self.plots.keys())):\n if self.plots[f].figure is not None and text is not None:\n self.plots[f].axes.text(\n xpos,\n ypos,\n text,\n fontproperties=self._font_properties,\n **text_kwargs,\n )\n self._plot_text[f] = text\n self._text_xpos[f] = xpos\n self._text_ypos[f] = ypos\n self._text_kwargs[f] = text_kwargs\n return self\n\n @validate_plot\n def save(self, name=None, suffix=None, mpl_kwargs=None):\n r\"\"\"\n Saves a 2d profile plot.\n\n Parameters\n ----------\n name : str\n The output file keyword.\n suffix : string\n Specify the image type by its suffix. If not specified, the output\n type will be inferred from the filename. Defaults to PNG.\n mpl_kwargs : dict\n A dict of keyword arguments to be passed to matplotlib.\n\n >>> plot.save(mpl_kwargs={'bbox_inches':'tight'})\n\n \"\"\"\n names = []\n if not self._plot_valid:\n self._setup_plots()\n if mpl_kwargs is None:\n mpl_kwargs = {}\n if name is None:\n name = str(self.profile.ds)\n name = os.path.expanduser(name)\n xfn = self.profile.x_field\n yfn = self.profile.y_field\n if isinstance(xfn, tuple):\n xfn = xfn[1]\n if isinstance(yfn, tuple):\n yfn = yfn[1]\n for f in self.profile.field_data:\n _f = f\n if isinstance(f, tuple):\n _f = _f[1]\n middle = \"2d-Profile_%s_%s_%s\" % (xfn, yfn, _f)\n splitname = os.path.split(name)\n if splitname[0] != \"\" and not os.path.isdir(splitname[0]):\n os.makedirs(splitname[0])\n if os.path.isdir(name) and name != str(self.profile.ds):\n prefix = name + (os.sep if name[-1] != os.sep else \"\")\n prefix += str(self.profile.ds)\n else:\n prefix = name\n if suffix is None:\n suffix = get_image_suffix(name)\n if suffix != \"\":\n for k, v in self.plots.items():\n names.append(v.save(name, mpl_kwargs))\n return names\n else:\n suffix = \"png\"\n fn = \"%s_%s.%s\" % (prefix, middle, suffix)\n names.append(fn)\n self.plots[f].save(fn, mpl_kwargs)\n return names\n\n @invalidate_plot\n def set_font(self, font_dict=None):\n \"\"\"\n\n Set the font and font properties.\n\n Parameters\n ----------\n\n font_dict : dict\n A dict of keyword parameters to be passed to\n :class:`matplotlib.font_manager.FontProperties`.\n\n Possible keys include:\n\n * family - The font family. Can be serif, sans-serif, cursive,\n 'fantasy', or 'monospace'.\n * style - The font style. Either normal, italic or oblique.\n * color - A valid color string like 'r', 'g', 'red', 'cobalt',\n and 'orange'.\n * variant - Either normal or small-caps.\n * size - Either a relative value of xx-small, x-small, small,\n medium, large, x-large, xx-large or an absolute font size, e.g. 12\n * stretch - A numeric value in the range 0-1000 or one of\n ultra-condensed, extra-condensed, condensed, semi-condensed,\n normal, semi-expanded, expanded, extra-expanded or ultra-expanded\n * weight - A numeric value in the range 0-1000 or one of ultralight,\n light, normal, regular, book, medium, roman, semibold, demibold,\n demi, bold, heavy, extra bold, or black\n\n See the matplotlib font manager API documentation for more details.\n https://matplotlib.org/api/font_manager_api.html\n\n Notes\n -----\n\n Mathtext axis labels will only obey the `size` and `color` keyword.\n\n Examples\n --------\n\n This sets the font to be 24-pt, blue, sans-serif, italic, and\n bold-face.\n\n >>> prof = ProfilePlot(ds.all_data(), 'density', 'temperature')\n >>> slc.set_font({'family':'sans-serif', 'style':'italic',\n ... 'weight':'bold', 'size':24, 'color':'blue'})\n\n \"\"\"\n from matplotlib.font_manager import FontProperties\n\n if font_dict is None:\n font_dict = {}\n if \"color\" in font_dict:\n self._font_color = font_dict.pop(\"color\")\n # Set default values if the user does not explicitly set them.\n # this prevents reverting to the matplotlib defaults.\n font_dict.setdefault(\"family\", \"stixgeneral\")\n font_dict.setdefault(\"size\", 18)\n self._font_properties = FontProperties(**font_dict)\n return self\n\n @invalidate_plot\n def set_title(self, field, title):\n \"\"\"Set a title for the plot.\n\n Parameters\n ----------\n field : str\n The z field of the plot to add the title.\n title : str\n The title to add.\n\n Examples\n --------\n\n >>> plot.set_title(\"cell_mass\", \"This is a phase plot\")\n\n \"\"\"\n self.plot_title[self.data_source._determine_fields(field)[0]] = title\n return self\n\n @invalidate_plot\n def annotate_title(self, title):\n \"\"\"Set a title for the plot.\n\n Parameters\n ----------\n title : str\n The title to add.\n\n Examples\n --------\n\n >>> plot.annotate_title(\"This is a phase plot\")\n\n \"\"\"\n for f in self._profile.field_data:\n if isinstance(f, tuple):\n f = f[1]\n self.plot_title[self.data_source._determine_fields(f)[0]] = title\n return self\n\n @invalidate_plot\n def reset_plot(self):\n self.plots = {}\n return self\n\n @invalidate_plot\n def set_log(self, field, log):\n \"\"\"set a field to log or linear.\n\n Parameters\n ----------\n field : string\n the field to set a transform\n log : boolean\n Log on/off.\n \"\"\"\n p = self._profile\n if field == \"all\":\n self.x_log = log\n self.y_log = log\n for field in p.field_data:\n self.z_log[field] = log\n self._profile_valid = False\n else:\n (field,) = self.profile.data_source._determine_fields([field])\n if field == p.x_field:\n self.x_log = log\n self._profile_valid = False\n elif field == p.y_field:\n self.y_log = log\n self._profile_valid = False\n elif field in p.field_data:\n self.z_log[field] = log\n else:\n raise KeyError(\"Field %s not in phase plot!\" % (field))\n return self\n\n @invalidate_plot\n def set_unit(self, field, unit):\n \"\"\"Sets a new unit for the requested field\n\n Parameters\n ----------\n field : string\n The name of the field that is to be changed.\n\n new_unit : string or Unit object\n The name of the new unit.\n \"\"\"\n fd = self.data_source._determine_fields(field)[0]\n if fd == self.profile.x_field:\n self.profile.set_x_unit(unit)\n elif fd == self.profile.y_field:\n self.profile.set_y_unit(unit)\n elif fd in self.profile.field_data.keys():\n self.profile.set_field_unit(field, unit)\n self.plots[field].zmin, self.plots[field].zmax = (None, None)\n else:\n raise KeyError(\"Field %s not in phase plot!\" % (field))\n return self\n\n @invalidate_plot\n @invalidate_profile\n def set_xlim(self, xmin=None, xmax=None):\n \"\"\"Sets the limits of the x bin field\n\n Parameters\n ----------\n\n xmin : float or None\n The new x minimum in the current x-axis units. Defaults to None,\n which leaves the xmin unchanged.\n\n xmax : float or None\n The new x maximum in the current x-axis units. Defaults to None,\n which leaves the xmax unchanged.\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n >>> pp = yt.PhasePlot(ds.all_data(), 'density', 'temperature', 'cell_mass')\n >>> pp.set_xlim(1e-29, 1e-24)\n >>> pp.save()\n\n \"\"\"\n p = self._profile\n if xmin is None:\n xmin = p.x_bins.min()\n elif not hasattr(xmin, \"units\"):\n xmin = self.ds.quan(xmin, p.x_bins.units)\n if xmax is None:\n xmax = p.x_bins.max()\n elif not hasattr(xmax, \"units\"):\n xmax = self.ds.quan(xmax, p.x_bins.units)\n self._xlim = (xmin, xmax)\n return self\n\n @invalidate_plot\n @invalidate_profile\n def set_ylim(self, ymin=None, ymax=None):\n \"\"\"Sets the plot limits for the y bin field.\n\n Parameters\n ----------\n\n ymin : float or None\n The new y minimum in the current y-axis units. Defaults to None,\n which leaves the ymin unchanged.\n\n ymax : float or None\n The new y maximum in the current y-axis units. Defaults to None,\n which leaves the ymax unchanged.\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n >>> pp = yt.PhasePlot(ds.all_data(), 'density', 'temperature', 'cell_mass')\n >>> pp.set_ylim(1e4, 1e6)\n >>> pp.save()\n\n \"\"\"\n p = self._profile\n if ymin is None:\n ymin = p.y_bins.min()\n elif not hasattr(ymin, \"units\"):\n ymin = self.ds.quan(ymin, p.y_bins.units)\n if ymax is None:\n ymax = p.y_bins.max()\n elif not hasattr(ymax, \"units\"):\n ymax = self.ds.quan(ymax, p.y_bins.units)\n self._ylim = (ymin, ymax)\n return self\n\n def _recreate_profile(self):\n p = self._profile\n units = {p.x_field: str(p.x.units), p.y_field: str(p.y.units)}\n zunits = dict((field, str(p.field_units[field])) for field in p.field_units)\n extrema = {p.x_field: self._xlim, p.y_field: self._ylim}\n if self.x_log is not None or self.y_log is not None:\n logs = {}\n else:\n logs = None\n if self.x_log is not None:\n logs[p.x_field] = self.x_log\n if self.y_log is not None:\n logs[p.y_field] = self.y_log\n deposition = getattr(p, \"deposition\", None)\n additional_kwargs = {\n \"accumulation\": p.accumulation,\n \"fractional\": p.fractional,\n \"deposition\": deposition,\n }\n self._profile = create_profile(\n p.data_source,\n [p.x_field, p.y_field],\n list(p.field_map.values()),\n n_bins=[len(p.x_bins) - 1, len(p.y_bins) - 1],\n weight_field=p.weight_field,\n units=units,\n extrema=extrema,\n logs=logs,\n **additional_kwargs,\n )\n for field in zunits:\n self._profile.set_field_unit(field, zunits[field])\n self._profile_valid = True\n\n\nclass PhasePlotMPL(ImagePlotMPL):\n \"\"\"A container for a single matplotlib figure and axes for a PhasePlot\"\"\"\n\n def __init__(\n self,\n x_data,\n y_data,\n data,\n x_scale,\n y_scale,\n z_scale,\n cmap,\n zlim,\n figure_size,\n fontsize,\n figure,\n axes,\n cax,\n ):\n self._initfinished = False\n self._draw_colorbar = True\n self._draw_axes = True\n self._figure_size = figure_size\n\n # Compute layout\n fontscale = float(fontsize) / 18.0\n if fontscale < 1.0:\n fontscale = np.sqrt(fontscale)\n\n if iterable(figure_size):\n self._cb_size = 0.0375 * figure_size[0]\n else:\n self._cb_size = 0.0375 * figure_size\n self._ax_text_size = [1.1 * fontscale, 0.9 * fontscale]\n self._top_buff_size = 0.30 * fontscale\n self._aspect = 1.0\n\n size, axrect, caxrect = self._get_best_layout()\n\n super(PhasePlotMPL, self).__init__(\n size, axrect, caxrect, zlim, figure, axes, cax\n )\n\n self._init_image(x_data, y_data, data, x_scale, y_scale, z_scale, zlim, cmap)\n\n self._initfinished = True\n\n def _init_image(\n self, x_data, y_data, image_data, x_scale, y_scale, z_scale, zlim, cmap\n ):\n \"\"\"Store output of imshow in image variable\"\"\"\n if z_scale == \"log\":\n norm = matplotlib.colors.LogNorm(zlim[0], zlim[1])\n elif z_scale == \"linear\":\n norm = matplotlib.colors.Normalize(zlim[0], zlim[1])\n self.image = None\n self.cb = None\n self.image = self.axes.pcolormesh(\n np.array(x_data),\n np.array(y_data),\n np.array(image_data.T),\n norm=norm,\n cmap=cmap,\n )\n\n self.axes.set_xscale(x_scale)\n self.axes.set_yscale(y_scale)\n self.cb = self.figure.colorbar(self.image, self.cax)\n if z_scale == \"linear\":\n self.cb.formatter.set_scientific(True)\n self.cb.formatter.set_powerlimits((-2, 3))\n self.cb.update_ticks()\n", "import io as io\nimport os\nimport warnings\n\nimport numpy as np\n\nfrom yt.data_objects.grid_patch import AMRGridPatch\nfrom yt.data_objects.static_output import Dataset\nfrom yt.fields.field_info_container import NullFunc\nfrom yt.frontends.enzo.misc import cosmology_get_units\nfrom yt.frontends.enzo_p.fields import EnzoPFieldInfo\nfrom yt.frontends.enzo_p.misc import (\n get_block_info,\n get_child_index,\n get_root_block_id,\n get_root_blocks,\n is_parent,\n nested_dict_get,\n)\nfrom yt.funcs import ensure_tuple, get_pbar, setdefaultattr\nfrom yt.geometry.grid_geometry_handler import GridIndex\nfrom yt.utilities.cosmology import Cosmology\nfrom yt.utilities.logger import ytLogger as mylog\nfrom yt.utilities.on_demand_imports import _h5py as h5py, _libconf as libconf\n\n\nclass EnzoPGrid(AMRGridPatch):\n \"\"\"\n Class representing a single EnzoP Grid instance.\n \"\"\"\n\n _id_offset = 0\n _refine_by = 2\n\n def __init__(self, id, index, block_name, filename=None):\n \"\"\"\n Returns an instance of EnzoPGrid with *id*, associated with\n *filename* and *index*.\n \"\"\"\n # All of the field parameters will be passed to us as needed.\n AMRGridPatch.__init__(self, id, filename=filename, index=index)\n self.block_name = block_name\n self._children_ids = None\n self._parent_id = -1\n self.Level = -1\n\n def __repr__(self):\n return \"EnzoPGrid_%04d\" % self.id\n\n def _prepare_grid(self):\n \"\"\"Copies all the appropriate attributes from the index.\"\"\"\n h = self.index # cache it\n my_ind = self.id - self._id_offset\n self.ActiveDimensions = h.grid_dimensions[my_ind]\n self.LeftEdge = h.grid_left_edge[my_ind]\n self.RightEdge = h.grid_right_edge[my_ind]\n\n def get_parent_id(self, desc_block_name):\n if self.block_name == desc_block_name:\n raise RuntimeError(\"Child and parent are the same!\")\n dim = self.ds.dimensionality\n d_block = desc_block_name[1:].replace(\":\", \"\")\n parent = self\n\n while True:\n a_block = parent.block_name[1:].replace(\":\", \"\")\n gengap = (len(d_block) - len(a_block)) / dim\n if gengap <= 1:\n return parent.id\n cid = get_child_index(a_block, d_block)\n parent = self.index.grids[parent._children_ids[cid]]\n\n def add_child(self, child):\n if self._children_ids is None:\n self._children_ids = -1 * np.ones(\n self._refine_by ** self.ds.dimensionality, dtype=np.int64\n )\n\n a_block = self.block_name[1:].replace(\":\", \"\")\n d_block = child.block_name[1:].replace(\":\", \"\")\n cid = get_child_index(a_block, d_block)\n self._children_ids[cid] = child.id\n\n _particle_count = None\n\n @property\n def particle_count(self):\n if self._particle_count is None:\n with h5py.File(self.filename, mode=\"r\") as f:\n fnstr = \"%s/%s\" % (\n self.block_name,\n self.ds.index.io._sep.join([\"particle\", \"%s\", \"%s\"]),\n )\n self._particle_count = dict(\n (ptype, f.get(fnstr % (ptype, pfield)).size)\n for ptype, pfield in self.ds.index.io.sample_pfields.items()\n )\n return self._particle_count\n\n _total_particles = None\n\n @property\n def total_particles(self):\n if self._total_particles is None:\n self._total_particles = sum(self.particle_count.values())\n return self._total_particles\n\n @property\n def Parent(self):\n if self._parent_id == -1:\n return None\n return self.index.grids[self._parent_id]\n\n @property\n def Children(self):\n if self._children_ids is None:\n return []\n return [self.index.grids[cid] for cid in self._children_ids]\n\n\nclass EnzoPHierarchy(GridIndex):\n\n _strip_path = False\n grid = EnzoPGrid\n _preload_implemented = True\n\n def __init__(self, ds, dataset_type):\n\n self.dataset_type = dataset_type\n self.directory = os.path.dirname(ds.parameter_filename)\n self.index_filename = ds.parameter_filename\n if os.path.getsize(self.index_filename) == 0:\n raise IOError(-1, \"File empty\", self.index_filename)\n\n GridIndex.__init__(self, ds, dataset_type)\n self.dataset.dataset_type = self.dataset_type\n\n def _count_grids(self):\n fblock_size = 32768\n f = open(self.ds.parameter_filename, \"r\")\n f.seek(0, 2)\n file_size = f.tell()\n nblocks = np.ceil(float(file_size) / fblock_size).astype(np.int64)\n f.seek(0)\n offset = f.tell()\n ngrids = 0\n for ib in range(nblocks):\n my_block = min(fblock_size, file_size - offset)\n buff = f.read(my_block)\n ngrids += buff.count(\"\\n\")\n offset += my_block\n f.close()\n self.num_grids = ngrids\n self.dataset_type = \"enzo_p\"\n\n def _parse_index(self):\n self.grids = np.empty(self.num_grids, dtype=\"object\")\n\n pbar = get_pbar(\"Parsing Hierarchy\", self.num_grids)\n f = open(self.ds.parameter_filename, \"r\")\n fblock_size = 32768\n f.seek(0, 2)\n file_size = f.tell()\n nblocks = np.ceil(float(file_size) / fblock_size).astype(np.int64)\n f.seek(0)\n offset = f.tell()\n lstr = \"\"\n # place child blocks after the root blocks\n rbdim = self.ds.root_block_dimensions\n nroot_blocks = rbdim.prod()\n child_id = nroot_blocks\n\n last_pid = None\n for ib in range(nblocks):\n fblock = min(fblock_size, file_size - offset)\n buff = lstr + f.read(fblock)\n bnl = 0\n for inl in range(buff.count(\"\\n\")):\n nnl = buff.find(\"\\n\", bnl)\n line = buff[bnl:nnl]\n block_name, block_file = line.split()\n\n # Handling of the B, B_, and B__ blocks is consistent with\n # other unrefined blocks\n level, left, right = get_block_info(block_name)\n rbindex = get_root_block_id(block_name)\n rbid = (\n rbindex[0] * rbdim[1:].prod()\n + rbindex[1] * rbdim[2:].prod()\n + rbindex[2]\n )\n\n # There are also blocks at lower level than the\n # real root blocks. These can be ignored.\n if level == 0:\n check_root = get_root_blocks(block_name).prod()\n if check_root < nroot_blocks:\n level = -1\n\n if level == -1:\n grid_id = child_id\n parent_id = -1\n child_id += 1\n elif level == 0:\n grid_id = rbid\n parent_id = -1\n else:\n grid_id = child_id\n # Try the last parent_id first\n if last_pid is not None and is_parent(\n self.grids[last_pid].block_name, block_name\n ):\n parent_id = last_pid\n else:\n parent_id = self.grids[rbid].get_parent_id(block_name)\n last_pid = parent_id\n child_id += 1\n\n my_grid = self.grid(\n grid_id,\n self,\n block_name,\n filename=os.path.join(self.directory, block_file),\n )\n my_grid.Level = level\n my_grid._parent_id = parent_id\n\n self.grids[grid_id] = my_grid\n self.grid_levels[grid_id] = level\n self.grid_left_edge[grid_id] = left\n self.grid_right_edge[grid_id] = right\n self.grid_dimensions[grid_id] = self.ds.active_grid_dimensions\n\n if level > 0:\n self.grids[parent_id].add_child(my_grid)\n\n bnl = nnl + 1\n pbar.update(1)\n lstr = buff[bnl:]\n offset += fblock\n\n f.close()\n pbar.finish()\n\n slope = self.ds.domain_width / self.ds.arr(np.ones(3), \"code_length\")\n self.grid_left_edge = self.grid_left_edge * slope + self.ds.domain_left_edge\n self.grid_right_edge = self.grid_right_edge * slope + self.ds.domain_left_edge\n\n def _populate_grid_objects(self):\n for g in self.grids:\n g._prepare_grid()\n g._setup_dx()\n self.max_level = self.grid_levels.max()\n\n def _setup_derived_fields(self):\n super(EnzoPHierarchy, self)._setup_derived_fields()\n for fname, field in self.ds.field_info.items():\n if not field.particle_type:\n continue\n if isinstance(fname, tuple):\n continue\n if field._function is NullFunc:\n continue\n\n def _get_particle_type_counts(self):\n return dict(\n (ptype, sum([g.particle_count[ptype] for g in self.grids]))\n for ptype in self.ds.particle_types_raw\n )\n\n def _detect_output_fields(self):\n self.field_list = []\n # Do this only on the root processor to save disk work.\n if self.comm.rank in (0, None):\n # Just check the first grid.\n grid = self.grids[0]\n field_list, ptypes = self.io._read_field_names(grid)\n mylog.debug(\"Grid %s has: %s\", grid.id, field_list)\n else:\n field_list = None\n ptypes = None\n self.field_list = list(self.comm.mpi_bcast(field_list))\n self.dataset.particle_types = list(self.comm.mpi_bcast(ptypes))\n self.dataset.particle_types_raw = self.dataset.particle_types[:]\n\n\nclass EnzoPDataset(Dataset):\n \"\"\"\n Enzo-P-specific output, set at a fixed time.\n \"\"\"\n\n refine_by = 2\n _index_class = EnzoPHierarchy\n _field_info_class = EnzoPFieldInfo\n _suffix = \".block_list\"\n particle_types = None\n particle_types_raw = None\n\n def __init__(\n self,\n filename,\n dataset_type=None,\n file_style=None,\n parameter_override=None,\n conversion_override=None,\n storage_filename=None,\n units_override=None,\n unit_system=\"cgs\",\n ):\n \"\"\"\n This class is a stripped down class that simply reads and parses\n *filename* without looking at the index. *dataset_type* gets passed\n to the index to pre-determine the style of data-output. However,\n it is not strictly necessary. Optionally you may specify a\n *parameter_override* dictionary that will override anything in the\n parameter file and a *conversion_override* dictionary that consists\n of {fieldname : conversion_to_cgs} that will override the #DataCGS.\n \"\"\"\n self.fluid_types += (\"enzop\",)\n if parameter_override is None:\n parameter_override = {}\n self._parameter_override = parameter_override\n if conversion_override is None:\n conversion_override = {}\n self._conversion_override = conversion_override\n self.storage_filename = storage_filename\n Dataset.__init__(\n self,\n filename,\n dataset_type,\n file_style=file_style,\n units_override=units_override,\n unit_system=unit_system,\n )\n warnings.warn(\n \"The Enzo-P file format is still under development and may \"\n + \"change. If loading fails, simulation data will need to be \"\n + \"re-generated.\"\n )\n\n def _parse_parameter_file(self):\n \"\"\"\n Parses the parameter file and establishes the various\n dictionaries.\n \"\"\"\n\n f = open(self.parameter_filename, \"r\")\n # get dimension from first block name\n b0, fn0 = f.readline().strip().split()\n level0, left0, right0 = get_block_info(b0, min_dim=0)\n root_blocks = get_root_blocks(b0)\n f.close()\n self.dimensionality = left0.size\n self.periodicity = ensure_tuple(np.ones(self.dimensionality, dtype=bool))\n\n lcfn = self.parameter_filename[: -len(self._suffix)] + \".libconfig\"\n if os.path.exists(lcfn):\n with io.open(lcfn, \"r\") as lf:\n self.parameters = libconf.load(lf)\n cosmo = nested_dict_get(self.parameters, (\"Physics\", \"cosmology\"))\n if cosmo is not None:\n self.cosmological_simulation = 1\n co_pars = [\n \"hubble_constant_now\",\n \"omega_matter_now\",\n \"omega_lambda_now\",\n \"comoving_box_size\",\n \"initial_redshift\",\n ]\n co_dict = dict(\n (\n attr,\n nested_dict_get(\n self.parameters, (\"Physics\", \"cosmology\", attr)\n ),\n )\n for attr in co_pars\n )\n for attr in [\"hubble_constant\", \"omega_matter\", \"omega_lambda\"]:\n setattr(self, attr, co_dict[\"%s_now\" % attr])\n\n # Current redshift is not stored, so it's not possible\n # to set all cosmological units yet.\n # Get the time units and use that to figure out redshift.\n k = cosmology_get_units(\n self.hubble_constant,\n self.omega_matter,\n co_dict[\"comoving_box_size\"],\n co_dict[\"initial_redshift\"],\n 0,\n )\n setdefaultattr(self, \"time_unit\", self.quan(k[\"utim\"], \"s\"))\n co = Cosmology(\n hubble_constant=self.hubble_constant,\n omega_matter=self.omega_matter,\n omega_lambda=self.omega_lambda,\n )\n else:\n self.cosmological_simulation = 0\n else:\n self.cosmological_simulation = 0\n\n fh = h5py.File(os.path.join(self.directory, fn0), \"r\")\n self.domain_left_edge = fh.attrs[\"lower\"]\n self.domain_right_edge = fh.attrs[\"upper\"]\n\n # all blocks are the same size\n ablock = fh[list(fh.keys())[0]]\n self.current_time = ablock.attrs[\"time\"][0]\n gsi = ablock.attrs[\"enzo_GridStartIndex\"]\n gei = ablock.attrs[\"enzo_GridEndIndex\"]\n self.ghost_zones = gsi[0]\n self.root_block_dimensions = root_blocks\n self.active_grid_dimensions = gei - gsi + 1\n self.grid_dimensions = ablock.attrs[\"enzo_GridDimension\"]\n self.domain_dimensions = root_blocks * self.active_grid_dimensions\n fh.close()\n\n if self.cosmological_simulation:\n self.current_redshift = co.z_from_t(self.current_time * self.time_unit)\n\n self.periodicity += (False,) * (3 - self.dimensionality)\n self.gamma = nested_dict_get(self.parameters, (\"Field\", \"gamma\"))\n\n def _set_code_unit_attributes(self):\n if self.cosmological_simulation:\n box_size = self.parameters[\"Physics\"][\"cosmology\"][\"comoving_box_size\"]\n k = cosmology_get_units(\n self.hubble_constant,\n self.omega_matter,\n box_size,\n self.parameters[\"Physics\"][\"cosmology\"][\"initial_redshift\"],\n self.current_redshift,\n )\n # Now some CGS values\n setdefaultattr(self, \"length_unit\", self.quan(box_size, \"Mpccm/h\"))\n setdefaultattr(\n self,\n \"mass_unit\",\n self.quan(k[\"urho\"], \"g/cm**3\") * (self.length_unit.in_cgs()) ** 3,\n )\n setdefaultattr(self, \"velocity_unit\", self.quan(k[\"uvel\"], \"cm/s\"))\n else:\n p = self.parameters\n for d, u in zip((\"length\", \"time\"), (\"cm\", \"s\")):\n val = nested_dict_get(p, (\"Units\", d), default=1)\n setdefaultattr(self, \"%s_unit\" % d, self.quan(val, u))\n mass = nested_dict_get(p, (\"Units\", \"mass\"))\n if mass is None:\n density = nested_dict_get(p, (\"Units\", \"density\"))\n if density is not None:\n mass = density * self.length_unit ** 3\n else:\n mass = 1\n setdefaultattr(self, \"mass_unit\", self.quan(mass, \"g\"))\n setdefaultattr(self, \"velocity_unit\", self.length_unit / self.time_unit)\n\n magnetic_unit = np.sqrt(\n 4 * np.pi * self.mass_unit / (self.time_unit ** 2 * self.length_unit)\n )\n magnetic_unit = np.float64(magnetic_unit.in_cgs())\n setdefaultattr(self, \"magnetic_unit\", self.quan(magnetic_unit, \"gauss\"))\n\n def __repr__(self):\n return self.basename[: -len(self._suffix)]\n\n @classmethod\n def _is_valid(cls, *args, **kwargs):\n fn = args[0]\n ddir = os.path.dirname(fn)\n if not fn.endswith(cls._suffix):\n return False\n try:\n with open(fn, \"r\") as f:\n block, block_file = f.readline().strip().split()\n get_block_info(block)\n if not os.path.exists(os.path.join(ddir, block_file)):\n return False\n except Exception:\n return False\n return True\n", "import numpy as np\n\nfrom yt.frontends.enzo.api import EnzoDataset\nfrom yt.frontends.enzo.fields import NODAL_FLAGS\nfrom yt.testing import (\n assert_allclose_units,\n assert_almost_equal,\n assert_array_equal,\n assert_equal,\n requires_file,\n units_override_check,\n)\nfrom yt.utilities.answer_testing.framework import (\n big_patch_amr,\n data_dir_load,\n requires_ds,\n small_patch_amr,\n)\nfrom yt.visualization.plot_window import SlicePlot\n\n_fields = (\"temperature\", \"density\", \"velocity_magnitude\", \"velocity_divergence\")\n\ntwo_sphere_test = \"ActiveParticleTwoSphere/DD0011/DD0011\"\nactive_particle_cosmology = \"ActiveParticleCosmology/DD0046/DD0046\"\necp = \"enzo_cosmology_plus/DD0046/DD0046\"\ng30 = \"IsolatedGalaxy/galaxy0030/galaxy0030\"\nenzotiny = \"enzo_tiny_cosmology/DD0046/DD0046\"\ntoro1d = \"ToroShockTube/DD0001/data0001\"\nkh2d = \"EnzoKelvinHelmholtz/DD0011/DD0011\"\nmhdctot = \"MHDCTOrszagTang/DD0004/data0004\"\ndnz = \"DeeplyNestedZoom/DD0025/data0025\"\np3mini = \"PopIII_mini/DD0034/DD0034\"\n\n\ndef color_conservation(ds):\n species_names = ds.field_info.species_names\n dd = ds.all_data()\n dens_yt = dd[\"density\"].copy()\n # Enumerate our species here\n for s in sorted(species_names):\n if s == \"El\":\n continue\n dens_yt -= dd[\"%s_density\" % s]\n dens_yt -= dd[\"metal_density\"]\n delta_yt = np.abs(dens_yt / dd[\"density\"])\n # Now we compare color conservation to Enzo's color conservation\n dd = ds.all_data()\n dens_enzo = dd[\"Density\"].copy()\n for f in sorted(ds.field_list):\n ff = f[1]\n if not ff.endswith(\"_Density\"):\n continue\n start_strings = [\n \"Electron_\",\n \"SFR_\",\n \"Forming_Stellar_\",\n \"Dark_Matter\",\n \"Star_Particle_\",\n ]\n if any([ff.startswith(ss) for ss in start_strings]):\n continue\n dens_enzo -= dd[f]\n delta_enzo = np.abs(dens_enzo / dd[\"Density\"])\n np.testing.assert_almost_equal(delta_yt, delta_enzo)\n\n\ndef check_color_conservation(ds):\n species_names = ds.field_info.species_names\n dd = ds.all_data()\n dens_yt = dd[\"density\"].copy()\n # Enumerate our species here\n for s in sorted(species_names):\n if s == \"El\":\n continue\n dens_yt -= dd[\"%s_density\" % s]\n dens_yt -= dd[\"metal_density\"]\n delta_yt = np.abs(dens_yt / dd[\"density\"])\n\n # Now we compare color conservation to Enzo's color conservation\n dd = ds.all_data()\n dens_enzo = dd[\"Density\"].copy()\n for f in sorted(ds.field_list):\n ff = f[1]\n if not ff.endswith(\"_Density\"):\n continue\n start_strings = [\n \"Electron_\",\n \"SFR_\",\n \"Forming_Stellar_\",\n \"Dark_Matter\",\n \"Star_Particle_\",\n ]\n if any([ff.startswith(ss) for ss in start_strings]):\n continue\n dens_enzo -= dd[f]\n delta_enzo = np.abs(dens_enzo / dd[\"Density\"])\n return assert_almost_equal, delta_yt, delta_enzo\n\n\nm7 = \"DD0010/moving7_0010\"\n\n\n@requires_ds(m7)\ndef test_moving7():\n ds = data_dir_load(m7)\n assert_equal(str(ds), \"moving7_0010\")\n for test in small_patch_amr(m7, _fields):\n test_moving7.__name__ = test.description\n yield test\n\n\n@requires_ds(g30, big_data=True)\ndef test_galaxy0030():\n ds = data_dir_load(g30)\n yield check_color_conservation(ds)\n assert_equal(str(ds), \"galaxy0030\")\n for test in big_patch_amr(ds, _fields):\n test_galaxy0030.__name__ = test.description\n yield test\n assert_equal(ds.particle_type_counts, {\"io\": 1124453})\n\n\n@requires_ds(toro1d)\ndef test_toro1d():\n ds = data_dir_load(toro1d)\n assert_equal(str(ds), \"data0001\")\n for test in small_patch_amr(ds, ds.field_list):\n test_toro1d.__name__ = test.description\n yield test\n\n\n@requires_ds(kh2d)\ndef test_kh2d():\n ds = data_dir_load(kh2d)\n assert_equal(str(ds), \"DD0011\")\n for test in small_patch_amr(ds, ds.field_list):\n test_kh2d.__name__ = test.description\n yield test\n\n\n@requires_ds(ecp, big_data=True)\ndef test_ecp():\n ds = data_dir_load(ecp)\n # Now we test our species fields\n yield check_color_conservation(ds)\n\n\n@requires_file(enzotiny)\ndef test_units_override():\n units_override_check(enzotiny)\n\n\n@requires_ds(ecp, big_data=True)\ndef test_nuclei_density_fields():\n ds = data_dir_load(ecp)\n ad = ds.all_data()\n assert_array_equal(\n ad[\"H_nuclei_density\"], (ad[\"H_p0_number_density\"] + ad[\"H_p1_number_density\"])\n )\n assert_array_equal(\n ad[\"He_nuclei_density\"],\n (\n ad[\"He_p0_number_density\"]\n + ad[\"He_p1_number_density\"]\n + ad[\"He_p2_number_density\"]\n ),\n )\n\n\n@requires_file(enzotiny)\ndef test_EnzoDataset():\n assert isinstance(data_dir_load(enzotiny), EnzoDataset)\n\n\n@requires_file(two_sphere_test)\n@requires_file(active_particle_cosmology)\ndef test_active_particle_datasets():\n two_sph = data_dir_load(two_sphere_test)\n assert \"AccretingParticle\" in two_sph.particle_types_raw\n assert \"io\" not in two_sph.particle_types_raw\n assert \"all\" in two_sph.particle_types\n assert \"nbody\" in two_sph.particle_types\n assert_equal(len(two_sph.particle_unions), 2)\n pfields = [\n \"GridID\",\n \"creation_time\",\n \"dynamical_time\",\n \"identifier\",\n \"level\",\n \"metallicity\",\n \"particle_mass\",\n ]\n pfields += [\"particle_position_%s\" % d for d in \"xyz\"]\n pfields += [\"particle_velocity_%s\" % d for d in \"xyz\"]\n\n acc_part_fields = [(\"AccretingParticle\", pf) for pf in [\"AccretionRate\"] + pfields]\n\n real_acc_part_fields = sorted(\n [f for f in two_sph.field_list if f[0] == \"AccretingParticle\"]\n )\n assert_equal(acc_part_fields, real_acc_part_fields)\n\n apcos = data_dir_load(active_particle_cosmology)\n assert_equal([\"CenOstriker\", \"DarkMatter\"], apcos.particle_types_raw)\n assert \"all\" in apcos.particle_unions\n assert \"nbody\" in apcos.particle_unions\n\n apcos_fields = [(\"CenOstriker\", pf) for pf in pfields]\n\n real_apcos_fields = sorted([f for f in apcos.field_list if f[0] == \"CenOstriker\"])\n\n assert_equal(apcos_fields, real_apcos_fields)\n\n assert_equal(\n apcos.particle_type_counts, {\"CenOstriker\": 899755, \"DarkMatter\": 32768}\n )\n\n\n@requires_file(mhdctot)\ndef test_face_centered_mhdct_fields():\n ds = data_dir_load(mhdctot)\n\n ad = ds.all_data()\n grid = ds.index.grids[0]\n\n for field, flag in NODAL_FLAGS.items():\n dims = ds.domain_dimensions\n assert_equal(ad[field].shape, (dims.prod(), 2 * sum(flag)))\n assert_equal(grid[field].shape, tuple(dims) + (2 * sum(flag),))\n\n # Average of face-centered fields should be the same as cell-centered field\n assert (ad[\"BxF\"].sum(axis=-1) / 2 == ad[\"Bx\"]).all()\n assert (ad[\"ByF\"].sum(axis=-1) / 2 == ad[\"By\"]).all()\n assert (ad[\"BzF\"].sum(axis=-1) / 2 == ad[\"Bz\"]).all()\n\n\n@requires_file(dnz)\ndef test_deeply_nested_zoom():\n ds = data_dir_load(dnz)\n\n # carefully chosen to just barely miss a grid in the middle of the image\n center = [0.4915073260199302, 0.5052605316800006, 0.4905805557500548]\n\n plot = SlicePlot(ds, \"z\", \"density\", width=(0.001, \"pc\"), center=center)\n\n image = plot.frb[\"density\"]\n\n assert (image > 0).all()\n\n v, c = ds.find_max(\"density\")\n\n assert_allclose_units(v, ds.quan(0.005878286377124154, \"g/cm**3\"))\n\n c_actual = [0.49150732540021, 0.505260532936791, 0.49058055816398]\n c_actual = ds.arr(c_actual, \"code_length\")\n assert_allclose_units(c, c_actual)\n\n assert_equal(max([g[\"density\"].max() for g in ds.index.grids]), v)\n\n\n@requires_file(kh2d)\ndef test_2d_grid_shape():\n # see issue #1601\n # we want to make sure that accessing data on a grid object\n # returns a 3D array with a dummy dimension.\n ds = data_dir_load(kh2d)\n g = ds.index.grids[1]\n assert g[\"density\"].shape == (128, 100, 1)\n\n\n@requires_file(p3mini)\ndef test_nonzero_omega_radiation():\n \"\"\"\n Test support for non-zero omega_radiation cosmologies.\n \"\"\"\n ds = data_dir_load(p3mini)\n\n assert_equal(ds.omega_radiation, ds.cosmology.omega_radiation)\n\n tratio = ds.current_time / ds.cosmology.t_from_z(ds.current_redshift)\n assert_almost_equal(\n tratio,\n 1,\n 4,\n err_msg=\"Simulation time not consistent with cosmology calculator.\",\n )\n", "import os\nimport shutil\nimport tempfile\nfrom unittest import TestCase\n\nimport numpy as np\n\nfrom yt.data_objects.api import ImageArray\nfrom yt.testing import fake_random_ds\nfrom yt.visualization.volume_rendering.api import (\n BoxSource,\n LineSource,\n Scene,\n VolumeSource,\n)\n\n\ndef setup():\n \"\"\"Test specific setup.\"\"\"\n from yt.config import ytcfg\n\n ytcfg[\"yt\", \"__withintesting\"] = \"True\"\n\n\nclass CompositeVRTest(TestCase):\n # This toggles using a temporary directory. Turn off to examine images.\n use_tmpdir = True\n\n def setUp(self):\n np.random.seed(0)\n if self.use_tmpdir:\n self.curdir = os.getcwd()\n # Perform I/O in safe place instead of yt main dir\n self.tmpdir = tempfile.mkdtemp()\n os.chdir(self.tmpdir)\n else:\n self.curdir, self.tmpdir = None, None\n\n def tearDown(self):\n if self.use_tmpdir:\n os.chdir(self.curdir)\n shutil.rmtree(self.tmpdir)\n\n def test_composite_vr(self):\n ds = fake_random_ds(64)\n dd = ds.sphere(ds.domain_center, 0.45 * ds.domain_width[0])\n ds.field_info[ds.field_list[0]].take_log = False\n\n sc = Scene()\n cam = sc.add_camera(ds)\n cam.resolution = (512, 512)\n vr = VolumeSource(dd, field=ds.field_list[0])\n vr.transfer_function.clear()\n vr.transfer_function.grey_opacity = True\n vr.transfer_function.map_to_colormap(0.0, 1.0, scale=3.0, colormap=\"Reds\")\n sc.add_source(vr)\n\n cam.set_width(1.8 * ds.domain_width)\n cam.lens.setup_box_properties(cam)\n\n # DRAW SOME LINES\n npoints = 100\n vertices = np.random.random([npoints, 2, 3])\n colors = np.random.random([npoints, 4])\n colors[:, 3] = 0.10\n\n box_source = BoxSource(\n ds.domain_left_edge, ds.domain_right_edge, color=[1.0, 1.0, 1.0, 1.0]\n )\n sc.add_source(box_source)\n\n LE = ds.domain_left_edge + np.array([0.1, 0.0, 0.3]) * ds.domain_left_edge.uq\n RE = ds.domain_right_edge - np.array([0.1, 0.2, 0.3]) * ds.domain_left_edge.uq\n color = np.array([0.0, 1.0, 0.0, 0.10])\n box_source = BoxSource(LE, RE, color=color)\n sc.add_source(box_source)\n\n line_source = LineSource(vertices, colors)\n sc.add_source(line_source)\n\n im = sc.render()\n im = ImageArray(im.d)\n im.write_png(\"composite.png\")\n return im\n" ]
[ [ "numpy.nanmax", "matplotlib.colors.LogNorm", "numpy.sqrt", "numpy.nanmin", "matplotlib.font_manager.FontProperties", "matplotlib.colors.Normalize", "matplotlib.colors.ListedColormap", "numpy.float64", "numpy.array" ], [ "numpy.sqrt", "numpy.empty", "numpy.ones" ], [ "numpy.testing.assert_almost_equal", "numpy.abs" ], [ "numpy.array", "numpy.random.random", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
stargazerSwc/SRGAN
[ "eef41597a95660cf3dc08937edeeaf905ee563d4" ]
[ "kaggle_train2.py" ]
[ "import argparse\nimport os\nfrom math import log10\n\nimport pandas as pd\nimport torch.optim as optim\nimport torch.utils.data\nimport torchvision.utils as utils\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nimport pytorch_ssim\nfrom data_utils import TrainDatasetFromFolder, ValDatasetFromFolder, display_transform\nfrom loss import GeneratorLoss\nfrom model import Generator, Discriminator\n\nparser = argparse.ArgumentParser(description='Train Super Resolution Models')\nparser.add_argument('--crop_size', default=88, type=int, help='training images crop size')\nparser.add_argument('--upscale_factor', default=4, type=int, choices=[2, 4, 8],\n help='super resolution upscale factor')\nparser.add_argument('--num_epochs', default=1, type=int, help='train epoch number')\n\n\nif __name__ == '__main__':\n opt = parser.parse_args()\n \n CROP_SIZE = opt.crop_size\n UPSCALE_FACTOR = opt.upscale_factor\n NUM_EPOCHS = opt.num_epochs\n \n # train_set = TrainDatasetFromFolder('data/DIV2K_train_HR', crop_size=CROP_SIZE, upscale_factor=UPSCALE_FACTOR)\n # val_set = ValDatasetFromFolder('data/DIV2K_valid_HR', upscale_factor=UPSCALE_FACTOR)\n\t# train_loader = DataLoader(dataset=train_set, num_workers=4, batch_size=64, shuffle=True)\n # val_loader = DataLoader(dataset=val_set, num_workers=4, batch_size=1, shuffle=False)\n train_set = TrainDatasetFromFolder('../input/sr-test/VOC2012/VOC2012/train', crop_size=CROP_SIZE, upscale_factor=UPSCALE_FACTOR)\n val_set = ValDatasetFromFolder('../input/sr-test/VOC2012/VOC2012/val', upscale_factor=UPSCALE_FACTOR)\n train_loader = DataLoader(dataset=train_set, num_workers=4, batch_size=64, shuffle=True)\n val_loader = DataLoader(dataset=val_set, num_workers=4, batch_size=1, shuffle=False)\n \n netG = Generator(UPSCALE_FACTOR)\n print('# generator parameters:', sum(param.numel() for param in netG.parameters()))\n netD = Discriminator()\n print('# discriminator parameters:', sum(param.numel() for param in netD.parameters()))\n \n generator_criterion = GeneratorLoss()\n \n if torch.cuda.is_available():\n netG.cuda()\n netD.cuda()\n generator_criterion.cuda()\n \n optimizerG = optim.Adam(netG.parameters())\n optimizerD = optim.Adam(netD.parameters())\n \n results = {'d_loss': [], 'g_loss': [], 'd_score': [], 'g_score': [], 'psnr': [], 'ssim': []}\n \n for epoch in range(1, NUM_EPOCHS + 1):\n train_bar = tqdm(train_loader)\n running_results = {'batch_sizes': 0, 'd_loss': 0, 'g_loss': 0, 'd_score': 0, 'g_score': 0}\n \n netG.train()\n netD.train()\n for data, target in train_bar:\n g_update_first = True\n batch_size = data.size(0)\n running_results['batch_sizes'] += batch_size\n \n ############################\n # (1) Update D network: maximize D(x)-1-D(G(z))\n ###########################\n real_img = Variable(target)\n if torch.cuda.is_available():\n real_img = real_img.cuda()\n z = Variable(data)\n if torch.cuda.is_available():\n z = z.cuda()\n fake_img = netG(z)\n \n netD.zero_grad()\n real_out = netD(real_img).mean()\n fake_out = netD(fake_img).mean()\n d_loss = 1 - real_out + fake_out\n d_loss.backward(retain_graph=True)\n optimizerD.step()\n \n ############################\n # (2) Update G network: minimize 1-D(G(z)) + Perception Loss + Image Loss + TV Loss\n ###########################\n netG.zero_grad()\n g_loss = generator_criterion(fake_out, fake_img, real_img)\n g_loss.backward()\n \n fake_img = netG(z)\n fake_out = netD(fake_img).mean()\n \n \n optimizerG.step()\n\n # loss for current batch before optimization \n running_results['g_loss'] += g_loss.item() * batch_size\n running_results['d_loss'] += d_loss.item() * batch_size\n running_results['d_score'] += real_out.item() * batch_size\n running_results['g_score'] += fake_out.item() * batch_size\n \n train_bar.set_description(desc='[%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f' % (\n epoch, NUM_EPOCHS, running_results['d_loss'] / running_results['batch_sizes'],\n running_results['g_loss'] / running_results['batch_sizes'],\n running_results['d_score'] / running_results['batch_sizes'],\n running_results['g_score'] / running_results['batch_sizes']))\n \n netG.eval()\n out_path = 'training_results/SRF_' + str(UPSCALE_FACTOR) + '/'\n if not os.path.exists(out_path):\n os.makedirs(out_path)\n \n with torch.no_grad():\n val_bar = tqdm(val_loader)\n valing_results = {'mse': 0, 'ssims': 0, 'psnr': 0, 'ssim': 0, 'batch_sizes': 0}\n val_images = []\n for val_lr, val_hr_restore, val_hr in val_bar:\n batch_size = val_lr.size(0)\n valing_results['batch_sizes'] += batch_size\n lr = val_lr\n hr = val_hr\n if torch.cuda.is_available():\n lr = lr.cuda()\n hr = hr.cuda()\n sr = netG(lr)\n \n batch_mse = ((sr - hr) ** 2).data.mean()\n valing_results['mse'] += batch_mse * batch_size\n batch_ssim = pytorch_ssim.ssim(sr, hr).item()\n valing_results['ssims'] += batch_ssim * batch_size\n valing_results['psnr'] = 10 * log10((hr.max()**2) / (valing_results['mse'] / valing_results['batch_sizes']))\n valing_results['ssim'] = valing_results['ssims'] / valing_results['batch_sizes']\n val_bar.set_description(\n desc='[converting LR images to SR images] PSNR: %.4f dB SSIM: %.4f' % (\n valing_results['psnr'], valing_results['ssim']))\n \n val_images.extend(\n [display_transform()(val_hr_restore.squeeze(0)), display_transform()(hr.data.cpu().squeeze(0)),\n display_transform()(sr.data.cpu().squeeze(0))])\n val_images = torch.stack(val_images)\n val_images = torch.chunk(val_images, val_images.size(0) // 15)\n if epoch == NUM_EPOCHS:\n val_save_bar = tqdm(val_images, desc='[saving training results]')\n index = 1\n for image in val_save_bar:\n image = utils.make_grid(image, nrow=3, padding=5)\n utils.save_image(image, out_path + 'epoch_%d_index_%d.png' % (epoch, index), padding=5)\n index += 1\n \n # save model parameters\n if epoch % 10 == 0 and epoch != 0:\n torch.save(netG.state_dict(), 'epochs/netG_epoch_%d_%d.pth' % (UPSCALE_FACTOR, epoch))\n torch.save(netD.state_dict(), 'epochs/netD_epoch_%d_%d.pth' % (UPSCALE_FACTOR, epoch))\n # save loss\\scores\\psnr\\ssim\n results['d_loss'].append(running_results['d_loss'] / running_results['batch_sizes'])\n results['g_loss'].append(running_results['g_loss'] / running_results['batch_sizes'])\n results['d_score'].append(running_results['d_score'] / running_results['batch_sizes'])\n results['g_score'].append(running_results['g_score'] / running_results['batch_sizes'])\n results['psnr'].append(valing_results['psnr'])\n results['ssim'].append(valing_results['ssim'])\n \n if epoch % 10 == 0 and epoch != 0:\n out_path = 'statistics/'\n data_frame = pd.DataFrame(\n data={'Loss_D': results['d_loss'], 'Loss_G': results['g_loss'], 'Score_D': results['d_score'],\n 'Score_G': results['g_score'], 'PSNR': results['psnr'], 'SSIM': results['ssim']},\n index=range(1, epoch + 1))\n data_frame.to_csv(out_path + 'srf_' + str(UPSCALE_FACTOR) + '_train_results.csv', index_label='Epoch')\n" ]
[ [ "torch.utils.data.DataLoader", "torch.autograd.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mutterer/pycro-manager
[ "3404e4c968fcef5bf9cb57927f4921f203c7bbe0" ]
[ "pycromanager/data.py" ]
[ "\"\"\"\nLibrary for reading multiresolution micro-magellan\n\"\"\"\nimport os\nimport mmap\nimport numpy as np\nimport sys\nimport json\nimport platform\nimport dask.array as da\nimport dask\nimport warnings\nfrom pycromanager.core import Bridge\nimport struct\nfrom pycromanager.legacy_data import Legacy_NDTiff_Dataset\nimport threading\n\n\nclass _MultipageTiffReader:\n \"\"\"\n Class corresponsing to a single multipage tiff file in a Micro-Magellan dataset.\n Pass the full path of the TIFF to instantiate and call close() when finished\n \"\"\"\n\n # file format constants\n SUMMARY_MD_HEADER = 2355492\n EIGHT_BIT = 0\n SIXTEEN_BIT = 1\n EIGHT_BIT_RGB = 2\n UNCOMPRESSED = 0\n\n def __init__(self, tiff_path):\n self.tiff_path = tiff_path\n self.file = open(tiff_path, \"rb\")\n if platform.system() == \"Windows\":\n self.mmap_file = mmap.mmap(self.file.fileno(), 0, access=mmap.ACCESS_READ)\n else:\n self.mmap_file = mmap.mmap(self.file.fileno(), 0, prot=mmap.PROT_READ)\n self.summary_md, self.first_ifd_offset = self._read_header()\n self.mmap_file.close()\n\n def close(self):\n \"\"\" \"\"\"\n self.file.close()\n\n def _read_header(self):\n \"\"\"\n Returns\n -------\n summary metadata : dict\n byte offsets : nested dict\n The byte offsets of TIFF Image File Directories with keys [channel_index][z_index][frame_index][position_index]\n first_image_byte_offset : int\n int byte offset of first image IFD\n \"\"\"\n # read standard tiff header\n if self._read(0,2) == b\"\\x4d\\x4d\":\n # Big endian\n if sys.byteorder != \"big\":\n raise Exception(\"Potential issue with mismatched endian-ness\")\n elif self._read(0,2) == b\"\\x49\\x49\":\n # little endian\n if sys.byteorder != \"little\":\n raise Exception(\"Potential issue with mismatched endian-ness\")\n else:\n raise Exception(\"Endian type not specified correctly\")\n if np.frombuffer(self._read(2,4), dtype=np.uint16)[0] != 42:\n raise Exception(\"Tiff magic 42 missing\")\n first_ifd_offset = np.frombuffer(self._read(4,8), dtype=np.uint32)[0]\n\n # read custom stuff: header, summary md\n # int.from_bytes(self.mmap_file[24:28], sys.byteorder) # should be equal to 483729 starting in version 1\n self._major_version = int.from_bytes(self._read(12,16), sys.byteorder)\n\n summary_md_header, summary_md_length = np.frombuffer(self._read(16,24), dtype=np.uint32)\n if summary_md_header != self.SUMMARY_MD_HEADER:\n raise Exception(\"Summary metadata header wrong\")\n summary_md = json.loads(self._read(24, 24 + summary_md_length))\n return summary_md, first_ifd_offset\n\n def _read(self, start, end):\n \"\"\"\n convert to python ints\n \"\"\"\n self.file.seek(int(start), 0)\n return self.file.read(end - start)\n # return self.np_memmap[int(start) : int(end)].tobytes()\n\n def read_metadata(self, index):\n return json.loads(\n self._read(\n index[\"metadata_offset\"], index[\"metadata_offset\"] + index[\"metadata_length\"]\n )\n )\n\n def read_image(self, index, memmapped=False):\n if index[\"pixel_type\"] == self.EIGHT_BIT_RGB:\n bytes_per_pixel = 3\n dtype = np.uint8\n elif index[\"pixel_type\"] == self.EIGHT_BIT:\n bytes_per_pixel = 1\n dtype = np.uint8\n elif index[\"pixel_type\"] == self.SIXTEEN_BIT:\n bytes_per_pixel = 2\n dtype = np.uint16\n else:\n raise Exception(\"unrecognized pixel type\")\n width = index[\"image_width\"]\n height = index[\"image_height\"]\n\n if memmapped:\n np_memmap = np.memmap(self.file, dtype=np.uint8, mode=\"r\")\n image = np.reshape(\n np_memmap[\n index[\"pixel_offset\"] : index[\"pixel_offset\"] + width * height * bytes_per_pixel\n ].view(dtype),\n [height, width, 3] if bytes_per_pixel == 3 else [height, width],\n )\n else:\n image = np.reshape(\n np.frombuffer(self._read(\n index[\"pixel_offset\"], index[\"pixel_offset\"] + width * height * bytes_per_pixel)\n , dtype=dtype),\n [height, width, 3] if bytes_per_pixel == 3 else [height, width],\n )\n return image\n\n\nclass _ResolutionLevel:\n def __init__(self, path=None, count=None, max_count=None, remote=False, summary_metadata=False):\n \"\"\"\n Open all tiff files in directory, keep them in a list, and a tree based on image indices\n\n Parameters\n ----------\n path : str\n count : int\n max_count : int\n\n \"\"\"\n self.path_root = path + (\"\" if path[-1] == os.sep else os.sep)\n if remote:\n self.summary_metadata = summary_metadata\n self.index = {}\n self._readers_by_filename = {}\n else:\n self.index = self.read_index(path)\n tiff_names = [\n os.path.join(path, tiff) for tiff in os.listdir(path) if tiff.endswith(\".tif\")\n ]\n self._readers_by_filename = {}\n # populate list of readers and tree mapping indices to readers\n for tiff in tiff_names:\n print(\"\\rOpening file {} of {}...\".format(count + 1, max_count), end=\"\")\n count += 1\n self._readers_by_filename[tiff.split(os.sep)[-1]] = _MultipageTiffReader(tiff)\n self.summary_metadata = list(self._readers_by_filename.values())[0].summary_md\n\n def has_image(self, axes):\n key = frozenset(axes.items())\n return key in self.index\n\n def add_index_entry(self, data):\n \"\"\"\n Manually add a single index entry\n :param data: bytes object of a single index entry\n \"\"\"\n _, axes, index_entry = self.read_single_index_entry(data, self.index)\n\n if index_entry[\"filename\"] not in self._readers_by_filename:\n self._readers_by_filename[index_entry[\"filename\"]] = _MultipageTiffReader(\n self.path_root + index_entry[\"filename\"]\n )\n return axes, index_entry\n\n def read_single_index_entry(self, data, entries, position=0):\n index_entry = {}\n (axes_length,) = struct.unpack(\"I\", data[position : position + 4])\n if axes_length == 0:\n warnings.warn(\n \"Index appears to not have been properly terminated (the dataset may still work)\"\n )\n return None\n axes_str = data[position + 4 : position + 4 + axes_length].decode(\"utf-8\")\n axes = json.loads(axes_str)\n position += axes_length + 4\n (filename_length,) = struct.unpack(\"I\", data[position : position + 4])\n index_entry[\"filename\"] = data[position + 4 : position + 4 + filename_length].decode(\n \"utf-8\"\n )\n position += 4 + filename_length\n (\n index_entry[\"pixel_offset\"],\n index_entry[\"image_width\"],\n index_entry[\"image_height\"],\n index_entry[\"pixel_type\"],\n index_entry[\"pixel_compression\"],\n index_entry[\"metadata_offset\"],\n index_entry[\"metadata_length\"],\n index_entry[\"metadata_compression\"],\n ) = struct.unpack(\"IIIIIIII\", data[position : position + 32])\n position += 32\n entries[frozenset(axes.items())] = index_entry\n return position, axes, index_entry\n\n def read_index(self, path):\n print(\"\\rReading index... \", end=\"\")\n with open(path + os.sep + \"NDTiff.index\", \"rb\") as index_file:\n data = index_file.read()\n entries = {}\n position = 0\n while position < len(data):\n print(\n \"\\rReading index... {:.1f}% \".format(\n 100 * (1 - (len(data) - position) / len(data))\n ),\n end=\"\",\n )\n position, axes, index_entry = self.read_single_index_entry(data, entries, position)\n if position is None:\n break\n\n print(\"\\rFinshed reading index \", end=\"\")\n return entries\n\n def read_image(\n self,\n axes,\n memmapped=False,\n ):\n \"\"\"\n\n Parameters\n ----------\n axes : dict\n memmapped : bool\n (Default value = False)\n\n Returns\n -------\n image :\n \"\"\"\n # determine which reader contains the image\n key = frozenset(axes.items())\n if key not in self.index:\n raise Exception(\"image with keys {} not present in data set\".format(key))\n index = self.index[key]\n reader = self._readers_by_filename[index[\"filename\"]]\n return reader.read_image(index, memmapped)\n\n def read_metadata(self, axes):\n \"\"\"\n\n Parameters\n ----------\n axes : dict\n\n Returns\n -------\n image_metadata\n \"\"\"\n key = frozenset(axes.items())\n if key not in self.index:\n raise Exception(\"image with keys {} not present in data set\".format(key))\n index = self.index[key]\n reader = self._readers_by_filename[index[\"filename\"]]\n return reader.read_metadata(index)\n\n def close(self):\n for reader in self._readers_by_filename.values():\n reader.close()\n\n\n### This function outside class to prevent problems with pickling when running them in differnet process\n\n\ndef _storage_monitor_fn(\n dataset, storage_monitor_push_port, connected_event, callback_fn, debug=False\n):\n #TODO: might need to add in support for doing this on a different port, if Acquistiion/bridge is not on default port\n with Bridge(debug=debug) as bridge:\n monitor_socket = bridge._connect_pull(storage_monitor_push_port)\n\n connected_event.set()\n\n while True:\n message = monitor_socket.receive()\n\n if \"finished\" in message:\n # Poison, time to shut down\n monitor_socket.close()\n return\n\n index_entry = message[\"index_entry\"]\n axes = dataset._add_index_entry(index_entry)\n\n if callback_fn is not None:\n callback_fn(axes)\n\n\nclass Dataset:\n \"\"\"Class that opens a single NDTiffStorage dataset\"\"\"\n\n _POSITION_AXIS = \"position\"\n _ROW_AXIS = \"row\"\n _COLUMN_AXIS = \"column\"\n _Z_AXIS = \"z\"\n _TIME_AXIS = \"time\"\n _CHANNEL_AXIS = \"channel\"\n\n def __new__(cls, dataset_path=None, full_res_only=True, remote_storage_monitor=None):\n if dataset_path is None:\n return super(Dataset, cls).__new__(Dataset)\n # Search for Full resolution dir, check for index\n res_dirs = [\n dI for dI in os.listdir(dataset_path) if os.path.isdir(os.path.join(dataset_path, dI))\n ]\n if \"Full resolution\" not in res_dirs:\n raise Exception(\n \"Couldn't find full resolution directory. Is this the correct path to a dataset?\"\n )\n fullres_path = (\n dataset_path + (\"\" if dataset_path[-1] == os.sep else os.sep) + \"Full resolution\"\n )\n if \"NDTiff.index\" in os.listdir(fullres_path):\n return super(Dataset, cls).__new__(Dataset)\n else:\n obj = Legacy_NDTiff_Dataset.__new__(Legacy_NDTiff_Dataset)\n obj.__init__(dataset_path, full_res_only, remote_storage=None)\n return obj\n\n def __init__(self, dataset_path=None, full_res_only=True, remote_storage_monitor=None):\n \"\"\"\n Creat a Object providing access to and NDTiffStorage dataset, either one currently being acquired or one on disk\n\n Parameters\n ----------\n dataset_path : str\n Abosolute path of top level folder of a dataset on disk\n full_res_only : bool\n One open the full resolution data, if it is multi-res\n remote_storage_monitor : JavaObjectShadow\n Object that allows callbacks from remote NDTiffStorage\n \"\"\"\n self._tile_width = None\n self._tile_height = None\n self._lock = threading.Lock()\n if remote_storage_monitor is not None:\n # this dataset is a view of an active acquisiiton. The storage exists on the java side\n self._remote_storage_monitor = remote_storage_monitor\n self.summary_metadata = self._remote_storage_monitor.get_summary_metadata()\n if \"GridPixelOverlapX\" in self.summary_metadata.keys():\n self._tile_width = (\n self.summary_metadata[\"Width\"] - self.summary_metadata[\"GridPixelOverlapX\"]\n )\n self._tile_height = (\n self.summary_metadata[\"Height\"] - self.summary_metadata[\"GridPixelOverlapY\"]\n )\n\n dataset_path = remote_storage_monitor.get_disk_location()\n dataset_path += \"\" if dataset_path[-1] == os.sep else os.sep\n full_res_path = dataset_path + \"Full resolution\"\n with self._lock:\n self.res_levels = {\n 0: _ResolutionLevel(\n remote=True, summary_metadata=self.summary_metadata, path=full_res_path\n )\n }\n self.axes = {}\n return\n else:\n self._remote_storage_monitor = None\n\n self.path = dataset_path\n res_dirs = [\n dI for dI in os.listdir(dataset_path) if os.path.isdir(os.path.join(dataset_path, dI))\n ]\n # map from downsample factor to datset\n with self._lock:\n self.res_levels = {}\n if \"Full resolution\" not in res_dirs:\n raise Exception(\n \"Couldn't find full resolution directory. Is this the correct path to a dataset?\"\n )\n num_tiffs = 0\n count = 0\n for res_dir in res_dirs:\n for file in os.listdir(os.path.join(dataset_path, res_dir)):\n if file.endswith(\".tif\"):\n num_tiffs += 1\n for res_dir in res_dirs:\n if full_res_only and res_dir != \"Full resolution\":\n continue\n res_dir_path = os.path.join(dataset_path, res_dir)\n res_level = _ResolutionLevel(res_dir_path, count, num_tiffs)\n if res_dir == \"Full resolution\":\n with self._lock:\n self.res_levels[0] = res_level\n # get summary metadata and index tree from full resolution image\n self.summary_metadata = res_level.summary_metadata\n\n self.overlap = (\n np.array(\n [\n self.summary_metadata[\"GridPixelOverlapY\"],\n self.summary_metadata[\"GridPixelOverlapX\"],\n ]\n )\n if \"GridPixelOverlapY\" in self.summary_metadata\n else None\n )\n\n self.axes = {}\n for axes_combo in res_level.index.keys():\n for axis, position in axes_combo:\n if axis not in self.axes.keys():\n self.axes[axis] = set()\n self.axes[axis].add(position)\n\n # figure out the mapping of channel name to position by reading image metadata\n print(\"\\rReading channel names...\", end=\"\")\n self._read_channel_names()\n print(\"\\rFinished reading channel names\", end=\"\")\n\n # remove axes with no variation\n # single_axes = [axis for axis in self.axes if len(self.axes[axis]) == 1]\n # for axis in single_axes:\n # del self.axes[axis]\n\n else:\n with self._lock:\n self.res_levels[int(np.log2(int(res_dir.split(\"x\")[1])))] = res_level\n\n # get information about image width and height, assuming that they are consistent for whole dataset\n # (which isn't strictly neccesary)\n with self._lock:\n first_index = list(self.res_levels[0].index.values())[0]\n if first_index[\"pixel_type\"] == _MultipageTiffReader.EIGHT_BIT_RGB:\n self.bytes_per_pixel = 3\n self.dtype = np.uint8\n elif first_index[\"pixel_type\"] == _MultipageTiffReader.EIGHT_BIT:\n self.bytes_per_pixel = 1\n self.dtype = np.uint8\n elif first_index[\"pixel_type\"] == _MultipageTiffReader.SIXTEEN_BIT:\n self.bytes_per_pixel = 2\n self.dtype = np.uint16\n\n self.image_width = first_index[\"image_width\"]\n self.image_height = first_index[\"image_height\"]\n if \"GridPixelOverlapX\" in self.summary_metadata:\n self._tile_width = self.image_width - self.summary_metadata[\"GridPixelOverlapX\"]\n self._tile_height = self.image_height - self.summary_metadata[\"GridPixelOverlapY\"]\n\n print(\"\\rDataset opened \")\n\n def _read_channel_names(self):\n if self._CHANNEL_AXIS in self.axes.keys():\n self._channel_names = {}\n for key in self.res_levels[0].index.keys():\n axes = {axis: position for axis, position in key}\n if (\n self._CHANNEL_AXIS in axes.keys()\n and axes[self._CHANNEL_AXIS] not in self._channel_names.values()\n ):\n channel_name = self.res_levels[0].read_metadata(axes)[\"Channel\"]\n self._channel_names[channel_name] = axes[self._CHANNEL_AXIS]\n if len(self._channel_names.values()) == len(self.axes[self._CHANNEL_AXIS]):\n break\n\n def _add_index_entry(self, index_entry):\n \"\"\"\n Add entry for a image that has been recieved and is now on disk\n \"\"\"\n with self._lock:\n axes, index_entry = self.res_levels[0].add_index_entry(index_entry)\n\n # update the axes that have been seen\n for axis_name in axes.keys():\n if axis_name not in self.axes.keys():\n self.axes[axis_name] = set()\n self.axes[axis_name].add(axes[axis_name])\n\n # update the map of channel names to channel indices\n self._read_channel_names()\n\n return axes\n\n def _add_storage_monitor_fn(self, callback_fn=None, debug=False):\n \"\"\"\n Add a callback function that gets called whenever a new image is writtern to disk (for acquisitions in\n progress only)\n\n Parameters\n ----------\n callback_fn : Callable\n callable with that takes 1 argument, the axes dict of the image just written\n \"\"\"\n if self._remote_storage_monitor is None:\n raise Exception(\"Only valid for datasets with writing in progress\")\n\n connected_event = threading.Event()\n\n push_port = self._remote_storage_monitor.get_port()\n monitor_thread = threading.Thread(\n target=_storage_monitor_fn,\n args=(\n self,\n push_port,\n connected_event,\n callback_fn,\n debug,\n ),\n name=\"ImageProcessor\",\n )\n\n monitor_thread.start()\n\n # not sure if this is neccesary, copied from acq hook\n connected_event.wait() # wait for push/pull sockets to connect\n\n # start pushing out all the image written events (including ones that have already accumulated)\n self._remote_storage_monitor.start()\n return monitor_thread\n\n def get_index_keys(self, res_level=0):\n \"\"\"\n Return a list of every combination of axes that has a image in this dataset\n \"\"\"\n frozen_set_list = list(self.res_levels[res_level].index.keys())\n # convert to dict\n return [{axis_name: position for axis_name, position in key} for key in frozen_set_list]\n\n def as_array(self, axes=None, stitched=False, verbose=True, **kwargs):\n \"\"\"\n Read all data image data as one big Dask array with last two axes as y, x and preceeding axes depending on data.\n The dask array is made up of memory-mapped numpy arrays, so the dataset does not need to be able to fit into RAM.\n If the data doesn't fully fill out the array (e.g. not every z-slice collected at every time point), zeros will\n be added automatically.\n\n To convert data into a numpy array, call np.asarray() on the returned result. However, doing so will bring the\n data into RAM, so it may be better to do this on only a slice of the array at a time.\n\n Parameters\n ----------\n axes : list\n list of axes names over which to iterate and merge into a stacked array. If None, all axes will be used\n stitched : bool\n If true and tiles were acquired in a grid, lay out adjacent tiles next to one another (Default value = False)\n verbose : bool\n If True print updates on progress loading the image\n **kwargs :\n names and integer positions of axes on which to slice data\n Returns\n -------\n dataset : dask array\n \"\"\"\n\n w = self.image_width if not stitched else self._tile_width\n h = self.image_height if not stitched else self._tile_height\n self._empty_tile = (\n np.zeros((h, w), self.dtype)\n if self.bytes_per_pixel != 3\n else np.zeros((h, w, 3), self.dtype)\n )\n self._count = 1\n total = np.prod([len(v) for v in self.axes.values()])\n\n def recurse_axes(loop_axes, point_axes):\n \"\"\"\n Used to create a nested list of images, with each nesting level corresponding to a particular axis.\n Each time this function is recursively called, it will descend one level deeper. The recursive calls\n can be thought of as a tree structure, where each depth level of the tree is one axis, and it has a\n branch (i.e. a subsequent call of recurse_axes) corresponding to every value of the the next axis.\n\n :param loop_axes: The remaining axes that need to be looped over (i.e. the innermost ones)\n :param point_axes: The axes that have been assigned values already by a previous call of this function\n\n :return: Nested list of images\n \"\"\"\n if len(loop_axes.values()) == 0:\n # There are no more axes over which to loop (i.e. we're at the maximum depth), so return\n # the image defined by point_axes, or a blank image if it is undefined (so that the full\n # nested list will have the expected rectangular shape)\n if verbose:\n print(\"\\rAdding data chunk {} of {}\".format(self._count, total), end=\"\")\n self._count += 1\n if None not in point_axes.values() and self.has_image(**point_axes):\n recurse_axes.empty = False # track that actual data was read\n if stitched:\n img = self.read_image(**point_axes, memmapped=True)\n if self.half_overlap[0] != 0:\n img = img[\n self.half_overlap[0] : -self.half_overlap[0],\n self.half_overlap[1] : -self.half_overlap[1],\n ]\n return img\n else:\n return self.read_image(**point_axes, memmapped=True)\n else:\n # return np.zeros((self.image_height, self.image_width), self.dtype)\n return self._empty_tile\n else:\n # Still have axes over which to loop\n # do row and col first because it makes stitching faster\n if \"row\" in loop_axes.keys() and stitched:\n axis = \"row\"\n elif \"column\" in loop_axes.keys() and stitched:\n axis = \"column\"\n else:\n # Take the next axis in the list that needs to be looped over\n axis = list(loop_axes.keys())[0]\n\n # copy so multiple calls dont collide on the same data structure\n remaining_loop_axes = loop_axes.copy()\n if (axis == \"row\" or axis == \"column\") and stitched:\n # do these both at once\n del remaining_loop_axes[\"row\"]\n del remaining_loop_axes[\"column\"]\n else:\n # remove because this axis is now being assigned a point value\n del remaining_loop_axes[axis]\n if (axis == \"row\" or axis == \"column\") and stitched:\n # Do stitching along existing axis\n # Stitch tiles acquired in a grid (i.e. data acquired by Micro-Magellan or in multi-res mode)\n self.half_overlap = (self.overlap[0] // 2, self.overlap[1] // 2)\n\n # get spatial layout of position indices\n row_values = np.array(list(self.axes[\"row\"]))\n column_values = np.array(list(self.axes[\"column\"]))\n\n # make nested list of rows and columns\n blocks = []\n for row in row_values:\n blocks.append([])\n for column in column_values:\n valed_axes = point_axes.copy()\n if verbose:\n print(\n \"\\rAdding data chunk {} of {}\".format(self._count, total),\n end=\"\",\n )\n valed_axes[\"row\"] = row\n valed_axes[\"column\"] = column\n\n blocks[-1].append(\n da.stack(recurse_axes(remaining_loop_axes, valed_axes))\n )\n\n rgb = self.bytes_per_pixel == 3 and self.dtype == np.uint8\n if rgb:\n stitched_array = np.concatenate(\n [\n np.concatenate(row, axis=len(blocks[0][0].shape) - 2)\n for row in blocks\n ],\n axis=len(blocks[0][0].shape) - 3,\n )\n else:\n stitched_array = da.block(blocks)\n return stitched_array\n else:\n # Do stacking along new axis (i.e. not stiching along exisitng)\n blocks = []\n # Loop through every value of the next axis (i.e. create new branches of the tree)\n for val in loop_axes[axis]:\n # Copy to avoid unexpected errors by multiple calls\n valed_axes = point_axes.copy()\n # Move this axis from one that needs to be looped over to one that has a discrete value.\n valed_axes[axis] = val\n blocks.append(recurse_axes(remaining_loop_axes, valed_axes))\n return blocks\n\n if axes is None:\n axes = self.axes.keys()\n axes_to_slice = kwargs\n axes_to_stack_or_stitch = {key: self.axes[key] for key in axes if key not in kwargs.keys()}\n\n recurse_axes.empty = True\n blocks = recurse_axes(axes_to_stack_or_stitch, axes_to_slice)\n if recurse_axes.empty:\n # No actual data in any of the tiles\n return None\n\n if verbose:\n print(\n \"\\rStacking tiles... \"\n ) # extra space otherwise there is no space after the \"Adding data chunk {} {}\"\n # import time\n # s = time.time()\n array = da.stack(blocks, allow_unknown_chunksizes=False)\n # e = time.time()\n # print(e - s)\n if verbose:\n print(\"\\rDask array opened\")\n # remove singleton axes\n array = da.squeeze(array)\n return array\n\n def has_image(\n self,\n channel=0,\n z=None,\n time=None,\n position=None,\n channel_name=None,\n resolution_level=0,\n row=None,\n col=None,\n **kwargs\n ):\n \"\"\"Check if this image is present in the dataset\n\n Parameters\n ----------\n channel : int\n index of the channel, if applicable (Default value = None)\n z : int\n index of z slice, if applicable (Default value = None)\n time : int\n index of the time point, if applicable (Default value = None)\n position : int\n index of the XY position, if applicable (Default value = None)\n channel_name : str\n Name of the channel. Overrides channel index if supplied (Default value = None)\n row : int\n index of tile row for XY tiled datasets (Default value = None)\n col : int\n index of tile col for XY tiled datasets (Default value = None)\n resolution_level :\n 0 is full resolution, otherwise represents downampling of pixels\n at 2 ** (resolution_level) (Default value = 0)\n **kwargs :\n names and integer positions of any other axes\n\n Returns\n -------\n bool :\n indicating whether the dataset has an image matching the specifications\n \"\"\"\n with self._lock:\n return self.res_levels[0].has_image(\n self._consolidate_axes(channel, channel_name, z, position, time, row, col, kwargs)\n )\n\n def read_image(\n self,\n channel=0,\n z=None,\n time=None,\n position=None,\n row=None,\n col=None,\n channel_name=None,\n resolution_level=0,\n memmapped=False,\n **kwargs\n ):\n \"\"\"\n Read image data as numpy array\n\n Parameters\n ----------\n channel : int\n index of the channel, if applicable (Default value = None)\n z : int\n index of z slice, if applicable (Default value = None)\n time : int\n index of the time point, if applicable (Default value = None)\n position : int\n index of the XY position, if applicable (Default value = None)\n channel_name :\n Name of the channel. Overrides channel index if supplied (Default value = None)\n row : int\n index of tile row for XY tiled datasets (Default value = None)\n col : int\n index of tile col for XY tiled datasets (Default value = None)\n resolution_level :\n 0 is full resolution, otherwise represents downampling of pixels\n at 2 ** (resolution_level) (Default value = 0)\n memmapped : bool\n (Default value = False)\n **kwargs :\n names and integer positions of any other axes\n\n Returns\n -------\n image : numpy array or tuple\n image as a 2D numpy array, or tuple with image and image metadata as dict\n\n \"\"\"\n with self._lock:\n axes = self._consolidate_axes(\n channel, channel_name, z, position, time, row, col, kwargs\n )\n\n res_level = self.res_levels[resolution_level]\n return res_level.read_image(axes, memmapped)\n\n def read_metadata(\n self,\n channel=0,\n z=None,\n time=None,\n position=None,\n channel_name=None,\n row=None,\n col=None,\n resolution_level=0,\n **kwargs\n ):\n \"\"\"\n Read metadata only. Faster than using read_image to retrieve metadata\n\n Parameters\n ----------\n channel : int\n index of the channel, if applicable (Default value = None)\n z : int\n index of z slice, if applicable (Default value = None)\n time : int\n index of the time point, if applicable (Default value = None)\n position : int\n index of the XY position, if applicable (Default value = None)\n channel_name :\n Name of the channel. Overrides channel index if supplied (Default value = None)\n row : int\n index of tile row for XY tiled datasets (Default value = None)\n col : int\n index of tile col for XY tiled datasets (Default value = None)\n resolution_level :\n 0 is full resolution, otherwise represents downampling of pixels\n at 2 ** (resolution_level) (Default value = 0)\n **kwargs :\n names and integer positions of any other axes\n\n Returns\n -------\n metadata : dict\n\n \"\"\"\n with self._lock:\n axes = self._consolidate_axes(\n channel, channel_name, z, position, time, row, col, kwargs\n )\n\n res_level = self.res_levels[resolution_level]\n return res_level.read_metadata(axes)\n\n def close(self):\n with self._lock:\n for res_level in self.res_levels:\n res_level.close()\n\n def get_channel_names(self):\n with self._lock:\n return self._channel_names.keys()\n\n def _consolidate_axes(self, channel, channel_name, z, position, time, row, col, kwargs):\n axes = {}\n if channel is not None:\n axes[self._CHANNEL_AXIS] = channel\n if channel_name is not None:\n axes[self._CHANNEL_AXIS] = self._channel_names[channel_name]\n if z is not None:\n axes[self._Z_AXIS] = z\n if position is not None:\n axes[self._POSITION_AXIS] = position\n if time is not None:\n axes[self._TIME_AXIS] = time\n if row is not None:\n axes[self._ROW_AXIS] = row\n if col is not None:\n axes[self._COLUMN_AXIS] = col\n for other_axis_name in kwargs.keys():\n axes[other_axis_name] = kwargs[other_axis_name]\n return axes\n" ]
[ [ "numpy.memmap", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Fakor/HojDoj
[ "5363b38e5d145785208ef81b9033132c7ac3b2f2" ]
[ "hojdoj/DTools/tools.py" ]
[ "import numpy as np\nimport PIL\nfrom PIL import ImageTk\n\n\ndef value_to_string(value):\n if isinstance(value, str):\n return '\"{}\"'.format(value)\n return str(value)\n\n\ndef value_from_string(text):\n txt_tmp = text.strip(\" \\\"\\'\")\n if txt_tmp[0] == '(' or txt_tmp[0] == '[':\n txt_tmp = txt_tmp.strip('()[]')\n return tuple((value_from_string(el) for el in txt_tmp.split(',')))\n try:\n return int(txt_tmp)\n except ValueError:\n try:\n return float(txt_tmp)\n except ValueError:\n return txt_tmp\n\n\ndef sum_points(*points, min_value=None):\n values = [sum(el) for el in zip(*points)]\n if min_value is not None:\n values = [max(el, 0) for el in values]\n return tuple(values)\n\n\ndef count_jump_range(velocity, acceleration):\n dx, dy = velocity\n _, acc_y = acceleration\n dy = np.abs(dy)\n acc_y = -np.abs(acc_y)\n rng = 0\n pos = 0\n dy += acc_y\n while np.sign(dy) == 1 or np.abs(dy) < pos:\n pos += dy\n rng += np.hypot(dx, dy)\n dy += acc_y\n overshot = np.abs(pos/dy)\n rng += np.hypot(dx, dy)*overshot\n return rng\n\n\ndef image_replace_white(image, new):\n image = image.convert('RGBA')\n data = np.array(image)\n red, green, blue, alpha = data.T\n white_areas = (red == 255) & (blue == 255) & (green == 255)\n data[..., :-1][white_areas.T] = new\n return PIL.Image.fromarray(data)\n\n\ndef image_replace_elastic(image, elastic_image, vertical=True):\n image = image.convert('RGBA')\n data = np.array(image)\n\n if vertical:\n el_back = elastic_background_vertical(elastic_image, (data.shape[1], data.shape[0]), as_photo_image=False)\n else:\n el_back = elastic_background_horizontal(elastic_image, (data.shape[1], data.shape[0]), as_photo_image=False)\n elastic_data = np.array(el_back)\n red, green, blue, alpha = data.T\n white_areas = (red == 255) & (blue == 255) & (green == 255)\n data[..., :-1][white_areas.T] = elastic_data[..., :3][white_areas.T]\n return PIL.Image.fromarray(data)\n\n\ndef elastic_background_horizontal(elastic_image, size, as_photo_image=True):\n data = np.array(elastic_image)\n rows, cols, K = data.shape\n\n new_cols, new_rows = size\n\n new_data = np.zeros((new_rows, new_cols, K), dtype=np.uint8)\n\n current_row = 0\n current_col = 0\n\n org_col = 0\n while True:\n M = np.min((rows, new_rows - current_row))\n N = np.min((new_cols - current_col, cols-org_col))\n new_data[current_row:current_row+M,current_col:current_col+N,:] = data[0:M,org_col:org_col+N,:]\n if current_col+N == new_cols:\n if current_row + M == new_rows:\n break\n current_col = 0\n current_row = current_row + M\n else:\n current_col = current_col + N\n org_col = org_col + N\n if org_col == cols:\n org_col = 0\n img = PIL.Image.fromarray(new_data)\n\n if as_photo_image:\n return ImageTk.PhotoImage(img)\n return img\n\n\ndef elastic_background_vertical(elastic_image, size, as_photo_image=True):\n data = np.array(elastic_image)\n rows, cols, K = data.shape\n\n new_cols, new_rows = size\n\n new_data = np.zeros((new_rows, new_cols, K), dtype=np.uint8)\n\n current_row = 0\n current_col = 0\n\n org_row = 0\n while True:\n M = np.min((rows-org_row, new_rows - current_row))\n N = np.min((new_cols - current_col, cols))\n new_data[current_row:current_row+M,current_col:current_col+N,:] = data[org_row:org_row+M,0:N,:]\n if current_row+M == new_rows:\n if current_col + N == new_cols:\n break\n current_row = 0\n current_col = current_col + N\n else:\n current_row = current_row + M\n org_row = org_row + M\n if org_row == rows:\n org_row = 0\n img = PIL.Image.fromarray(new_data)\n\n if as_photo_image:\n return PIL.ImageTk.PhotoImage(img)\n return img\n" ]
[ [ "numpy.abs", "numpy.min", "numpy.sign", "numpy.array", "numpy.zeros", "numpy.hypot" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
felixhao28/PaddleNLP
[ "e60dec5c6b2da2991d26b7c2c66e58c3c382532b" ]
[ "examples/experimental/faster_ernie/seq_cls/train.py" ]
[ "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom functools import partial\nimport argparse\nimport os\nimport random\nimport time\n\nimport numpy as np\nimport paddle\nimport paddle.nn.functional as F\nimport paddlenlp as ppnlp\nfrom paddlenlp.datasets import load_dataset\nfrom paddlenlp.transformers import LinearDecayWithWarmup\nfrom paddlenlp.experimental import FasterErnieForSequenceClassification, to_tensor\n\n# yapf: disable\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--save_dir\", default='./checkpoint', type=str, help=\"The output directory where the model checkpoints will be written.\")\nparser.add_argument(\"--max_seq_length\", default=128, type=int, help=\"The maximum total input sequence length after tokenization. \"\n \"Sequences longer than this will be truncated, sequences shorter will be padded.\")\nparser.add_argument(\"--batch_size\", default=32, type=int, help=\"Batch size per GPU/CPU for training.\")\nparser.add_argument(\"--learning_rate\", default=5e-5, type=float, help=\"The initial learning rate for Adam.\")\nparser.add_argument(\"--weight_decay\", default=0.0, type=float, help=\"Weight decay if we apply some.\")\nparser.add_argument(\"--epochs\", default=3, type=int, help=\"Total number of training epochs to perform.\")\nparser.add_argument(\"--warmup_proportion\", default=0.0, type=float, help=\"Linear warmup proption over the training process.\")\nparser.add_argument(\"--init_from_ckpt\", type=str, default=None, help=\"The path of checkpoint to be loaded.\")\nparser.add_argument(\"--seed\", type=int, default=1000, help=\"random seed for initialization\")\nparser.add_argument('--device', choices=['cpu', 'gpu', 'xpu'], default=\"gpu\", help=\"Select which device to train model, defaults to gpu.\")\nargs = parser.parse_args()\n# yapf: enable\n\n\ndef set_seed(seed):\n \"\"\"sets random seed\"\"\"\n random.seed(seed)\n np.random.seed(seed)\n paddle.seed(seed)\n\n\[email protected]_grad()\ndef evaluate(model, criterion, metric, data_loader):\n model.eval()\n metric.reset()\n losses = []\n for batch in data_loader:\n texts, labels = batch['text'], batch['label']\n texts = to_tensor(texts, \"texts\")\n logits, predictions = model(texts)\n loss = criterion(logits, labels)\n losses.append(loss.numpy())\n correct = metric.compute(logits, labels)\n metric.update(correct)\n accu = metric.accumulate()\n print(\"eval loss: %.5f, accu: %.5f\" % (np.mean(losses), accu))\n model.train()\n metric.reset()\n\n\ndef create_dataloader(dataset, mode='train', batch_size=1):\n\n shuffle = True if mode == 'train' else False\n if mode == 'train':\n batch_sampler = paddle.io.DistributedBatchSampler(\n dataset, batch_size=batch_size, shuffle=shuffle)\n else:\n batch_sampler = paddle.io.BatchSampler(\n dataset, batch_size=batch_size, shuffle=shuffle)\n\n return paddle.io.DataLoader(dataset=dataset, batch_sampler=batch_sampler)\n\n\ndef do_train():\n paddle.set_device(args.device)\n set_seed(args.seed)\n\n train_ds, dev_ds = load_dataset(\"chnsenticorp\", splits=[\"train\", \"dev\"])\n\n model = FasterErnieForSequenceClassification.from_pretrained(\n 'ernie-1.0',\n num_classes=len(train_ds.label_list),\n max_seq_len=args.max_seq_length)\n\n train_data_loader = create_dataloader(\n train_ds, mode='train', batch_size=args.batch_size)\n dev_data_loader = create_dataloader(\n dev_ds, mode='dev', batch_size=args.batch_size)\n\n if args.init_from_ckpt and os.path.isfile(args.init_from_ckpt):\n state_dict = paddle.load(args.init_from_ckpt)\n model.set_dict(state_dict)\n\n num_training_steps = len(train_data_loader) * args.epochs\n\n lr_scheduler = LinearDecayWithWarmup(args.learning_rate, num_training_steps,\n args.warmup_proportion)\n\n # Generate parameter names needed to perform weight decay.\n # All bias and LayerNorm parameters are excluded.\n decay_params = [\n p.name for n, p in model.named_parameters()\n if not any(nd in n for nd in [\"bias\", \"norm\"])\n ]\n optimizer = paddle.optimizer.AdamW(\n learning_rate=lr_scheduler,\n parameters=model.parameters(),\n weight_decay=args.weight_decay,\n apply_decay_param_fun=lambda x: x in decay_params)\n\n criterion = paddle.nn.loss.CrossEntropyLoss()\n metric = paddle.metric.Accuracy()\n\n global_step = 0\n tic_train = time.time()\n for epoch in range(1, args.epochs + 1):\n for step, batch in enumerate(train_data_loader, start=1):\n texts, labels = batch[\"text\"], batch[\"label\"]\n texts = to_tensor(texts)\n logits, predictions = model(texts)\n loss = criterion(logits, labels)\n probs = F.softmax(logits, axis=1)\n correct = metric.compute(logits, labels)\n metric.update(correct)\n acc = metric.accumulate()\n\n global_step += 1\n if global_step % 10 == 0:\n print(\n \"global step %d, epoch: %d, batch: %d, loss: %.5f, accu: %.5f, speed: %.2f step/s\"\n % (global_step, epoch, step, loss, acc,\n 10 / (time.time() - tic_train)))\n tic_train = time.time()\n loss.backward()\n optimizer.step()\n lr_scheduler.step()\n optimizer.clear_grad()\n if global_step % 100 == 0:\n save_dir = os.path.join(args.save_dir, \"model_%d\" % global_step)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n evaluate(model, criterion, metric, dev_data_loader)\n model.save_pretrained(save_dir)\n\n\nif __name__ == \"__main__\":\n do_train()\n" ]
[ [ "numpy.mean", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
taoyilee/ml_final_project
[ "0ac5ee3938d70e9ffcae8e186e0ef1a621391980" ]
[ "models/problem_2.py" ]
[ "import numpy as np\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.preprocessing import StandardScaler\nfrom multiprocessing import Pool\nimport matplotlib.pyplot as plt\nimport pickle\n\n\ndef sample_and_split(raw_data, train_percentage=10, dev_percentage=10):\n np.random.shuffle(raw_data)\n n_trn_data = np.int((train_percentage / 100) * raw_data.shape[0])\n n_dev_data = np.int((dev_percentage / 100) * raw_data.shape[0])\n x_trn_raw = raw_data[:n_trn_data, :-1]\n x_dev_raw = raw_data[n_trn_data:n_trn_data + n_dev_data, :-1]\n y_trn = raw_data[:n_trn_data, -1]\n y_dev = raw_data[n_trn_data:n_trn_data + n_dev_data, -1]\n print(f\"Training with {x_trn_raw.shape[0]}({100*x_trn_raw.shape[0]/raw_data.shape[0]:.1f}%) points\")\n print(f\"Validating with {x_dev_raw.shape[0]}({100*x_dev_raw.shape[0]/raw_data.shape[0]:.1f}%) points\")\n scaler = StandardScaler()\n scaler.fit(x_trn_raw)\n x_trn_scaled = scaler.transform(x_trn_raw)\n x_dev_scaled = scaler.transform(x_dev_raw)\n return x_trn_scaled, x_dev_scaled, y_trn, y_dev\n\n\nclass Trainer(object):\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __call__(self, learner):\n # print(f\"Fitting learner {learner}\")\n learner.fit(self.x, self.y)\n return learner\n\n\ndef sample_train(reg, raw_data, trn_p, dev_p):\n print(f\"Resamping...\")\n learners = [SGDClassifier(loss=\"log\", penalty=\"l2\", max_iter=500, alpha=r) for r in reg]\n x_trn_scaled, x_dev_scaled, y_trn, y_dev = sample_and_split(raw_data, train_percentage=trn_p,\n dev_percentage=dev_p)\n with Pool(4) as p:\n learners = p.map(Trainer(x_trn_scaled, y_trn), learners)\n training_auc = [roc_auc_score(y_trn, l.predict_proba(x_trn_scaled)[:, 1]) for l in learners]\n validating_auc = [roc_auc_score(y_dev, l.predict_proba(x_dev_scaled)[:, 1]) for l in learners]\n return training_auc, validating_auc\n\n\ndef plot_linear(train_pickle, dev_pickle, name=\"2a\"):\n with open(train_pickle, \"rb\") as f:\n training_auc = pickle.load(f)\n with open(dev_pickle, \"rb\") as f:\n validating_auc = pickle.load(f)\n reg = np.logspace(-4, 0, 20)\n reg = reg.round(6)\n plt.figure()\n plt.semilogx(reg, np.mean(training_auc, axis=0), marker=\"s\", label=\"Training AUC\", color=\"blue\")\n plt.semilogx(reg, np.mean(validating_auc, axis=0), marker=\"x\", label=\"Validating AUC\", color=\"red\")\n plt.fill_between(reg, np.min(training_auc, axis=0), np.max(training_auc, axis=0), color=\"blue\", alpha=0.1,\n label='Training (Max-Min)')\n plt.fill_between(reg, np.min(validating_auc, axis=0), np.max(validating_auc, axis=0), color=\"red\", alpha=0.1,\n label='Validation (Max-Min)')\n # plt.fill_between(reg, np.mean(training_auc, axis=0) - np.std(training_auc, axis=0),\n # np.mean(training_auc, axis=0) + np.std(training_auc, axis=0), color=\"blue\", alpha=0.5,\n # label='Training (1sigma)')\n # plt.fill_between(reg, np.mean(validating_auc, axis=0) - np.std(validating_auc, axis=0),\n # np.mean(validating_auc, axis=0) + np.std(validating_auc, axis=0), color=\"red\", alpha=0.5,\n # label='Validation (1sigma)')\n plt.xlabel(f\"L2 Regularization Coefficient\")\n plt.title(f\"AUROC over L2 Regularization Coefficient - {name}\")\n plt.legend()\n plt.grid()\n plt.ylabel(\"AUROC\")\n plt.savefig(f\"plot/linear_classifier_{name}.png\")\n plt.close(\"all\")\n for i in range(training_auc.shape[0]):\n plt.figure()\n plt.semilogx(reg, training_auc[i], marker=\"s\", label=\"Training AUC\", color=\"blue\")\n plt.semilogx(reg, validating_auc[i], marker=\"x\", label=\"Validating AUC\", color=\"red\")\n plt.title(f\"Sampling #{i+1} - {name}\")\n plt.xlabel(f\"L2 Regularization Coefficient\")\n plt.legend()\n plt.grid()\n plt.ylabel(\"AUROC\")\n plt.savefig(f\"plot/linear_classifier_{name}_{i}.png\")\n plt.close(\"all\")\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "numpy.min", "numpy.logspace", "matplotlib.pyplot.semilogx", "numpy.random.shuffle", "matplotlib.pyplot.savefig", "numpy.int", "matplotlib.pyplot.ylabel", "numpy.max", "numpy.mean", "matplotlib.pyplot.grid", "matplotlib.pyplot.close", "matplotlib.pyplot.xlabel", "sklearn.preprocessing.StandardScaler", "sklearn.linear_model.SGDClassifier", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
carolineechen/vision
[ "07fb8ba7fad7b5b458ff862919825df4e6f60b52" ]
[ "torchvision/transforms/transforms.py" ]
[ "import math\nimport numbers\nimport random\nimport warnings\nfrom collections.abc import Sequence\nfrom typing import Tuple, List, Optional\n\nimport torch\nfrom torch import Tensor\n\ntry:\n import accimage\nexcept ImportError:\n accimage = None\n\nfrom . import functional as F\nfrom .functional import InterpolationMode, _interpolation_modes_from_int\n\n\n__all__ = [\"Compose\", \"ToTensor\", \"PILToTensor\", \"ConvertImageDtype\", \"ToPILImage\", \"Normalize\", \"Resize\", \"Scale\",\n \"CenterCrop\", \"Pad\", \"Lambda\", \"RandomApply\", \"RandomChoice\", \"RandomOrder\", \"RandomCrop\",\n \"RandomHorizontalFlip\", \"RandomVerticalFlip\", \"RandomResizedCrop\", \"RandomSizedCrop\", \"FiveCrop\", \"TenCrop\",\n \"LinearTransformation\", \"ColorJitter\", \"RandomRotation\", \"RandomAffine\", \"Grayscale\", \"RandomGrayscale\",\n \"RandomPerspective\", \"RandomErasing\", \"GaussianBlur\", \"InterpolationMode\", \"RandomInvert\", \"RandomPosterize\",\n \"RandomSolarize\", \"RandomAdjustSharpness\", \"RandomAutocontrast\", \"RandomEqualize\"]\n\n\nclass Compose:\n \"\"\"Composes several transforms together. This transform does not support torchscript.\n Please, see the note below.\n\n Args:\n transforms (list of ``Transform`` objects): list of transforms to compose.\n\n Example:\n >>> transforms.Compose([\n >>> transforms.CenterCrop(10),\n >>> transforms.ToTensor(),\n >>> ])\n\n .. note::\n In order to script the transformations, please use ``torch.nn.Sequential`` as below.\n\n >>> transforms = torch.nn.Sequential(\n >>> transforms.CenterCrop(10),\n >>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n >>> )\n >>> scripted_transforms = torch.jit.script(transforms)\n\n Make sure to use only scriptable transformations, i.e. that work with ``torch.Tensor``, does not require\n `lambda` functions or ``PIL.Image``.\n\n \"\"\"\n\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, img):\n for t in self.transforms:\n img = t(img)\n return img\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n for t in self.transforms:\n format_string += '\\n'\n format_string += ' {0}'.format(t)\n format_string += '\\n)'\n return format_string\n\n\nclass ToTensor:\n \"\"\"Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor. This transform does not support torchscript.\n\n Converts a PIL Image or numpy.ndarray (H x W x C) in the range\n [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]\n if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)\n or if the numpy.ndarray has dtype = np.uint8\n\n In the other cases, tensors are returned without scaling.\n\n .. note::\n Because the input image is scaled to [0.0, 1.0], this transformation should not be used when\n transforming target image masks. See the `references`_ for implementing the transforms for image masks.\n\n .. _references: https://github.com/pytorch/vision/tree/master/references/segmentation\n \"\"\"\n\n def __call__(self, pic):\n \"\"\"\n Args:\n pic (PIL Image or numpy.ndarray): Image to be converted to tensor.\n\n Returns:\n Tensor: Converted image.\n \"\"\"\n return F.to_tensor(pic)\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n\n\nclass PILToTensor:\n \"\"\"Convert a ``PIL Image`` to a tensor of the same type. This transform does not support torchscript.\n\n Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).\n \"\"\"\n\n def __call__(self, pic):\n \"\"\"\n Args:\n pic (PIL Image): Image to be converted to tensor.\n\n Returns:\n Tensor: Converted image.\n \"\"\"\n return F.pil_to_tensor(pic)\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n\n\nclass ConvertImageDtype(torch.nn.Module):\n \"\"\"Convert a tensor image to the given ``dtype`` and scale the values accordingly\n This function does not support PIL Image.\n\n Args:\n dtype (torch.dtype): Desired data type of the output\n\n .. note::\n\n When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.\n If converted back and forth, this mismatch has no effect.\n\n Raises:\n RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as\n well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to\n overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range\n of the integer ``dtype``.\n \"\"\"\n\n def __init__(self, dtype: torch.dtype) -> None:\n super().__init__()\n self.dtype = dtype\n\n def forward(self, image):\n return F.convert_image_dtype(image, self.dtype)\n\n\nclass ToPILImage:\n \"\"\"Convert a tensor or an ndarray to PIL Image. This transform does not support torchscript.\n\n Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape\n H x W x C to a PIL Image while preserving the value range.\n\n Args:\n mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).\n If ``mode`` is ``None`` (default) there are some assumptions made about the input data:\n - If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.\n - If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.\n - If the input has 2 channels, the ``mode`` is assumed to be ``LA``.\n - If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,\n ``short``).\n\n .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes\n \"\"\"\n def __init__(self, mode=None):\n self.mode = mode\n\n def __call__(self, pic):\n \"\"\"\n Args:\n pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.\n\n Returns:\n PIL Image: Image converted to PIL Image.\n\n \"\"\"\n return F.to_pil_image(pic, self.mode)\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n if self.mode is not None:\n format_string += 'mode={0}'.format(self.mode)\n format_string += ')'\n return format_string\n\n\nclass Normalize(torch.nn.Module):\n \"\"\"Normalize a tensor image with mean and standard deviation.\n This transform does not support PIL Image.\n Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])`` for ``n``\n channels, this transform will normalize each channel of the input\n ``torch.*Tensor`` i.e.,\n ``output[channel] = (input[channel] - mean[channel]) / std[channel]``\n\n .. note::\n This transform acts out of place, i.e., it does not mutate the input tensor.\n\n Args:\n mean (sequence): Sequence of means for each channel.\n std (sequence): Sequence of standard deviations for each channel.\n inplace(bool,optional): Bool to make this operation in-place.\n\n \"\"\"\n\n def __init__(self, mean, std, inplace=False):\n super().__init__()\n self.mean = mean\n self.std = std\n self.inplace = inplace\n\n def forward(self, tensor: Tensor) -> Tensor:\n \"\"\"\n Args:\n tensor (Tensor): Tensor image to be normalized.\n\n Returns:\n Tensor: Normalized Tensor image.\n \"\"\"\n return F.normalize(tensor, self.mean, self.std, self.inplace)\n\n def __repr__(self):\n return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)\n\n\nclass Resize(torch.nn.Module):\n \"\"\"Resize the input image to the given size.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions\n\n .. warning::\n The output image might be different depending on its type: when downsampling, the interpolation of PIL images\n and tensors is slightly different, because PIL applies antialiasing. This may lead to significant differences\n in the performance of a network. Therefore, it is preferable to train and serve a model with the same input\n types.\n\n Args:\n size (sequence or int): Desired output size. If size is a sequence like\n (h, w), output size will be matched to this. If size is an int,\n smaller edge of the image will be matched to this number.\n i.e, if height > width, then image will be rescaled to\n (size * height / width, size).\n In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``.\n interpolation (InterpolationMode): Desired interpolation enum defined by\n :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.\n If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` and\n ``InterpolationMode.BICUBIC`` are supported.\n For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.\n max_size (int, optional): The maximum allowed for the longer edge of\n the resized image: if the longer edge of the image is greater\n than ``max_size`` after being resized according to ``size``, then\n the image is resized again so that the longer edge is equal to\n ``max_size``. As a result, ```size` might be overruled, i.e the\n smaller edge may be shorter than ``size``. This is only supported\n if ``size`` is an int (or a sequence of length 1 in torchscript\n mode).\n\n \"\"\"\n\n def __init__(self, size, interpolation=InterpolationMode.BILINEAR, max_size=None):\n super().__init__()\n if not isinstance(size, (int, Sequence)):\n raise TypeError(\"Size should be int or sequence. Got {}\".format(type(size)))\n if isinstance(size, Sequence) and len(size) not in (1, 2):\n raise ValueError(\"If size is a sequence, it should have 1 or 2 values\")\n self.size = size\n self.max_size = max_size\n\n # Backward compatibility with integer value\n if isinstance(interpolation, int):\n warnings.warn(\n \"Argument interpolation should be of type InterpolationMode instead of int. \"\n \"Please, use InterpolationMode enum.\"\n )\n interpolation = _interpolation_modes_from_int(interpolation)\n\n self.interpolation = interpolation\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be scaled.\n\n Returns:\n PIL Image or Tensor: Rescaled image.\n \"\"\"\n return F.resize(img, self.size, self.interpolation, self.max_size)\n\n def __repr__(self):\n interpolate_str = self.interpolation.value\n return self.__class__.__name__ + '(size={0}, interpolation={1}, max_size={2})'.format(\n self.size, interpolate_str, self.max_size)\n\n\nclass Scale(Resize):\n \"\"\"\n Note: This transform is deprecated in favor of Resize.\n \"\"\"\n def __init__(self, *args, **kwargs):\n warnings.warn(\"The use of the transforms.Scale transform is deprecated, \" +\n \"please use transforms.Resize instead.\")\n super(Scale, self).__init__(*args, **kwargs)\n\n\nclass CenterCrop(torch.nn.Module):\n \"\"\"Crops the given image at the center.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.\n If image size is smaller than output size along any edge, image is padded with 0 and then center cropped.\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).\n \"\"\"\n\n def __init__(self, size):\n super().__init__()\n self.size = _setup_size(size, error_msg=\"Please provide only two dimensions (h, w) for size.\")\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be cropped.\n\n Returns:\n PIL Image or Tensor: Cropped image.\n \"\"\"\n return F.center_crop(img, self.size)\n\n def __repr__(self):\n return self.__class__.__name__ + '(size={0})'.format(self.size)\n\n\nclass Pad(torch.nn.Module):\n \"\"\"Pad the given image on all sides with the given \"pad\" value.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means at most 2 leading dimensions for mode reflect and symmetric,\n at most 3 leading dimensions for mode edge,\n and an arbitrary number of leading dimensions for mode constant\n\n Args:\n padding (int or sequence): Padding on each border. If a single int is provided this\n is used to pad all borders. If sequence of length 2 is provided this is the padding\n on left/right and top/bottom respectively. If a sequence of length 4 is provided\n this is the padding for the left, top, right and bottom borders respectively.\n In torchscript mode padding as single int is not supported, use a sequence of length 1: ``[padding, ]``.\n fill (number or str or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of\n length 3, it is used to fill R, G, B channels respectively.\n This value is only used when the padding_mode is constant.\n Only number is supported for torch Tensor.\n Only int or str or tuple value is supported for PIL Image.\n padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric.\n Default is constant.\n\n - constant: pads with a constant value, this value is specified with fill\n\n - edge: pads with the last value at the edge of the image,\n if input a 5D torch Tensor, the last 3 dimensions will be padded instead of the last 2\n\n - reflect: pads with reflection of image without repeating the last value on the edge\n\n For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode\n will result in [3, 2, 1, 2, 3, 4, 3, 2]\n\n - symmetric: pads with reflection of image repeating the last value on the edge\n\n For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode\n will result in [2, 1, 1, 2, 3, 4, 4, 3]\n \"\"\"\n\n def __init__(self, padding, fill=0, padding_mode=\"constant\"):\n super().__init__()\n if not isinstance(padding, (numbers.Number, tuple, list)):\n raise TypeError(\"Got inappropriate padding arg\")\n\n if not isinstance(fill, (numbers.Number, str, tuple)):\n raise TypeError(\"Got inappropriate fill arg\")\n\n if padding_mode not in [\"constant\", \"edge\", \"reflect\", \"symmetric\"]:\n raise ValueError(\"Padding mode should be either constant, edge, reflect or symmetric\")\n\n if isinstance(padding, Sequence) and len(padding) not in [1, 2, 4]:\n raise ValueError(\"Padding must be an int or a 1, 2, or 4 element tuple, not a \" +\n \"{} element tuple\".format(len(padding)))\n\n self.padding = padding\n self.fill = fill\n self.padding_mode = padding_mode\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be padded.\n\n Returns:\n PIL Image or Tensor: Padded image.\n \"\"\"\n return F.pad(img, self.padding, self.fill, self.padding_mode)\n\n def __repr__(self):\n return self.__class__.__name__ + '(padding={0}, fill={1}, padding_mode={2})'.\\\n format(self.padding, self.fill, self.padding_mode)\n\n\nclass Lambda:\n \"\"\"Apply a user-defined lambda as a transform. This transform does not support torchscript.\n\n Args:\n lambd (function): Lambda/function to be used for transform.\n \"\"\"\n\n def __init__(self, lambd):\n if not callable(lambd):\n raise TypeError(\"Argument lambd should be callable, got {}\".format(repr(type(lambd).__name__)))\n self.lambd = lambd\n\n def __call__(self, img):\n return self.lambd(img)\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n\n\nclass RandomTransforms:\n \"\"\"Base class for a list of transformations with randomness\n\n Args:\n transforms (sequence): list of transformations\n \"\"\"\n\n def __init__(self, transforms):\n if not isinstance(transforms, Sequence):\n raise TypeError(\"Argument transforms should be a sequence\")\n self.transforms = transforms\n\n def __call__(self, *args, **kwargs):\n raise NotImplementedError()\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n for t in self.transforms:\n format_string += '\\n'\n format_string += ' {0}'.format(t)\n format_string += '\\n)'\n return format_string\n\n\nclass RandomApply(torch.nn.Module):\n \"\"\"Apply randomly a list of transformations with a given probability.\n\n .. note::\n In order to script the transformation, please use ``torch.nn.ModuleList`` as input instead of list/tuple of\n transforms as shown below:\n\n >>> transforms = transforms.RandomApply(torch.nn.ModuleList([\n >>> transforms.ColorJitter(),\n >>> ]), p=0.3)\n >>> scripted_transforms = torch.jit.script(transforms)\n\n Make sure to use only scriptable transformations, i.e. that work with ``torch.Tensor``, does not require\n `lambda` functions or ``PIL.Image``.\n\n Args:\n transforms (sequence or torch.nn.Module): list of transformations\n p (float): probability\n \"\"\"\n\n def __init__(self, transforms, p=0.5):\n super().__init__()\n self.transforms = transforms\n self.p = p\n\n def forward(self, img):\n if self.p < torch.rand(1):\n return img\n for t in self.transforms:\n img = t(img)\n return img\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n format_string += '\\n p={}'.format(self.p)\n for t in self.transforms:\n format_string += '\\n'\n format_string += ' {0}'.format(t)\n format_string += '\\n)'\n return format_string\n\n\nclass RandomOrder(RandomTransforms):\n \"\"\"Apply a list of transformations in a random order. This transform does not support torchscript.\n \"\"\"\n def __call__(self, img):\n order = list(range(len(self.transforms)))\n random.shuffle(order)\n for i in order:\n img = self.transforms[i](img)\n return img\n\n\nclass RandomChoice(RandomTransforms):\n \"\"\"Apply single transformation randomly picked from a list. This transform does not support torchscript.\n \"\"\"\n def __call__(self, img):\n t = random.choice(self.transforms)\n return t(img)\n\n\nclass RandomCrop(torch.nn.Module):\n \"\"\"Crop the given image at a random location.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions,\n but if non-constant padding is used, the input is expected to have at most 2 leading dimensions\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).\n padding (int or sequence, optional): Optional padding on each border\n of the image. Default is None. If a single int is provided this\n is used to pad all borders. If sequence of length 2 is provided this is the padding\n on left/right and top/bottom respectively. If a sequence of length 4 is provided\n this is the padding for the left, top, right and bottom borders respectively.\n In torchscript mode padding as single int is not supported, use a sequence of length 1: ``[padding, ]``.\n pad_if_needed (boolean): It will pad the image if smaller than the\n desired size to avoid raising an exception. Since cropping is done\n after padding, the padding seems to be done at a random offset.\n fill (number or str or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of\n length 3, it is used to fill R, G, B channels respectively.\n This value is only used when the padding_mode is constant.\n Only number is supported for torch Tensor.\n Only int or str or tuple value is supported for PIL Image.\n padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.\n\n - constant: pads with a constant value, this value is specified with fill\n\n - edge: pads with the last value on the edge of the image\n\n - reflect: pads with reflection of image (without repeating the last value on the edge)\n\n padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode\n will result in [3, 2, 1, 2, 3, 4, 3, 2]\n\n - symmetric: pads with reflection of image (repeating the last value on the edge)\n\n padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode\n will result in [2, 1, 1, 2, 3, 4, 4, 3]\n\n \"\"\"\n\n @staticmethod\n def get_params(img: Tensor, output_size: Tuple[int, int]) -> Tuple[int, int, int, int]:\n \"\"\"Get parameters for ``crop`` for a random crop.\n\n Args:\n img (PIL Image or Tensor): Image to be cropped.\n output_size (tuple): Expected output size of the crop.\n\n Returns:\n tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.\n \"\"\"\n w, h = F._get_image_size(img)\n th, tw = output_size\n\n if h + 1 < th or w + 1 < tw:\n raise ValueError(\n \"Required crop size {} is larger then input image size {}\".format((th, tw), (h, w))\n )\n\n if w == tw and h == th:\n return 0, 0, h, w\n\n i = torch.randint(0, h - th + 1, size=(1, )).item()\n j = torch.randint(0, w - tw + 1, size=(1, )).item()\n return i, j, th, tw\n\n def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode=\"constant\"):\n super().__init__()\n\n self.size = tuple(_setup_size(\n size, error_msg=\"Please provide only two dimensions (h, w) for size.\"\n ))\n\n self.padding = padding\n self.pad_if_needed = pad_if_needed\n self.fill = fill\n self.padding_mode = padding_mode\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be cropped.\n\n Returns:\n PIL Image or Tensor: Cropped image.\n \"\"\"\n if self.padding is not None:\n img = F.pad(img, self.padding, self.fill, self.padding_mode)\n\n width, height = F._get_image_size(img)\n # pad the width if needed\n if self.pad_if_needed and width < self.size[1]:\n padding = [self.size[1] - width, 0]\n img = F.pad(img, padding, self.fill, self.padding_mode)\n # pad the height if needed\n if self.pad_if_needed and height < self.size[0]:\n padding = [0, self.size[0] - height]\n img = F.pad(img, padding, self.fill, self.padding_mode)\n\n i, j, h, w = self.get_params(img, self.size)\n\n return F.crop(img, i, j, h, w)\n\n def __repr__(self):\n return self.__class__.__name__ + \"(size={0}, padding={1})\".format(self.size, self.padding)\n\n\nclass RandomHorizontalFlip(torch.nn.Module):\n \"\"\"Horizontally flip the given image randomly with a given probability.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading\n dimensions\n\n Args:\n p (float): probability of the image being flipped. Default value is 0.5\n \"\"\"\n\n def __init__(self, p=0.5):\n super().__init__()\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be flipped.\n\n Returns:\n PIL Image or Tensor: Randomly flipped image.\n \"\"\"\n if torch.rand(1) < self.p:\n return F.hflip(img)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={})'.format(self.p)\n\n\nclass RandomVerticalFlip(torch.nn.Module):\n \"\"\"Vertically flip the given image randomly with a given probability.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading\n dimensions\n\n Args:\n p (float): probability of the image being flipped. Default value is 0.5\n \"\"\"\n\n def __init__(self, p=0.5):\n super().__init__()\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be flipped.\n\n Returns:\n PIL Image or Tensor: Randomly flipped image.\n \"\"\"\n if torch.rand(1) < self.p:\n return F.vflip(img)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={})'.format(self.p)\n\n\nclass RandomPerspective(torch.nn.Module):\n \"\"\"Performs a random perspective transformation of the given image with a given probability.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.\n\n Args:\n distortion_scale (float): argument to control the degree of distortion and ranges from 0 to 1.\n Default is 0.5.\n p (float): probability of the image being transformed. Default is 0.5.\n interpolation (InterpolationMode): Desired interpolation enum defined by\n :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.\n If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.\n For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.\n fill (sequence or number): Pixel fill value for the area outside the transformed\n image. Default is ``0``. If given a number, the value is used for all bands respectively.\n If input is PIL Image, the options is only available for ``Pillow>=5.0.0``.\n \"\"\"\n\n def __init__(self, distortion_scale=0.5, p=0.5, interpolation=InterpolationMode.BILINEAR, fill=0):\n super().__init__()\n self.p = p\n\n # Backward compatibility with integer value\n if isinstance(interpolation, int):\n warnings.warn(\n \"Argument interpolation should be of type InterpolationMode instead of int. \"\n \"Please, use InterpolationMode enum.\"\n )\n interpolation = _interpolation_modes_from_int(interpolation)\n\n self.interpolation = interpolation\n self.distortion_scale = distortion_scale\n\n if fill is None:\n fill = 0\n elif not isinstance(fill, (Sequence, numbers.Number)):\n raise TypeError(\"Fill should be either a sequence or a number.\")\n\n self.fill = fill\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be Perspectively transformed.\n\n Returns:\n PIL Image or Tensor: Randomly transformed image.\n \"\"\"\n\n fill = self.fill\n if isinstance(img, Tensor):\n if isinstance(fill, (int, float)):\n fill = [float(fill)] * F._get_image_num_channels(img)\n else:\n fill = [float(f) for f in fill]\n\n if torch.rand(1) < self.p:\n width, height = F._get_image_size(img)\n startpoints, endpoints = self.get_params(width, height, self.distortion_scale)\n return F.perspective(img, startpoints, endpoints, self.interpolation, fill)\n return img\n\n @staticmethod\n def get_params(width: int, height: int, distortion_scale: float) -> Tuple[List[List[int]], List[List[int]]]:\n \"\"\"Get parameters for ``perspective`` for a random perspective transform.\n\n Args:\n width (int): width of the image.\n height (int): height of the image.\n distortion_scale (float): argument to control the degree of distortion and ranges from 0 to 1.\n\n Returns:\n List containing [top-left, top-right, bottom-right, bottom-left] of the original image,\n List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image.\n \"\"\"\n half_height = height // 2\n half_width = width // 2\n topleft = [\n int(torch.randint(0, int(distortion_scale * half_width) + 1, size=(1, )).item()),\n int(torch.randint(0, int(distortion_scale * half_height) + 1, size=(1, )).item())\n ]\n topright = [\n int(torch.randint(width - int(distortion_scale * half_width) - 1, width, size=(1, )).item()),\n int(torch.randint(0, int(distortion_scale * half_height) + 1, size=(1, )).item())\n ]\n botright = [\n int(torch.randint(width - int(distortion_scale * half_width) - 1, width, size=(1, )).item()),\n int(torch.randint(height - int(distortion_scale * half_height) - 1, height, size=(1, )).item())\n ]\n botleft = [\n int(torch.randint(0, int(distortion_scale * half_width) + 1, size=(1, )).item()),\n int(torch.randint(height - int(distortion_scale * half_height) - 1, height, size=(1, )).item())\n ]\n startpoints = [[0, 0], [width - 1, 0], [width - 1, height - 1], [0, height - 1]]\n endpoints = [topleft, topright, botright, botleft]\n return startpoints, endpoints\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={})'.format(self.p)\n\n\nclass RandomResizedCrop(torch.nn.Module):\n \"\"\"Crop a random portion of image and resize it to a given size.\n\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions\n\n A crop of the original image is made: the crop has a random area (H * W)\n and a random aspect ratio. This crop is finally resized to the given\n size. This is popularly used to train the Inception networks.\n\n Args:\n size (int or sequence): expected output size of the crop, for each edge. If size is an\n int instead of sequence like (h, w), a square output size ``(size, size)`` is\n made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).\n In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``.\n scale (tuple of float): Specifies the lower and upper bounds for the random area of the crop,\n before resizing. The scale is defined with respect to the area of the original image.\n ratio (tuple of float): lower and upper bounds for the random aspect ratio of the crop, before\n resizing.\n interpolation (InterpolationMode): Desired interpolation enum defined by\n :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.\n If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` and\n ``InterpolationMode.BICUBIC`` are supported.\n For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.\n\n \"\"\"\n\n def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=InterpolationMode.BILINEAR):\n super().__init__()\n self.size = _setup_size(size, error_msg=\"Please provide only two dimensions (h, w) for size.\")\n\n if not isinstance(scale, Sequence):\n raise TypeError(\"Scale should be a sequence\")\n if not isinstance(ratio, Sequence):\n raise TypeError(\"Ratio should be a sequence\")\n if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):\n warnings.warn(\"Scale and ratio should be of kind (min, max)\")\n\n # Backward compatibility with integer value\n if isinstance(interpolation, int):\n warnings.warn(\n \"Argument interpolation should be of type InterpolationMode instead of int. \"\n \"Please, use InterpolationMode enum.\"\n )\n interpolation = _interpolation_modes_from_int(interpolation)\n\n self.interpolation = interpolation\n self.scale = scale\n self.ratio = ratio\n\n @staticmethod\n def get_params(\n img: Tensor, scale: List[float], ratio: List[float]\n ) -> Tuple[int, int, int, int]:\n \"\"\"Get parameters for ``crop`` for a random sized crop.\n\n Args:\n img (PIL Image or Tensor): Input image.\n scale (list): range of scale of the origin size cropped\n ratio (list): range of aspect ratio of the origin aspect ratio cropped\n\n Returns:\n tuple: params (i, j, h, w) to be passed to ``crop`` for a random\n sized crop.\n \"\"\"\n width, height = F._get_image_size(img)\n area = height * width\n\n log_ratio = torch.log(torch.tensor(ratio))\n for _ in range(10):\n target_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item()\n aspect_ratio = torch.exp(\n torch.empty(1).uniform_(log_ratio[0], log_ratio[1])\n ).item()\n\n w = int(round(math.sqrt(target_area * aspect_ratio)))\n h = int(round(math.sqrt(target_area / aspect_ratio)))\n\n if 0 < w <= width and 0 < h <= height:\n i = torch.randint(0, height - h + 1, size=(1,)).item()\n j = torch.randint(0, width - w + 1, size=(1,)).item()\n return i, j, h, w\n\n # Fallback to central crop\n in_ratio = float(width) / float(height)\n if in_ratio < min(ratio):\n w = width\n h = int(round(w / min(ratio)))\n elif in_ratio > max(ratio):\n h = height\n w = int(round(h * max(ratio)))\n else: # whole image\n w = width\n h = height\n i = (height - h) // 2\n j = (width - w) // 2\n return i, j, h, w\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be cropped and resized.\n\n Returns:\n PIL Image or Tensor: Randomly cropped and resized image.\n \"\"\"\n i, j, h, w = self.get_params(img, self.scale, self.ratio)\n return F.resized_crop(img, i, j, h, w, self.size, self.interpolation)\n\n def __repr__(self):\n interpolate_str = self.interpolation.value\n format_string = self.__class__.__name__ + '(size={0}'.format(self.size)\n format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))\n format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))\n format_string += ', interpolation={0})'.format(interpolate_str)\n return format_string\n\n\nclass RandomSizedCrop(RandomResizedCrop):\n \"\"\"\n Note: This transform is deprecated in favor of RandomResizedCrop.\n \"\"\"\n def __init__(self, *args, **kwargs):\n warnings.warn(\"The use of the transforms.RandomSizedCrop transform is deprecated, \" +\n \"please use transforms.RandomResizedCrop instead.\")\n super(RandomSizedCrop, self).__init__(*args, **kwargs)\n\n\nclass FiveCrop(torch.nn.Module):\n \"\"\"Crop the given image into four corners and the central crop.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading\n dimensions\n\n .. Note::\n This transform returns a tuple of images and there may be a mismatch in the number of\n inputs and targets your Dataset returns. See below for an example of how to deal with\n this.\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an ``int``\n instead of sequence like (h, w), a square crop of size (size, size) is made.\n If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).\n\n Example:\n >>> transform = Compose([\n >>> FiveCrop(size), # this is a list of PIL Images\n >>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor\n >>> ])\n >>> #In your test loop you can do the following:\n >>> input, target = batch # input is a 5d tensor, target is 2d\n >>> bs, ncrops, c, h, w = input.size()\n >>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops\n >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops\n \"\"\"\n\n def __init__(self, size):\n super().__init__()\n self.size = _setup_size(size, error_msg=\"Please provide only two dimensions (h, w) for size.\")\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be cropped.\n\n Returns:\n tuple of 5 images. Image can be PIL Image or Tensor\n \"\"\"\n return F.five_crop(img, self.size)\n\n def __repr__(self):\n return self.__class__.__name__ + '(size={0})'.format(self.size)\n\n\nclass TenCrop(torch.nn.Module):\n \"\"\"Crop the given image into four corners and the central crop plus the flipped version of\n these (horizontal flipping is used by default).\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading\n dimensions\n\n .. Note::\n This transform returns a tuple of images and there may be a mismatch in the number of\n inputs and targets your Dataset returns. See below for an example of how to deal with\n this.\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).\n vertical_flip (bool): Use vertical flipping instead of horizontal\n\n Example:\n >>> transform = Compose([\n >>> TenCrop(size), # this is a list of PIL Images\n >>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor\n >>> ])\n >>> #In your test loop you can do the following:\n >>> input, target = batch # input is a 5d tensor, target is 2d\n >>> bs, ncrops, c, h, w = input.size()\n >>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops\n >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops\n \"\"\"\n\n def __init__(self, size, vertical_flip=False):\n super().__init__()\n self.size = _setup_size(size, error_msg=\"Please provide only two dimensions (h, w) for size.\")\n self.vertical_flip = vertical_flip\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be cropped.\n\n Returns:\n tuple of 10 images. Image can be PIL Image or Tensor\n \"\"\"\n return F.ten_crop(img, self.size, self.vertical_flip)\n\n def __repr__(self):\n return self.__class__.__name__ + '(size={0}, vertical_flip={1})'.format(self.size, self.vertical_flip)\n\n\nclass LinearTransformation(torch.nn.Module):\n \"\"\"Transform a tensor image with a square transformation matrix and a mean_vector computed\n offline.\n This transform does not support PIL Image.\n Given transformation_matrix and mean_vector, will flatten the torch.*Tensor and\n subtract mean_vector from it which is then followed by computing the dot\n product with the transformation matrix and then reshaping the tensor to its\n original shape.\n\n Applications:\n whitening transformation: Suppose X is a column vector zero-centered data.\n Then compute the data covariance matrix [D x D] with torch.mm(X.t(), X),\n perform SVD on this matrix and pass it as transformation_matrix.\n\n Args:\n transformation_matrix (Tensor): tensor [D x D], D = C x H x W\n mean_vector (Tensor): tensor [D], D = C x H x W\n \"\"\"\n\n def __init__(self, transformation_matrix, mean_vector):\n super().__init__()\n if transformation_matrix.size(0) != transformation_matrix.size(1):\n raise ValueError(\"transformation_matrix should be square. Got \" +\n \"[{} x {}] rectangular matrix.\".format(*transformation_matrix.size()))\n\n if mean_vector.size(0) != transformation_matrix.size(0):\n raise ValueError(\"mean_vector should have the same length {}\".format(mean_vector.size(0)) +\n \" as any one of the dimensions of the transformation_matrix [{}]\"\n .format(tuple(transformation_matrix.size())))\n\n if transformation_matrix.device != mean_vector.device:\n raise ValueError(\"Input tensors should be on the same device. Got {} and {}\"\n .format(transformation_matrix.device, mean_vector.device))\n\n self.transformation_matrix = transformation_matrix\n self.mean_vector = mean_vector\n\n def forward(self, tensor: Tensor) -> Tensor:\n \"\"\"\n Args:\n tensor (Tensor): Tensor image to be whitened.\n\n Returns:\n Tensor: Transformed image.\n \"\"\"\n shape = tensor.shape\n n = shape[-3] * shape[-2] * shape[-1]\n if n != self.transformation_matrix.shape[0]:\n raise ValueError(\"Input tensor and transformation matrix have incompatible shape.\" +\n \"[{} x {} x {}] != \".format(shape[-3], shape[-2], shape[-1]) +\n \"{}\".format(self.transformation_matrix.shape[0]))\n\n if tensor.device.type != self.mean_vector.device.type:\n raise ValueError(\"Input tensor should be on the same device as transformation matrix and mean vector. \"\n \"Got {} vs {}\".format(tensor.device, self.mean_vector.device))\n\n flat_tensor = tensor.view(-1, n) - self.mean_vector\n transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix)\n tensor = transformed_tensor.view(shape)\n return tensor\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '(transformation_matrix='\n format_string += (str(self.transformation_matrix.tolist()) + ')')\n format_string += (\", (mean_vector=\" + str(self.mean_vector.tolist()) + ')')\n return format_string\n\n\nclass ColorJitter(torch.nn.Module):\n \"\"\"Randomly change the brightness, contrast, saturation and hue of an image.\n If the image is torch Tensor, it is expected\n to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions.\n If img is PIL Image, mode \"1\", \"L\", \"I\", \"F\" and modes with transparency (alpha channel) are not supported.\n\n Args:\n brightness (float or tuple of float (min, max)): How much to jitter brightness.\n brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]\n or the given [min, max]. Should be non negative numbers.\n contrast (float or tuple of float (min, max)): How much to jitter contrast.\n contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]\n or the given [min, max]. Should be non negative numbers.\n saturation (float or tuple of float (min, max)): How much to jitter saturation.\n saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]\n or the given [min, max]. Should be non negative numbers.\n hue (float or tuple of float (min, max)): How much to jitter hue.\n hue_factor is chosen uniformly from [-hue, hue] or the given [min, max].\n Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.\n \"\"\"\n\n def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):\n super().__init__()\n self.brightness = self._check_input(brightness, 'brightness')\n self.contrast = self._check_input(contrast, 'contrast')\n self.saturation = self._check_input(saturation, 'saturation')\n self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5),\n clip_first_on_zero=False)\n\n @torch.jit.unused\n def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True):\n if isinstance(value, numbers.Number):\n if value < 0:\n raise ValueError(\"If {} is a single number, it must be non negative.\".format(name))\n value = [center - float(value), center + float(value)]\n if clip_first_on_zero:\n value[0] = max(value[0], 0.0)\n elif isinstance(value, (tuple, list)) and len(value) == 2:\n if not bound[0] <= value[0] <= value[1] <= bound[1]:\n raise ValueError(\"{} values should be between {}\".format(name, bound))\n else:\n raise TypeError(\"{} should be a single number or a list/tuple with lenght 2.\".format(name))\n\n # if value is 0 or (1., 1.) for brightness/contrast/saturation\n # or (0., 0.) for hue, do nothing\n if value[0] == value[1] == center:\n value = None\n return value\n\n @staticmethod\n def get_params(brightness: Optional[List[float]],\n contrast: Optional[List[float]],\n saturation: Optional[List[float]],\n hue: Optional[List[float]]\n ) -> Tuple[Tensor, Optional[float], Optional[float], Optional[float], Optional[float]]:\n \"\"\"Get the parameters for the randomized transform to be applied on image.\n\n Args:\n brightness (tuple of float (min, max), optional): The range from which the brightness_factor is chosen\n uniformly. Pass None to turn off the transformation.\n contrast (tuple of float (min, max), optional): The range from which the contrast_factor is chosen\n uniformly. Pass None to turn off the transformation.\n saturation (tuple of float (min, max), optional): The range from which the saturation_factor is chosen\n uniformly. Pass None to turn off the transformation.\n hue (tuple of float (min, max), optional): The range from which the hue_factor is chosen uniformly.\n Pass None to turn off the transformation.\n\n Returns:\n tuple: The parameters used to apply the randomized transform\n along with their random order.\n \"\"\"\n fn_idx = torch.randperm(4)\n\n b = None if brightness is None else float(torch.empty(1).uniform_(brightness[0], brightness[1]))\n c = None if contrast is None else float(torch.empty(1).uniform_(contrast[0], contrast[1]))\n s = None if saturation is None else float(torch.empty(1).uniform_(saturation[0], saturation[1]))\n h = None if hue is None else float(torch.empty(1).uniform_(hue[0], hue[1]))\n\n return fn_idx, b, c, s, h\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Input image.\n\n Returns:\n PIL Image or Tensor: Color jittered image.\n \"\"\"\n fn_idx, brightness_factor, contrast_factor, saturation_factor, hue_factor = \\\n self.get_params(self.brightness, self.contrast, self.saturation, self.hue)\n\n for fn_id in fn_idx:\n if fn_id == 0 and brightness_factor is not None:\n img = F.adjust_brightness(img, brightness_factor)\n elif fn_id == 1 and contrast_factor is not None:\n img = F.adjust_contrast(img, contrast_factor)\n elif fn_id == 2 and saturation_factor is not None:\n img = F.adjust_saturation(img, saturation_factor)\n elif fn_id == 3 and hue_factor is not None:\n img = F.adjust_hue(img, hue_factor)\n\n return img\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n format_string += 'brightness={0}'.format(self.brightness)\n format_string += ', contrast={0}'.format(self.contrast)\n format_string += ', saturation={0}'.format(self.saturation)\n format_string += ', hue={0})'.format(self.hue)\n return format_string\n\n\nclass RandomRotation(torch.nn.Module):\n \"\"\"Rotate the image by angle.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.\n\n Args:\n degrees (sequence or number): Range of degrees to select from.\n If degrees is a number instead of sequence like (min, max), the range of degrees\n will be (-degrees, +degrees).\n interpolation (InterpolationMode): Desired interpolation enum defined by\n :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.\n If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.\n For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.\n expand (bool, optional): Optional expansion flag.\n If true, expands the output to make it large enough to hold the entire rotated image.\n If false or omitted, make the output image the same size as the input image.\n Note that the expand flag assumes rotation around the center and no translation.\n center (sequence, optional): Optional center of rotation, (x, y). Origin is the upper left corner.\n Default is the center of the image.\n fill (sequence or number): Pixel fill value for the area outside the rotated\n image. Default is ``0``. If given a number, the value is used for all bands respectively.\n If input is PIL Image, the options is only available for ``Pillow>=5.2.0``.\n resample (int, optional): deprecated argument and will be removed since v0.10.0.\n Please use the ``interpolation`` parameter instead.\n\n .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters\n\n \"\"\"\n\n def __init__(\n self, degrees, interpolation=InterpolationMode.NEAREST, expand=False, center=None, fill=0, resample=None\n ):\n super().__init__()\n if resample is not None:\n warnings.warn(\n \"Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead\"\n )\n interpolation = _interpolation_modes_from_int(resample)\n\n # Backward compatibility with integer value\n if isinstance(interpolation, int):\n warnings.warn(\n \"Argument interpolation should be of type InterpolationMode instead of int. \"\n \"Please, use InterpolationMode enum.\"\n )\n interpolation = _interpolation_modes_from_int(interpolation)\n\n self.degrees = _setup_angle(degrees, name=\"degrees\", req_sizes=(2, ))\n\n if center is not None:\n _check_sequence_input(center, \"center\", req_sizes=(2, ))\n\n self.center = center\n\n self.resample = self.interpolation = interpolation\n self.expand = expand\n\n if fill is None:\n fill = 0\n elif not isinstance(fill, (Sequence, numbers.Number)):\n raise TypeError(\"Fill should be either a sequence or a number.\")\n\n self.fill = fill\n\n @staticmethod\n def get_params(degrees: List[float]) -> float:\n \"\"\"Get parameters for ``rotate`` for a random rotation.\n\n Returns:\n float: angle parameter to be passed to ``rotate`` for random rotation.\n \"\"\"\n angle = float(torch.empty(1).uniform_(float(degrees[0]), float(degrees[1])).item())\n return angle\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be rotated.\n\n Returns:\n PIL Image or Tensor: Rotated image.\n \"\"\"\n fill = self.fill\n if isinstance(img, Tensor):\n if isinstance(fill, (int, float)):\n fill = [float(fill)] * F._get_image_num_channels(img)\n else:\n fill = [float(f) for f in fill]\n angle = self.get_params(self.degrees)\n\n return F.rotate(img, angle, self.resample, self.expand, self.center, fill)\n\n def __repr__(self):\n interpolate_str = self.interpolation.value\n format_string = self.__class__.__name__ + '(degrees={0}'.format(self.degrees)\n format_string += ', interpolation={0}'.format(interpolate_str)\n format_string += ', expand={0}'.format(self.expand)\n if self.center is not None:\n format_string += ', center={0}'.format(self.center)\n if self.fill is not None:\n format_string += ', fill={0}'.format(self.fill)\n format_string += ')'\n return format_string\n\n\nclass RandomAffine(torch.nn.Module):\n \"\"\"Random affine transformation of the image keeping center invariant.\n If the image is torch Tensor, it is expected\n to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.\n\n Args:\n degrees (sequence or number): Range of degrees to select from.\n If degrees is a number instead of sequence like (min, max), the range of degrees\n will be (-degrees, +degrees). Set to 0 to deactivate rotations.\n translate (tuple, optional): tuple of maximum absolute fraction for horizontal\n and vertical translations. For example translate=(a, b), then horizontal shift\n is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is\n randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default.\n scale (tuple, optional): scaling factor interval, e.g (a, b), then scale is\n randomly sampled from the range a <= scale <= b. Will keep original scale by default.\n shear (sequence or number, optional): Range of degrees to select from.\n If shear is a number, a shear parallel to the x axis in the range (-shear, +shear)\n will be applied. Else if shear is a sequence of 2 values a shear parallel to the x axis in the\n range (shear[0], shear[1]) will be applied. Else if shear is a sequence of 4 values,\n a x-axis shear in (shear[0], shear[1]) and y-axis shear in (shear[2], shear[3]) will be applied.\n Will not apply shear by default.\n interpolation (InterpolationMode): Desired interpolation enum defined by\n :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.\n If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.\n For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.\n fill (sequence or number): Pixel fill value for the area outside the transformed\n image. Default is ``0``. If given a number, the value is used for all bands respectively.\n If input is PIL Image, the options is only available for ``Pillow>=5.0.0``.\n fillcolor (sequence or number, optional): deprecated argument and will be removed since v0.10.0.\n Please use the ``fill`` parameter instead.\n resample (int, optional): deprecated argument and will be removed since v0.10.0.\n Please use the ``interpolation`` parameter instead.\n\n .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters\n\n \"\"\"\n\n def __init__(\n self, degrees, translate=None, scale=None, shear=None, interpolation=InterpolationMode.NEAREST, fill=0,\n fillcolor=None, resample=None\n ):\n super().__init__()\n if resample is not None:\n warnings.warn(\n \"Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead\"\n )\n interpolation = _interpolation_modes_from_int(resample)\n\n # Backward compatibility with integer value\n if isinstance(interpolation, int):\n warnings.warn(\n \"Argument interpolation should be of type InterpolationMode instead of int. \"\n \"Please, use InterpolationMode enum.\"\n )\n interpolation = _interpolation_modes_from_int(interpolation)\n\n if fillcolor is not None:\n warnings.warn(\n \"Argument fillcolor is deprecated and will be removed since v0.10.0. Please, use fill instead\"\n )\n fill = fillcolor\n\n self.degrees = _setup_angle(degrees, name=\"degrees\", req_sizes=(2, ))\n\n if translate is not None:\n _check_sequence_input(translate, \"translate\", req_sizes=(2, ))\n for t in translate:\n if not (0.0 <= t <= 1.0):\n raise ValueError(\"translation values should be between 0 and 1\")\n self.translate = translate\n\n if scale is not None:\n _check_sequence_input(scale, \"scale\", req_sizes=(2, ))\n for s in scale:\n if s <= 0:\n raise ValueError(\"scale values should be positive\")\n self.scale = scale\n\n if shear is not None:\n self.shear = _setup_angle(shear, name=\"shear\", req_sizes=(2, 4))\n else:\n self.shear = shear\n\n self.resample = self.interpolation = interpolation\n\n if fill is None:\n fill = 0\n elif not isinstance(fill, (Sequence, numbers.Number)):\n raise TypeError(\"Fill should be either a sequence or a number.\")\n\n self.fillcolor = self.fill = fill\n\n @staticmethod\n def get_params(\n degrees: List[float],\n translate: Optional[List[float]],\n scale_ranges: Optional[List[float]],\n shears: Optional[List[float]],\n img_size: List[int]\n ) -> Tuple[float, Tuple[int, int], float, Tuple[float, float]]:\n \"\"\"Get parameters for affine transformation\n\n Returns:\n params to be passed to the affine transformation\n \"\"\"\n angle = float(torch.empty(1).uniform_(float(degrees[0]), float(degrees[1])).item())\n if translate is not None:\n max_dx = float(translate[0] * img_size[0])\n max_dy = float(translate[1] * img_size[1])\n tx = int(round(torch.empty(1).uniform_(-max_dx, max_dx).item()))\n ty = int(round(torch.empty(1).uniform_(-max_dy, max_dy).item()))\n translations = (tx, ty)\n else:\n translations = (0, 0)\n\n if scale_ranges is not None:\n scale = float(torch.empty(1).uniform_(scale_ranges[0], scale_ranges[1]).item())\n else:\n scale = 1.0\n\n shear_x = shear_y = 0.0\n if shears is not None:\n shear_x = float(torch.empty(1).uniform_(shears[0], shears[1]).item())\n if len(shears) == 4:\n shear_y = float(torch.empty(1).uniform_(shears[2], shears[3]).item())\n\n shear = (shear_x, shear_y)\n\n return angle, translations, scale, shear\n\n def forward(self, img):\n \"\"\"\n img (PIL Image or Tensor): Image to be transformed.\n\n Returns:\n PIL Image or Tensor: Affine transformed image.\n \"\"\"\n fill = self.fill\n if isinstance(img, Tensor):\n if isinstance(fill, (int, float)):\n fill = [float(fill)] * F._get_image_num_channels(img)\n else:\n fill = [float(f) for f in fill]\n\n img_size = F._get_image_size(img)\n\n ret = self.get_params(self.degrees, self.translate, self.scale, self.shear, img_size)\n\n return F.affine(img, *ret, interpolation=self.interpolation, fill=fill)\n\n def __repr__(self):\n s = '{name}(degrees={degrees}'\n if self.translate is not None:\n s += ', translate={translate}'\n if self.scale is not None:\n s += ', scale={scale}'\n if self.shear is not None:\n s += ', shear={shear}'\n if self.interpolation != InterpolationMode.NEAREST:\n s += ', interpolation={interpolation}'\n if self.fill != 0:\n s += ', fill={fill}'\n s += ')'\n d = dict(self.__dict__)\n d['interpolation'] = self.interpolation.value\n return s.format(name=self.__class__.__name__, **d)\n\n\nclass Grayscale(torch.nn.Module):\n \"\"\"Convert image to grayscale.\n If the image is torch Tensor, it is expected\n to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions\n\n Args:\n num_output_channels (int): (1 or 3) number of channels desired for output image\n\n Returns:\n PIL Image: Grayscale version of the input.\n\n - If ``num_output_channels == 1`` : returned image is single channel\n - If ``num_output_channels == 3`` : returned image is 3 channel with r == g == b\n\n \"\"\"\n\n def __init__(self, num_output_channels=1):\n super().__init__()\n self.num_output_channels = num_output_channels\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be converted to grayscale.\n\n Returns:\n PIL Image or Tensor: Grayscaled image.\n \"\"\"\n return F.rgb_to_grayscale(img, num_output_channels=self.num_output_channels)\n\n def __repr__(self):\n return self.__class__.__name__ + '(num_output_channels={0})'.format(self.num_output_channels)\n\n\nclass RandomGrayscale(torch.nn.Module):\n \"\"\"Randomly convert image to grayscale with a probability of p (default 0.1).\n If the image is torch Tensor, it is expected\n to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions\n\n Args:\n p (float): probability that image should be converted to grayscale.\n\n Returns:\n PIL Image or Tensor: Grayscale version of the input image with probability p and unchanged\n with probability (1-p).\n - If input image is 1 channel: grayscale version is 1 channel\n - If input image is 3 channel: grayscale version is 3 channel with r == g == b\n\n \"\"\"\n\n def __init__(self, p=0.1):\n super().__init__()\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be converted to grayscale.\n\n Returns:\n PIL Image or Tensor: Randomly grayscaled image.\n \"\"\"\n num_output_channels = F._get_image_num_channels(img)\n if torch.rand(1) < self.p:\n return F.rgb_to_grayscale(img, num_output_channels=num_output_channels)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={0})'.format(self.p)\n\n\nclass RandomErasing(torch.nn.Module):\n \"\"\" Randomly selects a rectangle region in an torch Tensor image and erases its pixels.\n This transform does not support PIL Image.\n 'Random Erasing Data Augmentation' by Zhong et al. See https://arxiv.org/abs/1708.04896\n\n Args:\n p: probability that the random erasing operation will be performed.\n scale: range of proportion of erased area against input image.\n ratio: range of aspect ratio of erased area.\n value: erasing value. Default is 0. If a single int, it is used to\n erase all pixels. If a tuple of length 3, it is used to erase\n R, G, B channels respectively.\n If a str of 'random', erasing each pixel with random values.\n inplace: boolean to make this transform inplace. Default set to False.\n\n Returns:\n Erased Image.\n\n Example:\n >>> transform = transforms.Compose([\n >>> transforms.RandomHorizontalFlip(),\n >>> transforms.ToTensor(),\n >>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n >>> transforms.RandomErasing(),\n >>> ])\n \"\"\"\n\n def __init__(self, p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3), value=0, inplace=False):\n super().__init__()\n if not isinstance(value, (numbers.Number, str, tuple, list)):\n raise TypeError(\"Argument value should be either a number or str or a sequence\")\n if isinstance(value, str) and value != \"random\":\n raise ValueError(\"If value is str, it should be 'random'\")\n if not isinstance(scale, (tuple, list)):\n raise TypeError(\"Scale should be a sequence\")\n if not isinstance(ratio, (tuple, list)):\n raise TypeError(\"Ratio should be a sequence\")\n if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):\n warnings.warn(\"Scale and ratio should be of kind (min, max)\")\n if scale[0] < 0 or scale[1] > 1:\n raise ValueError(\"Scale should be between 0 and 1\")\n if p < 0 or p > 1:\n raise ValueError(\"Random erasing probability should be between 0 and 1\")\n\n self.p = p\n self.scale = scale\n self.ratio = ratio\n self.value = value\n self.inplace = inplace\n\n @staticmethod\n def get_params(\n img: Tensor, scale: Tuple[float, float], ratio: Tuple[float, float], value: Optional[List[float]] = None\n ) -> Tuple[int, int, int, int, Tensor]:\n \"\"\"Get parameters for ``erase`` for a random erasing.\n\n Args:\n img (Tensor): Tensor image to be erased.\n scale (sequence): range of proportion of erased area against input image.\n ratio (sequence): range of aspect ratio of erased area.\n value (list, optional): erasing value. If None, it is interpreted as \"random\"\n (erasing each pixel with random values). If ``len(value)`` is 1, it is interpreted as a number,\n i.e. ``value[0]``.\n\n Returns:\n tuple: params (i, j, h, w, v) to be passed to ``erase`` for random erasing.\n \"\"\"\n img_c, img_h, img_w = img.shape[-3], img.shape[-2], img.shape[-1]\n area = img_h * img_w\n\n log_ratio = torch.log(torch.tensor(ratio))\n for _ in range(10):\n erase_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item()\n aspect_ratio = torch.exp(\n torch.empty(1).uniform_(log_ratio[0], log_ratio[1])\n ).item()\n\n h = int(round(math.sqrt(erase_area * aspect_ratio)))\n w = int(round(math.sqrt(erase_area / aspect_ratio)))\n if not (h < img_h and w < img_w):\n continue\n\n if value is None:\n v = torch.empty([img_c, h, w], dtype=torch.float32).normal_()\n else:\n v = torch.tensor(value)[:, None, None]\n\n i = torch.randint(0, img_h - h + 1, size=(1, )).item()\n j = torch.randint(0, img_w - w + 1, size=(1, )).item()\n return i, j, h, w, v\n\n # Return original image\n return 0, 0, img_h, img_w, img\n\n def forward(self, img):\n \"\"\"\n Args:\n img (Tensor): Tensor image to be erased.\n\n Returns:\n img (Tensor): Erased Tensor image.\n \"\"\"\n if torch.rand(1) < self.p:\n\n # cast self.value to script acceptable type\n if isinstance(self.value, (int, float)):\n value = [self.value, ]\n elif isinstance(self.value, str):\n value = None\n elif isinstance(self.value, tuple):\n value = list(self.value)\n else:\n value = self.value\n\n if value is not None and not (len(value) in (1, img.shape[-3])):\n raise ValueError(\n \"If value is a sequence, it should have either a single value or \"\n \"{} (number of input channels)\".format(img.shape[-3])\n )\n\n x, y, h, w, v = self.get_params(img, scale=self.scale, ratio=self.ratio, value=value)\n return F.erase(img, x, y, h, w, v, self.inplace)\n return img\n\n def __repr__(self):\n s = '(p={}, '.format(self.p)\n s += 'scale={}, '.format(self.scale)\n s += 'ratio={}, '.format(self.ratio)\n s += 'value={}, '.format(self.value)\n s += 'inplace={})'.format(self.inplace)\n return self.__class__.__name__ + s\n\n\nclass GaussianBlur(torch.nn.Module):\n \"\"\"Blurs image with randomly chosen Gaussian blur.\n If the image is torch Tensor, it is expected\n to have [..., C, H, W] shape, where ... means an arbitrary number of leading dimensions.\n\n Args:\n kernel_size (int or sequence): Size of the Gaussian kernel.\n sigma (float or tuple of float (min, max)): Standard deviation to be used for\n creating kernel to perform blurring. If float, sigma is fixed. If it is tuple\n of float (min, max), sigma is chosen uniformly at random to lie in the\n given range.\n\n Returns:\n PIL Image or Tensor: Gaussian blurred version of the input image.\n\n \"\"\"\n\n def __init__(self, kernel_size, sigma=(0.1, 2.0)):\n super().__init__()\n self.kernel_size = _setup_size(kernel_size, \"Kernel size should be a tuple/list of two integers\")\n for ks in self.kernel_size:\n if ks <= 0 or ks % 2 == 0:\n raise ValueError(\"Kernel size value should be an odd and positive number.\")\n\n if isinstance(sigma, numbers.Number):\n if sigma <= 0:\n raise ValueError(\"If sigma is a single number, it must be positive.\")\n sigma = (sigma, sigma)\n elif isinstance(sigma, Sequence) and len(sigma) == 2:\n if not 0. < sigma[0] <= sigma[1]:\n raise ValueError(\"sigma values should be positive and of the form (min, max).\")\n else:\n raise ValueError(\"sigma should be a single number or a list/tuple with length 2.\")\n\n self.sigma = sigma\n\n @staticmethod\n def get_params(sigma_min: float, sigma_max: float) -> float:\n \"\"\"Choose sigma for random gaussian blurring.\n\n Args:\n sigma_min (float): Minimum standard deviation that can be chosen for blurring kernel.\n sigma_max (float): Maximum standard deviation that can be chosen for blurring kernel.\n\n Returns:\n float: Standard deviation to be passed to calculate kernel for gaussian blurring.\n \"\"\"\n return torch.empty(1).uniform_(sigma_min, sigma_max).item()\n\n def forward(self, img: Tensor) -> Tensor:\n \"\"\"\n Args:\n img (PIL Image or Tensor): image to be blurred.\n\n Returns:\n PIL Image or Tensor: Gaussian blurred image\n \"\"\"\n sigma = self.get_params(self.sigma[0], self.sigma[1])\n return F.gaussian_blur(img, self.kernel_size, [sigma, sigma])\n\n def __repr__(self):\n s = '(kernel_size={}, '.format(self.kernel_size)\n s += 'sigma={})'.format(self.sigma)\n return self.__class__.__name__ + s\n\n\ndef _setup_size(size, error_msg):\n if isinstance(size, numbers.Number):\n return int(size), int(size)\n\n if isinstance(size, Sequence) and len(size) == 1:\n return size[0], size[0]\n\n if len(size) != 2:\n raise ValueError(error_msg)\n\n return size\n\n\ndef _check_sequence_input(x, name, req_sizes):\n msg = req_sizes[0] if len(req_sizes) < 2 else \" or \".join([str(s) for s in req_sizes])\n if not isinstance(x, Sequence):\n raise TypeError(\"{} should be a sequence of length {}.\".format(name, msg))\n if len(x) not in req_sizes:\n raise ValueError(\"{} should be sequence of length {}.\".format(name, msg))\n\n\ndef _setup_angle(x, name, req_sizes=(2, )):\n if isinstance(x, numbers.Number):\n if x < 0:\n raise ValueError(\"If {} is a single number, it must be positive.\".format(name))\n x = [-x, x]\n else:\n _check_sequence_input(x, name, req_sizes)\n\n return [float(d) for d in x]\n\n\nclass RandomInvert(torch.nn.Module):\n \"\"\"Inverts the colors of the given image randomly with a given probability.\n If img is a Tensor, it is expected to be in [..., 1 or 3, H, W] format,\n where ... means it can have an arbitrary number of leading dimensions.\n If img is PIL Image, it is expected to be in mode \"L\" or \"RGB\".\n\n Args:\n p (float): probability of the image being color inverted. Default value is 0.5\n \"\"\"\n\n def __init__(self, p=0.5):\n super().__init__()\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be inverted.\n\n Returns:\n PIL Image or Tensor: Randomly color inverted image.\n \"\"\"\n if torch.rand(1).item() < self.p:\n return F.invert(img)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={})'.format(self.p)\n\n\nclass RandomPosterize(torch.nn.Module):\n \"\"\"Posterize the image randomly with a given probability by reducing the\n number of bits for each color channel. If the image is torch Tensor, it should be of type torch.uint8,\n and it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.\n If img is PIL Image, it is expected to be in mode \"L\" or \"RGB\".\n\n Args:\n bits (int): number of bits to keep for each channel (0-8)\n p (float): probability of the image being color inverted. Default value is 0.5\n \"\"\"\n\n def __init__(self, bits, p=0.5):\n super().__init__()\n self.bits = bits\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be posterized.\n\n Returns:\n PIL Image or Tensor: Randomly posterized image.\n \"\"\"\n if torch.rand(1).item() < self.p:\n return F.posterize(img, self.bits)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(bits={},p={})'.format(self.bits, self.p)\n\n\nclass RandomSolarize(torch.nn.Module):\n \"\"\"Solarize the image randomly with a given probability by inverting all pixel\n values above a threshold. If img is a Tensor, it is expected to be in [..., 1 or 3, H, W] format,\n where ... means it can have an arbitrary number of leading dimensions.\n If img is PIL Image, it is expected to be in mode \"L\" or \"RGB\".\n\n Args:\n threshold (float): all pixels equal or above this value are inverted.\n p (float): probability of the image being color inverted. Default value is 0.5\n \"\"\"\n\n def __init__(self, threshold, p=0.5):\n super().__init__()\n self.threshold = threshold\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be solarized.\n\n Returns:\n PIL Image or Tensor: Randomly solarized image.\n \"\"\"\n if torch.rand(1).item() < self.p:\n return F.solarize(img, self.threshold)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(threshold={},p={})'.format(self.threshold, self.p)\n\n\nclass RandomAdjustSharpness(torch.nn.Module):\n \"\"\"Adjust the sharpness of the image randomly with a given probability. If the image is torch Tensor,\n it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.\n\n Args:\n sharpness_factor (float): How much to adjust the sharpness. Can be\n any non negative number. 0 gives a blurred image, 1 gives the\n original image while 2 increases the sharpness by a factor of 2.\n p (float): probability of the image being color inverted. Default value is 0.5\n \"\"\"\n\n def __init__(self, sharpness_factor, p=0.5):\n super().__init__()\n self.sharpness_factor = sharpness_factor\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be sharpened.\n\n Returns:\n PIL Image or Tensor: Randomly sharpened image.\n \"\"\"\n if torch.rand(1).item() < self.p:\n return F.adjust_sharpness(img, self.sharpness_factor)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(sharpness_factor={},p={})'.format(self.sharpness_factor, self.p)\n\n\nclass RandomAutocontrast(torch.nn.Module):\n \"\"\"Autocontrast the pixels of the given image randomly with a given probability.\n If the image is torch Tensor, it is expected\n to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.\n If img is PIL Image, it is expected to be in mode \"L\" or \"RGB\".\n\n Args:\n p (float): probability of the image being autocontrasted. Default value is 0.5\n \"\"\"\n\n def __init__(self, p=0.5):\n super().__init__()\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be autocontrasted.\n\n Returns:\n PIL Image or Tensor: Randomly autocontrasted image.\n \"\"\"\n if torch.rand(1).item() < self.p:\n return F.autocontrast(img)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={})'.format(self.p)\n\n\nclass RandomEqualize(torch.nn.Module):\n \"\"\"Equalize the histogram of the given image randomly with a given probability.\n If the image is torch Tensor, it is expected\n to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.\n If img is PIL Image, it is expected to be in mode \"P\", \"L\" or \"RGB\".\n\n Args:\n p (float): probability of the image being equalized. Default value is 0.5\n \"\"\"\n\n def __init__(self, p=0.5):\n super().__init__()\n self.p = p\n\n def forward(self, img):\n \"\"\"\n Args:\n img (PIL Image or Tensor): Image to be equalized.\n\n Returns:\n PIL Image or Tensor: Randomly equalized image.\n \"\"\"\n if torch.rand(1).item() < self.p:\n return F.equalize(img)\n return img\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={})'.format(self.p)\n" ]
[ [ "torch.mm", "torch.randint", "torch.empty", "torch.randperm", "torch.tensor", "torch.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sourcery-ai-bot/streamlit
[ "cbfa69c8ec310a839148cfa4bac5697e6f392a79", "cbfa69c8ec310a839148cfa4bac5697e6f392a79" ]
[ "lib/streamlit/caching/hashing.py", "lib/streamlit/elements/image.py" ]
[ "# Copyright 2018-2022 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Hashing for st.memo and st.singleton.\"\"\"\nimport collections\nimport functools\nimport hashlib\nimport inspect\nimport io\nimport os\nimport pickle\nimport sys\nimport tempfile\nimport threading\nimport unittest.mock\nimport weakref\nfrom typing import Any, Pattern, Optional, Dict, List\n\nfrom streamlit import type_util\nfrom streamlit import util\nfrom streamlit.logger import get_logger\nfrom streamlit.uploaded_file_manager import UploadedFile\nfrom .cache_errors import (\n CacheType,\n UnhashableTypeError,\n)\n\n_LOGGER = get_logger(__name__)\n\n\n# If a dataframe has more than this many rows, we consider it large and hash a sample.\n_PANDAS_ROWS_LARGE = 100000\n_PANDAS_SAMPLE_SIZE = 10000\n\n\n# Similar to dataframes, we also sample large numpy arrays.\n_NP_SIZE_LARGE = 1000000\n_NP_SAMPLE_SIZE = 100000\n\n\n# Arbitrary item to denote where we found a cycle in a hashed object.\n# This allows us to hash self-referencing lists, dictionaries, etc.\n_CYCLE_PLACEHOLDER = b\"streamlit-57R34ML17-hesamagicalponyflyingthroughthesky-CYCLE\"\n\n\ndef update_hash(val: Any, hasher, cache_type: CacheType) -> None:\n \"\"\"Updates a hashlib hasher with the hash of val.\n\n This is the main entrypoint to hashing.py.\n \"\"\"\n ch = _CacheFuncHasher(cache_type)\n ch.update(hasher, val)\n\n\nclass _HashStack:\n \"\"\"Stack of what has been hashed, for debug and circular reference detection.\n\n This internally keeps 1 stack per thread.\n\n Internally, this stores the ID of pushed objects rather than the objects\n themselves because otherwise the \"in\" operator inside __contains__ would\n fail for objects that don't return a boolean for \"==\" operator. For\n example, arr == 10 where arr is a NumPy array returns another NumPy array.\n This causes the \"in\" to crash since it expects a boolean.\n \"\"\"\n\n def __init__(self):\n self._stack: collections.OrderedDict[int, List[Any]] = collections.OrderedDict()\n\n def __repr__(self) -> str:\n return util.repr_(self)\n\n def push(self, val: Any):\n self._stack[id(val)] = val\n\n def pop(self):\n self._stack.popitem()\n\n def __contains__(self, val: Any):\n return id(val) in self._stack\n\n\nclass _HashStacks:\n \"\"\"Stacks of what has been hashed, with at most 1 stack per thread.\"\"\"\n\n def __init__(self):\n self._stacks: weakref.WeakKeyDictionary[\n threading.Thread, _HashStack\n ] = weakref.WeakKeyDictionary()\n\n def __repr__(self) -> str:\n return util.repr_(self)\n\n @property\n def current(self) -> _HashStack:\n current_thread = threading.current_thread()\n\n stack = self._stacks.get(current_thread, None)\n\n if stack is None:\n stack = _HashStack()\n self._stacks[current_thread] = stack\n\n return stack\n\n\nhash_stacks = _HashStacks()\n\n\ndef _int_to_bytes(i: int) -> bytes:\n num_bytes = (i.bit_length() + 8) // 8\n return i.to_bytes(num_bytes, \"little\", signed=True)\n\n\ndef _key(obj: Optional[Any]) -> Any:\n \"\"\"Return key for memoization.\"\"\"\n\n if obj is None:\n return None\n\n def is_simple(obj):\n return (\n isinstance(obj, bytes)\n or isinstance(obj, bytearray)\n or isinstance(obj, str)\n or isinstance(obj, float)\n or isinstance(obj, int)\n or isinstance(obj, bool)\n or obj is None\n )\n\n if is_simple(obj):\n return obj\n\n if isinstance(obj, tuple):\n if all(map(is_simple, obj)):\n return obj\n\n if isinstance(obj, list):\n if all(map(is_simple, obj)):\n return (\"__l\", tuple(obj))\n\n if (\n type_util.is_type(obj, \"pandas.core.frame.DataFrame\")\n or type_util.is_type(obj, \"numpy.ndarray\")\n or inspect.isbuiltin(obj)\n or inspect.isroutine(obj)\n or inspect.iscode(obj)\n ):\n return id(obj)\n\n return NoResult\n\n\nclass _CacheFuncHasher:\n \"\"\"A hasher that can hash objects with cycles.\"\"\"\n\n def __init__(self, cache_type: CacheType):\n self._hashes: Dict[Any, bytes] = {}\n\n # The number of the bytes in the hash.\n self.size = 0\n\n self.cache_type = cache_type\n\n def __repr__(self) -> str:\n return util.repr_(self)\n\n def to_bytes(self, obj: Any) -> bytes:\n \"\"\"Add memoization to _to_bytes and protect against cycles in data structures.\"\"\"\n tname = type(obj).__qualname__.encode()\n key = (tname, _key(obj))\n\n # Memoize if possible.\n if key[1] is not NoResult and key in self._hashes:\n return self._hashes[key]\n\n # Break recursive cycles.\n if obj in hash_stacks.current:\n return _CYCLE_PLACEHOLDER\n\n hash_stacks.current.push(obj)\n\n try:\n # Hash the input\n b = b\"%s:%s\" % (tname, self._to_bytes(obj))\n\n # Hmmm... It's possible that the size calculation is wrong. When we\n # call to_bytes inside _to_bytes things get double-counted.\n self.size += sys.getsizeof(b)\n\n if key[1] is not NoResult:\n self._hashes[key] = b\n\n finally:\n # In case an UnhashableTypeError (or other) error is thrown, clean up the\n # stack so we don't get false positives in future hashing calls\n hash_stacks.current.pop()\n\n return b\n\n def update(self, hasher, obj: Any) -> None:\n \"\"\"Update the provided hasher with the hash of an object.\"\"\"\n b = self.to_bytes(obj)\n hasher.update(b)\n\n def _to_bytes(self, obj: Any) -> bytes:\n \"\"\"Hash objects to bytes, including code with dependencies.\n\n Python's built in `hash` does not produce consistent results across\n runs.\n \"\"\"\n\n if isinstance(obj, unittest.mock.Mock):\n # Mock objects can appear to be infinitely\n # deep, so we don't try to hash them at all.\n return self.to_bytes(id(obj))\n\n elif isinstance(obj, (bytes, bytearray)):\n return obj\n\n elif isinstance(obj, str):\n return obj.encode()\n\n elif isinstance(obj, float):\n return self.to_bytes(hash(obj))\n\n elif isinstance(obj, int):\n return _int_to_bytes(obj)\n\n elif isinstance(obj, (list, tuple)):\n h = hashlib.new(\"md5\")\n for item in obj:\n self.update(h, item)\n return h.digest()\n\n elif isinstance(obj, dict):\n h = hashlib.new(\"md5\")\n for item in obj.items():\n self.update(h, item)\n return h.digest()\n\n elif obj is None:\n return b\"0\"\n\n elif obj is True:\n return b\"1\"\n\n elif obj is False:\n return b\"0\"\n\n elif type_util.is_type(obj, \"pandas.core.frame.DataFrame\") or type_util.is_type(\n obj, \"pandas.core.series.Series\"\n ):\n import pandas as pd\n\n if len(obj) >= _PANDAS_ROWS_LARGE:\n obj = obj.sample(n=_PANDAS_SAMPLE_SIZE, random_state=0)\n try:\n return b\"%s\" % pd.util.hash_pandas_object(obj).sum()\n except TypeError:\n # Use pickle if pandas cannot hash the object for example if\n # it contains unhashable objects.\n return b\"%s\" % pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)\n\n elif type_util.is_type(obj, \"numpy.ndarray\"):\n h = hashlib.new(\"md5\")\n self.update(h, obj.shape)\n\n if obj.size >= _NP_SIZE_LARGE:\n import numpy as np\n\n state = np.random.RandomState(0)\n obj = state.choice(obj.flat, size=_NP_SAMPLE_SIZE)\n\n self.update(h, obj.tobytes())\n return h.digest()\n\n elif inspect.isbuiltin(obj):\n return bytes(obj.__name__.encode())\n\n elif type_util.is_type(obj, \"builtins.mappingproxy\") or type_util.is_type(\n obj, \"builtins.dict_items\"\n ):\n return self.to_bytes(dict(obj))\n\n elif type_util.is_type(obj, \"builtins.getset_descriptor\"):\n return bytes(obj.__qualname__.encode())\n\n elif isinstance(obj, UploadedFile):\n # UploadedFile is a BytesIO (thus IOBase) but has a name.\n # It does not have a timestamp so this must come before\n # temproary files\n h = hashlib.new(\"md5\")\n self.update(h, obj.name)\n self.update(h, obj.tell())\n self.update(h, obj.getvalue())\n return h.digest()\n\n elif hasattr(obj, \"name\") and isinstance(\n obj, (io.IOBase, tempfile._TemporaryFileWrapper)\n ):\n # Hash files as name + last modification date + offset.\n # NB: we're using hasattr(\"name\") to differentiate between\n # on-disk and in-memory StringIO/BytesIO file representations.\n # That means that this condition must come *before* the next\n # condition, which just checks for StringIO/BytesIO.\n h = hashlib.new(\"md5\")\n obj_name = getattr(obj, \"name\", \"wonthappen\") # Just to appease MyPy.\n self.update(h, obj_name)\n self.update(h, os.path.getmtime(obj_name))\n self.update(h, obj.tell())\n return h.digest()\n\n elif isinstance(obj, Pattern):\n return self.to_bytes([obj.pattern, obj.flags])\n\n elif isinstance(obj, (io.StringIO, io.BytesIO)):\n # Hash in-memory StringIO/BytesIO by their full contents\n # and seek position.\n h = hashlib.new(\"md5\")\n self.update(h, obj.tell())\n self.update(h, obj.getvalue())\n return h.digest()\n\n elif type_util.is_type(obj, \"numpy.ufunc\"):\n # For numpy.remainder, this returns remainder.\n return bytes(obj.__name__.encode())\n\n elif inspect.ismodule(obj):\n # TODO: Figure out how to best show this kind of warning to the\n # user. In the meantime, show nothing. This scenario is too common,\n # so the current warning is quite annoying...\n # st.warning(('Streamlit does not support hashing modules. '\n # 'We did not hash `%s`.') % obj.__name__)\n # TODO: Hash more than just the name for internal modules.\n return self.to_bytes(obj.__name__)\n\n elif inspect.isclass(obj):\n # TODO: Figure out how to best show this kind of warning to the\n # user. In the meantime, show nothing. This scenario is too common,\n # (e.g. in every \"except\" statement) so the current warning is\n # quite annoying...\n # st.warning(('Streamlit does not support hashing classes. '\n # 'We did not hash `%s`.') % obj.__name__)\n # TODO: Hash more than just the name of classes.\n return self.to_bytes(obj.__name__)\n\n elif isinstance(obj, functools.partial):\n # The return value of functools.partial is not a plain function:\n # it's a callable object that remembers the original function plus\n # the values you pickled into it. So here we need to special-case it.\n h = hashlib.new(\"md5\")\n self.update(h, obj.args)\n self.update(h, obj.func)\n self.update(h, obj.keywords)\n return h.digest()\n\n else:\n # As a last resort, hash the output of the object's __reduce__ method\n h = hashlib.new(\"md5\")\n try:\n reduce_data = obj.__reduce__()\n except BaseException as e:\n raise UnhashableTypeError() from e\n\n for item in reduce_data:\n self.update(h, item)\n return h.digest()\n\n\nclass NoResult:\n \"\"\"Placeholder class for return values when None is meaningful.\"\"\"\n\n pass\n", "# Copyright 2018-2022 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Image marshalling.\"\"\"\n\nimport imghdr\nimport io\nimport mimetypes\nfrom typing import cast\nfrom urllib.parse import urlparse\nimport re\n\nimport numpy as np\nfrom PIL import Image, ImageFile\n\nimport streamlit\nfrom streamlit.errors import StreamlitAPIException\nfrom streamlit.logger import get_logger\nfrom streamlit.in_memory_file_manager import in_memory_file_manager\nfrom streamlit.proto.Image_pb2 import ImageList as ImageListProto\n\nLOGGER = get_logger(__name__)\n\n# This constant is related to the frontend maximum content width specified\n# in App.jsx main container\n# 730 is the max width of element-container in the frontend, and 2x is for high\n# DPI.\nMAXIMUM_CONTENT_WIDTH = 2 * 730\n\n\nclass ImageMixin:\n def image(\n self,\n image,\n caption=None,\n width=None,\n use_column_width=None,\n clamp=False,\n channels=\"RGB\",\n output_format=\"auto\",\n ):\n \"\"\"Display an image or list of images.\n\n Parameters\n ----------\n image : numpy.ndarray, [numpy.ndarray], BytesIO, str, or [str]\n Monochrome image of shape (w,h) or (w,h,1)\n OR a color image of shape (w,h,3)\n OR an RGBA image of shape (w,h,4)\n OR a URL to fetch the image from\n OR a path of a local image file\n OR an SVG XML string like `<svg xmlns=...</svg>`\n OR a list of one of the above, to display multiple images.\n caption : str or list of str\n Image caption. If displaying multiple images, caption should be a\n list of captions (one for each image).\n width : int or None\n Image width. None means use the image width,\n but do not exceed the width of the column.\n Should be set for SVG images, as they have no default image width.\n use_column_width : 'auto' or 'always' or 'never' or bool\n If 'auto', set the image's width to its natural size,\n but do not exceed the width of the column.\n If 'always' or True, set the image's width to the column width.\n If 'never' or False, set the image's width to its natural size.\n Note: if set, `use_column_width` takes precedence over the `width` parameter.\n clamp : bool\n Clamp image pixel values to a valid range ([0-255] per channel).\n This is only meaningful for byte array images; the parameter is\n ignored for image URLs. If this is not set, and an image has an\n out-of-range value, an error will be thrown.\n channels : 'RGB' or 'BGR'\n If image is an nd.array, this parameter denotes the format used to\n represent color information. Defaults to 'RGB', meaning\n `image[:, :, 0]` is the red channel, `image[:, :, 1]` is green, and\n `image[:, :, 2]` is blue. For images coming from libraries like\n OpenCV you should set this to 'BGR', instead.\n output_format : 'JPEG', 'PNG', or 'auto'\n This parameter specifies the format to use when transferring the\n image data. Photos should use the JPEG format for lossy compression\n while diagrams should use the PNG format for lossless compression.\n Defaults to 'auto' which identifies the compression type based\n on the type and format of the image argument.\n\n Example\n -------\n >>> from PIL import Image\n >>> image = Image.open('sunrise.jpg')\n >>>\n >>> st.image(image, caption='Sunrise by the mountains')\n\n .. output::\n https://share.streamlit.io/streamlit/docs/main/python/api-examples-source/charts.image.py\n height: 710px\n\n \"\"\"\n\n if use_column_width == \"auto\" or (use_column_width is None and width is None):\n width = -3\n elif use_column_width in [\"always\", True]:\n width = -2\n elif width is None:\n width = -1\n elif width <= 0:\n raise StreamlitAPIException(\"Image width must be positive.\")\n\n image_list_proto = ImageListProto()\n marshall_images(\n self.dg._get_delta_path_str(),\n image,\n caption,\n width,\n image_list_proto,\n clamp,\n channels,\n output_format,\n )\n return self.dg._enqueue(\"imgs\", image_list_proto)\n\n @property\n def dg(self) -> \"streamlit.delta_generator.DeltaGenerator\":\n \"\"\"Get our DeltaGenerator.\"\"\"\n return cast(\"streamlit.delta_generator.DeltaGenerator\", self)\n\n\ndef _image_may_have_alpha_channel(image):\n return image.mode in (\"RGBA\", \"LA\", \"P\")\n\n\ndef _format_from_image_type(image, output_format):\n output_format = output_format.upper()\n if output_format in [\"JPEG\", \"PNG\"]:\n return output_format\n\n # We are forgiving on the spelling of JPEG\n if output_format == \"JPG\":\n return \"JPEG\"\n\n if _image_may_have_alpha_channel(image):\n return \"PNG\"\n\n return \"JPEG\"\n\n\ndef _PIL_to_bytes(image, format=\"JPEG\", quality=100):\n tmp = io.BytesIO()\n\n # User must have specified JPEG, so we must convert it\n if format == \"JPEG\" and _image_may_have_alpha_channel(image):\n image = image.convert(\"RGB\")\n\n image.save(tmp, format=format, quality=quality)\n\n return tmp.getvalue()\n\n\ndef _BytesIO_to_bytes(data):\n data.seek(0)\n return data.getvalue()\n\n\ndef _np_array_to_bytes(array, output_format=\"JPEG\"):\n img = Image.fromarray(array.astype(np.uint8))\n format = _format_from_image_type(img, output_format)\n\n return _PIL_to_bytes(img, format)\n\n\ndef _4d_to_list_3d(array):\n return [array[i, :, :, :] for i in range(array.shape[0])]\n\n\ndef _verify_np_shape(array):\n if len(array.shape) not in (2, 3):\n raise StreamlitAPIException(\"Numpy shape has to be of length 2 or 3.\")\n if len(array.shape) == 3 and array.shape[-1] not in (1, 3, 4):\n raise StreamlitAPIException(\n \"Channel can only be 1, 3, or 4 got %d. Shape is %s\"\n % (array.shape[-1], str(array.shape))\n )\n\n # If there's only one channel, convert is to x, y\n if len(array.shape) == 3 and array.shape[-1] == 1:\n array = array[:, :, 0]\n\n return array\n\n\ndef _normalize_to_bytes(data, width, output_format):\n image = Image.open(io.BytesIO(data))\n actual_width, actual_height = image.size\n format = _format_from_image_type(image, output_format)\n if output_format.lower() == \"auto\":\n ext = imghdr.what(None, data)\n mimetype = mimetypes.guess_type(\"image.%s\" % ext)[0]\n # if no other options, attempt to convert\n if mimetype is None:\n mimetype = f\"image/{format.lower()}\"\n else:\n mimetype = f\"image/{format.lower()}\"\n\n if width < 0 and actual_width > MAXIMUM_CONTENT_WIDTH:\n width = MAXIMUM_CONTENT_WIDTH\n\n if width > 0 and actual_width > width:\n new_height = int(1.0 * actual_height * width / actual_width)\n image = image.resize((width, new_height), resample=Image.BILINEAR)\n data = _PIL_to_bytes(image, format=format, quality=90)\n mimetype = f\"image/{format.lower()}\"\n\n return data, mimetype\n\n\ndef _clip_image(image, clamp):\n data = image\n if issubclass(image.dtype.type, np.floating):\n if clamp:\n data = np.clip(image, 0, 1.0)\n elif np.amin(image) < 0.0 or np.amax(image) > 1.0:\n raise RuntimeError(\"Data is outside [0.0, 1.0] and clamp is not set.\")\n data = data * 255\n elif clamp:\n data = np.clip(image, 0, 255)\n elif np.amin(image) < 0 or np.amax(image) > 255:\n raise RuntimeError(\"Data is outside [0, 255] and clamp is not set.\")\n return data\n\n\ndef image_to_url(\n image, width, clamp, channels, output_format, image_id, allow_emoji=False\n):\n # PIL Images\n if isinstance(image, (ImageFile.ImageFile, Image.Image)):\n format = _format_from_image_type(image, output_format)\n data = _PIL_to_bytes(image, format)\n\n elif isinstance(image, io.BytesIO):\n data = _BytesIO_to_bytes(image)\n\n elif type(image) is np.ndarray:\n data = _verify_np_shape(image)\n data = _clip_image(data, clamp)\n\n if channels == \"BGR\":\n if len(data.shape) == 3:\n data = data[:, :, [2, 1, 0]]\n else:\n raise StreamlitAPIException(\n 'When using `channels=\"BGR\"`, the input image should '\n \"have exactly 3 color channels\"\n )\n\n data = _np_array_to_bytes(data, output_format=output_format)\n\n elif isinstance(image, str):\n # If it's a url, then set the protobuf and continue\n try:\n p = urlparse(image)\n if p.scheme:\n return image\n except UnicodeDecodeError:\n pass\n\n # Finally, see if it's a file.\n try:\n with open(image, \"rb\") as f:\n data = f.read()\n except:\n if allow_emoji:\n # This might be an emoji string, so just pass it to the frontend\n return image\n else:\n # Allow OS filesystem errors to raise\n raise\n\n else:\n data = image\n\n (data, mimetype) = _normalize_to_bytes(data, width, output_format)\n this_file = in_memory_file_manager.add(data, mimetype, image_id)\n return this_file.url\n\n\ndef marshall_images(\n coordinates,\n image,\n caption,\n width,\n proto_imgs,\n clamp,\n channels=\"RGB\",\n output_format=\"auto\",\n):\n channels = channels.upper()\n\n # Turn single image and caption into one element list.\n if type(image) is list:\n images = image\n elif type(image) == np.ndarray and len(image.shape) == 4:\n images = _4d_to_list_3d(image)\n else:\n images = [image]\n\n if type(caption) is list:\n captions = caption\n elif isinstance(caption, str):\n captions = [caption]\n elif type(caption) == np.ndarray and len(caption.shape) == 1:\n captions = caption.tolist()\n elif caption is None:\n captions = [None] * len(images)\n else:\n captions = [str(caption)]\n\n assert type(captions) == list, \"If image is a list then caption should be as well\"\n assert len(captions) == len(images), \"Cannot pair %d captions with %d images.\" % (\n len(captions),\n len(images),\n )\n\n proto_imgs.width = width\n # Each image in an image list needs to be kept track of at its own coordinates.\n for coord_suffix, (image, caption) in enumerate(zip(images, captions)):\n proto_img = proto_imgs.imgs.add()\n if caption is not None:\n proto_img.caption = str(caption)\n\n # We use the index of the image in the input image list to identify this image inside\n # InMemoryFileManager. For this, we just add the index to the image's \"coordinates\".\n image_id = \"%s-%i\" % (coordinates, coord_suffix)\n\n is_svg = False\n if isinstance(image, str):\n # Unpack local SVG image file to an SVG string\n if image.endswith(\".svg\") and not image.startswith((\"http://\", \"https://\")):\n with open(image) as textfile:\n image = textfile.read()\n\n # Following regex allows svg image files to start either via a \"<?xml...>\" tag eventually followed by a \"<svg...>\" tag or directly starting with a \"<svg>\" tag\n if re.search(r\"(^\\s?(<\\?xml[\\s\\S]*<svg )|^\\s?<svg )\", image):\n proto_img.markup = f\"data:image/svg+xml,{image}\"\n is_svg = True\n if not is_svg:\n proto_img.url = image_to_url(\n image, width, clamp, channels, output_format, image_id\n )\n" ]
[ [ "pandas.util.hash_pandas_object", "numpy.random.RandomState" ], [ "numpy.amin", "numpy.amax", "numpy.clip" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xxdreck/google-research
[ "dac724bc2b9362d65c26747a8754504fe4c615f8", "dac724bc2b9362d65c26747a8754504fe4c615f8", "7cee4b22b925581d912e8d993625c180da2a5a4f", "dac724bc2b9362d65c26747a8754504fe4c615f8", "7cee4b22b925581d912e8d993625c180da2a5a4f", "7cee4b22b925581d912e8d993625c180da2a5a4f", "7cee4b22b925581d912e8d993625c180da2a5a4f", "dac724bc2b9362d65c26747a8754504fe4c615f8", "7cee4b22b925581d912e8d993625c180da2a5a4f", "7cee4b22b925581d912e8d993625c180da2a5a4f", "7cee4b22b925581d912e8d993625c180da2a5a4f", "7cee4b22b925581d912e8d993625c180da2a5a4f", "7cee4b22b925581d912e8d993625c180da2a5a4f", "7cee4b22b925581d912e8d993625c180da2a5a4f" ]
[ "graph_embedding/simulations/sbm_simulator.py", "non_semantic_speech_benchmark/data_prep/augmentation.py", "basisnet/personalization/centralized_so_nwp/trainer_so.py", "dedal/models/encoders.py", "proxy_rewards/train.py", "pde_preconditioner/unet.py", "linear_eval/linear_eval_test.py", "aqt/jax/compute_cost_utils.py", "autoregressive_diffusion/experiments/language/input_pipeline.py", "latent_programmer/train_lib.py", "dedal/data/serialization.py", "autoregressive_diffusion/utils/util_fns.py", "blur/genome_util.py", "spin_spherical_cnns/spherical_mnist/spherical_mnist.py" ]
[ "# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Library for stochastic block models (SBMs) with node features.\n\nSimulateSbm, SimulateFeatures, and SimulateEdgeFeatures are top-level library\nfunctions used by GenerateStochasticBlockModel in simulations.py. You can call\nthese separately to generate various parts of an SBM with features.\n\"\"\"\nimport collections\nimport dataclasses\nimport enum\nimport math\nimport random\nfrom typing import Dict, List, Optional, Sequence, Tuple\n\nimport graph_tool\nfrom graph_tool import generation\nimport networkx as nx\nimport numpy as np\n\nfrom graph_embedding.simulations import heterogeneous_sbm_utils as hsu\n\n# pylint: disable=g-explicit-length-test\n\n\nclass MatchType(enum.Enum):\n \"\"\"Indicates type of feature/graph membership matching to do.\n\n RANDOM: feature memberships are generated randomly.\n NESTED: for # feature groups >= # graph groups. Each feature cluster is a\n sub-cluster of a graph cluster. Multiplicity of sub-clusters per\n graph cluster is kept as uniform as possible.\n GROUPED: for # feature groups <= # graph groups. Each graph cluster is a\n sub-cluster of a feature cluster. Multiplicity of sub-clusters per\n feature cluster is kept as uniform as possible.\n \"\"\"\n RANDOM = 1\n NESTED = 2\n GROUPED = 3\n\n\[email protected]\nclass EdgeProbabilityProfile:\n \"\"\"Stores p-to-q ratios for Stochastic Block Model.\n\n Attributes:\n p_to_q_ratio1: Probability of in-cluster edges divided by probability of\n out-cluster edges, for type 1 nodes. If the SBM is homogeneous, this\n is the global p_to_q_ratio.\n p_to_q_ratio2: Probability of in-cluster edges divided by probability of\n out-cluster edges, for type 2 nodes.\n p_to_q_ratio_cross: Probability of in-cluster edges divided by probability\n of out-cluster edges, for node clusters that are linked across-type.\n \"\"\"\n p_to_q_ratio1: float = Ellipsis\n p_to_q_ratio2: Optional[float] = 0.0\n p_to_q_ratio_cross: Optional[float] = 0.0\n\n\[email protected]\nclass StochasticBlockModel:\n \"\"\"Stores data for stochastic block model (SBM) graphs with features.\n\n This class supports heterogeneous SBMs, in which each node is assumed to be\n exactly one of two types. In this model, the following extra fields are used:\n * type1_clusters: list of cluster indices for type 1 nodes. (For single-type\n graphs, this contains the list of all cluster indices.)\n * type2_clusters: list of cluster indices for type 2 nodes.\n * cross_links: tuples of cluster indices that are linked cross-type.\n * node_features2: features for type 2 nodes. (node_features1 is used as the\n sole feature field for single-type SBM.)\n\n Attributes:\n graph: graph-tool Graph object.\n graph_memberships: list of integer node classes.\n node_features1: numpy array of node features for nodes of type 1. Features\n for node with index i is in row i.\n node_features2: numpy array of node features for nodes of type 2. Features\n for node with index i is in row i - (# of nodes of type 1).\n feature_memberships: list of integer node feature classes.\n edge_features: map from edge tuple to numpy array. Only stores undirected\n edges, i.e. (0, 1) will be in the map, but (1, 0) will not be.\n cross_links: list of 2-tuples, each tuple a pair of cluster indices which\n are cross-correlated between the types. (i, j) included in this list means\n the i-th cluster from type 1 is correlated with the j-th cluster from type\n 2.\n type1_clusters: list of the indices of type 1 clusters.\n type2_clusters: list of the indices of type 2 clusters.\n cross_links: list of cluster index pairs, each pair coding that the clusters\n are linked across types.\n \"\"\"\n graph: graph_tool.Graph = Ellipsis\n graph_memberships: np.ndarray = Ellipsis\n node_features1: np.ndarray = Ellipsis\n node_features2: Optional[np.ndarray] = Ellipsis\n feature_memberships: np.ndarray = Ellipsis\n edge_features: Dict[Tuple[int, int], np.ndarray] = Ellipsis\n type1_clusters: Optional[List[int]] = Ellipsis\n type2_clusters: Optional[List[int]] = Ellipsis\n cross_links: Optional[List[Tuple[int, int]]] = Ellipsis\n\n\ndef _GetNestingMap(large_k, small_k):\n \"\"\"Given two group sizes, computes a \"nesting map\" between groups.\n\n This function will produce a bipartite map between two sets of \"group nodes\"\n that will be used downstream to partition nodes in a bigger graph. The map\n encodes which groups from the larger set are nested in certain groups from\n the smaller set.\n\n As currently implemented, nesting is assigned as evenly as possible. If\n large_k is an integer multiple of small_k, each smaller-set group will be\n mapped to exactly (large_k/small_k) larger-set groups. If there is a\n remainder r, the first r smaller-set groups will each have one extra nested\n larger-set group.\n\n\n Args:\n large_k: (int) size of the larger group set\n small_k: (int) size of the smaller group set\n\n Returns:\n nesting_map: (dict) map from larger group set indices to lists of\n smaller group set indices\n\n \"\"\"\n min_multiplicity = int(math.floor(large_k / small_k))\n max_bloated_group_index = large_k - small_k * min_multiplicity - 1\n nesting_map = collections.defaultdict(list)\n pos = 0\n for i in range(small_k):\n for _ in range(min_multiplicity + int(i <= max_bloated_group_index)):\n nesting_map[i].append(pos)\n pos += 1\n return nesting_map\n\n\ndef _GenerateFeatureMemberships(\n graph_memberships,\n num_groups = None,\n match_type = MatchType.RANDOM):\n \"\"\"Generates a feature membership assignment.\n\n Args:\n graph_memberships: (list) the integer memberships for the graph SBM\n num_groups: (int) number of groups. If None, defaults to number of unique\n values in graph_memberships.\n match_type: (MatchType) see the enum class description.\n\n Returns:\n memberships: a int list - index i contains feature group of node i.\n \"\"\"\n # Parameter checks\n if num_groups is not None and num_groups == 0:\n raise ValueError(\"argument num_groups must be None or positive\")\n graph_num_groups = len(set(graph_memberships))\n if num_groups is None:\n num_groups = graph_num_groups\n\n # Compute memberships\n memberships = []\n if match_type == MatchType.GROUPED:\n if num_groups > graph_num_groups:\n raise ValueError(\n \"for match type GROUPED, must have num_groups <= graph_num_groups\")\n nesting_map = _GetNestingMap(graph_num_groups, num_groups)\n # Creates deterministic map from (smaller) graph clusters to (larger)\n # feature clusters.\n reverse_nesting_map = {}\n for feature_cluster, graph_cluster_list in nesting_map.items():\n for cluster in graph_cluster_list:\n reverse_nesting_map[cluster] = feature_cluster\n for cluster in graph_memberships:\n memberships.append(reverse_nesting_map[cluster])\n elif match_type == MatchType.NESTED:\n if num_groups < graph_num_groups:\n raise ValueError(\n \"for match type NESTED, must have num_groups >= graph_num_groups\")\n nesting_map = _GetNestingMap(num_groups, graph_num_groups)\n # Creates deterministic map from (smaller) feature clusters to (larger)\n # graph clusters.\n for graph_cluster_id, feature_cluster_ids in nesting_map.items():\n sorted_feature_cluster_ids = sorted(feature_cluster_ids)\n num_feature_groups = len(sorted_feature_cluster_ids)\n feature_pi = np.ones(num_feature_groups) / num_feature_groups\n num_graph_cluster_nodes = np.sum(\n [i == graph_cluster_id for i in graph_memberships])\n sub_memberships = _GenerateNodeMemberships(num_graph_cluster_nodes,\n feature_pi)\n sub_memberships = [sorted_feature_cluster_ids[i] for i in sub_memberships]\n memberships.extend(sub_memberships)\n else: # MatchType.RANDOM\n memberships = random.choices(range(num_groups), k=len(graph_memberships))\n return np.array(sorted(memberships))\n\n\ndef _ComputeExpectedEdgeCounts(num_edges, num_vertices,\n pi,\n prop_mat):\n \"\"\"Computes expected edge counts within and between communities.\n\n Args:\n num_edges: expected number of edges in the graph.\n num_vertices: number of nodes in the graph\n pi: interable of non-zero community size proportions. Must sum to 1.0, but\n this check is left to the caller of this internal function.\n prop_mat: square, symmetric matrix of community edge count rates. Entries\n must be non-negative, but this check is left to the caller.\n\n Returns:\n symmetric matrix with shape prop_mat.shape giving expected edge counts.\n \"\"\"\n scale = np.matmul(pi, np.matmul(prop_mat, pi)) * num_vertices**2\n prob_mat = prop_mat * num_edges / scale\n return np.outer(pi, pi) * prob_mat * num_vertices**2\n\n\ndef _ComputeCommunitySizes(num_vertices, pi):\n \"\"\"Helper function of GenerateNodeMemberships to compute group sizes.\n\n Args:\n num_vertices: number of nodes in graph.\n pi: interable of non-zero community size proportions.\n\n Returns:\n community_sizes: np vector of group sizes. If num_vertices * pi[i] is a\n whole number (up to machine precision), community_sizes[i] will be that\n number. Otherwise, this function accounts for rounding errors by making\n group sizes as balanced as possible (i.e. increasing smallest groups by\n 1 or decreasing largest groups by 1 if needed).\n \"\"\"\n community_sizes = [int(x * num_vertices) for x in pi]\n if sum(community_sizes) != num_vertices:\n size_order = np.argsort(community_sizes)\n delta = sum(community_sizes) - num_vertices\n adjustment = np.sign(delta)\n if adjustment == 1:\n size_order = np.flip(size_order)\n for i in range(int(abs(delta))):\n community_sizes[size_order[i]] -= adjustment\n return community_sizes\n\n\ndef _GenerateNodeMemberships(num_vertices,\n pi):\n \"\"\"Gets node memberships for sbm.\n\n Args:\n num_vertices: number of nodes in graph.\n pi: interable of non-zero community size proportions. Must sum to 1.0, but\n this check is left to the caller of this internal function.\n\n Returns:\n np vector of ints representing community indices.\n \"\"\"\n community_sizes = _ComputeCommunitySizes(num_vertices, pi)\n memberships = np.zeros(num_vertices, dtype=int)\n node = 0\n for i in range(len(pi)):\n memberships[range(node, node + community_sizes[i])] = i\n node += community_sizes[i]\n return memberships\n\n\ndef SimulateSbm(sbm_data,\n num_vertices,\n num_edges,\n pi,\n prop_mat,\n out_degs = None,\n num_vertices2 = 0,\n pi2 = None):\n \"\"\"Generates a stochastic block model, storing data in sbm_data.graph.\n\n This function uses graph_tool.generate_sbm. Refer to that\n documentation for more information on the model and parameters.\n\n This function can generate a heterogeneous SBM graph, meaning each node is\n exactly one of two types (and both types are present). To generate a\n heteroteneous SBM graph, both `num_vertices2` and `pi2` must be non-zero and\n supplied (respectively). When this happens, additional fields of `sbm_data`\n are filled. See the StochasticBlockModel dataclass for full details.\n\n Args:\n sbm_data: StochasticBlockModel dataclass to store result data.\n num_vertices: (int) number of nodes in the graph.\n num_edges: (int) expected number of edges in the graph.\n pi: iterable of non-zero community size proportions. Must sum to 1.0.\n prop_mat: square, symmetric matrix of community edge count rates.\n out_degs: Out-degree propensity for each node. If not provided, a constant\n value will be used. Note that the values will be normalized inside each\n group, if they are not already so.\n num_vertices2: If simulating a heterogeneous SBM, this is the number of\n vertices of type 2.\n pi2: If simulating a heterogeneous SBM, this is the pi vector for the\n vertices of type 2. Must sum to 1.0.\n Returns: (none)\n \"\"\"\n if ((num_vertices2 == 0 and pi2 is not None) or\n (num_vertices2 > 0 and pi2 is None)):\n raise ValueError(\n \"num_vertices2 and pi2 must be either both supplied or both None\")\n if num_vertices2 == 0:\n pi2 = []\n # Equivalent to assertAlmostEqual(np.sum(pi), 1.0, places=12)\n # https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertNotAlmostEqual\n #\n # Some leniency is required here because some theoretically-valid ways to\n # programmatically compute a simplex vector suffer from precision errors. One\n # example of this is in the simulate_sbm_community_sizes_seven_groups test\n # from sbm_simulator_test.py. Places>=12 covers known similar cases (to date).\n if round(abs(np.sum(pi) - 1.0), 12) != 0:\n raise ValueError(\"entries of pi ( must sum to 1.0\")\n if len(pi2) > 0 and round(abs(np.sum(pi2) - 1.0), 12) != 0:\n raise ValueError(\"entries of pi2 ( must sum to 1.0\")\n k1, k2 = len(pi), len(pi2)\n pi = np.array(list(pi) + list(pi2))\n pi /= np.sum(pi)\n if prop_mat.shape[0] != len(pi) or prop_mat.shape[1] != len(pi):\n raise ValueError(\"prop_mat must be k x k; k = len(pi1) + len(pi2)\")\n sbm_data.graph_memberships = _GenerateNodeMemberships(\n num_vertices + num_vertices2, pi)\n sbm_data.type1_clusters = sorted(list(set(sbm_data.graph_memberships)))\n if num_vertices2 > 0:\n sbm_data.cross_links = hsu.GetCrossLinks(k1, k2)\n type1_clusters, type2_clusters = zip(*sbm_data.cross_links)\n sbm_data.type1_clusters = sorted(list(set(type1_clusters)))\n sbm_data.type2_clusters = sorted(list(set(type2_clusters)))\n edge_counts = _ComputeExpectedEdgeCounts(\n num_edges, num_vertices + num_vertices2, pi, prop_mat)\n sbm_data.graph = generation.generate_sbm(sbm_data.graph_memberships,\n edge_counts, out_degs)\n graph_tool.stats.remove_self_loops(sbm_data.graph)\n graph_tool.stats.remove_parallel_edges(sbm_data.graph)\n sbm_data.graph.reindex_edges()\n\n\ndef _GetFeatureCenters(num_groups, center_var, feature_dim):\n \"\"\"Helper function to generate multivariate Normal feature centers.\n\n Args:\n num_groups: number of centers to generate.\n center_var: diagonal element of the covariance matrix (off-diagonals = 0).\n feature_dim: the dimension of each center.\n Returns:\n centers: numpy array with feature group centers as rows.\n \"\"\"\n centers = np.random.multivariate_normal(\n np.zeros(feature_dim), np.identity(feature_dim) * center_var,\n num_groups)\n return centers\n\n\ndef SimulateFeatures(sbm_data,\n center_var,\n feature_dim,\n num_groups = None,\n match_type = MatchType.RANDOM,\n cluster_var = 1.0,\n center_var2 = 0.0,\n feature_dim2 = 0,\n type_correlation = 0.0,\n type_center_var = 0.0):\n \"\"\"Generates node features using multivate normal mixture model.\n\n This function does nothing and throws a warning if\n sbm_data.graph_memberships is empty. Run SimulateSbm to fill that field.\n\n Feature data is stored as an attribute of sbm_data named 'node_features1'.\n\n If the `type2_clusters` field in the input `sbm_data` is filled, this function\n produces node features for a heterogeneous SBM. Specifically:\n * Handling differing # graph clusters and # feature clusters is not\n implemented for heterogeneous SBMs. `num_groups` and must equal the\n length of sbm_data.type1_clusters (raises RuntimeWarning if not).\n * The node_features{1,2} fields of the input sbm_data will store the features\n generated for type {1,2} nodes.\n\n Args:\n sbm_data: StochasticBlockModel dataclass to store result data.\n center_var: (float) variance of feature cluster centers. When this is 0.0,\n the signal-to-noise ratio is 0.0. When equal to cluster_var, SNR is 1.0.\n feature_dim: (int) dimension of the multivariate normal.\n num_groups: (int) number of centers. Generated by a multivariate normal with\n mean zero and covariance matrix cluster_var * I_{feature_dim}. This is\n ignored if the input sbm_data is heterogeneous. Feature cluster counts\n will be set equal to the graph cluster counts. If left as default (None),\n and input sbm_data is homogeneous, set to len(sbm_data.type1_clusters).\n match_type: (MatchType) see sbm_simulator.MatchType for details.\n cluster_var: (float) variance of feature clusters around their centers.\n center_var2: (float) center_var for nodes of type 2. Not needed if sbm_data\n is not heterogeneous (see above).\n feature_dim2: (int) feature_dim for nodes of type 2. Not needed if sbm_data\n is not heterogeneous (see above).\n type_correlation: (float) proportion of each cluster's center vector that\n is shared with other clusters linked across types. Not needed if sbm_data\n is not heterogeneous (see above).\n type_center_var: (float) center_var for center vectors that are shared with\n clusters linked across types. Not used if input sbm_data is not\n heterogeneous.\n\n Raises:\n RuntimeWarning:\n * if sbm_data no graph, no graph_memberships, or type1_clusters fields.\n * if len(sbm_data.type2_clusters) > 0 and sbm_data.cross_links is not a\n list.\n \"\"\"\n if sbm_data.graph is None or sbm_data.graph is Ellipsis:\n raise RuntimeWarning(\"No graph found: no features generated. \"\n \"Run SimulateSbm to generate a graph.\")\n if sbm_data.graph_memberships is None or sbm_data.graph_memberships is Ellipsis:\n raise RuntimeWarning(\"No graph_memberships found: no features generated. \"\n \"Run SimulateSbm to generate graph_memberships.\")\n if sbm_data.type1_clusters is None or sbm_data.type1_clusters is Ellipsis:\n raise RuntimeWarning(\"No type1_clusters found: no features generated. \"\n \"Run SimulateSbm to generate type1_clusters.\")\n if num_groups is None:\n num_groups = len(sbm_data.type1_clusters)\n centers = list(_GetFeatureCenters(num_groups, center_var, feature_dim))\n num_groups2 = (0 if sbm_data.type2_clusters is Ellipsis\n else len(sbm_data.type2_clusters))\n if num_groups2 > 0:\n # The SBM is heterogeneous. Check input and adjust variables.\n if not isinstance(sbm_data.cross_links, list):\n raise RuntimeWarning(\n (\"len(sbm_data.type2_clusters) > 0, implying heterogeneous SBM, but \"\n \"heterogeneous data `cross_links` is unfilled.\"))\n\n # Generate heterogeneous feature centers.\n centers += list(_GetFeatureCenters(num_groups2, center_var2, feature_dim2))\n correspondence_graph = nx.Graph()\n correspondence_graph.add_edges_from(sbm_data.cross_links)\n connected_components = list(\n nx.algorithms.connected_components(correspondence_graph))\n cross_type_feature_dim = min(feature_dim, feature_dim2)\n component_center_cov = np.identity(cross_type_feature_dim) * type_center_var\n for component in connected_components:\n component_center = np.random.multivariate_normal(\n np.zeros(cross_type_feature_dim), component_center_cov, 1)[0]\n for cluster_index in component:\n centers[cluster_index][:cross_type_feature_dim] = (\n component_center * type_correlation\n + centers[cluster_index][:cross_type_feature_dim] *\n (1 - type_correlation))\n\n # Get memberships\n sbm_data.feature_memberships = _GenerateFeatureMemberships(\n graph_memberships=sbm_data.graph_memberships,\n num_groups=num_groups,\n match_type=match_type)\n cluster_indices = sbm_data.feature_memberships\n if num_groups2 > 0:\n cluster_indices = sbm_data.graph_memberships\n\n features1 = []\n features2 = []\n cluster_cov1 = np.identity(feature_dim) * cluster_var\n cluster_cov2 = np.identity(feature_dim2) * cluster_var\n for cluster_index in cluster_indices:\n cluster_cov = cluster_cov1\n if num_groups2 > 0 and cluster_index in sbm_data.type2_clusters:\n cluster_cov = cluster_cov2\n feature = np.random.multivariate_normal(centers[cluster_index], cluster_cov,\n 1)[0]\n if cluster_index in sbm_data.type1_clusters:\n features1.append(feature)\n else:\n features2.append(feature)\n sbm_data.node_features1 = np.array(features1)\n if num_groups2 > 0:\n sbm_data.node_features2 = np.array(features2)\n\n\ndef SimulateEdgeFeatures(sbm_data,\n feature_dim,\n center_distance = 0.0,\n cluster_variance = 1.0):\n \"\"\"Generates edge feature distribution via inter-class vs intra-class.\n\n Edge feature data is stored as an sbm_data attribute named `edge_feature`, a\n dict from 2-tuples of node IDs to numpy vectors.\n\n Edge features have two centers: one at (0, 0, ....) and one at\n (center_distance, center_distance, ....) for inter-class and intra-class\n edges (respectively). They are generated from a multivariate normal with\n covariance matrix = cluster_variance * I_d.\n\n Requires non-None `graph` and `graph_memberships` attributes in sbm_data.\n Use SimulateSbm to generate them. Throws warning if either are None.\n\n Args:\n sbm_data: StochasticBlockModel dataclass to store result data.\n feature_dim: (int) dimension of the multivariate normal.\n center_distance: (float) per-dimension distance between the intra-class and\n inter-class means. Increasing this makes the edge feature signal stronger.\n cluster_variance: (float) variance of clusters around their centers.\n\n Raises:\n RuntimeWarning: if simulator has no graph or a graph with no nodes.\n \"\"\"\n if sbm_data.graph is None:\n raise RuntimeWarning(\"SbmSimulator has no graph: no features generated.\")\n if sbm_data.graph.num_vertices() == 0:\n raise RuntimeWarning(\"graph has no nodes: no features generated.\")\n if sbm_data.graph_memberships is None:\n raise RuntimeWarning(\"graph has no memberships: no features generated.\")\n\n center0 = np.zeros(shape=(feature_dim,))\n center1 = np.ones(shape=(feature_dim,)) * center_distance\n covariance = np.identity(feature_dim) * cluster_variance\n sbm_data.edge_features = {}\n for edge in sbm_data.graph.edges():\n vertex1 = int(edge.source())\n vertex2 = int(edge.target())\n edge_tuple = tuple(sorted((vertex1, vertex2)))\n if (sbm_data.graph_memberships[vertex1] ==\n sbm_data.graph_memberships[vertex2]):\n center = center1\n else:\n center = center0\n sbm_data.edge_features[edge_tuple] = np.random.multivariate_normal(\n center, covariance, 1)[0]\n", "# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Augmentation library for spec augment for keras transform.\n\"\"\"\n\nimport abc\nimport enum\n\nfrom lingvo.core import spectrum_augmenter\nimport tensorflow as tf # We want TF2.\n\n\nclass AugmentationMode(enum.Enum):\n \"\"\"The supported alignment modes.\"\"\"\n DISABLED = 'disabled'\n TRAIN_ONLY = 'train_only'\n TEST_ONLY = 'test_only'\n TRAIN_AND_TEST = 'train_and_test'\n\n\nclass Augmentation(tf.keras.Model, abc.ABC):\n \"\"\"Abstract base class for augmentation.\"\"\"\n\n def __init__(self,\n augment_mode = AugmentationMode.TRAIN_ONLY):\n \"\"\"Builds Augmentation.\n\n Args:\n augment_mode: the augmentation mode.\n \"\"\"\n super().__init__()\n self.augment_mode = augment_mode\n\n def _should_augment(self, training = False):\n return (training and self.augment_mode in [\n AugmentationMode.TRAIN_ONLY, AugmentationMode.TRAIN_AND_TEST\n ]) or (not training and self.augment_mode in [\n AugmentationMode.TEST_ONLY, AugmentationMode.TRAIN_AND_TEST\n ])\n\n def call(self, inputs, training = False):\n if self._should_augment(training):\n return self.apply_augmentation(inputs)\n else:\n return inputs\n\n @abc.abstractmethod\n def apply_augmentation(self, inputs):\n pass\n\n\nclass SpecAugment(Augmentation):\n \"\"\"A wrapper around lingo.core.spectrum_augmenter.SpectrumAugmenter.\n\n SpecAugment is a data augmentation that combines three transformations:\n - a time warping of up to max(time_warp_max_frames,\n time_warp_max_ratio*input_length) frames.\n - a masking of sampled frequencies with zeros along the entire time axis\n (freq_mask)\n - a masking of sampled timesteps with zeros along the entire frequency axis\n (time_mask)\n For the frequency mask, freq_mask_max_bins is the maximum number of\n consecutive frequency bins to be masked, freq_mask_count is the number of\n masks to apply to a signal. Same for time_mask.\n\n Note: SpecAugment takes mel spectrograms as input.\n \"\"\"\n\n def __init__(self,\n freq_mask_max_bins,\n freq_mask_count,\n time_mask_max_frames,\n time_mask_count,\n time_mask_max_ratio,\n time_warp_max_frames,\n time_warp_max_ratio,\n use_input_dependent_random_seed = True,\n augment_mode = AugmentationMode.TRAIN_ONLY):\n \"\"\"Builds SpecAugment.\n\n Args:\n freq_mask_max_bins: max number of consecutive mel bins to mask in a band.\n freq_mask_count: number of frequency bands to mask.\n time_mask_max_frames: max number of consecutive time frames to mask.\n time_mask_count: number of time bands to mask.\n time_mask_max_ratio: max time mask ratio.\n time_warp_max_frames: max numer of time frames to warp.\n time_warp_max_ratio: max ratio of the time warp.\n use_input_dependent_random_seed: If true, uses stateless random TensorFlow\n ops, with seeds determined by the input features.\n augment_mode: the augmentation mode.\n \"\"\"\n super().__init__(augment_mode)\n spec_augment_params = spectrum_augmenter.SpectrumAugmenter.Params()\n spec_augment_params.freq_mask_max_bins = freq_mask_max_bins\n spec_augment_params.freq_mask_count = freq_mask_count\n spec_augment_params.time_mask_max_frames = time_mask_max_frames\n spec_augment_params.time_mask_count = time_mask_count\n spec_augment_params.time_warp_max_frames = time_warp_max_frames\n spec_augment_params.time_warp_max_ratio = time_warp_max_ratio\n spec_augment_params.time_mask_max_ratio = time_mask_max_ratio\n spec_augment_params.use_input_dependent_random_seed = (\n use_input_dependent_random_seed)\n spec_augment_params.name = 'SpecAugmentLayer'\n self._spec_augment_layer = spec_augment_params.Instantiate()\n\n def apply_augmentation(self, inputs):\n \"\"\"Performs SpecAugment on the inputs.\n\n Args:\n inputs: input mel spectrogram of shape (num_time_bins, num_freq_bins) or\n (batch_size, num_time_bins, num_freq_bins).\n\n Returns:\n Augmented mel spectrogram of shape (num_time_bins, num_freq_bins) or\n (batch_size, num_time_bins, num_freq_bins).\n \"\"\"\n if inputs.shape.ndims == 2:\n inputs = inputs[None, :, :, None]\n squeeze_axis = [0, 3]\n elif inputs.shape.ndims == 3:\n inputs = inputs[:, :, :, None]\n squeeze_axis = 3\n else:\n raise ValueError('Input shape must have 2 or 3 dimensions')\n\n outputs, _ = self._spec_augment_layer.FPropDefaultTheta(\n inputs=inputs,\n paddings=tf.zeros(tf.shape(inputs)[:2])\n )\n return tf.squeeze(outputs, axis=squeeze_axis)\n\n", "# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Runs Stackoverflow next word prediction.\n\nIt first learn a global model on the training clients, then adapt to each of\nthe test client. It evaluates the personalized accuracy for each client on the\nindividual test sets.\n\"\"\"\nimport functools\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_federated as tff\n\nfrom basisnet.personalization.centralized_so_nwp import so_nwp_eval\nfrom basisnet.personalization.centralized_so_nwp import so_nwp_preprocessing\nfrom basisnet.personalization.centralized_so_nwp import stackoverflow_basis_models\n\n\n# Stack Overflow NWP flags\nflags.DEFINE_integer('so_nwp_vocab_size', 10000, 'Size of vocab to use.')\nflags.DEFINE_integer('so_nwp_num_oov_buckets', 1,\n 'Number of out of vocabulary buckets.')\nflags.DEFINE_integer('so_nwp_sequence_length', 20,\n 'Max sequence length to use.')\nflags.DEFINE_integer('so_nwp_max_elements_per_user', 1000, 'Max number of '\n 'training sentences to use per user.')\n\nflags.DEFINE_string('modeldir', '/tmp/basisnet/centralized_so_nwp',\n 'The dir for saving checkpoints and logs.')\n\nflags.DEFINE_integer(\n 'fine_tune_epoch', 20, 'number of epochs for fine-tuning'\n 'to use from test set for per-round validation.')\nflags.DEFINE_integer('max_num_ft_clients', 1000,\n 'number of clients fot personalized evaluation.')\n\nflags.DEFINE_integer('num_basis', 1,\n 'number of basis to learn, 1 = original model.')\nflags.DEFINE_integer(\n 'num_lstm_units', -1,\n 'number of LSTM hidden size, -1 to use default value for each task.')\nflags.DEFINE_string('experiment_name', '',\n 'Experiment name string')\nflags.DEFINE_integer('fine_tune_batch_size', 20,\n 'Batch size for fine-tuning.')\n\nFLAGS = flags.FLAGS\n\n\ndef main(argv):\n tf.compat.v2.enable_v2_behavior()\n if len(argv) > 1:\n raise app.UsageError('Expected no command-line arguments, '\n 'got: {}'.format(argv))\n\n if 'debug' in FLAGS.experiment_name:\n total_iterations = 100\n else:\n total_iterations = 400000\n\n # Avoid a long line\n clientdata = tff.simulation.datasets.stackoverflow.load_data()\n (train_clientdata, valid_clientdata, _) = clientdata\n\n vocab = so_nwp_preprocessing.create_vocab(FLAGS.so_nwp_vocab_size)\n sample_client_ids = np.random.choice(valid_clientdata.client_ids,\n FLAGS.max_num_ft_clients)\n\n # id = 0 for global embedding\n ids = np.arange(\n len(train_clientdata.client_ids) + len(valid_clientdata.client_ids),\n dtype=np.int64) + 1\n str_ids = train_clientdata.client_ids + valid_clientdata.client_ids\n client_id_encodings = tf.lookup.StaticVocabularyTable(\n tf.lookup.KeyValueTensorInitializer(str_ids, ids),\n num_oov_buckets=FLAGS.so_nwp_num_oov_buckets)\n\n def to_embedding_id(client_id):\n return client_id_encodings.lookup(client_id)\n\n preprocess_fn = so_nwp_preprocessing.build_preprocess_fn(\n vocab,\n so_nwp_sequence_length=FLAGS.so_nwp_sequence_length,\n so_nwp_num_oov_buckets=FLAGS.so_nwp_num_oov_buckets,\n debug='debug' in FLAGS.experiment_name)\n\n train_dataset, val_dataset = so_nwp_preprocessing.create_centralized_datasets(\n preprocess_fn,\n to_embedding_id,\n sample_client_ids)\n\n special_tokens = so_nwp_preprocessing.get_special_tokens(\n vocab_size=FLAGS.so_nwp_vocab_size,\n num_oov_buckets=FLAGS.so_nwp_num_oov_buckets)\n\n pad_token = special_tokens.pad\n oov_tokens = special_tokens.oov\n eos_token = special_tokens.eos\n mask_vocab_id = [pad_token, eos_token] + oov_tokens\n\n # Create train set, split train set, test set for personalization.\n # Create train/test sets by dates.\n per_tuples_by_date = so_nwp_preprocessing.build_split_centralized_dataset(\n valid_clientdata,\n preprocess_fn,\n to_embedding_id,\n sample_client_ids,\n split_by='date')\n # Create train/test sets randomly.\n per_tuples_random = so_nwp_preprocessing.build_split_centralized_dataset(\n valid_clientdata,\n preprocess_fn,\n to_embedding_id,\n sample_client_ids,\n split_by='random')\n\n stackoverflow_models_fn = stackoverflow_basis_models.create_basis_recurrent_model\n model_builder = functools.partial(\n stackoverflow_models_fn,\n vocab_size=FLAGS.so_nwp_vocab_size,\n num_oov_buckets=FLAGS.so_nwp_num_oov_buckets,\n num_basis=FLAGS.num_basis)\n\n # Compile\n loss_builder = functools.partial(\n tf.keras.losses.SparseCategoricalCrossentropy, from_logits=True)\n\n metrics = [\n so_nwp_eval.SubsetInVocabAccuracy(\n name='mask_accuracy',\n non_vocabulary_classes=[pad_token, eos_token] + oov_tokens,\n masked_classes=[pad_token, eos_token])\n ]\n\n basisnet = model_builder()\n basisnet.summary()\n basisnet.compile(\n loss=loss_builder(), optimizer='adam', metrics=metrics)\n\n history = basisnet.fit(\n train_dataset, epochs=1, validation_data=val_dataset, verbose=1,\n steps_per_epoch=total_iterations,\n workers=16,\n use_multiprocessing=True)\n logging.info(history)\n\n if 'debug' not in FLAGS.experiment_name:\n basisnet.save_weights(\n FLAGS.modeldir+'/so_%s_basis_%d.ckpt' %\n (FLAGS.experiment_name, FLAGS.num_basis))\n\n # model_builder for the global embedding\n global_model_builder = functools.partial(\n stackoverflow_models_fn,\n vocab_size=FLAGS.so_nwp_vocab_size,\n num_oov_buckets=FLAGS.so_nwp_num_oov_buckets,\n num_basis=FLAGS.num_basis,\n global_embedding_only=True)\n\n # Personalization\n def online_evaluation(fix_basis=True):\n def _create_full_dataset_with_id(client_id):\n def add_id(x):\n x['client_id'] = 0\n return x\n\n # pylint: disable=protected-access\n client_ds = so_nwp_preprocessing.sort_by_date_pipe(\n valid_clientdata._create_dataset(client_id)).map(add_id)\n return client_ds\n\n all_clients_acc_before = []\n all_clients_acc = []\n for clnt_id in sample_client_ids:\n local_basisnet = model_builder()\n optimizer = tf.keras.optimizers.Adam(learning_rate=1e-4)\n\n local_basisnet.compile(\n optimizer=optimizer,\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=metrics)\n local_basisnet.set_weights(basisnet.get_weights())\n\n if fix_basis:\n # only fine-tune the embedding\n logging.info('Fix basis')\n for layer in local_basisnet.layers:\n if layer.name != 'client_embedding':\n layer.trainable = False\n\n ds = _create_full_dataset_with_id(clnt_id)\n ds = preprocess_fn(ds).unbatch().batch(FLAGS.fine_tune_batch_size)\n\n all_clients_acc_before.append(local_basisnet.evaluate(ds)[1])\n\n num_batches = so_nwp_preprocessing.count_batches(ds)\n\n all_val_acc = []\n for idx in range(1, num_batches):\n train_data_time, test_data_time = so_nwp_preprocessing.split_time(\n ds, idx)\n history = local_basisnet.fit(\n train_data_time,\n epochs=1,\n validation_data=test_data_time,\n verbose=0)\n all_val_acc.append(history.history['val_mask_accuracy'])\n all_clients_acc.append(np.mean(all_val_acc))\n\n logging.info(all_clients_acc_before)\n logging.info(np.mean(all_clients_acc_before))\n\n logging.info(all_clients_acc)\n logging.info(np.mean(all_clients_acc))\n\n logging.info('=====Start evaluation split by dates=====')\n so_nwp_eval.per_evaluation(\n basisnet,\n per_tuples_by_date,\n global_model_builder,\n model_builder,\n mask_vocab_id,\n fix_basis=FLAGS.num_basis > 1)\n logging.info('=====Start evaluation split by random=====')\n so_nwp_eval.per_evaluation(\n basisnet,\n per_tuples_random,\n global_model_builder,\n model_builder,\n mask_vocab_id,\n fix_basis=FLAGS.num_basis > 1)\n logging.info('=====Start online evaluation=====')\n online_evaluation(fix_basis=FLAGS.num_basis > 1)\n\nif __name__ == '__main__':\n app.run(main)\n", "# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Implements models to embed biological sequences as vector sequences.\"\"\"\n\nimport functools\nfrom typing import Optional, Tuple, Type\n\nimport gin\nimport tensorflow as tf\n\nfrom dedal import vocabulary\nfrom dedal.models import activations\nfrom dedal.models import initializers\n# TODO(oliviert): how to deal with this one?\nfrom tensorflow_models.official.nlp.modeling import layers as nlp_layers\n\n\[email protected]\nclass Encoder(tf.keras.Model):\n \"\"\"A generic sequence encoder.\"\"\"\n\n def __init__(self,\n vocab = None,\n mask_special_tokens = True,\n trainable = True,\n **kwargs):\n super().__init__(trainable=trainable, **kwargs)\n self._vocab = vocabulary.get_default() if vocab is None else vocab\n self._mask_special_tokens = mask_special_tokens\n\n def compute_mask(self,\n inputs,\n mask = None):\n \"\"\"Standard keras method.\"\"\"\n del mask\n mask = self._vocab.padding_mask(inputs)\n if self._mask_special_tokens:\n mask = tf.math.logical_and(mask, self._vocab.special_token_mask(inputs))\n return mask\n\n\[email protected]\nclass OneHotEncoder(Encoder):\n \"\"\"Embeds sequences using non-contextual, one-hot embeddings.\"\"\"\n\n def call(self, inputs):\n return tf.one_hot(inputs, len(self._vocab))\n\n\[email protected]\nclass LookupEncoder(Encoder):\n \"\"\"Encoder using a lookup.\"\"\"\n\n def __init__(\n self,\n emb_dim = 768,\n dropout = 0.0,\n use_layer_norm = False,\n use_positional_embedding = False,\n position_embed_init=initializers.HarmonicEmbeddings(\n scale_factor=1e-4, max_freq=1.0),\n train_position_embed = True,\n aaemb_init=tf.initializers.TruncatedNormal(stddev=1.0),\n aaemb_scale_factor = None,\n max_len = 1024,\n **kwargs):\n super().__init__(**kwargs)\n self._use_layer_norm = use_layer_norm\n\n if use_positional_embedding:\n self._positional_embedding = nlp_layers.PositionEmbedding(\n max_length=max_len,\n initializer=position_embed_init,\n trainable=train_position_embed,\n name='embeddings/positional')\n else:\n self._positional_embedding = None\n\n self._aa_embed = nlp_layers.OnDeviceEmbedding(\n vocab_size=len(self._vocab),\n embedding_width=emb_dim,\n initializer=aaemb_init,\n scale_factor=aaemb_scale_factor,\n name='embeddings/aminoacid')\n\n if use_layer_norm:\n self._layer_norm = tf.keras.layers.LayerNormalization(\n axis=-1, epsilon=1e-12, name='embeddings/layer_norm')\n else:\n self._layer_norm = None\n\n self._dropout = tf.keras.layers.Dropout(\n rate=dropout, name='embeddings/dropout')\n\n def call(self, inputs):\n embeddings = self._aa_embed(inputs)\n if self._positional_embedding is not None:\n pos_embeddings = self._positional_embedding(embeddings)\n embeddings += pos_embeddings\n if self._layer_norm is not None:\n embeddings = self._layer_norm(embeddings)\n embeddings = self._dropout(embeddings)\n return embeddings\n\n\[email protected]\nclass RecurrentEncoder(Encoder):\n \"\"\"RNN based Encoder.\"\"\"\n\n def __init__(\n self,\n emb_dim = 512,\n num_layers = 3,\n rnn_cls = tf.keras.layers.GRU,\n rnn_input_dropout = 0.0,\n rnn_recurrent_dropout = 0.0,\n causal = False,\n aaemb_init=tf.initializers.TruncatedNormal(stddev=1.0),\n kernel_init=tf.initializers.GlorotUniform(),\n recurrent_init=tf.initializers.Orthogonal(),\n aaemb_scale_factor = None,\n **kwargs):\n super().__init__(**kwargs)\n\n self._aaemb_layer = nlp_layers.OnDeviceEmbedding(\n vocab_size=len(self._vocab),\n embedding_width=emb_dim,\n initializer=aaemb_init,\n scale_factor=aaemb_scale_factor,\n name='embeddings/aminoacid')\n\n self._rnn_layers = []\n for i in range(num_layers):\n layer = rnn_cls(\n units=self.config.emb_dim,\n kernel_initializer=kernel_init,\n recurrent_initializer=recurrent_init,\n dropout=rnn_input_dropout,\n recurrent_dropout=rnn_recurrent_dropout,\n return_sequences=True,\n name=f'RNN/layer_{i}')\n if not causal:\n layer = tf.keras.layers.Bidirectional(layer, name=f'BiRNN/layer_{i}')\n self._rnn_layers.append(layer)\n\n def call(self, inputs):\n embeddings = self._aaemb_layer(inputs)\n mask = self._vocab.padding_mask(inputs)\n for layer in self._rnn_layers:\n embeddings = layer(embeddings, mask=mask)\n return embeddings\n\n\[email protected]\nclass TransformerEncoder(Encoder):\n \"\"\"Encoder with a transformer.\"\"\"\n\n def __init__(\n self,\n emb_dim = 768,\n num_layers = 6,\n num_heads = 12,\n mlp_dim = 3072,\n mlp_act=activations.approximate_gelu,\n output_dropout = 0.1,\n attention_dropout = 0.1,\n mlp_dropout = 0.1,\n norm_first = True,\n norm_input = False,\n norm_output = True,\n causal = False,\n trainable_posemb = False,\n posemb_init=initializers.HarmonicEmbeddings(\n scale_factor=1e-4, max_freq=1.0),\n aaemb_init=tf.initializers.RandomNormal(stddev=1.0),\n kernel_init=tf.initializers.GlorotUniform(),\n aaemb_scale_factor = None,\n max_len = 1024,\n **kwargs):\n super().__init__(**kwargs)\n self._causal = causal\n self.posemb_layer = nlp_layers.PositionEmbedding(\n max_length=max_len,\n initializer=posemb_init,\n trainable=trainable_posemb,\n name='embeddings/positional')\n self.aaemb_layer = nlp_layers.OnDeviceEmbedding(\n vocab_size=len(self._vocab),\n embedding_width=emb_dim,\n initializer=aaemb_init,\n scale_factor=aaemb_scale_factor,\n name='embeddings/aminoacid')\n layer_norm_cls = functools.partial(\n tf.keras.layers.LayerNormalization, axis=-1, epsilon=1e-12)\n self._input_norm_layer = (\n layer_norm_cls(name='embeddings/layer_norm') if norm_input else None)\n self._output_norm_layer = (\n layer_norm_cls(name='output/layer_norm') if norm_output else None)\n self._dropout_layer = tf.keras.layers.Dropout(\n rate=output_dropout, name='embeddings/dropout')\n self._attention_mask = nlp_layers.SelfAttentionMask()\n self._transformer_layers = []\n for i in range(num_layers):\n self._transformer_layers.append(nlp_layers.TransformerEncoderBlock(\n num_attention_heads=num_heads,\n inner_dim=mlp_dim,\n inner_activation=mlp_act,\n output_dropout=output_dropout,\n attention_dropout=attention_dropout,\n inner_dropout=mlp_dropout,\n kernel_initializer=kernel_init,\n norm_first=norm_first,\n name=f'transformer/layer_{i}'))\n\n def call(self, inputs):\n aa_embeddings = self.aaemb_layer(inputs)\n pos_embeddings = self.posemb_layer(aa_embeddings)\n embeddings = aa_embeddings + pos_embeddings\n if self._input_norm_layer is not None:\n embeddings = self._input_norm_layer(embeddings) # pylint: disable=not-callable\n embeddings = self._dropout_layer(embeddings)\n\n mask = self._vocab.padding_mask(inputs)\n attention_mask = self._attention_mask(\n embeddings, tf.cast(mask, embeddings.dtype))\n if self._causal:\n attention_shape = tf.shape(attention_mask)\n len1, len2 = attention_shape[1], attention_shape[2]\n causal_mask = tf.range(len1)[:, None] >= tf.range(len2)[None, :]\n causal_mask = tf.cast(tf.expand_dims(causal_mask, 0), embeddings.dtype)\n attention_mask *= causal_mask\n\n for layer in self._transformer_layers:\n embeddings = layer((embeddings, attention_mask))\n\n if self._output_norm_layer is not None:\n embeddings = self._output_norm_layer(embeddings)\n\n return embeddings\n", "# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Experiment script for learning a proxy reward.\"\"\"\n\nfrom absl import app\nfrom absl import flags\nimport numpy as np\nimport pandas as pd\n\nfrom tf.io import gfile\nfrom proxy_rewards import train_proxy\n\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_integer('nsteps', 1001, 'The number of steps to run training for.')\nflags.DEFINE_float('learning_rate', 1e-1, 'Hyperparameter: learning rate.')\nflags.DEFINE_float('tol', 1e-8,\n 'Hyperparameter: If grad norm less than tol, stop early.')\nflags.DEFINE_float('erm_weight', 1., 'Hyperparameter: Weight on ERM loss.')\nflags.DEFINE_float('bias_lamb', 0.,\n 'Hyperparameter: Weight on policy evaluation loss.')\nflags.DEFINE_integer('seed', 0, 'Random Seed for model initialization.')\nflags.DEFINE_integer('data_seed', 2, 'Random Seed for train/valid split.')\nflags.DEFINE_enum(\n name='bias_norm',\n default='max',\n enum_values=['max', 'l2'],\n help='Calculation of policy loss via max or weighted L2 norm over policies.'\n )\n\nDEFAULT_DATA_PATH = None\nDEFAULT_DATA_FILE = None\n\nflags.DEFINE_string('data_path', DEFAULT_DATA_PATH,\n 'Path to MovieLens Data')\nflags.DEFINE_string('data_file', DEFAULT_DATA_FILE,\n 'File name (in data_path) for the simulated interactions.')\nflags.DEFINE_string('simulation_dir', 'simulation_alt',\n 'Directory (in data_path) for simulation results')\nflags.DEFINE_string('embed_file', 'movielens_factorization.json',\n 'File name (in data_path) for embeddings')\n\n\ndef load_and_train():\n \"\"\"Load data from file and return checkpoints from training.\"\"\"\n simulation_path = f'{FLAGS.data_path}/{FLAGS.simulation_dir}'\n with gfile.GFile(f'{simulation_path}/{FLAGS.data_file}', 'r') as f:\n df = pd.read_csv(f)\n\n # Split this into train and validate\n rng = np.random.default_rng(FLAGS.data_seed)\n users = np.unique(df['user'])\n users = rng.permutation(users)\n\n n_users = users.shape[0]\n n_train_users = int(n_users / 2)\n\n users_train = users[:n_train_users]\n users_val = users[n_train_users:]\n assert users_val.shape[0] + users_train.shape[0] == n_users\n\n df_tr = df.query('user in @users_train').copy()\n df_val = df.query('user in @users_val').copy()\n\n a_tr = df_tr['rec'].to_numpy()\n m_tr = df_tr[['diversity', 'rating']].to_numpy()\n y_tr = df_tr['ltr'].to_numpy()\n t_tr = np.ones_like(a_tr)\n\n a_val = df_val['rec'].to_numpy()\n m_val = df_val[['diversity', 'rating']].to_numpy()\n y_val = df_val['ltr'].to_numpy()\n t_val = np.ones_like(a_val)\n\n model = train_proxy.LogisticReg()\n\n data_tr = {\n 'a': a_tr,\n 'm': m_tr,\n 'y': y_tr,\n 't': t_tr,\n }\n\n data_val = {\n 'a': a_val,\n 'm': m_val,\n 'y': y_val,\n 't': t_val,\n }\n\n init_params = train_proxy.initialize_params(\n model, mdim=2, seed=FLAGS.seed)\n\n loss_tr = train_proxy.make_loss_func(\n model, data_tr,\n erm_weight=FLAGS.erm_weight,\n bias_lamb=FLAGS.bias_lamb,\n bias_norm=FLAGS.bias_norm)\n loss_val = train_proxy.make_loss_func(\n model, data_val,\n erm_weight=FLAGS.erm_weight,\n bias_lamb=FLAGS.bias_lamb,\n bias_norm=FLAGS.bias_norm)\n\n _, checkpoints = train_proxy.train(\n loss_tr, init_params,\n validation_loss=loss_val,\n lr=FLAGS.learning_rate,\n nsteps=FLAGS.nsteps,\n tol=FLAGS.tol, verbose=True, log=True)\n\n return checkpoints\n", "# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# pylint: skip-file\nfrom absl import logging\nfrom jax import random\n\nfrom jax.experimental import stax\nfrom jax.experimental.stax import Dense, MaxPool, Relu, Flatten, LogSoftmax\nfrom jax.experimental import optimizers\nfrom flax import optim as flax_optim\nfrom . import equations\nfrom . import meshes\nfrom . import gmres\nfrom . import flax_cnn\nimport os\nimport functools\nimport jax\nimport flax\nfrom flax.deprecated import nn\nfrom jax import lax\nimport jax.numpy as np\nimport numpy as onp\nimport jax.ops\nfrom jax.tree_util import Partial\n\nrandn = stax.randn\nglorot = stax.glorot\n\n# Loss functions\n\n\[email protected](jax.vmap, in_axes=(None, None, None, None, 0, 0))\ndef losses_gmres_inf(preconditioner, params, n, new_matvec, x0, b):\n A = Partial(new_matvec)\n M = Partial(preconditioner, params)\n x_opt = gmres.gmres(A, b, x0, n=n, M=M)\n\n return np.linalg.norm(A(x_opt) - b, np.inf) * 1000 * x_opt.shape[0]\n\n\[email protected](jax.vmap, in_axes=(None, None, None, None, 0, 0))\ndef losses_gmres(preconditioner, params, n, new_matvec, x0, b):\n A = Partial(new_matvec)\n M = Partial(preconditioner, params)\n loss = gmres.gmres_training(A, b, x0, n=n, M=M)\n return loss * 10000000\n\[email protected](jax.vmap, in_axes=(None, None, None, None, 0, 0))\ndef losses_gmres_flax(preconditioner, model, n, new_matvec, x0, b):\n A = Partial(new_matvec)\n M = Partial(preconditioner, model)\n #loss = gmres.gmres_training(A, b, x0, n=n, M=M)\n x = gmres.gmres(A, b, x0, n=n, M=M)\n\n return np.linalg.norm(A(x) - b) * 10000000\n\n\[email protected](jax.jit, static_argnums=(0, 1, 2, 3))\ndef loss_gmres(preconditioner,\n n,\n shapeL,\n mesh,\n params,\n inputs,\n bs,\n x=0,\n k=0,\n aspect_ratio=1.0,\n **kwargs):\n if shapeL == 'R':\n new_matvec = lambda x: mesh.matvec_helmholtz(\n k, aspect_ratio, equations.make_mask, equations.make_mask_dual, x)\n elif shapeL == 'L':\n new_matvec = lambda x: mesh.matvec_helmholtz(\n k, aspect_ratio, equations.make_mask_L, equations.make_mask_L_dual, x)\n return np.mean(\n losses_gmres(preconditioner, params, n, new_matvec, inputs, bs))\n\n\[email protected](jax.jit, static_argnums=(0, 1, 2))\ndef loss_gmresR(preconditioner,\n n,\n mesh,\n params,\n inputs,\n bs,\n x=0,\n k=0,\n aspect_ratio=1.0,\n **kwargs):\n new_matvec = lambda y: mesh.matvec_helmholtz(\n k, aspect_ratio, equations.make_mask, equations.make_mask_dual, y)\n return np.mean(\n losses_gmres(preconditioner, params, n, new_matvec, inputs, bs))\n\[email protected](jax.jit, static_argnums=(0, 1, 2))\ndef loss_gmresR_flax(preconditioner,\n n,\n mesh,\n model,\n inputs,\n bs,\n x=0,\n k=0,\n aspect_ratio=1.0,\n **kwargs):\n new_matvec = lambda y: mesh.matvec_helmholtz(\n k, aspect_ratio, equations.make_mask, equations.make_mask_dual, y)\n return np.mean(\n losses_gmres_flax(preconditioner, model, n, new_matvec, inputs, bs))\n\n# CNN definition\n# Like the convolutions from stax, but without bias.\n\ndef GeneralUnbiasedConv(dimension_numbers,\n out_chan,\n filter_shape,\n strides=None,\n padding='SAME',\n W_init=None,\n b_init=randn(1e-6)):\n \"\"\"Layer construction function for a general convolution layer.\"\"\"\n lhs_spec, rhs_spec, out_spec = dimension_numbers\n one = (1,) * len(filter_shape)\n strides = strides or one\n W_init = W_init or glorot(rhs_spec.index('O'), rhs_spec.index('I'))\n\n def init_fun(rng, input_shape):\n filter_shape_iter = iter(filter_shape)\n kernel_shape = [\n out_chan if c == 'O' else input_shape[lhs_spec.index('C')]\n if c == 'I' else next(filter_shape_iter) for c in rhs_spec\n ]\n output_shape = lax.conv_general_shape_tuple(input_shape, kernel_shape,\n strides, padding,\n dimension_numbers)\n W = W_init(rng, kernel_shape)\n return output_shape, W\n\n def apply_fun(params, inputs, **kwargs):\n W = params\n return lax.conv_general_dilated(inputs, W, strides, padding, one, one,\n dimension_numbers)\n\n return init_fun, apply_fun\n\n\nUnbiasedConv = functools.partial(GeneralUnbiasedConv, ('NHWC', 'HWIO', 'NHWC'))\n\n\ndef GeneralUnbiasedConvTranspose(dimension_numbers,\n out_chan,\n filter_shape,\n strides=None,\n padding='SAME',\n W_init=None,\n b_init=randn(1e-6)):\n \"\"\"Layer construction function for a general transposed-convolution layer.\"\"\"\n lhs_spec, rhs_spec, out_spec = dimension_numbers\n one = (1,) * len(filter_shape)\n strides = strides or one\n W_init = W_init or glorot(rhs_spec.index('O'), rhs_spec.index('I'))\n\n def init_fun(rng, input_shape):\n filter_shape_iter = iter(filter_shape)\n kernel_shape = [\n out_chan if c == 'O' else input_shape[lhs_spec.index('C')]\n if c == 'I' else next(filter_shape_iter) for c in rhs_spec\n ]\n output_shape = lax.conv_transpose_shape_tuple(input_shape, kernel_shape,\n strides, padding,\n dimension_numbers)\n W = W_init(rng, kernel_shape)\n return output_shape, W\n\n def apply_fun(params, inputs, **kwargs):\n W = params\n return lax.conv_transpose(\n inputs, W, strides, padding, dimension_numbers=dimension_numbers)\n\n return init_fun, apply_fun\n\n\nUnbiasedConvTranspose = functools.partial(GeneralUnbiasedConvTranspose,\n ('NHWC', 'HWIO', 'NHWC'))\n\n\ndef UNetBlock(filters, kernel_size, inner_block, **kwargs):\n\n def make_main(input_shape):\n return stax.serial(\n UnbiasedConv(filters, kernel_size, **kwargs),\n inner_block,\n UnbiasedConvTranspose(input_shape[3], kernel_size, **kwargs),\n )\n\n Main = stax.shape_dependent(make_main)\n return stax.serial(\n stax.FanOut(2), stax.parallel(Main, stax.Identity), stax.FanInSum)\n\n\nclass UNet:\n \"\"\"UNet that mimics 3-cycle V_cycle structure.\"\"\"\n\n def __init__(self,\n n=2**7 - 1,\n rng=None,\n channels=8,\n loss=loss_gmres,\n iter_gmres=lambda i: 10,\n training_iter=500,\n name='net',\n model_dir=None,\n lr=3e-4,\n k=0.0,\n n_test=10,\n beta1=0.9,\n beta2=0.999,\n lr_og = 3e-3,\n flaxd = False):\n self.n = n\n self.n_test = n_test\n self.mesh = meshes.Mesh(n)\n self.in_shape = (-1, n, n, 1)\n self.inner_channels = channels\n def itera(i):\n return onp.random.choice([5, 10, 10, 10, 10, 15, 15, 15, 20, 25])\n self.iter_gmres = itera\n self.training_iter = training_iter\n self.name = name\n self.k = k\n self.model_dir = model_dir\n if flaxd:\n self.test_loss = loss_gmresR_flax\n else:\n self.test_loss = loss_gmresR\n self.beta1 = beta1\n self.beta2 = beta2\n if rng is None:\n rng = random.PRNGKey(1)\n if not flaxd:\n self.net_init, self.net_apply = stax.serial(\n UNetBlock(\n 1, (3, 3),\n stax.serial(\n UnbiasedConv(self.inner_channels, (3, 3), padding='SAME'),\n UnbiasedConv(self.inner_channels, (3, 3), padding='SAME'),\n UNetBlock(\n self.inner_channels, (3, 3),\n stax.serial(\n UnbiasedConv(\n self.inner_channels, (3, 3), padding='SAME'),\n UnbiasedConv(\n self.inner_channels, (3, 3), padding='SAME'),\n UnbiasedConv(\n self.inner_channels, (3, 3), padding='SAME'),\n ),\n strides=(2, 2),\n padding='VALID'),\n UnbiasedConv(self.inner_channels, (3, 3), padding='SAME'),\n UnbiasedConv(self.inner_channels, (3, 3), padding='SAME'),\n ),\n strides=(2, 2),\n padding='VALID'),)\n out_shape, net_params = self.net_init(rng, self.in_shape)\n else:\n #import pdb;pdb.set_trace()\n model_def = flax_cnn.new_CNN.partial(\n inner_channels=self.inner_channels)\n out_shape, net_params = model_def.init_by_shape(\n rng,[(self.in_shape, np.float32)])\n self.model_def = model_def\n self.model = nn.Model(model_def, net_params)\n self.net_apply = lambda param, x: nn.Model(model_def,\n param)(x) #.reshape(self.in_shape))\n self.out_shape = out_shape\n self.net_params = net_params\n self.loss = loss\n self.lr_og = lr_og\n self.lr = lr\n if not flaxd:\n self.opt_init, self.opt_update, self.get_params = optimizers.adam(\n step_size=lambda i: np.where(i < 100, lr_og, lr), b1=beta1, b2=beta2)\n self.opt_state = self.opt_init(self.net_params)\n self.step = self.step_notflax\n\n if flaxd:\n self.step = self.step_flax\n self.optimizer = flax.optim.Adam(\n learning_rate= lr, beta1=beta1,\n beta2=beta2).create(self.model)\n #self.optimizer = flax.optim.Momentum(\n # learning_rate= lr, beta=beta1,\n # weight_decay=0, nesterov=False).create(self.model)\n self.alpha = lambda i: 0.0\n self.flaxd = flaxd\n if flaxd:\n self.preconditioner = self.preconditioner_flaxed\n else:\n self.preconditioner = self.preconditioner_unflaxed\n\n def preconditioner_unflaxed(self, params, x):\n return self.net_apply(params, x.reshape(1, self.n, self.n, 1)).ravel()\n\n def preconditioner_flaxed(self, model, x):\n return model(x.reshape(1, self.n, self.n, 1)).ravel()\n\n @functools.partial(jax.jit, static_argnums=(0,))\n def step_notflax(self, i, opt_state, batch, bs, solutions=None):\n params = self.get_params(opt_state)\n curr_loss, g = jax.value_and_grad(\n self.loss, argnums=3)(\n self.preconditioner,\n self.iter_gmres(i),\n self.mesh,\n params,\n batch,\n bs,\n self.alpha(i),\n self.k,\n solutions=solutions)\n return curr_loss, g, self.opt_update(i, g, opt_state)\n\n @functools.partial(jax.jit, static_argnums=(0,))\n def step_flax(self, i, optimizer, batch, bs, solutions=None):\n curr_loss, grad = jax.value_and_grad(\n self.loss, argnums=3)(\n self.preconditioner,\n self.iter_gmres(i),\n self.mesh,\n optimizer.target,\n batch,\n bs,\n self.alpha(i),\n self.k,\n solutions=solutions)\n optimizer = optimizer.apply_gradient(grad)\n return curr_loss, grad, optimizer\n\n def save(self, i=''):\n if self.model_dir is None:\n serialization.save_params(self.name + 'params' + i, self.opt_params)\n else:\n serialization.save_params(self.model_dir + '/' + self.name + 'params' + i,\n self.opt_params)\n\n\n def load(self, i=''):\n directory = os.path.join(self.model_dir, self.name + 'params' + i)\n if not Exists(directory):\n logging.info('still training')\n return 1\n self.opt_params = serialization.load_params(directory)\n if self.flaxd:\n self.model = nn.Model(self.model_def, self.opt_params)\n return 0\n\n\n def train(self,\n bs,\n solutions=[None],\n retrain=False,\n tensorboard_writer=None,\n work_unit=None):\n\n if not retrain and not self.flaxd:\n opt_state = self.opt_init(self.net_params)\n if retrain:\n opt_state = self.opt_init(self.opt_params)\n loss = onp.zeros(self.training_iter // 10 + 1)\n gradients = onp.zeros(self.training_iter // 10 + 1)\n if not self.flaxd:\n param = self.get_params(opt_state)\n else:\n param = self.optimizer.target\n opt_state = self.optimizer\n og_loss = self.test_loss(\n self.preconditioner, self.n_test, self.mesh,\n param, np.zeros(\n (bs.shape[1], self.n * self.n)),\n bs[0].reshape(bs.shape[1],\n self.n * self.n), 0, self.k) / 10000000\n print(og_loss)\n if work_unit is not None:\n work_unit.get_measurement_series(\n label='train/loss').create_measurement(\n objective_value=og_loss, step=0)\n for i in range(self.training_iter):\n m = bs.shape[0]\n order = random.shuffle(random.PRNGKey(i), np.arange(m))\n for _ in range(50):\n for b in bs[order]:\n current_loss, grad, opt_state = self.step(i,\n opt_state,\n np.zeros((b.shape[0],\n self.n * self.n)),\n b,\n solutions[min(m,\n len(solutions)\n - 1)])\n\n if i % 10 == 0:\n if not self.flaxd:\n param = self.get_params(opt_state)\n else:\n param = opt_state.target\n current_loss_test = self.test_loss(\n self.preconditioner, self.n_test, self.mesh, param, np.zeros(\n (b.shape[0], self.n * self.n)), b, 0, self.k) / 10000000\n current_loss = current_loss / 10000000\n avg_grad = onp.mean(onp.abs(onp_utils.flatten(grad)[-1]))\n print(f'step{i: 5d}: loss { current_loss :1.5f} : avg_gradient \\\n { avg_grad :1.5f} : current_loss_test { current_loss_test :1.5f}')\n logging.info(f'step{i: 5d}: loss { current_loss :1.5f} : avg_gradient \\\n { avg_grad :1.5f} : current_loss_test { current_loss_test :1.5f}')\n loss[i // 10] = current_loss\n gradients[i // 10] = avg_grad\n if work_unit is not None:\n work_unit.get_measurement_series(\n label='train/loss').create_measurement(\n objective_value=current_loss_test, step=i)\n tensorboard_writer.scalar('train/loss', current_loss_test, step=i+1)\n work_unit.get_measurement_series(\n label='train/loss ' + str(self.iter_gmres(i))).create_measurement(\n objective_value=current_loss, step=i+1)\n tensorboard_writer.scalar(\n 'train/loss ' + str(self.iter_gmres(i)), current_loss, step=i+1)\n if i % 50 == 0:\n if self.flaxd:\n self.opt_params = opt_state.target.params\n else:\n self.opt_params = self.get_params(opt_state)\n self.save(str(i))\n if self.flaxd:\n self.optimizer = opt_state\n else:\n self.opt_params = self.get_params(opt_state)\n self.opt_state = opt_state\n if self.model_dir is None:\n self.model_dir = ''\n\n with open(os.path.join(self.model_dir, 'train_loss.np'), 'wb') as f:\n onp.save(f, loss)\n with open(os.path.join(self.model_dir, 'train_gradients.np'),\n 'wb') as f:\n onp.save(f, gradients)\n self.save()\n if work_unit is not None:\n tensorboard_writer.close()\n\n @functools.partial(jax.jit, static_argnums=(0,))\n def approximate_inverse(self, inputs):\n return self.net_apply(self.opt_params, inputs.reshape(1, self.n, self.n,\n 1)).reshape(-1)\n\n def print_layer_shape(self):\n print([array.shape for array in jax.tree_flatten(self.net_params)[0]])\n", "# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for linear_eval.\"\"\"\n\nfrom absl.testing import absltest\nimport jax\nfrom jax import numpy as jnp\nimport numpy as onp\nfrom sklearn import datasets\nfrom sklearn import linear_model\nfrom sklearn import model_selection\nfrom linear_eval import linear_eval\n\n\n# Copied from the FLAX tutorial.\ndef onehot(labels, num_classes, on_value=1.0, off_value=0.0):\n x = (labels[Ellipsis, None] == jnp.arange(num_classes)[None])\n x = jax.lax.select(\n x, jnp.full(x.shape, on_value), jnp.full(x.shape, off_value))\n return x.astype(jnp.float32)\n\n\ndef cross_entropy_loss(logits, labels, num_classes):\n return -jnp.sum(onehot(labels, num_classes=num_classes) *\n jax.nn.log_softmax(logits))\n\n\nclass LinearEvalTest(absltest.TestCase):\n\n def testTrainAndEvaluate(self):\n iris = datasets.load_iris()\n\n (train_embeddings, test_embeddings,\n train_labels, test_labels) = model_selection.train_test_split(\n iris.data[:, :2].astype(onp.float32), iris.target, test_size=0.25,\n random_state=0xdeadbeef)\n\n sklearn_logreg = linear_model.LogisticRegression(\n C=1e5, solver='lbfgs', multi_class='multinomial')\n sklearn_logreg.fit(train_embeddings, train_labels)\n sklearn_y_pred = sklearn_logreg.predict(test_embeddings)\n\n ((train_embeddings, train_labels),\n train_mask) = linear_eval.reshape_and_pad_data_for_devices(\n (train_embeddings, train_labels))\n ((test_embeddings, test_labels),\n test_mask) = linear_eval.reshape_and_pad_data_for_devices(\n (test_embeddings, test_labels))\n weights, biases, _ = linear_eval.train(\n train_embeddings, train_labels, train_mask, l2_regularization=1e-6)\n accuracy = linear_eval.evaluate(\n test_embeddings, test_labels, test_mask, weights, biases).astype(\n onp.float32)\n\n self.assertAlmostEqual(accuracy, onp.mean(sklearn_y_pred == test_labels),\n places=3)\n\n def testLossFunction(self):\n rng = onp.random.RandomState(1337)\n weights = rng.randn(10, 20)\n biases = rng.randn(20) / 4\n embeddings = rng.randn(100, 10)\n logits = embeddings.dot(weights) + biases[None, :]\n labels = onp.argmax(logits + onp.random.randn(*logits.shape), -1)\n mask = onp.ones((embeddings.shape[0],))\n\n params = linear_eval.weights_and_biases_to_params(weights, biases)\n loss = linear_eval.multinomial_logistic_loss(\n params, embeddings, labels, mask, 1, 0.0).astype(onp.float32)\n self.assertAlmostEqual(\n loss,\n cross_entropy_loss(logits, labels, weights.shape[-1]),\n places=3)\n\n\nif __name__ == '__main__':\n absltest.main()\n", "# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Estimation functions for compute costs of ML models.\"\"\"\n\nimport contextlib\nimport functools\nimport re\nfrom typing import Dict, Iterable, List, Optional, Tuple\nfrom absl import flags\nfrom jax._src.lax import lax\nimport numpy as onp\n\nfrom aqt.jax import hlo_utils\n\n# pylint: disable=g-direct-tensorflow-import\nfrom tensorflow.compiler.xla.service import hlo_pb2\n# pylint: enable=g-direct-tensorflow-import\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_bool(\n 'metadata_enabled',\n default=False,\n help=('Whether to annotate quantization info in HLO metadata.'))\n\n# We intend to use quantization information to estimate the compute cost of\n# our ML model. However, currently JAX does not support transferring high-level\n# information from the model to its HLO representation, e.g. via metadata.\n# Therefore, using the context managers below, this info is appended to the name\n# of the op in the original primitives in lax via monkey-patching. After the\n# op is instantiated, the original value of the primitive is restored.\n# This is not a permanent solution.\n\n\n# Based on: https://jax.readthedocs.io/en/latest/jax.lax.html\nclass DotMetadataMonkeyPatch(contextlib.ContextDecorator):\n \"\"\"Context for passing quantization data to the dot operation.\"\"\"\n\n def __init__(self, *, lhs_prec, rhs_prec,\n rhs_is_weight):\n annotation = _quantization_annotation(lhs_prec, rhs_prec, rhs_is_weight)\n self._op_name = 'dot_general_quant' + annotation\n\n def __enter__(self):\n # pylint: disable=protected-access\n # The following primitive accepts a name argument which is passed into\n # the HLO metadata field. Here, it is the only argument changed from\n # the original lax implementation.\n self._dot_general_p_original = lax.dot_general_p\n lax.dot_general_p = lax.standard_primitive(\n shape_rule=lax._dot_general_shape_rule,\n dtype_rule=lax._dot_general_dtype_rule,\n name=self._op_name,\n translation_rule=lax._dot_general_translation_rule)\n lax.ad.defbilinear(lax.dot_general_p, lax._dot_general_transpose_lhs,\n lax._dot_general_transpose_rhs)\n lax.batching.primitive_batchers[\n lax.dot_general_p] = lax._dot_general_batch_rule\n lax.masking.masking_rules[lax.dot_general_p] = lax._dot_general_masking_rule\n # pylint: enable=protected-access\n\n def __exit__(self, *exc):\n # Restore original primitive\n lax.dot_general_p = self._dot_general_p_original\n\n\n# Based on: https://jax.readthedocs.io/en/latest/jax.lax.html\nclass ConvMetadataMonkeyPatch(contextlib.ContextDecorator):\n \"\"\"Context for passing quantization data to the conv operation.\"\"\"\n\n def __init__(self, *, weight_prec,\n act_prec):\n annotation = _quantization_annotation(\n act_prec, weight_prec, rhs_is_weight=True)\n self._op_name = 'conv_general_dilated_quant' + annotation\n\n def __enter__(self):\n # pylint: disable=protected-access\n self._conv_general_dilated_p_original = lax.conv_general_dilated_p\n # The following primitive accepts a name argument which is passed into\n # the HLO metadata field. Here, it is the only argument changed from\n # the original lax implementation.\n lax.conv_general_dilated_p = lax.standard_primitive(\n shape_rule=lax._conv_general_dilated_shape_rule,\n dtype_rule=lax._conv_general_dilated_dtype_rule,\n name=self._op_name,\n translation_rule=functools.partial(\n lax._conv_general_dilated_translation_rule,\n expand_complex_convolutions=False))\n lax.xla.register_translation(\n lax.conv_general_dilated_p,\n functools.partial(\n lax._conv_general_dilated_translation_rule,\n expand_complex_convolutions=True),\n platform='cpu')\n lax.xla.register_translation(\n lax.conv_general_dilated_p,\n functools.partial(\n lax._conv_general_dilated_translation_rule,\n expand_complex_convolutions=True),\n platform='gpu')\n lax.ad.defbilinear(lax.conv_general_dilated_p,\n lax._conv_general_dilated_transpose_lhs,\n lax._conv_general_dilated_transpose_rhs)\n lax.batching.primitive_batchers[\n lax.conv_general_dilated_p] = lax._conv_general_dilated_batch_rule\n lax.masking.masking_rules[\n lax.conv_general_dilated_p] = lax._conv_general_dilated_masking_rule\n # pylint: enable=protected-access\n\n def __exit__(self, *exc):\n # Restore original primitive\n lax.conv_general_dilated_p = self._conv_general_dilated_p_original\n\n\n# TODO(abdolrashidi): Add support for QuantOps.FloatQuant for cost estimation.\ndef _quantization_annotation(lhs_prec, rhs_prec,\n rhs_is_weight):\n \"\"\"Returns an annotation to be appended to the name of the quantizable op.\"\"\"\n bfloat16_prec = 'bf16'\n\n def _replace_with_bf16_if_prec_is_none(prec):\n return prec if prec is not None else bfloat16_prec\n\n lhs_prec = _replace_with_bf16_if_prec_is_none(lhs_prec)\n rhs_prec = _replace_with_bf16_if_prec_is_none(rhs_prec)\n\n quant_annotation = '_lhs{}_rhs{}_lw{}'.format(lhs_prec, rhs_prec,\n int(rhs_is_weight))\n return quant_annotation\n\n\ndef _find_lhs_shape(instr,\n computations):\n \"\"\"Find the lhs shape of an instruction in HLO computations.\"\"\"\n for computation in computations:\n for i in computation.instructions:\n # instr.operand_ids[0] contains the input shape.\n if i.id == instr.operand_ids[0]:\n # Input and output batch sizes must match\n assert i.shape.dimensions[0] == instr.shape.dimensions[0]\n return i.shape.dimensions\n return None\n\n\ndef _find_rhs_shape(instr,\n computations):\n \"\"\"Find the weight shape of an instruction in HLO computations.\"\"\"\n for computation in computations:\n for i in computation.instructions:\n # instr.operand_ids[1] contains the weight shape.\n if i.id == instr.operand_ids[1]:\n return i.shape.dimensions\n return None\n\n\ndef _estimate_weights(\n instr,\n computations):\n \"\"\"Estimate the number of weights in a conv or dot instruction.\"\"\"\n weight_shape = _find_rhs_shape(instr, computations)\n assert weight_shape is not None\n return onp.prod(weight_shape)\n\n\ndef _estimate_conv_mults(\n instr,\n computations):\n \"\"\"Estimate the number of multiplications in a convolution instruction.\"\"\"\n lhs_shape = _find_lhs_shape(instr, computations)\n assert lhs_shape is not None\n input_channels = lhs_shape[-1]\n kernel_size = onp.prod([dim.size for dim in instr.window.dimensions])\n output_conv_dims = [\n instr.shape.dimensions[i]\n for i in instr.convolution_dimension_numbers.output_spatial_dimensions\n ]\n output_image_size = onp.prod(output_conv_dims)\n output_channels = instr.shape.dimensions[\n instr.convolution_dimension_numbers.output_feature_dimension]\n return output_image_size * kernel_size * input_channels * output_channels\n\n\ndef _estimate_dot_mults(\n instr,\n computations):\n \"\"\"Estimate the number of multiplications in a dot instruction.\"\"\"\n lhs_shape = _find_lhs_shape(instr, computations)\n assert lhs_shape is not None\n input_channels = lhs_shape[-1]\n output_channels = instr.shape.dimensions[-1]\n return input_channels * output_channels\n\n\ndef _extract_quant_info(\n instr):\n \"\"\"Extracts lhs and rhs quantization precision from op metadata.\"\"\"\n if instr.opcode not in _get_supported_ops():\n raise NotImplementedError('Unexpected op detected')\n # If annotated, metadata.op_type would have the following format:\n # '[original op name]_quant_w[weight prec]_a[act prec]'\n if 'quant' not in instr.metadata.op_type:\n raise NotImplementedError('Unable to parse {}'.format(\n instr.metadata.op_type))\n [(lhs_prec_str, rhs_prec_str, rhs_is_weight_str)\n ] = re.findall('_lhs(.*)_rhs(.*)_lw(.*)', instr.metadata.op_type)\n\n def _extract_prec(prec_str):\n if prec_str.startswith('bf'):\n prec_str = prec_str[2:]\n if prec_str.isnumeric():\n prec = int(prec_str)\n else:\n raise NotImplementedError('Unable to parse {}'.format(\n instr.metadata.op_type))\n return prec\n\n lhs_prec = _extract_prec(lhs_prec_str)\n rhs_prec = _extract_prec(rhs_prec_str)\n rhs_is_weight = bool(int(rhs_is_weight_str))\n if lhs_prec <= 0 or rhs_prec <= 0:\n raise ValueError('HLO metadata precision annotatation must be a positive '\n 'integer.')\n return lhs_prec, rhs_prec, rhs_is_weight\n\n\ndef _list_supported_ops_from_hlo(\n hlo_proto):\n \"\"\"Gather and return a list of supported quantizable ops in the HLO.\"\"\"\n supported_ops = _get_supported_ops()\n target_instructions = []\n computations = hlo_proto.computations\n for computation in computations:\n for instr in computation.instructions:\n if instr.opcode in supported_ops:\n target_instructions.append(instr)\n return target_instructions\n\n\ndef _get_supported_ops():\n \"\"\"Get the supported ops for compute and memory cost estimation.\"\"\"\n # Output dictionary key is HLO instruction opcode.\n # 'estimate_instr_mult': function used to estimate the number of matrix\n # multiplications.\n # 'estimate_instr_weights': function used to estimate the number of weights.\n return {\n 'convolution': {\n 'estimate_instr_mult': _estimate_conv_mults,\n 'estimate_instr_weights': _estimate_weights,\n },\n 'dot': {\n 'estimate_instr_mult': _estimate_dot_mults,\n 'estimate_instr_weights': _estimate_weights,\n },\n }\n\n\ndef estimate_compute_cost(\n hlo_proto):\n \"\"\"Estimates the compute cost for the input HLO proto.\n\n Args:\n hlo_proto: the model's HLO representation (e.g. derived from\n hlo_utils.load_hlo_proto()) It contains all the instructions used in the\n model.\n\n Returns:\n a dictionary with two key-value pairs:\n 'compute_cost': The sum of all compute costs for each layer\n 'compute_cost_ratio_to_bfloat16': The ratio of the estimated compute cost\n to the cost in the case of no quantization (bfloat16)\n\n \"\"\"\n # To estimate the overall compute cost of ops, we should multiply the\n # number of multiplications by bits_weights * bits_acts in each layer.\n\n # Gather the supported quantizable ops and their quantization parameters\n target_instructions = _list_supported_ops_from_hlo(hlo_proto)\n\n # Begin compute cost calculation\n compute_cost_quadratic = 0\n compute_cost_linear = 0\n bfloat16_cost_quadratic = 0 # computed as reference for comparison purposes\n bfloat16_cost_linear = 0\n\n supported_ops = _get_supported_ops()\n for instr in target_instructions:\n # Estimate multiplications in the op\n opcode = instr.opcode\n multiplication_count = supported_ops[opcode]['estimate_instr_mult'](\n instr, hlo_proto.computations)\n\n # For the model's compute cost, we use the number of multiplications used\n # in the quantizable layers used in the model, which dominate the cost\n # compared to that of other operations used in them, such as additions.\n # Also, the cost of each multiplication is proportional to the number of\n # bits in each operand, which are subject to change due to quantization.\n # Therefore, we use the weight and activation precisions in our estimation\n # as well.\n\n bfloat16_lhs_prec = 16\n bfloat16_rhs_prec = 16\n\n lhs_prec, rhs_prec, _ = _extract_quant_info(instr)\n if lhs_prec > 16:\n raise ValueError(f'Unexpected lhs precision {lhs_prec}.')\n if rhs_prec > 16:\n raise ValueError(f'Unexpected rhs precision {rhs_prec}.')\n\n bfloat16_cost_quadratic += multiplication_count * (\n bfloat16_lhs_prec * bfloat16_rhs_prec)\n compute_cost_quadratic += multiplication_count * lhs_prec * rhs_prec\n\n # Nvidia A100 only supports 16x16, 8x8 and 4x4\n # Cost of 8x8 is half of 16x16\n # Cost of 4x4 is 1/4 of 16x16\n bfloat16_cost_linear += multiplication_count * max(\n bfloat16_lhs_prec, bfloat16_rhs_prec)\n\n prec = max(lhs_prec, rhs_prec)\n if prec <= 4:\n prec = 4\n elif prec <= 8:\n prec = 8\n elif prec <= 16:\n prec = 16\n\n compute_cost_linear += multiplication_count * prec\n # Return the results\n cost_ratio_to_bfloat16_quadratic = compute_cost_quadratic / bfloat16_cost_quadratic\n cost_ratio_to_bfloat16_linear = compute_cost_linear / bfloat16_cost_linear\n\n result_dict = {\n # quadratic compute cost. We didn't rename the keys to\n # compute_cost_quadratic to presev backwards compatibility.\n 'compute_cost':\n float(compute_cost_quadratic),\n 'compute_cost_ratio_to_bfloat16':\n float(cost_ratio_to_bfloat16_quadratic),\n # Linear compute cost\n 'compute_cost_linear':\n float(compute_cost_linear),\n 'compute_cost_ratio_to_bfloat16_linear':\n float(cost_ratio_to_bfloat16_linear),\n }\n\n return result_dict\n\n\ndef estimate_memory_cost(hlo_proto):\n \"\"\"Estimates the memory cost for the input HLO proto.\n\n Args:\n hlo_proto: the model's HLO representation (e.g. derived from\n hlo_utils.load_hlo_proto()) It contains all the instructions used in the\n model.\n\n Returns:\n a dictionary with two key-value pairs:\n 'memory_cost': The sum of all memory costs for each layer\n 'memory_cost_ratio_to_bfloat16': The ratio of the estimated memory cost to\n the cost in the case of no quantization (bfloat16)\n\n \"\"\"\n # For the memory cost, we estimate the number of bits used for the weights,\n # which would also require the quantization precision for those weights.\n\n # Gather the supported quantizable ops and their quantization parameters\n target_instructions = _list_supported_ops_from_hlo(hlo_proto)\n\n # Begin memory cost calculation\n memory_cost = 0\n bfloat16_cost = 0 # computed as reference for comparison purposes\n\n supported_ops = _get_supported_ops()\n for instr in target_instructions:\n # Estimate number of weights in the op\n opcode = instr.opcode\n weight_count = supported_ops[opcode]['estimate_instr_weights'](\n instr, hlo_proto.computations)\n\n # Multiply number of weights by the number of bits used for each.\n bfloat16_weight_prec = 16\n\n _, rhs_prec, rhs_is_weight = _extract_quant_info(instr)\n\n if rhs_prec > 16:\n raise ValueError(f'Unexpected rhs precision {rhs_prec}.')\n\n if rhs_is_weight:\n memory_cost += weight_count * rhs_prec\n bfloat16_cost += weight_count * bfloat16_weight_prec\n\n # For dynamic matmuls, memory_cost is zero. The following check is to avoid\n # division by zero. We set the ratio for 0/0 to 1.\n if bfloat16_cost == 0 and memory_cost == 0:\n cost_ratio_to_bfloat16 = 1\n else:\n cost_ratio_to_bfloat16 = memory_cost / bfloat16_cost\n\n result_dict = {\n 'memory_cost': float(memory_cost),\n 'memory_cost_ratio_to_bfloat16': float(cost_ratio_to_bfloat16)\n }\n return result_dict\n\n\ndef estimate_costs_of_dot_and_conv_ops_from_jax_fn(\n fn, *fn_args, **fn_kwargs):\n \"\"\"Wrapper function around estimate_compute_and_memory_cost().\n\n Will generate hlo proto from jax function, and call\n estimate_compute_and_memory_cost() on it.\n\n Args:\n fn: the function for which the HLO is to be produced.\n *fn_args: the function's args.\n **fn_kwargs: the function's kwargs.\n\n Returns:\n A dictionary with compute cost and memory cost data.\n See estimate_compute_and_memory_cost() docstring for details.\n \"\"\"\n FLAGS.metadata_enabled = True\n hlo_module_proto = hlo_utils.load_hlo_proto_from_jax_fn(\n fn, *fn_args, **fn_kwargs)\n cost_dict = estimate_compute_cost(hlo_module_proto)\n memory_cost_dict = estimate_memory_cost(hlo_module_proto)\n cost_dict.update(memory_cost_dict)\n FLAGS.metadata_enabled = False\n return cost_dict\n", "# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Input pipeline for a text8 / enwik8 dataset.\"\"\"\n\nimport os\nfrom typing import Dict\nimport zipfile\n\nimport jax\nimport ml_collections\nimport numpy as np\nimport tensorflow as tf\n\n\nAUTOTUNE = tf.data.experimental.AUTOTUNE\nFeatures = Dict[str, tf.Tensor]\n\n\ndef _crop(x, max_length):\n \"\"\"Select (optionally random) crop from sequence.\"\"\"\n # Optionally sample random starting position.\n start = tf.random.uniform(\n (), dtype=tf.int32, maxval=tf.maximum(1, tf.shape(x)[0] - max_length + 1))\n\n x = x[start:(start + max_length)]\n return x\n\n\nclass CharLevelTokenizer():\n \"\"\"Tokenizes strings to a char-level embedding.\"\"\"\n\n def __init__(self, raw_train):\n assert isinstance(raw_train, str)\n train_chars = sorted(set(raw_train))\n chars = [' ',\n 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',\n 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n assert set(train_chars) == set(chars), f'{train_chars} != {chars}'\n\n # Do not use zero as input.\n self.i_to_s = {i+1: char for i, char in enumerate(chars)}\n self.s_to_i = {char: i+1 for i, char in enumerate(chars)}\n\n self.maximum = max(self.i_to_s.keys())\n\n def vocab_size(self):\n return len(self.i_to_s) + 1 # For the zero token.\n\n def decode_unknown(self, i):\n if i == self.maximum + 1:\n return '_'\n else:\n return str(i)\n\n def tokenize(self, string):\n return np.array([self.s_to_i[s] for s in string])\n\n def detokenize(self, tokens):\n return ''.join([\n self.i_to_s[i] if i in self.i_to_s else self.decode_unknown(i)\n for i in tokens\n ])\n\n\nclass ByteTokenizer():\n \"\"\"Tokenizes strings to a char-level embedding.\"\"\"\n\n def __init__(self):\n # For the zero token everything is shifted.\n self.maximum = 255 + 1\n\n def vocab_size(self):\n return self.maximum + 1 # For the zero token.\n\n def tokenize(self, string):\n # Add one for special token zero.\n return np.array(list(string), dtype=np.int32) + 1\n\n def detokenize(self, tokens):\n \"\"\"Detokenizes an array of tokens to a string.\"\"\"\n tokens = np.array(tokens)\n\n # Shift and deal with padding token.\n tokens = tokens - 1\n tokens[tokens < 0] = 48 # ASCII index for digit zero\n\n # Convert to bytes.\n tokens = np.array(tokens, dtype=np.uint8)\n # Create byte string.\n byte_string = b''.join(tokens)\n # Decode the byte string.\n string = byte_string.decode('utf-8', errors='replace')\n return string\n\n\ndef prepare_eval_ds_from_tokens(config,\n tokens):\n \"\"\"This function prepares an eval dataset to have the correct context.\n\n What this function does in words:\n 1. It batches the tokens data.\n 2. In case of a context, it computes which indices _precede_ the batch,\n and then those are used to retrieve the tokens preceding the batches.\n The main reason for putting this in a seperate function is the context logic,\n which is a little tedious if the context length and the sequence length are\n not the same.\n\n Args:\n config: A ml_collections config.\n tokens: An np.ndarray containing integers.\n\n Returns:\n A tensorflow dataset.\n \"\"\"\n length = len(tokens)\n assert length % config.seq_length == 0\n\n # Here the character indices of the datapoints are collected.\n input_idcs = np.arange(length).reshape(-1, config.seq_length)\n\n tokens_inputs = tokens[input_idcs][:, :, None] # Add channel axis.\n\n if config.context_length > 0:\n start_idcs = input_idcs[:, 0:1]\n\n # Context idcs start at the same index, since they will be applied to a\n # shifted array of exactly context_size.\n context_idcs = start_idcs + np.arange(config.context_length)[None, :]\n\n tokens_padded = np.concatenate(\n [np.zeros(config.context_length, dtype=tokens.dtype), tokens])\n tokens_context = tokens_padded[context_idcs]\n ds = tf.data.Dataset.from_tensor_slices(\n {'inputs': tokens_inputs, 'context': tokens_context})\n else:\n ds = tf.data.Dataset.from_tensor_slices(\n {'inputs': tokens_inputs})\n\n ds = ds.batch(config.test_batch_size, drop_remainder=False)\n ds = ds.prefetch(AUTOTUNE)\n\n return ds\n\n\ndef get_datasets(config,\n *,\n shuffle_buffer_size = 1000_000):\n \"\"\"Load and return dataset of batched examples for use during training.\"\"\"\n assert config.batch_size % jax.process_count() == 0\n per_process_batch_size = config.batch_size // jax.process_count()\n\n if config.dataset_name == 'text8':\n path = os.path.join(config.text8_path, 'text8.zip')\n with tf.io.gfile.GFile(path, 'rb') as z:\n raw = zipfile.ZipFile(z).read('text8').decode('utf-8')\n elif config.dataset_name == 'enwik8':\n path = os.path.join(config.text8_path, 'enwik8.zip')\n with tf.io.gfile.GFile(path, 'rb') as z:\n raw = zipfile.ZipFile(z).read('enwik8') # Do not decode opposed to text8.\n else:\n raise ValueError\n\n # Standard text8/enwik8 splits, both datasets have the same number of tokens.\n assert len(raw) == 100000000, f'{len(raw)} != 10000000'\n train_data = raw[:90000000]\n eval_data = raw[90000000:95000000]\n test_data = raw[95000000:]\n\n if config.dataset_name == 'text8':\n tokenizer = CharLevelTokenizer(train_data)\n elif config.dataset_name == 'enwik8':\n tokenizer = ByteTokenizer()\n else:\n raise ValueError\n\n train_tokens = tokenizer.tokenize(train_data)\n\n # Pad with zero tokens for the first batch.\n if config.context_length > 0:\n pad = np.zeros(shape=(config.context_length,), dtype=train_tokens.dtype)\n train_tokens = np.concatenate([pad, train_tokens], axis=0)\n\n chunk_size = config.seq_length + config.context_length\n\n train_ds = tf.data.Dataset.from_tensor_slices(train_tokens)\n\n # We batch sequences of 4 times chunk_size, and then crop with size\n # 1 x 'chunk_size' for the purpose of augmenting the data somewhat. Although\n # this does leave out some crops over the borders of the 4 * 'chunk_size'\n # chunks, in practice this is not really an issue and also done by others\n # in a similar fashion.\n train_ds = train_ds.batch(4 * chunk_size, drop_remainder=True)\n\n # We are not sharding here, as the data is small enough, note that we rely on\n # tensorflow random to produce different orders.\n train_ds = train_ds.shuffle(shuffle_buffer_size)\n\n # We take random crops of size 'chunk_size' from the previously chunked\n # pieces of '4 x chunk_size'. This is a form of data augmentation.\n def crop(batch):\n return _crop(batch, config.seq_length + config.context_length)\n train_ds = train_ds.map(crop, num_parallel_calls=AUTOTUNE)\n\n train_ds = train_ds.batch(\n per_process_batch_size, drop_remainder=True, num_parallel_calls=AUTOTUNE)\n\n # For the training chunks, this final step is need to separate the context\n # from the inputs.\n def prepare_inputs(batch):\n if config.context_length > 0:\n context = batch[:, :config.context_length]\n inputs = batch[:, config.context_length:, None] # Channel axis.\n return {'inputs': inputs, 'context': context}\n else:\n return {'inputs': batch[:, :, None]} # Channel axis.\n\n train_ds = train_ds.map(prepare_inputs, num_parallel_calls=AUTOTUNE)\n\n train_ds = train_ds.prefetch(AUTOTUNE)\n\n eval_tokens = tokenizer.tokenize(eval_data)\n eval_ds = prepare_eval_ds_from_tokens(config, eval_tokens)\n\n test_tokens = tokenizer.tokenize(test_data)\n test_ds = prepare_eval_ds_from_tokens(config, test_tokens)\n\n return train_ds, eval_ds, test_ds, tokenizer\n", "# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# python3\n\"\"\"Helper functions for training baseline seq-to-seq model.\"\"\"\n\n# pytype: disable=wrong-arg-count\n# pytype: disable=attribute-error\n\nimport collections\n\nfrom flax import linen as nn\nfrom flax.training import common_utils\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\n\nfrom latent_programmer import decode\nfrom latent_programmer import models\n\n\ndef create_learning_rate_scheduler(\n base_learning_rate=0.5,\n factors='constant * linear_warmup * rsqrt_normalized_decay',\n warmup_steps=16000,\n decay_factor=0.5,\n steps_per_decay=50000,\n steps_per_cycle=100000):\n \"\"\"Creates learning rate schedule.\n\n Interprets factors in the factors string which can consist of:\n * constant: interpreted as the constant value,\n * linear_warmup: interpreted as linear warmup until warmup_steps,\n * rsqrt_decay: divide by square root of max(step, warmup_steps)\n * decay_every: Every k steps decay the learning rate by decay_factor.\n * cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter.\n\n Args:\n base_learning_rate: float, the starting constant for the lr schedule.\n factors: a string with factors separated by '*' that defines the schedule.\n warmup_steps: how many steps to warm up for in the warmup schedule.\n decay_factor: The amount to decay the learning rate by.\n steps_per_decay: How often to decay the learning rate.\n steps_per_cycle: Steps per cycle when using cosine decay.\n\n Returns:\n A function learning_rate(step): float -> {'learning_rate': float}, the\n step-dependent lr.\n \"\"\"\n factors = [n.strip() for n in factors.split('*')]\n\n def step_fn(step):\n \"\"\"Step to learning rate function.\"\"\"\n ret = 1.0\n for name in factors:\n if name == 'constant':\n ret *= base_learning_rate\n elif name == 'linear_warmup':\n ret *= jnp.minimum(1.0, step / warmup_steps)\n elif name == 'rsqrt_decay':\n ret /= jnp.sqrt(jnp.maximum(1.0, step - warmup_steps))\n elif name == 'rsqrt_normalized_decay':\n ret *= jnp.sqrt(warmup_steps)\n ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))\n elif name == 'decay_every':\n ret *= (decay_factor**(step // steps_per_decay))\n elif name == 'cosine_decay':\n progress = jnp.maximum(0.0,\n (step - warmup_steps) / float(steps_per_cycle))\n ret *= jnp.maximum(0.0,\n 0.5 * (1.0 + jnp.cos(jnp.pi * (progress % 1.0))))\n else:\n raise ValueError('Unknown factor %s.' % name)\n return jnp.asarray(ret, dtype=jnp.float32)\n\n return step_fn\n\n\ndef compute_weighted_cross_entropy(logits, targets, weights=None):\n \"\"\"Compute weighted cross entropy and entropy for log probs and targets.\n\n Args:\n logits: `[batch, length, num_classes]` float array.\n targets: categorical targets `[batch, length]` int array.\n weights: None or array of shape [batch, length, 1]\n\n Returns:\n Tuple of scalar loss and batch normalizing factor.\n \"\"\"\n if logits.ndim != targets.ndim + 1:\n raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %\n (str(logits.shape), str(targets.shape)))\n\n onehot_targets = common_utils.onehot(targets, logits.shape[-1])\n loss = -jnp.sum(onehot_targets * nn.log_softmax(logits), axis=-1)\n normalizing_factor = jnp.prod(jnp.asarray(targets.shape))\n if weights is not None:\n loss = loss * weights\n normalizing_factor = weights.sum()\n\n return loss.sum(), normalizing_factor\n\n\ndef compute_weighted_accuracy(logits, targets, weights=None):\n \"\"\"Compute weighted accuracy for log probs and targets.\n\n Args:\n logits: `[batch, length, num_classes]` float array.\n targets: categorical targets `[batch, length]` int array.\n weights: None or array of shape [batch, length, 1]\n\n Returns:\n Tuple of scalar accuracy and batch normalizing factor.\n \"\"\"\n if logits.ndim != targets.ndim + 1:\n raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %\n (str(logits.shape), str(targets.shape)))\n acc = jnp.equal(jnp.argmax(logits, axis=-1), targets)\n normalizing_factor = jnp.prod(jnp.asarray(targets.shape))\n if weights is not None:\n acc = acc * weights\n normalizing_factor = weights.sum()\n\n return acc.sum(), normalizing_factor\n\n\ndef compute_metrics(logits, targets, weights):\n \"\"\"Compute summary metrics.\"\"\"\n loss, weight_sum = compute_weighted_cross_entropy(logits, targets, weights)\n acc, _ = compute_weighted_accuracy(logits, targets, weights)\n metrics = {\n 'loss': loss,\n 'accuracy': acc,\n 'denominator': weight_sum,\n }\n metrics = jax.lax.psum(metrics, 'batch')\n return metrics\n\n\n# Train / eval / decode step functions.\n# -----------------------------------------------------------------------------\n\n\ndef train_step(optimizer,\n inputs,\n outputs,\n programs,\n learning_rate_fn,\n config,\n train_rng=None):\n \"\"\"Train on batch of program tasks.\"\"\"\n # We handle PRNG splitting inside the top pmap, rather\n # than handling it outside in the training loop - doing the\n # latter can add some stalls to the devices.\n train_rng, new_train_rng = jax.random.split(train_rng)\n\n weights = jnp.where(programs > 0, 1, 0).astype(jnp.float32)\n\n def loss_fn(params):\n \"\"\"Loss function used for training.\"\"\"\n logits = models.ProgramTransformer(config).apply(\n {'params': params},\n inputs,\n outputs,\n programs,\n rngs={'dropout': train_rng})\n loss, weight_sum = compute_weighted_cross_entropy(logits, programs, weights)\n mean_loss = loss / weight_sum\n return mean_loss, logits\n\n step = optimizer.state.step\n lr = learning_rate_fn(step)\n grad_fn = jax.value_and_grad(loss_fn, has_aux=True)\n (_, logits), grad = grad_fn(optimizer.target)\n grad = jax.lax.pmean(grad, 'batch')\n new_optimizer = optimizer.apply_gradient(grad, learning_rate=lr)\n\n # Get metrics.\n metrics = compute_metrics(logits, programs, weights)\n metrics['learning_rate'] = lr\n return new_optimizer, metrics, new_train_rng\n\n\ndef eval_step(params, inputs, outputs, programs, config):\n weights = jnp.where(programs > 0, 1, 0).astype(jnp.float32)\n logits = models.ProgramTransformer(config).apply(\n {'params': params}, inputs, outputs, programs)\n\n return compute_metrics(logits, programs, weights)\n\n\ndef initialize_cache(inputs, outputs, programs, max_decode_len, config):\n \"\"\"Initialize a cache for a given input shape and max decode length.\"\"\"\n target_shape = (programs.shape[0], max_decode_len)\n initial_variables = models.ProgramTransformer(config).init(\n jax.random.PRNGKey(0),\n jnp.ones(inputs.shape, config.dtype),\n jnp.ones(outputs.shape, config.dtype),\n jnp.ones(target_shape, config.dtype))\n return initial_variables['cache']\n\n\ndef predict_step(params,\n inputs,\n outputs,\n cache,\n eos_token,\n max_decode_len,\n beam_size,\n config):\n \"\"\"Predict translation with fast decoding beam search on a batch.\"\"\"\n # Prepare transformer fast-decoder call for beam search: for beam search, we\n # need to set up our decoder model to handle a batch size equal to\n # batch_size * beam_size, where each batch item's data is expanded in-place\n # rather than tiled.\n flat_encoded = decode.flat_batch_beam_expand(\n models.ProgramTransformer(config).apply(\n {'params': params},\n inputs,\n outputs,\n method=models.ProgramTransformer.encode),\n beam_size)\n\n encoded_padding_mask = jnp.where(outputs > 0, 1, 0).astype(jnp.float32)\n flat_encoded_padding_mask = decode.flat_batch_beam_expand(\n encoded_padding_mask, beam_size)\n\n def tokens_ids_to_logits(flat_ids, flat_cache):\n \"\"\"Token slice to logits from decoder model.\"\"\"\n # --> [batch * beam, 1, vocab]\n flat_logits, new_vars = models.ProgramTransformer(config).apply(\n {'params': params, 'cache': flat_cache},\n flat_ids,\n flat_encoded,\n flat_encoded_padding_mask,\n mutable=['cache'],\n method=models.ProgramTransformer.decode)\n new_flat_cache = new_vars['cache']\n # Remove singleton sequence-length dimension:\n # [batch * beam, 1, vocab] --> [batch * beam, vocab]\n flat_logits = flat_logits.squeeze(axis=1)\n return flat_logits, new_flat_cache\n\n # Using the above-defined single-step decoder function, run a\n # beam search over possible sequences given input encoding.\n beam_seqs, _ = decode.beam_search(\n inputs,\n cache,\n tokens_ids_to_logits,\n beam_size=beam_size,\n alpha=0.6,\n bos_token=config.bos_token,\n eos_token=eos_token,\n max_decode_len=max_decode_len)\n\n # Beam search returns [n_batch, n_beam, n_length] with beam dimension\n # sorted in increasing order of log-probability.\n return beam_seqs\n\n\n# Util functions for prediction\n# -----------------------------------------------------------------------------\n\n\ndef pad_examples(x, desired_batch_size):\n \"\"\"Expand batch to desired size by repeating last slice.\"\"\"\n batch_pad = desired_batch_size - x.shape[0]\n tile_dims = [1] * len(x.shape)\n tile_dims[0] = batch_pad\n return np.concatenate([x, np.tile(x[-1], tile_dims)], axis=0)\n\n\ndef tohost(x):\n \"\"\"Collect batches from all devices to host and flatten batch dimensions.\"\"\"\n n_device, n_batch, *remaining_dims = x.shape\n return x.reshape((n_device * n_batch,) + tuple(remaining_dims))\n\n\ndef per_host_sum_pmap(in_tree):\n \"\"\"Execute psum on in_tree's leaves over one device per host.\"\"\"\n host2devices = collections.defaultdict(list)\n for d in jax.devices():\n host2devices[d.host_id].append(d)\n devices = [host2devices[k][0] for k in host2devices]\n host_psum = jax.pmap(lambda x: jax.lax.psum(x, 'i'), 'i', devices=devices)\n def pre_pmap(xs):\n return jax.tree_map(lambda x: jnp.broadcast_to(x, (1,) + x.shape), xs)\n def post_pmap(xs):\n return jax.tree_map(lambda x: x[0], xs)\n return post_pmap(host_psum(pre_pmap(in_tree)))\n\n\ndef eval_predicted(predicted, inputs, outputs, parse_beam_fn):\n \"\"\"Evaluate predicted program beams.\"\"\"\n best_p, best_score = None, -1\n\n # predicted shape [beam_size, length]\n for beam in predicted:\n try:\n p = parse_beam_fn(beam)\n p_outs = [p(inp) for inp in inputs]\n score = np.sum([p_out == out for p_out, out in zip(p_outs, outputs)])\n if score > best_score:\n best_p, best_score = p, score\n except: # pylint: disable=bare-except\n pass\n if best_score >= len(inputs): # Found solution.\n break\n return best_p, best_score\n", "# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Serializer/deserializers for biological sequence data.\n\nInspired by learning/brain/research/wavesplit/wavesplit_v2/serialization.py\n\nThis module defines Coder that are object that turn dictionary of features,\nkeyed by string and with tensor values, into binary strings and vice-versa.\n\nDifferent serialization protocols are implemented to perform this conversion.\n\"\"\"\n\nimport json\nfrom typing import Any, Dict, Mapping, Optional, Sequence, Tuple\n\nimport gin\nimport numpy as np\nimport tensorflow as tf\n\nFeaturesDict = Dict[str, tf.Tensor]\n\n\[email protected]\nclass JSONCoder:\n \"\"\"A JSON coder for beam.\"\"\"\n\n def encode(self, features):\n return json.dumps(features)\n\n def decode(self, line):\n return json.loads(line)\n\n\[email protected]\nclass Coder:\n \"\"\"Encodes / decodes FeaturesDict into / from strings.\"\"\"\n\n def __init__(self,\n specs = None,\n shapes = None,\n to_numpy = False):\n self._specs = specs\n self._shapes = shapes if shapes is not None else {}\n self._to_numpy = to_numpy\n\n def encode(self, features):\n raise NotImplementedError()\n\n def raw_features(self, serialized_example):\n return tf.io.parse_single_example(serialized_example, self.specs)\n\n def decode(self, serialized_example):\n \"\"\"Reads a tf.Example and turns it into a string.\"\"\"\n sparse = self.raw_features(serialized_example)\n features = {}\n for k, v in sparse.items():\n is_sparse = isinstance(self.specs.get(k, None), tf.io.VarLenFeature)\n features[k] = tf.sparse.to_dense(v) if is_sparse else v\n\n result = {}\n for k, v in features.items():\n if v.dtype == tf.string and v.shape.rank > 0 and v.shape[0] == 1:\n parsed = v[0]\n else:\n parsed = v\n parsed = parsed.numpy() if self._to_numpy else parsed\n parsed = parsed.decode() if isinstance(parsed, bytes) else parsed\n # Enforces the final shapes if possible.\n shape = self._shapes.get(k, None)\n parsed = tf.ensure_shape(parsed, shape) if shape is not None else parsed\n result[k] = parsed\n return result\n\n @property\n def specs(self):\n result = {}\n for k, v in self._specs.items():\n if isinstance(v, tf.dtypes.DType):\n v = tf.io.VarLenFeature(v)\n result[k] = v\n return result\n\n\[email protected]\nclass FlatCoder(Coder):\n \"\"\"Encode and decode strings into tf.Example with flat tensors.\"\"\"\n\n def encode(self, features):\n \"\"\"Turns a features dictionary into a serialized tf.Example.\"\"\"\n data = {}\n for k, v in features.items():\n curr_dtype = self._specs.get(k, None)\n if curr_dtype is None:\n continue\n if curr_dtype == tf.float32:\n data[k] = tf.train.Feature(float_list=tf.train.FloatList(value=v))\n elif curr_dtype == tf.int64:\n data[k] = tf.train.Feature(int64_list=tf.train.Int64List(value=v))\n elif curr_dtype == tf.string:\n v = v.numpy() if isinstance(v, tf.Tensor) else v.encode()\n data[k] = tf.train.Feature(bytes_list=tf.train.BytesList(value=[v]))\n example = tf.train.Example(features=tf.train.Features(feature=data))\n return example.SerializeToString()\n\n\[email protected]\nclass SequenceCoder(Coder):\n \"\"\"Use a chunked sequence serialization.\n\n To be encoded, the sequences are first chunked into subsequences of fix size.\n In case of multi channels, all the channels are interleaved into a single one.\n Decoding is done by `tf.io.parse_single_sequence_example`.\n \"\"\"\n\n def __init__(self, sequence_keys = None, **kwargs):\n super().__init__(**kwargs)\n self.sequence_keys = {} if sequence_keys is None else set(sequence_keys)\n\n def raw_features(self, serialized_example):\n \"\"\"Returns the decoded sparse features.\"\"\"\n ctx_specs = {}\n seq_specs = {}\n for k, v in self._specs.items():\n target_specs = seq_specs if k in self.sequence_keys else ctx_specs\n v = tf.io.VarLenFeature(v) if isinstance(v, tf.dtypes.DType) else v\n target_specs[k] = v\n context, sparse = tf.io.parse_single_sequence_example(\n serialized_example, ctx_specs, seq_specs)\n\n sparse.update(context)\n return sparse\n\n def encode(self, features):\n \"\"\"Encodes a Dict of Tensors into a string.\"\"\"\n example = tf.train.SequenceExample()\n for key, tensor in features.items():\n if key not in self._specs:\n continue\n\n spec = self.specs.get(key, tf.io.VarLenFeature(tf.float32))\n if key in self.sequence_keys:\n feature = example.feature_lists.feature_list[key].feature\n sequence = np.array(tensor) if isinstance(tensor, list) else tensor\n for i in range(sequence.shape[0]):\n if spec.dtype == tf.float32:\n if len(sequence.shape) > 1:\n feature.add().float_list.value.extend(sequence[i])\n else:\n feature.add().float_list.value.append(sequence[i])\n else:\n if len(sequence.shape) > 1:\n feature.add().int64_list.value.extend(sequence[i])\n else:\n feature.add().int64_list.value.append(sequence[i])\n\n else:\n tensor = [tensor] if isinstance(spec, tf.io.FixedLenFeature) else tensor\n if spec.dtype == tf.string:\n tensor = tensor.encode() if isinstance(tensor, str) else tensor\n tensor = tensor.numpy() if isinstance(tensor, tf.Tensor) else tensor\n example.context.feature[key].bytes_list.value.append(tensor)\n if spec.dtype == tf.int64:\n example.context.feature[key].int64_list.value.extend(tensor)\n if spec.dtype == tf.float32:\n example.context.feature[key].float_list.value.extend(tensor)\n return example.SerializeToString()\n", "# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Contains util functions that are needed often.\n\nThis contains often used transformations / functions that are very general, but\ncomplicated enough that they warrant an implementation in this file.\n\"\"\"\nimport io\nimport os\nfrom typing import Optional\n\nimport flax\nimport imageio\nimport jax\nimport jax.numpy as jnp\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\n\n\nArray = jnp.ndarray\n\n\ndef get_iterator(\n ds,\n prefetch = 3):\n \"\"\"Make a prefetch-to-device JAX iterator for the dataset.\n\n Args:\n ds: Dataset to obtain an iterator for.\n prefetch: Number of dataset entries to pre-fetch to device.\n\n Returns:\n Dataset iterator.\n \"\"\"\n # Convert to numpy.\n it = map(lambda x: jax.tree_map(lambda y: y._numpy(), x), iter(ds)) # pylint: disable=protected-access\n if prefetch:\n it = flax.jax_utils.prefetch_to_device(it, prefetch)\n return it\n\n\ndef apply_weight(x, weight):\n \"\"\"Apply weights an array. Broadcast if necessary.\"\"\"\n if len(x.shape) < len(weight.shape):\n raise ValueError(f'Incompatible number of dimensions for {x.shape} and '\n f'{weight.shape}.')\n for i, (dx, dw) in enumerate(zip(x.shape, weight.shape)):\n if dx != dw and dw != 1:\n raise ValueError(f'Unable to brodcast shapes {x.shape} and {weight.shape}'\n f'in dimension {i}.')\n weight = jnp.reshape(\n weight, weight.shape + (1,) * (len(x.shape) - len(weight.shape)))\n return x * weight\n\n\ndef global_norm(tree, eps=1e-10):\n return jnp.sqrt(eps + jnp.sum(jnp.asarray(\n [jnp.sum(jnp.square(x)) for x in jax.tree_leaves(tree)])))\n\n\ndef clip_by_global_norm(tree, clip_norm, eps=1e-10):\n norm = global_norm(tree)\n scale = jnp.minimum(1.0, clip_norm / norm + eps)\n return jax.tree_map(lambda x: x * scale, tree), norm\n\n\ndef batch_permute(array, permutation):\n \"\"\"Permutes an input array using permutations, batched.\n\n This function permutes the array using the permutation array as indexing.\n Importantly, this is done in a batchwise fashion, so each array has its own\n individual permutation.\n\n Args:\n array: The array to permute with size (batch_size, length, ...).\n permutation: The permutations with size (batch_size, length)\n Returns:\n The array permuted.\n \"\"\"\n assert array.shape[:2] == permutation.shape, (f'{array.shape} does not '\n f'match {permutation.shape}')\n batch_size = permutation.shape[0]\n\n return array[jnp.arange(batch_size)[:, None], permutation, Ellipsis]\n\n\ndef compute_batch_inverse_permute(permutation):\n \"\"\"Permutes an inverses of permutations, batched.\n\n Args:\n permutation: The permutations with size (batch_size, length)\n Returns:\n The inverse permutation, also with size (batch_size, length).\n \"\"\"\n batch_size, num_steps = permutation.shape\n temp = jnp.full_like(permutation, fill_value=-1)\n arange = jnp.arange(num_steps)[None, :].repeat(batch_size, axis=0)\n\n # In numpy this would read approximately as follows:\n # inv_permute[jnp.arange(batch_size)[:, None], permutation] = arange\n # and it essentially inverts the permutation operation by writing the original\n # index to the permutation destinations.\n inv_permute = temp.at[jnp.arange(batch_size)[:, None], permutation].set(\n arange)\n return inv_permute\n\n\ndef sum_except_batch(x):\n return x.reshape(x.shape[0], -1).sum(-1)\n\n\ndef batch(x, num):\n batch_size = x.shape[0]\n assert batch_size % num == 0\n\n if len(x.shape) == 1:\n return x.reshape(num, batch_size // num)\n else:\n return x.reshape(num, batch_size // num, *x.shape[1:])\n\n\ndef unbatch(x):\n return x.reshape(x.shape[0] * x.shape[1], *x.shape[2:])\n\n\ndef add_empty_axes_behind(x, number):\n new_shape = x.shape + (1,) * number\n return x.reshape(new_shape)\n\n\ndef plot_loss_components(kl_history, save_path, num_stages, max_plots=20):\n \"\"\"Plots an area plot for the KLs over time.\"\"\"\n if jax.process_index() == 0:\n assert len(kl_history) >= 1\n num_kls = len(kl_history[0])\n\n if len(kl_history) < max_plots:\n ts = range(len(kl_history))\n else:\n ts = np.asarray((np.linspace(0, len(kl_history)-1, max_plots)), np.int32)\n\n num_timesteps = len(ts)\n\n for i, t in enumerate(ts):\n kls = kl_history[t]\n linear_scale = 1. - (i+1) / float(num_timesteps)\n color = (0.9 * linear_scale, 0.9 * linear_scale, 1.0)\n # kls are multiplied with num_timesteps, to see there contribution better\n # in relation to average bpd values.\n plt.fill_between(\n np.arange(num_kls), kls * len(kls), color=color, alpha=0.9)\n\n if num_stages > 1:\n steps_per_stage = num_kls // num_stages\n\n for i in range(1, num_stages):\n x_value = steps_per_stage * i - 0.5\n # draw black vertical line:\n plt.plot(\n np.array([x_value, x_value]),\n np.array([0., 1.]),\n 'k--',\n linewidth=1)\n\n plt.ylim((0, 2 * len(kl_history[-1]) * np.max(kl_history[-1])))\n\n tf.io.gfile.makedirs(os.path.dirname(save_path))\n with tf.io.gfile.GFile(save_path, 'wb') as out_file:\n plt.savefig(out_file, dpi=200)\n\n plt.show()\n plt.close()\n\n\ndef plot_batch_images(batch_imgs, n_rows, n_classes):\n grid = make_grid(batch_imgs, n_rows)\n plt.imshow(grid / (n_classes - 1.))\n plt.show()\n plt.close()\n\n\ndef make_grid(batch_imgs, n_rows):\n \"\"\"Makes grid of images.\"\"\"\n batch_imgs = np.array(batch_imgs)\n assert len(batch_imgs.shape) == 4, f'Invalid shape {batch_imgs.shape}'\n\n batchsize, height, width, channels = batch_imgs.shape\n\n n_cols = (batchsize + n_rows - 1) // n_rows\n grid = np.zeros((n_rows * height, n_cols * width, channels))\n\n for i, img in enumerate(batch_imgs):\n y = i // n_cols\n x = i % n_cols\n grid[y*height:(y+1)*height, x*width:(x+1)*width, :] = img\n\n if channels == 1:\n grid = np.concatenate([grid, grid, grid], axis=-1)\n\n # Upsample if low res to avoid visualization artifacts.\n if height <= 32:\n upsample_factor = 2\n grid = grid.repeat(upsample_factor, axis=0).repeat(upsample_factor, axis=1)\n\n return grid\n\n\nclass KLTracker():\n \"\"\"Tracks KL divergences per timestep.\"\"\"\n\n def __init__(self, num_steps, momentum=0.95):\n self.history = np.zeros(num_steps)\n # Ensured int64 to avoid overflow\n self.n_updates = np.zeros(num_steps, dtype=np.int64)\n self.momentum = momentum\n\n # Bit penalty if KL unknown. Hardcoded for 8-bit images.\n self.bit_penalty = 8. / num_steps\n\n def update(self, t_batch, nelbo_batch):\n \"\"\"Updates buffers with KL divergences per timestep.\"\"\"\n assert len(t_batch.shape) == 1 and len(nelbo_batch.shape) == 1\n assert len(t_batch) == len(\n nelbo_batch), f'{len(t_batch)} != {len(nelbo_batch)}'\n\n for t, nelbo in zip(t_batch, nelbo_batch):\n if self.n_updates[t] == 0:\n self.history[t] = nelbo\n else:\n self.history[t] = self.momentum * self.history[t] + (\n 1 - self.momentum) * nelbo\n\n self.n_updates[t] += 1\n\n def has_history_forall_t(self):\n return np.alltrue(self.n_updates >= 5)\n\n def get_kl_per_t(self):\n kl_per_t = self.history + np.where(self.n_updates == 0,\n self.bit_penalty,\n np.zeros_like(self.history))\n\n return kl_per_t\n\n\ndef onehot(labels, num_classes):\n x = (labels[Ellipsis, None].astype(jnp.int32) == jnp.arange(num_classes)[None])\n return x.astype(jnp.float32)\n\n\ndef save_chain_to_gif(chain, path, n_rows, max_steps=100):\n \"\"\"Saves list of batches of images to a gif.\"\"\"\n if jax.process_index() == 0:\n if len(chain) > max_steps:\n idcs = np.linspace(0, len(chain) - 1, max_steps, dtype=np.int32)\n else:\n idcs = np.arange(0, len(chain))\n\n chain_grid = [make_grid(chain[i], n_rows) for i in idcs]\n\n # Extend with last frame 10 times to see results better.\n chain_grid.extend(10 * [make_grid(chain[-1], n_rows=n_rows)])\n\n chain_grid = [np.asarray(x, dtype=np.uint8) for x in chain_grid]\n\n # Checks if dir already available and creates if not.\n tf.io.gfile.makedirs(os.path.dirname(path))\n\n with tf.io.gfile.GFile(path, 'wb') as out_file:\n io_buffer = io.BytesIO()\n imageio.mimwrite(io_buffer, chain_grid, format='gif', duration=.01)\n out_file.write(io_buffer.getvalue())\n", "# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities for genome handling.\"\"\"\n\nimport dataclasses as dc\nimport functools as ft\nfrom typing import Any, Callable, Optional, Union\n\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\nfrom blur import blur_env\n\n\n\nTensor = Union[tf.Tensor, np.ndarray]\n\n\[email protected]\nclass NeuronGenome:\n transform: Tensor\n keep: Union[float, Tensor] = 1.0\n update: Union[float, Tensor] = 1.0\n norm_multiplier: Union[float, Tensor] = 1.0\n norm_shift: Union[float, Tensor] = 0.0\n\n\[email protected]\nclass HebbianTransform:\n pre: Tensor\n post: Tensor\n ojas_multiplier: Union[float, Tensor] = 1.0\n\n\[email protected]\nclass SynapticGenome:\n transform: HebbianTransform\n synapse_init_std: Union[float, Tensor] = 1e-1\n synapse_init_xavier_std: Union[float, Tensor] = 0.0\n keep: Union[float, Tensor] = 1.0\n update: Union[float, Tensor] = 1.0\n saturation: Union[float, Tensor] = 1\n rescale_to: Union[float, Tensor] = 1.0\n\n\[email protected]\nclass Genome:\n \"\"\"Genome.\"\"\"\n neuron: NeuronGenome\n synapse: SynapticGenome\n forward_synapse: Optional[SynapticGenome] = None\n\n def num_states_per_neuron(self):\n return get_num_states_in_genome(self)\n\n def num_species(self):\n return get_num_species_in_genome(self)\n\n def __post_init__(self):\n # By default we start with the same forward pass synapse genome that is\n # used on the backward pass; whether to do synaptic weight update on the\n # forward pass is decided in `network_step` based on the value of\n # `forward_synapse_update` in the network specification.\n if self.forward_synapse is None:\n self.forward_synapse = self.synapse\n\n\ndef _safe_shape(t):\n if hasattr(t, 'shape'):\n return t.shape\n else:\n return np.array(t).shape\n\n\ndef get_num_states_in_genome(g):\n return _safe_shape(g.synapse.transform.pre)[-1]\n\n\ndef transform_genome(g, map_fn, prefix=''):\n \"\"\"Applies transformation to genome using map_fn.\"\"\"\n r = {}\n for k, v in vars(g).items():\n if dc.is_dataclass(v):\n r[k] = transform_genome(v, map_fn=map_fn, prefix=f'{prefix}{k}/')\n else:\n mapped_value = map_fn(v, prefix + k)\n if mapped_value is not None:\n r[k] = mapped_value\n return dc.replace(g, **r)\n\n\ndef copy_genome(genome):\n return transform_genome(genome, lambda x, _: x)\n\n\ndef get_genome_slice(g, i):\n def fn(x, unused_name):\n # Necessary to avoid issues with tests restoring checkpoints.\n if isinstance(x, int) or isinstance(x, float):\n return x\n return x[i]\n return transform_genome(g, fn)\n\n\ndef get_genome(g, layer_index, per_layer_genome=False):\n if per_layer_genome:\n return get_genome_slice(g, layer_index)\n else:\n return g\n\n\ndef convert_genome_to_tf_variables(g, prefix=''):\n \"\"\"Converts genome to tensorflow variables with initialized to constant.\"\"\"\n\n def map_fn(v, name):\n return tf.Variable(initial_value=v, dtype=tf.float32, name=name)\n\n return transform_genome(g, map_fn, prefix=prefix)\n\n\ndef convert_genome_to_dict(g):\n res = {}\n map_fn = lambda v, name: res.update([(name, v)])\n transform_genome(g, map_fn)\n return res\n\n\ndef _assign_from_values(v, name, values, index=None, prefix='', suffix=''):\n key = prefix + name + suffix\n if key not in values:\n tf.logging.warning(f'Genome parameter \"{key}\" cannot be found in the '\n 'dictionary.')\n return None\n if hasattr(v, 'shape') and index is not None:\n return values[key][index]\n else:\n return values[key]\n\n\ndef get_num_species_in_genome(g):\n shape = _safe_shape(g.synapse.transform.pre)\n return shape[0] if len(shape) == 3 else None\n\n\ndef genome_from_dict(values, index=None, prefix='', suffix=''):\n num_states = _safe_shape(values['synapse/transform/pre'])[-1]\n transform_fn = ft.partial(\n _assign_from_values,\n values=values,\n index=index,\n prefix=prefix,\n suffix=suffix)\n return transform_genome(create_random_genome(num_states), transform_fn)\n\n\ndef replicate_across_dims(value, shared_update_params, num_species, num_layers):\n if num_species is not None and not shared_update_params:\n value = np.array([value] * num_species)\n if num_layers is not None:\n value = np.array([value] * num_layers)\n return value\n\n\ndef create_random_genome(num_states,\n num_species=None,\n shared_update_params=True,\n neuron_transform_std=1.0,\n synapse_transform_std=1.0,\n synapse_update=-1e-3,\n synapse_init_std=1e-1,\n separate_forward_synapse=False,\n num_layers=None):\n \"\"\"Creates random genome with that many species.\"\"\"\n\n species_dims = (num_species,) if num_species is not None else ()\n if num_layers is not None:\n species_dims = (num_layers, *species_dims)\n\n maybe_shared = ft.partial(replicate_across_dims,\n shared_update_params=shared_update_params,\n num_species=num_species,\n num_layers=num_layers)\n def _synaptic_genome(pre_transform, post_transform):\n return SynapticGenome(\n update=maybe_shared(synapse_update),\n keep=maybe_shared(1.0),\n synapse_init_std=maybe_shared(synapse_init_std),\n synapse_init_xavier_std=maybe_shared(0.0),\n saturation=maybe_shared(1.0),\n rescale_to=maybe_shared(1.0),\n transform=HebbianTransform(\n pre=pre_transform,\n post=post_transform,\n ojas_multiplier=maybe_shared(1.0)))\n\n matrix_shape = (*species_dims, num_states, num_states)\n o = np.ones(matrix_shape)\n z = np.zeros(matrix_shape)\n init_matrix = lambda: np.random.randn(*matrix_shape) * synapse_transform_std\n pre, post = init_matrix(), init_matrix()\n g = Genome(\n neuron=NeuronGenome(\n transform=(\n neuron_transform_std *\n np.random.randn(*species_dims, 2 * num_states, 2 * num_states) *\n np.block([[z, o], [o, z]])),\n update=maybe_shared(1.0),\n keep=maybe_shared(1.0),\n norm_multiplier=maybe_shared(1.0),\n norm_shift=maybe_shared(0.0)),\n synapse=_synaptic_genome(pre, post))\n if separate_forward_synapse:\n fwd_pre, fwd_post = init_matrix(), init_matrix()\n g.forward_synapse = _synaptic_genome(fwd_pre, fwd_post)\n return g\n\n\n\n\n# Neuron transformation matrix \\mu before being fed to synapse\n# Rows describe contribution of corresponding state to all outputs\n# Columns describe of all inputs to a corresponding output\n#\n# row 0: sensory(i) ('pre')\n# row 1: feedback(i)\n# row 2: sensory(j) ('post')\n# row 3: feedback(j)\n_grad_neuron_genome = np.array(\n [[0, 0, 1, 1],\n [0, 0, 0, 0],\n [1, 0, 0, 0],\n [0, 1, 0, 0]], dtype=blur_env.NP_FLOATING_TYPE) # pyformat: disable\n\n# ΔW(i, j, o) = Σ_{k, l} n(i, k) @ pre(i, o) @ post(o, l) @ n(j, l)\n# where n(i, k) is concatenation of input and output activations.\n_grad_hebbian_genome = HebbianTransform(\n pre=np.array([[1, 0],\n [0, 1]], dtype=blur_env.NP_FLOATING_TYPE),\n post=np.array([[0, 1],\n [1, 0]], dtype=blur_env.NP_FLOATING_TYPE))\n", "# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Convert Spherical MNIST dataset to tensorflow_datasets (tfds) format.\n\nThis module converts the dataset from the format used in\n\"Spin-Weighted Spherical CNNs\", NeurIPS'20 to tensorflow_datasets\n(tfds).\n\nTo build the dataset, run the following from directory containing this file:\n$ tfds build\n\"\"\"\n\nfrom typing import Any, Dict, Iterable, Tuple\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_datasets as tfds\n\n\n_DESCRIPTION = \"\"\"\\\nSpherical MNIST consists of MNIST digits projected on the sphere.\n\nIn \"canonical\" mode, projections are centered at the south pole.\n\nIn \"rotated\" mode, the spherical image is randomly rotated after\nprojection. Instead of sampling one rotation per image, each sampled rotation\nis applied to 500 images (chunk_size=500 in the original code).\n\"\"\"\n\n_CITATION = \"\"\"\\\n@inproceedings{DBLP:conf/iclr/CohenGKW18,\n author = {Taco S. Cohen and\n Mario Geiger and\n Jonas K{\\\"{o}}hler and\n Max Welling},\n title = {Spherical CNNs},\n booktitle = {6th International Conference on Learning Representations,\n {ICLR} 2018, Vancouver, BC, Canada, April 30 - May 3, 2018,\n Conference Track Proceedings},\n publisher = {OpenReview.net},\n year = {2018},\n url = {https://openreview.net/forum?id=Hkbd5xZRb},\n timestamp = {Thu, 21 Jan 2021 17:36:45 +0100},\n biburl = {https://dblp.org/rec/conf/iclr/CohenGKW18.bib},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n}\n\"\"\"\n\n_HOMEPAGE = 'https://github.com/jonas-koehler/s2cnn'\n\n# This is the dataset in the format used in the \"Spin-Weighted\n# Spherical CNNs\", linked in\n# https://github.com/daniilidis-group/swscnn.\n_DOWNLOAD_URL = 'https://drive.google.com/uc?id=1h7JwdjWalXZFoXCU8Ez1rLscWih8PcZ7'\n\n_IMAGE_DIMENSIONS = (64, 64, 1)\n_VALIDATION_SET_SIZE = 10_000\n\n\nclass SphericalMnist(tfds.core.GeneratorBasedBuilder):\n \"\"\"DatasetBuilder for spherical_mnist dataset. See superclass for details.\"\"\"\n\n VERSION = tfds.core.Version('1.0.0')\n RELEASE_NOTES = {\n '1.0.0': 'Initial release.',\n }\n\n def _info(self):\n \"\"\"Returns the dataset metadata.\"\"\"\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n 'image': tfds.features.Image(shape=_IMAGE_DIMENSIONS),\n 'label': tfds.features.ClassLabel(num_classes=10),\n }),\n # These are returned if `as_supervised=True` in `builder.as_dataset`.\n supervised_keys=('image', 'label'),\n homepage=_HOMEPAGE,\n citation=_CITATION,\n )\n\n def _split_generators(\n self, dl_manager):\n \"\"\"Returns SplitGenerators. See superclass method for details.\"\"\"\n dataset_directory = dl_manager.download_and_extract(_DOWNLOAD_URL)\n dataset_files = {\n 'train_rotated': dataset_directory / 'rr/train0.tfrecord',\n 'validation_rotated': dataset_directory / 'rr/train0.tfrecord',\n 'test_rotated': dataset_directory / 'rr/test0.tfrecord',\n 'train_canonical': dataset_directory / 'nrnr/train0.tfrecord',\n 'validation_canonical': dataset_directory / 'nrnr/train0.tfrecord',\n 'test_canonical': dataset_directory / 'nrnr/test0.tfrecord'}\n\n return {split: self._generate_examples(filename, split)\n for split, filename in dataset_files.items()}\n\n def _generate_examples(self,\n path,\n split):\n \"\"\"Dataset generator. See superclass method for details.\"\"\"\n dataset = tf.data.TFRecordDataset(path, compression_type='GZIP')\n\n for image_id, datapoint in enumerate(dataset):\n # The validation set is obtained from train, but we must make sure the ids\n # are different since one might want to combine both during training.\n if split.startswith('train') and image_id < _VALIDATION_SET_SIZE:\n continue\n if split.startswith('validation') and image_id >= _VALIDATION_SET_SIZE:\n break\n\n parsed = tf.train.Example.FromString(datapoint.numpy())\n image = np.frombuffer(parsed.features.feature['x'].bytes_list.value[0],\n dtype=np.float32).reshape(*_IMAGE_DIMENSIONS)\n label = parsed.features.feature['y'].int64_list.value[0]\n\n yield image_id, {\n 'image': image.astype('uint8'),\n 'label': label,\n }\n" ]
[ [ "numpy.random.multivariate_normal", "numpy.matmul", "numpy.ones", "numpy.sign", "numpy.identity", "numpy.flip", "numpy.argsort", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.outer" ], [ "tensorflow.squeeze", "tensorflow.shape" ], [ "numpy.random.choice", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.compat.v2.enable_v2_behavior", "tensorflow.lookup.KeyValueTensorInitializer", "tensorflow.keras.optimizers.Adam", "numpy.mean" ], [ "tensorflow.keras.layers.LayerNormalization", "tensorflow.range", "tensorflow.shape", "tensorflow.initializers.RandomNormal", "tensorflow.initializers.GlorotUniform", "tensorflow.cast", "tensorflow.expand_dims", "tensorflow.keras.layers.Bidirectional", "tensorflow.initializers.Orthogonal", "tensorflow.initializers.TruncatedNormal", "tensorflow.keras.layers.Dropout" ], [ "pandas.read_csv", "numpy.ones_like", "numpy.random.default_rng", "numpy.unique" ], [ "numpy.zeros", "numpy.save", "numpy.random.choice" ], [ "sklearn.linear_model.LogisticRegression", "sklearn.datasets.load_iris", "numpy.ones", "numpy.mean", "numpy.random.randn", "numpy.random.RandomState" ], [ "numpy.prod" ], [ "tensorflow.shape", "numpy.arange", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.io.gfile.GFile", "numpy.concatenate", "numpy.array", "numpy.zeros" ], [ "numpy.tile" ], [ "tensorflow.ensure_shape", "tensorflow.sparse.to_dense", "tensorflow.io.parse_single_example", "tensorflow.train.SequenceExample", "tensorflow.io.VarLenFeature", "tensorflow.train.Features", "tensorflow.train.FloatList", "tensorflow.train.BytesList", "tensorflow.io.parse_single_sequence_example", "numpy.array", "tensorflow.train.Int64List" ], [ "matplotlib.pyplot.imshow", "numpy.asarray", "tensorflow.io.gfile.GFile", "numpy.arange", "matplotlib.pyplot.savefig", "numpy.concatenate", "numpy.max", "numpy.alltrue", "numpy.zeros_like", "matplotlib.pyplot.close", "numpy.array", "numpy.zeros", "matplotlib.pyplot.show" ], [ "tensorflow.compat.v1.Variable", "numpy.ones", "tensorflow.compat.v1.logging.warning", "numpy.block", "numpy.random.randn", "numpy.array", "numpy.zeros" ], [ "tensorflow.data.TFRecordDataset", "numpy.frombuffer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
clementjumel/master_thesis
[ "5a39657a212f794690e7c426f60e10ba70d50da9" ]
[ "tesa/modeling/modeling_task.py" ]
[ "from modeling.ranking_task import RankingTask\nfrom modeling.utils import format_context, format_targets\n\nfrom collections import defaultdict\nfrom numpy import asarray, split, concatenate\nfrom numpy.random import seed, shuffle\nfrom pickle import dump, load\nfrom re import findall\nfrom csv import writer\nfrom os import makedirs\nfrom os.path import exists\n\n\nclass ModelingTask:\n def __init__(self, ranking_size, batch_size, context_format, targets_format, context_max_size, k_cross_validation,\n valid_proportion, test_proportion, random_seed, save, silent, results_path,\n annotation_task_results_path):\n \"\"\"\n Initializes an instance of the base ModelingTask.\n\n Args:\n ranking_size: int, number of choices to compute for each ranking task.\n batch_size: int, number of samples in each batch.\n context_format: str, version of the context format to encode the inputs in a string.\n targets_format: str, version of the targets format to encode the target choices in strings.\n context_max_size: int, maximum number of tokens in the formatted context for classifier BART.\n k_cross_validation: int, number of folds to use in k-fold cross validation (if 0, doesn't use k-fold).\n valid_proportion: float, fraction (between 0 and 1) of the data to keep in the valid set.\n test_proportion: float, fraction (between 0 and 1) of the data to keep in the test set.\n random_seed: int, the seed to use for the random processes.\n save: bool, saving option.\n silent: bool, silent option.\n results_path: str, path to the folder to save the modeling task in.\n annotation_task_results_path: str, path to the annotation results folder.\n \"\"\"\n\n self.ranking_size = ranking_size\n self.batch_size = batch_size\n self.context_format = context_format\n self.targets_format = targets_format\n self.context_max_size = context_max_size\n self.k_cross_validation = k_cross_validation\n self.valid_proportion = valid_proportion\n self.test_proportion = test_proportion\n self.save = save\n self.silent = silent\n self.results_path = results_path\n self.annotation_task_results_path = annotation_task_results_path\n\n self.train_loader = None\n self.valid_loader = None\n self.test_loader = None\n\n seed(random_seed)\n\n # region Main methods\n\n def process_data_loaders(self):\n \"\"\" Process the data of the annotations to create the data loaders. \"\"\"\n\n self.compute_ranking_tasks()\n\n self.makedirs(self.results_path)\n self.save_pkl()\n\n def process_classification_task(self, folder_path):\n \"\"\"\n Save the task set up for classification for BART finetuning.\n\n Args:\n folder_path: str, path of the folder to create, starting from <self.results_path>.\n \"\"\"\n\n folder_path = folder_path + \"classification/\" + self.class_name() + self.suffix() + \"/\"\n\n self.makedirs(folder_path)\n self.save_classification_task(folder_path)\n\n def process_generation_task(self, folder_path):\n \"\"\"\n Save the task set up for generation (summarization) for BART finetuning.\n\n Args:\n folder_path: str, path of the folder to create, starting from <self.results_path>.\n\n \"\"\"\n\n folder_path = folder_path + \"generation/\" + self.class_name() + self.suffix() + \"/\"\n\n self.makedirs(folder_path)\n self.save_generation_task(folder_path)\n\n # endregion\n\n def compute_ranking_tasks(self):\n \"\"\" Compute the RankingTasks of the ModelingTask, and split them into the data_loaders. \"\"\"\n\n annotations_fname = self.annotation_task_results_path + \"annotations/annotations.pkl\"\n queries_fname = self.annotation_task_results_path + \"annotations/queries.pkl\"\n\n with open(annotations_fname, 'rb') as annotations_file, open(queries_fname, 'rb') as queries_file:\n annotations = load(annotations_file)\n queries = load(queries_file)\n\n self.print(\"Annotations and queries loaded from %sannotations/.\" % self.annotation_task_results_path)\n\n annotations = self.get_reordered_annotations(queries=queries, annotations=annotations)\n\n ranking_tasks = []\n for _, sample_annotations in annotations.items():\n sample_queries_ids = sorted(set([annotation.id_ for annotation in sample_annotations]))\n sample_queries = [queries[query_id_] for query_id_ in sample_queries_ids]\n\n labelled_answers = self.get_labelled_answers(sample_queries=sample_queries,\n sample_annotations=sample_annotations,\n queries=queries,\n annotations=annotations)\n\n ranking_tasks.append(RankingTask(queries=sample_queries,\n labelled_answers=labelled_answers,\n ranking_size=self.ranking_size,\n batch_size=self.batch_size))\n\n shuffle(ranking_tasks)\n\n n = len(ranking_tasks)\n if not n:\n raise Exception(\"No data imported.\")\n\n k = self.k_cross_validation\n n_test = round(self.test_proportion * n)\n\n if not k:\n n_valid = round(self.valid_proportion * n)\n n_train = n - n_test - n_valid\n\n assert 0 <= n_train <= n and 0 <= n_valid <= n and 0 <= n_test <= n\n train_set, valid_set, test_set = split(asarray(ranking_tasks), [n_train, n_train + n_valid])\n assert n_train == train_set.shape[0] and n_valid == valid_set.shape[0] and n_test == test_set.shape[0]\n\n self.train_loader = [ranking_task.to_loader() for ranking_task in train_set]\n self.valid_loader = [ranking_task.to_loader() for ranking_task in valid_set]\n self.test_loader = [ranking_task.to_loader() for ranking_task in test_set]\n\n train_loader, valid_loader, test_loader = self.train_loader, self.valid_loader, self.test_loader\n self.print(\"Data loaders computed:\")\n\n else:\n n_test += (n - n_test) % k\n\n test_set, cross_validation_set = split(asarray(ranking_tasks), [n_test])\n cross_validation_split = split(cross_validation_set, k)\n\n train_sets, valid_sets = [], []\n n_trains, n_valids = set(), set()\n\n for i in range(k):\n train_set = concatenate([cross_validation_split[j] for j in range(k) if j != i])\n valid_set = cross_validation_split[i]\n\n train_sets.append(train_set)\n valid_sets.append(valid_set)\n\n n_trains.add(train_set.shape[0])\n n_valids.add(valid_set.shape[0])\n\n assert len(n_trains) == 1 and len(n_valids) == 1\n\n n_train, n_valid = n_trains.pop(), n_valids.pop()\n\n self.train_loader = [[ranking_task.to_loader() for ranking_task in train_set] for train_set in train_sets]\n self.valid_loader = [[ranking_task.to_loader() for ranking_task in valid_set] for valid_set in valid_sets]\n self.test_loader = [ranking_task.to_loader() for ranking_task in test_set]\n\n train_loader, valid_loader, test_loader = self.train_loader[0], self.valid_loader[0], self.test_loader\n\n self.print(\"Data loaders for %i-fold cross validation computed:\" % k)\n\n m_train = sum([len(ranking_task) for ranking_task in train_loader])\n m_valid = sum([len(ranking_task) for ranking_task in valid_loader])\n m_test = sum([len(ranking_task) for ranking_task in test_loader])\n\n self.print(\" train: %i ranking tasks (%i%%), %i batches,\\n\" % (n_train, 100 * n_train / n, m_train),\n \" valid: %i ranking tasks (%i%%), %i batches,\\n\" % (n_valid, 100 * n_valid / n, m_valid),\n \" test: %i ranking tasks (%i%%), %i batches.\\n\" % (n_test, 100 * n_test / n, m_test))\n\n # region Methods get_\n\n @staticmethod\n def get_reordered_annotations(queries, annotations):\n \"\"\"\n Rework the annotations for the specificity of the Task.\n\n Args:\n queries: dict of Query, Queries of the annotations.\n annotations: dict of list of Annotations, Annotations from the MT workers.\n\n Returns:\n dict of list of Annotations, Annotations from the MT workers.\n \"\"\"\n\n return annotations\n\n def get_labelled_answers(self, sample_queries, sample_annotations, queries, annotations):\n \"\"\"\n Returns the answers and their labels as a list of tuples.\n\n Args:\n sample_queries: list of queries, queries of the Sample.\n sample_annotations: list of annotations, annotations of the Sample.\n queries: dict of Query, Queries of the annotations.\n annotations: dict of Annotations, all the annotations.\n\n Returns:\n dict, answers and their labels (0 for negative answers).\n \"\"\"\n\n return dict()\n\n @staticmethod\n def get_answers_all(annotations):\n \"\"\"\n Returns a set of all the answers in the annotations.\n\n Args:\n annotations: dict of list of Annotations, Annotations from the MT workers.\n\n Returns:\n set, preprocessed answers of the annotations.\n \"\"\"\n\n answers = set()\n\n for _, annotation_list in annotations.items():\n for annotation in annotation_list:\n for answer in annotation.preprocessed_answers:\n answers.add(answer)\n\n return answers\n\n @staticmethod\n def get_answers_same_type(annotations, sample_queries, queries):\n \"\"\"\n Returns a set of the answers in the annotations with the same entities type than sample_queries.\n\n Args:\n annotations: dict of list of Annotations, Annotations from the MT workers.\n sample_queries: list, Annotations of the Sample.\n queries: dict of Query, Queries of the annotations.\n\n Returns:\n set, preprocessed answers of the annotations.\n \"\"\"\n\n assert len(set([query.entities_type_ for query in sample_queries])) == 1\n entities_type_ = sample_queries[0].entities_type_\n\n answers = set()\n\n for _, annotation_list in annotations.items():\n for annotation in annotation_list:\n if entities_type_ == queries[annotation.id_].entities_type_:\n for answer in annotation.preprocessed_answers:\n answers.add(answer)\n\n return answers\n\n @staticmethod\n def get_answers_same_tuple(annotations, sample_queries, queries):\n \"\"\"\n Returns a set of the answers in the annotations with the same entities tuple than sample_queries.\n\n Args:\n annotations: dict of list of Annotations, Annotations from the MT workers.\n sample_queries: list, Annotations of the Sample.\n queries: dict of Query, Queries of the annotations.\n\n Returns:\n set, preprocessed answers of the annotations.\n \"\"\"\n\n assert len(set([tuple(sorted(query.entities)) for query in sample_queries])) == 1\n entities_tuple_ = tuple(sorted(sample_queries[0].entities))\n\n answers = set()\n\n for _, annotation_list in annotations.items():\n for annotation in annotation_list:\n if entities_tuple_ == tuple(sorted(queries[annotation.id_].entities)):\n for answer in annotation.preprocessed_answers:\n answers.add(answer)\n\n return answers\n\n @staticmethod\n def get_answers_sample(sample_annotations):\n \"\"\"\n Returns a set of all the answers in the sample annotations.\n\n Args:\n sample_annotations: list of annotations, annotations of the Sample.\n\n Returns:\n set, preprocessed answers of the annotations.\n \"\"\"\n\n answers = set()\n\n for annotation in sample_annotations:\n for answer in annotation.preprocessed_answers:\n answers.add(answer)\n\n return answers\n\n def get_classification_rows(self, ranking_task):\n \"\"\"\n Returns a list of rows [sentence1, sentence2, label] for the ranking_task.\n\n Args:\n ranking_task: list of (inputs, targets) batches.\n \"\"\"\n\n rows = []\n\n sentence1 = format_context(ranking_task,\n context_format=self.context_format,\n context_max_size=self.context_max_size)\n\n for inputs, targets in ranking_task:\n for choice, target in zip(inputs['choices'], targets):\n sentence2 = choice\n label = \"aggregation\" if target else \"not_aggregation\"\n\n rows.append([sentence1, sentence2, label])\n\n return rows\n\n def get_generation_rows(self, ranking_task):\n \"\"\"\n Return two lists, for the sources and the targets, respectively, of the ranking_task.\n\n Args:\n ranking_task: list of (inputs, targets) batches.\n \"\"\"\n\n source_rows, target_rows = [], []\n\n source = format_context(ranking_task,\n context_format=self.context_format,\n context_max_size=self.context_max_size)\n\n targets = format_targets(ranking_task, targets_format=self.targets_format)\n\n for target in targets:\n source_rows.append(source), target_rows.append(target)\n\n return source_rows, target_rows\n\n # endregion\n\n # region Other methods\n\n def class_name(self):\n \"\"\" Returns the standardized name of the class. \"\"\"\n\n return \"-\".join([word.lower() for word in findall(r'[A-Z][^A-Z]*', self.__class__.__name__)])\n\n def suffix(self):\n \"\"\" Returns the standard suffix of a file_name as a string. \"\"\"\n\n train_proportion = (\"%.2f\" % (1 - self.valid_proportion - self.test_proportion)).split(\".\")[1]\n valid_proportion = (\"%.2f\" % self.valid_proportion).split(\".\")[1]\n test_proportion = (\"%.2f\" % self.test_proportion).split(\".\")[1]\n suffix = \"_\" + \"-\".join([train_proportion, valid_proportion, test_proportion])\n\n suffix += \"_rs\" + str(self.ranking_size) if self.ranking_size is not None else \"\"\n suffix += \"_bs\" + str(self.batch_size)\n suffix += \"_cf-\" + self.context_format if self.context_format is not None else \"\"\n suffix += \"_tf-\" + self.targets_format if self.targets_format is not None else \"\"\n suffix += \"_cv\" if self.k_cross_validation else \"\"\n\n return suffix\n\n def makedirs(self, folder_name):\n \"\"\"\n If necessary, creates folders to save outputs, starting from self.results_path.\n\n Args:\n folder_name: str, path of the folder to create.\n \"\"\"\n\n if self.save:\n if not exists(folder_name):\n makedirs(folder_name)\n self.print(\"Creating folder(s) %s.\" % folder_name)\n\n def save_pkl(self):\n \"\"\" Save the Task using pickle in self.results_path. \"\"\"\n\n file_name = self.results_path + self.class_name() + self.suffix() + '.pkl'\n\n if self.save:\n with open(file_name, 'wb') as file:\n dump(obj=self, file=file, protocol=-1)\n\n self.print(\"Task saved at %s.\\n\" % file_name)\n\n else:\n self.print(\"Not saving %s (not in save mode).\\n\" % file_name)\n\n def save_classification_task(self, path):\n \"\"\"\n Saves the task as a classification task.\n\n Args:\n path: str, full path to the folder to save in.\n \"\"\"\n\n data_loader_names = [\"train\", \"valid\", \"test\"]\n file_names = [path + file_name + \".tsv\" for file_name in [\"train\", \"dev\", \"test\"]]\n\n for i, data_loader_name in enumerate(data_loader_names):\n data_loader = getattr(self, data_loader_name + \"_loader\")\n file_name = file_names[i]\n\n all_rows = []\n for ranking_task in data_loader:\n all_rows.extend(self.get_classification_rows(ranking_task))\n\n shuffle(all_rows)\n all_rows = [[str(j)] + row for j, row in enumerate(all_rows)]\n\n if self.save:\n with open(file_name, 'wt') as file:\n tsv_writer = writer(file, delimiter='\\t')\n tsv_writer.writerow(['index', 'sentence1', 'sentence2', 'label'])\n\n for row in all_rows:\n tsv_writer.writerow(row)\n\n self.print(\"File %s saved.\" % file_name)\n\n else:\n self.print(\"File %s not saved (not in save mode).\" % file_name)\n\n def save_generation_task(self, path):\n \"\"\"\n Saves the task as a generation task.\n\n Args:\n path: str, full path to the folder to save in.\n \"\"\"\n\n data_loader_names = [\"train\", \"valid\", \"test\"]\n file_name_pairs = [[path + file_name + suffix for suffix in [\".source\", \".target\"]]\n for file_name in [\"train\", \"val\", \"test\"]]\n\n for i, data_loader_name in enumerate(data_loader_names):\n data_loader = getattr(self, data_loader_name + \"_loader\")\n file_name_pair = file_name_pairs[i]\n\n all_source_rows, all_target_rows = [], []\n for ranking_task in data_loader:\n source_rows, target_rows = self.get_generation_rows(ranking_task)\n all_source_rows.extend(source_rows), all_target_rows.extend(target_rows)\n\n all_rows = [(source_row, target_row) for source_row, target_row in zip(all_source_rows, all_target_rows)]\n shuffle(all_rows)\n all_source_rows = [source_row + '\\n' for source_row, _ in all_rows]\n all_target_rows = [target_row + '\\n' for _, target_row in all_rows]\n\n if self.save:\n with open(file_name_pair[0], 'wt') as source_file, open(file_name_pair[1], 'wt') as target_file:\n source_file.writelines(all_source_rows), target_file.writelines(all_target_rows)\n\n self.print(\"File %s and %s saved.\" % (file_name_pair[0], file_name_pair[1]))\n\n else:\n self.print(\"File %s and %s not saved (not in save mode).\" % (file_name_pair[0], file_name_pair[1]))\n\n def print(self, *args):\n \"\"\" Prints only if not in silent mode. \"\"\"\n\n if not self.silent:\n print(*args)\n\n # endregion\n\n\nclass ContextFree(ModelingTask):\n @staticmethod\n def get_reordered_annotations(queries, annotations):\n new_annotations = defaultdict(list)\n\n for id_, annotation_list in annotations.items():\n for annotation in annotation_list:\n entities_tuple = tuple(sorted(queries[annotation.id_].entities))\n new_annotations[entities_tuple].append(annotation)\n\n return new_annotations\n\n def get_labelled_answers(self, sample_queries, sample_annotations, queries, annotations):\n answers = self.get_answers_all(annotations=annotations)\n labelled_answers = {answer: 0 for answer in answers}\n\n answers = self.get_answers_sample(sample_annotations=sample_annotations)\n for answer in answers:\n labelled_answers[answer] = 1\n\n return labelled_answers\n\n\nclass ContextFreeSameType(ContextFree):\n def get_labelled_answers(self, sample_queries, sample_annotations, queries, annotations):\n answers = self.get_answers_same_type(annotations=annotations, sample_queries=sample_queries, queries=queries)\n labelled_answers = {answer: 0 for answer in answers}\n\n answers = self.get_answers_sample(sample_annotations=sample_annotations)\n for answer in answers:\n labelled_answers[answer] = 1\n\n return labelled_answers\n\n\nclass ContextDependent(ModelingTask):\n def get_labelled_answers(self, sample_queries, sample_annotations, queries, annotations):\n answers = self.get_answers_all(annotations=annotations)\n labelled_answers = {answer: 0 for answer in answers}\n\n answers = self.get_answers_sample(sample_annotations=sample_annotations)\n for answer in answers:\n labelled_answers[answer] = 1\n\n return labelled_answers\n\n\nclass ContextDependentSameType(ModelingTask):\n def get_labelled_answers(self, sample_queries, sample_annotations, queries, annotations):\n answers = self.get_answers_same_type(annotations=annotations, sample_queries=sample_queries, queries=queries)\n labelled_answers = {answer: 0 for answer in answers}\n\n answers = self.get_answers_sample(sample_annotations=sample_annotations)\n for answer in answers:\n labelled_answers[answer] = 1\n\n return labelled_answers\n\n\nclass FullHybrid(ModelingTask):\n def get_labelled_answers(self, sample_queries, sample_annotations, queries, annotations):\n answers = self.get_answers_all(annotations=annotations)\n labelled_answers = {answer: 0 for answer in answers}\n\n answers = self.get_answers_same_type(annotations=annotations, sample_queries=sample_queries, queries=queries)\n for answer in answers:\n labelled_answers[answer] = 1\n\n answers = self.get_answers_same_tuple(annotations=annotations, sample_queries=sample_queries, queries=queries)\n for answer in answers:\n labelled_answers[answer] = 2\n\n answers = self.get_answers_sample(sample_annotations=sample_annotations)\n for answer in answers:\n labelled_answers[answer] = 3\n\n return labelled_answers\n\n\nclass Hybrid(ModelingTask):\n def get_labelled_answers(self, sample_queries, sample_annotations, queries, annotations):\n answers = self.get_answers_all(annotations=annotations)\n labelled_answers = {answer: 0 for answer in answers}\n\n answers = self.get_answers_same_tuple(annotations=annotations, sample_queries=sample_queries, queries=queries)\n for answer in answers:\n labelled_answers[answer] = 1\n\n answers = self.get_answers_sample(sample_annotations=sample_annotations)\n for answer in answers:\n labelled_answers[answer] = 2\n\n return labelled_answers\n\n\nclass HybridSameType(ModelingTask):\n def get_labelled_answers(self, sample_queries, sample_annotations, queries, annotations):\n answers = self.get_answers_same_type(annotations=annotations, sample_queries=sample_queries, queries=queries)\n labelled_answers = {answer: 0 for answer in answers}\n\n answers = self.get_answers_same_tuple(annotations=annotations, sample_queries=sample_queries, queries=queries)\n for answer in answers:\n labelled_answers[answer] = 1\n\n answers = self.get_answers_sample(sample_annotations=sample_annotations)\n for answer in answers:\n labelled_answers[answer] = 2\n\n return labelled_answers\n" ]
[ [ "numpy.asarray", "numpy.random.shuffle", "numpy.random.seed", "numpy.split" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Jerryxiaoyu/maml_rl_v2
[ "fda134dcbd87ef3e91f339ea2f836f28ec5f7784", "6091f996ff1be8e80d80331e510087868461b8e6" ]
[ "sandbox/rocky/tf/policies/maml_minimal_categorical_mlp_policy.py", "maml_examples/test_maml_cellrobot.py" ]
[ "from contextlib import contextmanager\nimport itertools\nimport numpy as np\nimport sandbox.rocky.tf.core.layers as L\nfrom rllab.core.serializable import Serializable\nfrom sandbox.rocky.tf.distributions.categorical import Categorical\nfrom sandbox.rocky.tf.policies.base import StochasticPolicy\nfrom rllab.misc import ext\nfrom sandbox.rocky.tf.misc import tensor_utils\nfrom rllab.misc.overrides import overrides\nfrom sandbox.rocky.tf.spaces.discrete import Discrete\nfrom rllab.misc import logger\nfrom rllab.misc.tensor_utils import flatten_tensors, unflatten_tensors\nimport tensorflow as tf\nfrom sandbox.rocky.tf.core.utils import make_input, _create_param, add_param, make_dense_layer, forward_dense_layer, make_param_layer, forward_param_layer\n\ntf_layers = None\nload_params = True\n\n@contextmanager\ndef suppress_params_loading():\n global load_params\n load_params = False\n yield\n load_params = True\n\n\nclass MAMLCategoricalMLPPolicy(StochasticPolicy, Serializable):\n def __init__(\n self,\n name,\n env_spec,\n hidden_sizes=(32, 32),\n hidden_nonlinearity=tf.nn.tanh,\n prob_network=None,\n grad_step_size=1.0,\n ):\n \"\"\"\n :param env_spec: A spec for the mdp.\n :param hidden_sizes: list of sizes for the fully connected hidden layers\n :param hidden_nonlinearity: nonlinearity used for each hidden layer\n :param prob_network: manually specified network for this policy, other network params\n are ignored\n :param grad_step_size: the step size taken in the learner's gradient update, sample uniformly if it is a range e.g. [0.1,1]\n :return:\n \"\"\"\n Serializable.quick_init(self, locals())\n\n assert isinstance(env_spec.action_space, Discrete)\n obs_dim = env_spec.observation_space.flat_dim\n self.action_dim = env_spec.action_space.n\n self.n_hidden = len(hidden_sizes)\n self.hidden_nonlinearity = hidden_nonlinearity\n self.input_shape = (None, obs_dim,)\n self.step_size = grad_step_size\n\n if prob_network is None:\n self.all_params = self.create_MLP(\n output_dim=self.action_dim,\n hidden_sizes=hidden_sizes,\n name=\"prob_network\",\n )\n self._l_obs, self._l_prob = self.forward_MLP('prob_network', self.all_params,\n n_hidden=len(hidden_sizes), input_shape=(obs_dim,),\n hidden_nonlinearity=hidden_nonlinearity,\n output_nonlinearity=tf.nn.softmax, reuse=None)\n\n # if you want to input your own tensor.\n self._forward_out = lambda x, params, is_train: self.forward_MLP('prob_network', params,\n n_hidden=len(hidden_sizes), hidden_nonlinearity=hidden_nonlinearity,\n output_nonlinearity=tf.nn.softmax, input_tensor=x, is_training=is_train)[1]\n\n\n self._init_f_prob = tensor_utils.compile_function(\n [self._l_obs],\n [self._l_prob])\n self._cur_f_prob = self._init_f_prob\n\n self._dist = Categorical(self.action_dim)\n self._cached_params = {}\n super(MAMLCategoricalMLPPolicy, self).__init__(env_spec)\n\n\n @property\n def vectorized(self):\n return True\n\n @overrides\n def dist_info_sym(self, obs_var, state_info_vars=None, all_params=None, is_training=True):\n # sym means symbolic here.\n return_params=True\n if all_params is None:\n return_params=False\n all_params = self.all_params\n output = self._forward_out(tf.cast(obs_var,tf.float32), all_params, is_training)\n if return_params:\n return dict(prob=output), all_params\n else:\n return dict(prob=output)\n\n def updated_dist_info_sym(self, task_id, surr_obj, new_obs_var, params_dict=None, is_training=True):\n \"\"\" symbolically create MAML graph, for the meta-optimization, only called at the beginning of meta-training.\n Called more than once if you want to do more than one grad step.\n \"\"\"\n old_params_dict = params_dict\n step_size = self.step_size\n\n if old_params_dict == None:\n old_params_dict = self.all_params\n param_keys = self.all_params.keys()\n gradients = dict(zip(param_keys, tf.gradients(surr_obj, [old_params_dict[key] for key in param_keys])))\n params_dict = dict(zip(param_keys, [old_params_dict[key] - step_size*gradients[key] for key in param_keys]))\n\n return self.dist_info_sym(new_obs_var, all_params=params_dict, is_training=is_training)\n\n @overrides\n def dist_info(self, obs, state_infos=None):\n return dict(prob=self._f_prob(obs))\n\n def switch_to_init_dist(self):\n # switch cur policy distribution to pre-update policy\n self._cur_f_prob = self._init_f_prob\n self.all_param_vals = None\n\n def set_init_surr_obj(self, input_list, surr_objs_tensor):\n \"\"\" Set the surrogate objectives used the update the policy\n \"\"\"\n self.input_list_for_grad = input_list\n self.surr_objs = surr_objs_tensor\n\n def compute_updated_dists(self, samples):\n \"\"\" Compute fast gradients once and pull them out of tensorflow for sampling.\n \"\"\"\n num_tasks = len(samples)\n param_keys = self.all_params.keys()\n\n sess = tf.get_default_session()\n\n obs_list, action_list, adv_list = [], [], []\n for i in range(num_tasks):\n inputs = ext.extract(samples[i],\n 'observations', 'actions', 'advantages')\n obs_list.append(inputs[0])\n action_list.append(inputs[1])\n adv_list.append(inputs[2])\n\n inputs = obs_list + action_list + adv_list\n\n # To do a second update, replace self.all_params below with the params that were used to collect the policy.\n init_param_values = None\n if self.all_param_vals is not None:\n init_param_values = self.get_variable_values(self.all_params)\n\n step_size = self.step_size\n for i in range(num_tasks):\n if self.all_param_vals is not None:\n self.assign_params(self.all_params, self.all_param_vals[i])\n\n if 'all_fast_params_tensor' not in dir(self):\n # make computation graph once\n self.all_fast_params_tensor = []\n for i in range(num_tasks):\n gradients = dict(zip(param_keys, tf.gradients(self.surr_objs[i], [self.all_params[key] for key in param_keys])))\n fast_params_tensor = dict(zip(param_keys, [self.all_params[key] - step_size*gradients[key] for key in param_keys]))\n self.all_fast_params_tensor.append(fast_params_tensor)\n\n # pull new param vals out of tensorflow, so gradient computation only done once\n self.all_param_vals = sess.run(self.all_fast_params_tensor, feed_dict=dict(list(zip(self.input_list_for_grad, inputs))))\n\n if init_param_values is not None:\n self.assign_params(self.all_params, init_param_values)\n\n outputs = []\n inputs = tf.split(0, num_tasks, self._l_obs)\n for i in range(num_tasks):\n # TODO - use a placeholder to feed in the params, so that we don't have to recompile every time.\n task_inp = inputs[i]\n info, _ = self.dist_info_sym(task_inp, dict(), all_params=self.all_param_vals[i],\n is_training=False)\n\n outputs.append([info['prob']])\n\n self._cur_f_prob = tensor_utils.compile_function(\n inputs = [self._l_obs],\n outputs = outputs,\n )\n\n def get_variable_values(self, tensor_dict):\n sess = tf.get_default_session()\n result = sess.run(tensor_dict)\n return result\n\n def assign_params(self, tensor_dict, param_values):\n if 'assign_placeholders' not in dir(self):\n # make computation graph, if it doesn't exist; then cache it for future use.\n self.assign_placeholders = {}\n self.assign_ops = {}\n for key in tensor_dict.keys():\n self.assign_placeholders[key] = tf.placeholder(tf.float32)\n self.assign_ops[key] = tf.assign(tensor_dict[key], self.assign_placeholders[key])\n\n feed_dict = {self.assign_placeholders[key]:param_values[key] for key in tensor_dict.keys()}\n sess = tf.get_default_session()\n sess.run(self.assign_ops, feed_dict)\n\n\n\n\n\n # The return value is a pair. The first item is a matrix (N, A), where each\n # entry corresponds to the action value taken. The second item is a vector\n # of length N, where each entry is the density value for that action, under\n # the current policy\n @overrides\n def get_action(self, observation):\n flat_obs = self.observation_space.flatten(observation)\n prob = self._cur_f_prob([flat_obs])[0]\n action = self.action_space.weighted_sample(prob)\n return action, dict(prob=prob)\n\n def get_actions(self, observations):\n flat_obs = self.observation_space.flatten_n(observations)\n result = self._cur_f_prob(flat_obs)\n if len(result) == 1:\n probs = result[0]\n else:\n #import pdb; pdb.set_trace()\n # TODO - I think this is correct but not sure.\n probs = np.array(result)[:,0,0,:]\n actions = list(map(self.action_space.weighted_sample, probs))\n return actions, dict(prob=probs)\n\n @property\n def distribution(self):\n return self._dist\n\n\n # This makes all of the parameters.\n def create_MLP(self, name, output_dim, hidden_sizes,\n hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=tf.zeros_initializer,\n output_W_init=L.XavierUniformInitializer(), output_b_init=tf.zeros_initializer,\n weight_normalization=False,\n ):\n input_shape = self.input_shape\n cur_shape = input_shape\n with tf.variable_scope(name):\n all_params = {}\n for idx, hidden_size in enumerate(hidden_sizes):\n W, b, cur_shape = make_dense_layer(\n cur_shape,\n num_units=hidden_size,\n name=\"hidden_%d\" % idx,\n W=hidden_W_init,\n b=hidden_b_init,\n weight_norm=weight_normalization,\n )\n all_params['W' + str(idx)] = W\n all_params['b' + str(idx)] = b\n W, b, _ = make_dense_layer(\n cur_shape,\n num_units=output_dim,\n name='output',\n W=output_W_init,\n b=output_b_init,\n weight_norm=weight_normalization,\n )\n all_params['W' + str(len(hidden_sizes))] = W\n all_params['b'+str(len(hidden_sizes))] = b\n\n return all_params\n\n def forward_MLP(self, name, all_params, input_tensor=None, input_shape=None, n_hidden=-1,\n hidden_nonlinearity=tf.identity, output_nonlinearity=tf.identity,\n batch_normalization=False, reuse=True, is_training=False):\n # is_training and reuse are for batch norm, irrelevant if batch_norm set to False\n # set reuse to False if the first time this func is called.\n with tf.variable_scope(name):\n if input_tensor is None:\n assert input_shape is not None\n l_in = make_input(shape=(None,)+input_shape, input_var=None, name='input')\n else:\n l_in = input_tensor\n l_hid = l_in\n for idx in range(n_hidden):\n l_hid = forward_dense_layer(l_hid, all_params['W'+str(idx)], all_params['b'+str(idx)],\n batch_norm=batch_normalization,\n nonlinearity=hidden_nonlinearity,\n scope=str(idx), reuse=reuse,\n is_training=is_training\n )\n output = forward_dense_layer(l_hid, all_params['W'+str(n_hidden)], all_params['b'+str(n_hidden)],\n batch_norm=False, nonlinearity=output_nonlinearity,\n )\n return l_in, output\n\n\n def get_params_internal(self, all_params=False, **tags):\n if tags.get('trainable', False):\n params = tf.trainable_variables()\n else:\n params = tf.all_variables()\n\n # TODO - this is hacky...\n params = [p for p in params if p.name.startswith('prob_network')]\n params = [p for p in params if 'Adam' not in p.name]\n\n return params\n\n def log_diagnostics(self, paths, prefix=''):\n pass\n\n", "from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline\nfrom rllab.envs.mujoco.ant_env_rand import AntEnvRand\nfrom rllab.envs.mujoco.ant_env_oracle import AntEnvOracle\nfrom rllab.envs.normalized_env import normalize\nfrom rllab.misc.instrument import stub, run_experiment_lite\nfrom sandbox.rocky.tf.algos.vpg import VPG\nfrom sandbox.rocky.tf.algos.trpo import TRPO\nfrom sandbox.rocky.tf.policies.minimal_gauss_mlp_policy import GaussianMLPPolicy\nfrom sandbox.rocky.tf.envs.base import TfEnv\n\nimport csv\nimport joblib\nimport numpy as np\nimport os\nimport pickle\nimport tensorflow as tf\nfrom math import pi\n\nfrom rllab.envs.mujoco.cellrobot_rand_direc_env import CellRobotRandDirectEnv\nstub(globals())\n\nEXP_root_dir = '/home/drl/PycharmProjects/maml_rl-master/data/AWS_data/Cellrobot-trpo-mamldirec-500-EXP2/maml1_fbs20_mbs20_flr_0.1_mlr0.01/'\n\n\nfile1 = 'itr_50.pkl'\nfile2 = 'data/s3/posticml-trpo-maml-ant200/randenv100traj/itr_575.pkl'\nfile3 = 'data/s3/posticml-trpo-maml-ant200/oracleenv100traj/itr_550.pkl'\n\nfile1 = os.path.join(EXP_root_dir,file1)\n\nmake_video = True # generate results if False, run code to make video if True\nrun_id = 2 # for if you want to run this script in multiple terminals (need to have different ids for each run)\n\nif not make_video:\n test_num_goals = 40\n np.random.seed(1)\n goals = np.random.uniform(-pi/3, pi/3, size=(test_num_goals, ))\nelse:\n np.random.seed(1)\n test_num_goals = 1\n #goals = np.random.uniform(-pi/3, pi/3, size=(test_num_goals, ))\n goals=[ -pi/4.0]\n file_ext = 'mp4' # can be mp4 or gif\nprint(goals)\n\n\ngen_name = 'Cellrobot_results_'\nnames = ['maml']#,'pretrain','random', 'oracle'\nexp_names = [gen_name + name for name in names]\n\nstep_sizes = [0.1, None, None, None]\ninitial_params_files = [file1, None, None, None]\n\n\nall_avg_returns = []\nfor step_i, initial_params_file in zip(range(len(step_sizes)), initial_params_files):\n avg_returns = []\n\n for goal in goals:\n print('goal = ()', goal/3.141692*180)\n if initial_params_file is not None and 'oracle' in initial_params_file:\n env = normalize(AntEnvOracle())\n n_itr = 1\n else:\n env = normalize(CellRobotRandDirectEnv())\n n_itr = 5\n env = TfEnv(env)\n policy = GaussianMLPPolicy( # random policy\n name='policy',\n env_spec=env.spec,\n hidden_nonlinearity=tf.nn.relu,\n output_nonlinearity=tf.nn.sigmoid,\n hidden_sizes=(64, 64),\n )\n \n\n if initial_params_file is not None:\n policy = None\n\n baseline = LinearFeatureBaseline(env_spec=env.spec)\n algo = VPG(\n env=env,\n policy=policy,\n load_policy=initial_params_file,\n baseline=baseline,\n batch_size=400, # 2x\n max_path_length=500,\n n_itr=n_itr,\n reset_arg=goal,\n optimizer_args={'init_learning_rate': step_sizes[step_i], 'tf_optimizer_args': {'learning_rate': 0.5*step_sizes[step_i]}, 'tf_optimizer_cls': tf.train.GradientDescentOptimizer}\n )\n\n test_dir_name = 'test' + str(run_id)+'_'+str(step_i)+'_'+str(goal)\n run_experiment_lite(\n algo.train(),\n # Number of parallel workers for sampling\n n_parallel=1,\n # Only keep the snapshot parameters for the last iteration\n snapshot_mode=\"all\",\n # Specifies the seed for the experiment. If this is not provided, a random seed\n # will be used\n seed=1,\n exp_prefix='CellRobot-ICRA-test',\n exp_name=test_dir_name,\n #plot=True,\n )\n\n\n\n # get return from the experiment\n with open(os.path.join('../data/local/CellRobot-ICRA-test', test_dir_name+'/progress.csv'), 'r') as f:\n reader = csv.reader(f, delimiter=',')\n i = 0\n row = None\n returns = []\n for row in reader:\n i+=1\n if i ==1:\n ret_idx = row.index('AverageReturn')\n else:\n returns.append(float(row[ret_idx]))\n avg_returns.append(returns)\n\n if make_video:\n data_loc = os.path.join('../data/local/CellRobot-ICRA-test', test_dir_name+'/')\n save_loc = os.path.join(EXP_root_dir, 'monitor/')\n if os.path.exists(save_loc) is False:\n os.mkdir(save_loc)\n param_file = initial_params_file\n save_prefix = save_loc + names[step_i] + '_goal_' + str(goal)\n video_filename = save_prefix + 'prestep.' + file_ext\n os.system('python ../scripts/sim_policy.py ' + param_file + ' --speedup=4 --max_path_length=300 --video_filename='+video_filename)\n for itr_i in range(3):\n param_file = data_loc + 'itr_' + str(itr_i) + '.pkl'\n video_filename = save_prefix + 'step_'+str(itr_i)+'.'+file_ext\n os.system('python ../scripts/sim_policy.py ' + param_file + ' --speedup=4 --max_path_length=300 --video_filename='+video_filename)\n\n\n\n\n all_avg_returns.append(avg_returns)\n\n\n\n task_avg_returns = []\n for itr in range(len(all_avg_returns[step_i][0])):\n task_avg_returns.append([ret[itr] for ret in all_avg_returns[step_i]])\n\n if not make_video:\n results = {'task_avg_returns': task_avg_returns}\n with open(exp_names[step_i] + '.pkl', 'wb') as f:\n pickle.dump(results, f)\n\n\nfor i in range(len(initial_params_files)):\n returns = []\n std_returns = []\n returns.append(np.mean([ret[itr] for ret in all_avg_returns[i]]))\n std_returns.append(np.std([ret[itr] for ret in all_avg_returns[i]]))\n print(initial_params_files[i])\n print(returns)\n print(std_returns)\n\n\n\n" ]
[ [ "tensorflow.get_default_session", "tensorflow.all_variables", "tensorflow.cast", "tensorflow.gradients", "tensorflow.placeholder", "tensorflow.trainable_variables", "tensorflow.assign", "tensorflow.variable_scope", "tensorflow.split", "numpy.array" ], [ "numpy.random.uniform", "numpy.std", "numpy.mean", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aqui-tna/darts-UNIQ
[ "293a27b104bc0f53c6093829d1184686b788fba9", "293a27b104bc0f53c6093829d1184686b788fba9", "293a27b104bc0f53c6093829d1184686b788fba9" ]
[ "cnn/models/BaseNet.py", "cnn/MixedLayer.py", "cnn/gradEstimators/random_path.py" ]
[ "from abc import abstractmethod\nfrom pandas import DataFrame\nfrom os.path import exists\nfrom numpy import argmax\n\nfrom torch.nn import Module, Conv2d\nfrom torch.nn import functional as F\nfrom torch import load as loadModel\n\nfrom cnn.MixedFilter import MixedConvBNWithReLU as MixedConvWithReLU\nfrom cnn.uniq_loss import UniqLoss\nimport cnn.statistics\nfrom cnn.HtmlLogger import HtmlLogger\n\nfrom UNIQ.quantize import check_quantization\n\n\n# from torch import save as saveModel\n# from torch import ones, zeros, no_grad, cat, tensor\n# from torch.nn import CrossEntropyLoss\n\n\n# preForward hook for training weights phase.\n# when we train weights, we need to quantize staged layers before forward, and remove quantization after forward in order to update by gradient\n# same for noise, we need to add noise before forward, and remove noise after forward, in order to update by gradient\ndef preForward(self, input):\n deviceID = input[0].device.index\n assert (deviceID not in self.hookDevices)\n self.hookDevices.append(deviceID)\n\n assert (self.training is True)\n # update layers list to new DataParallel layers copies\n self.layersList = self.buildLayersList()\n # quantize staged layers\n self.restoreQuantizationForStagedLayers()\n\n # add noise to next to be staged layer\n if self.nLayersQuantCompleted < self.nLayers():\n layer = self.layersList[self.nLayersQuantCompleted]\n assert (layer.added_noise is True)\n for op in layer.opsList():\n assert (op.noise is True)\n op.add_noise()\n\n\ndef postForward(self, input, __):\n deviceID = input[0].device.index\n assert (deviceID in self.hookDevices)\n self.hookDevices.remove(deviceID)\n\n assert (self.training is True)\n # remove quantization from staged layers\n self.removeQuantizationFromStagedLayers()\n\n # remove noise from next to be staged layer\n if self.nLayersQuantCompleted < self.nLayers():\n layer = self.layersList[self.nLayersQuantCompleted]\n assert (layer.added_noise is True)\n for op in layer.opsList():\n assert (op.noise is True)\n op.restore_state()\n\n\nclass BaseNet(Module):\n # init bitwidth of input to model\n modelInputBitwidth = 8\n modelInputnFeatureMaps = 3\n\n # counts the entire model bops in discrete mode\n def countBopsDiscrete(self):\n totalBops = 0\n # input_bitwidth is a list of bitwidth per feature map\n input_bitwidth = [self.modelInputBitwidth] * self.modelInputnFeatureMaps\n\n for layer in self.layers:\n totalBops += layer.getBops(input_bitwidth)\n input_bitwidth = layer.getCurrentOutputBitwidth()\n\n totalBops /= 1E9\n return totalBops\n\n def countBops(self):\n # wrapper is needed because countBopsFuncs is defined outside __init__()\n return self.countBopsFunc(self)\n\n countBopsFuncs = dict(discrete=countBopsDiscrete)\n\n alphasCsvFileName = 'alphas.csv'\n\n def buildLayersList(self):\n layersList = []\n for layer in self.layers:\n layersList.extend(layer.getLayers())\n\n return layersList\n\n def __init__(self, args, initLayersParams):\n super(BaseNet, self).__init__()\n # init save folder\n saveFolder = args.save\n # init layers\n self.layers = self.initLayers(initLayersParams)\n # build mixture layers list\n self.layersList = self.buildLayersList()\n # set bops counter function\n self.countBopsFunc = self.countBopsFuncs[args.bopsCounter]\n # init statistics\n self.stats = cnn.statistics.Statistics(self.layersList, saveFolder)\n # collect learnable params (weights)\n self.learnable_params = [param for param in self.parameters() if param.requires_grad]\n # init learnable alphas\n self.learnable_alphas = self.getLearnableAlphas()\n # init number of layers we have completed its quantization\n self.nLayersQuantCompleted = 0\n # calc init baseline bops\n baselineBops = self.calcBaselineBops()\n args.baselineBops = baselineBops[args.baselineBits[0]]\n # plot baselines bops\n self.stats.addBaselineBopsData(args, baselineBops)\n # init criterion\n self._criterion = UniqLoss(args)\n self._criterion = self._criterion.cuda()\n\n # init hooks handlers list\n self.hooksList = []\n # set hook flag, to make sure hook happens\n # # turn it on on pre-forward hook, turn it off on post-forward hook\n # self.hookFlag = False\n self.hookDevices = []\n\n self.printToFile(saveFolder)\n # init layers permutation list\n self.layersPerm = []\n # init number of permutations counter\n self.nPerms = 1\n for layer in self.layersList:\n # add layer numOps range to permutation list\n self.layersPerm.append(list(range(len(layer.alphas))))\n self.nPerms *= len(layer.alphas)\n\n # init alphas DataFrame\n self.alphas_df = None\n self.__initAlphasDataFrame(saveFolder)\n\n @abstractmethod\n def initLayers(self, params):\n raise NotImplementedError('subclasses must override initLayers()!')\n\n @abstractmethod\n def forward(self, x):\n raise NotImplementedError('subclasses must override forward()!')\n\n @abstractmethod\n def switch_stage(self, logger=None):\n raise NotImplementedError('subclasses must override switch_stage()!')\n\n @abstractmethod\n def loadUNIQPreTrained(self, checkpoint):\n raise NotImplementedError('subclasses must override loadUNIQPreTrained()!')\n\n @abstractmethod\n def loadSingleOpPreTrained(self, checkpoint):\n raise NotImplementedError('subclasses must override loadSingleOpPreTrained()!')\n\n @abstractmethod\n def turnOnWeights(self):\n raise NotImplementedError('subclasses must override turnOnWeights()!')\n\n def nLayers(self):\n return len(self.layersList)\n\n def getLearnableAlphas(self):\n return [layer.alphas for layer in self.layersList if layer.alphas.requires_grad is True]\n\n def updateLearnableAlphas(self):\n self.learnable_alphas = self.getLearnableAlphas()\n\n def arch_parameters(self):\n return self.learnable_alphas\n\n # layer_basis is a function of filter quantization,\n # therefore we have to update its value bases on weight_max_int, which is a function of weights bitwidth\n def __updateStatistics(self, loggerFuncs=[]):\n for layer in self.layersList:\n for op in layer.opsList():\n conv = op.getModule(Conv2d)\n # update layer_basis value based on weights bitwidth\n conv.layer_basis = conv.initial_clamp_value / op.quantize.weight_max_int\n\n for f in loggerFuncs:\n f('Updated layer_basis according to bitwidth (weight_max_int)')\n\n def loadPreTrained(self, path, logger, gpu):\n # init bool flag whether we loaded ops in the same layer with equal or different weights\n loadOpsWithDifferentWeights = False\n loggerRows = []\n loadSuccess = None\n if path is not None:\n if exists(path):\n # load checkpoint\n checkpoint = loadModel(path, map_location=lambda storage, loc: storage.cuda(gpu))\n assert (checkpoint['updated_statistics'] is True)\n chckpntStateDict = checkpoint['state_dict']\n # load model state dict keys\n modelStateDictKeys = set(self.state_dict().keys())\n # compare dictionaries\n dictDiff = modelStateDictKeys.symmetric_difference(set(chckpntStateDict.keys()))\n # update flag value\n loadOpsWithDifferentWeights = len(dictDiff) == 0\n # decide how to load checkpoint state dict\n if loadOpsWithDifferentWeights:\n # load directly, keys are the same\n self.load_state_dict(chckpntStateDict)\n else:\n # use some function to map keys\n loadFuncs = [self.loadUNIQPreTrained, self.loadSingleOpPreTrained]\n for func in loadFuncs:\n loadSuccess = func(chckpntStateDict)\n if loadSuccess is not False:\n # update statistics if we don't load ops with different statistics\n self.__updateStatistics(loggerFuncs=[lambda msg: loggerRows.append(['Statistics update', msg])])\n break\n\n if loadSuccess is not False:\n # add info rows about checkpoint\n loggerRows.append(['Path', '{}'.format(path)])\n loggerRows.append(['Validation accuracy', '{:.5f}'.format(checkpoint['best_prec1'])])\n loggerRows.append(['checkpoint[updated_statistics]', checkpoint['updated_statistics']])\n # check if model includes stats\n modelIncludesStats = False\n for key in chckpntStateDict.keys():\n if key.endswith('.layer_basis'):\n modelIncludesStats = True\n break\n loggerRows.append(['Includes stats', '{}'.format(modelIncludesStats)])\n\n # # =============================== Load alphas & plots ==========================================================\n # self.load_alphas_state(checkpoint['alphas'])\n # loggerRows.append(['Loaded alphas distribution', 'True'])\n # p = '/home/yochaiz/F-BANNAS/cnn/results/[(2, 2), (2, 4), (3, 3), (8, 8)],[1.0],[cifar10],[20181113-212929]/plots.data'\n # stats = self.stats\n # stats.plotsData = loadModel(p)\n # stats.batchLabels = stats.plotsData['alphas_entropy over epochs']['x']\n #\n # stats.containers[stats.lossAvgKey][0] = stats.plotsData['loss_avg over epochs']['data'][0]['y']\n # stats.containers[stats.crossEntropyLossAvgKey][0] = stats.plotsData['cross_entropy_loss_avg over epochs']['data'][0]['y']\n # stats.containers[stats.bopsLossAvgKey][0] = stats.plotsData['bops_loss_avg over epochs']['data'][0]['y']\n # stats.containers[stats.lossVarianceKey][0] = stats.plotsData['loss_variance over epochs']['data'][0]['y']\n #\n # for i in range(len(stats.containers[stats.entropyKey])):\n # stats.containers[stats.entropyKey][i] = stats.plotsData['alphas_entropy over epochs']['data'][i]['y']\n #\n # for i in range(len(stats.containers[stats.alphaDistributionKey])):\n # key = 'alphas_distribution --layer:[{}]-- over epochs'.format(i)\n # for j in range(len(stats.containers[stats.alphaDistributionKey][i])):\n # stats.containers[stats.alphaDistributionKey][i][j] = stats.plotsData[key]['data'][j]['y']\n #\n # loggerRows.append(['Loaded plots data', p])\n # # =============================================================================================================\n else:\n loggerRows.append(['Path', 'Failed to load pre-trained from [{}], state_dict does not fit'.format(path)])\n else:\n loggerRows.append(['Path', 'Failed to load pre-trained from [{}], path does not exists'.format(path)])\n\n # load pre-trained model if we tried to load pre-trained\n logger.addInfoTable('Pre-trained model', loggerRows)\n\n return loadOpsWithDifferentWeights\n\n # load weights for each filter from its uniform model, i.e. load 2-bits filter weights from uniform 2-bits model\n # weights in uniform models are full-precision, i.e. before quantization\n def loadUniformPreTrained(self, args, logger):\n from collections import OrderedDict\n from cnn.MixedFilter import QuantizedOp\n from cnn.utils import loadCheckpoint\n from torch.nn.modules import Linear, BatchNorm2d\n\n def b(op, prefix):\n keysList = []\n\n for name, param in op._parameters.items():\n if param is not None:\n keysList.append(prefix + name)\n for name, buf in op._buffers.items():\n if buf is not None:\n keysList.append(prefix + name)\n for name, module in op._modules.items():\n if module is not None:\n keysList.extend(b(module, prefix + name + '.'))\n\n return keysList\n\n def a(model, dict, prefix=''):\n for name, module in model._modules.items():\n key = None\n if isinstance(module, QuantizedOp):\n key = module.getBitwidth()\n # elif isinstance(module, BatchNorm2d) or isinstance(module, Linear):\n elif isinstance(module, Linear):\n key = (32, 32)\n\n if key is not None:\n if key not in dict.keys():\n dict[key] = []\n\n dict[key].extend(b(module, prefix + name + '.'))\n\n else:\n a(module, dict, prefix + name + '.')\n\n modelDict = OrderedDict()\n a(self, modelDict)\n\n # transform downsamples keys\n transformMap = [[(2, None), (2, 2)], [(3, None), (3, 3)], [(4, None), (4, 4)], [(8, None), (8, 8)]]\n for srcBitwidth, dstBitwidth in transformMap:\n if srcBitwidth in modelDict.keys():\n modelDict[dstBitwidth].extend(modelDict[srcBitwidth])\n del modelDict[srcBitwidth]\n\n keysList = []\n for bitwidth, bitwidthKeysList in modelDict.items():\n keysList.extend(bitwidthKeysList)\n\n modelStateDictKeys = set(self.state_dict().keys())\n dictDiff = modelStateDictKeys.symmetric_difference(set(keysList))\n assert (len(dictDiff) == 0)\n\n stateDict = OrderedDict()\n token1 = '.ops.'\n token2 = '.op.'\n for bitwidth, bitwidthKeysList in modelDict.items():\n if bitwidth == (32, 32):\n continue\n\n checkpoint, _ = loadCheckpoint(args.dataset, args.model, bitwidth)\n assert (checkpoint is not None)\n chckpntStateDict = checkpoint['state_dict']\n for key in bitwidthKeysList:\n prefix = key[:key.index(token1)]\n suffix = key[key.rindex(token2):]\n # convert model key to checkpoint key\n chckpntKey = prefix + token1 + '0.0' + suffix\n # add value to dict\n stateDict[key] = chckpntStateDict[chckpntKey]\n\n # # load keys from (32, 32) checkpoint, no need to transform keys\n bitwidth = (8, 8)\n checkpoint, _ = loadCheckpoint(args.dataset, args.model, bitwidth) # , filename='model.updated_stats.pth.tar')\n # checkpoint = loadModel(\"/home/vista/Desktop/Architecture_Search/ZZ/cifar100/resnet_[2#2,4#3#4#8]/pre_trained_checkpoint.pth.tar\")\n assert (checkpoint is not None)\n chckpntStateDict = checkpoint['state_dict']\n # map = self.buildStateDictMap(chckpntStateDict)\n # invMap = {v: k for k, v in map.items()}\n bitwidth = (32, 32)\n for key in modelDict[bitwidth]:\n stateDict[key] = chckpntStateDict[key]\n\n # prefix = key[:key.rindex('.')]\n # suffix = key[key.rindex('.'):]\n # newKey = invMap[prefix]\n # stateDict[key] = chckpntStateDict[newKey + suffix]\n\n dictDiff = modelStateDictKeys.symmetric_difference(set(stateDict.keys()))\n assert (len(dictDiff) == 0)\n\n self.load_state_dict(stateDict)\n logger.addInfoTable('Pre-trained model', [['Loaded each filter with filter from the corresponding bitwidth uniform model']])\n\n def loss(self, logits, target):\n return self._criterion(logits, target, self.countBops())\n\n def turnOffAlphas(self):\n for layer in self.layersList:\n layer.alphas.grad = None\n\n def calcBopsRatio(self):\n return self._criterion.calcBopsRatio(self.countBops())\n\n def choosePathByAlphas(self, loggerFuncs=[]):\n for l in self.layers:\n l.choosePathByAlphas()\n\n logMsg = 'Model layers filters partition has been updated by alphas distribution'\n for f in loggerFuncs:\n f(logMsg)\n\n # set curr_alpha_idx to each filter by alphas values\n def setFiltersByAlphas(self, loggerFuncs=[]):\n for layer in self.layersList:\n layer.setFiltersPartitionByAlphas()\n\n logMsg = 'Model layers filters partition has been updated by alphas values'\n for f in loggerFuncs:\n f(logMsg)\n\n # returns list of layers filters partition\n def getCurrentFiltersPartition(self):\n return [layer.getCurrentFiltersPartition() for layer in self.layersList]\n\n # partition is list of int tensors\n # given a partition, set model filters accordingly\n def setFiltersByPartition(self, partition, loggerFuncs=[]):\n for layer, p in zip(self.layersList, partition):\n layer.setFiltersPartition(p)\n\n logMsg = 'Model layers filters partition has been updated by given partition'\n for f in loggerFuncs:\n f(logMsg)\n\n def isQuantized(self):\n for layerIdx, layer in enumerate(self.layersList):\n assert (layer.quantized is True)\n assert (layer.added_noise is False)\n for opIdx, op in enumerate(layer.opsList()):\n assert (check_quantization(op.getModule(Conv2d).weight) <= (2 ** op.bitwidth[0]))\n\n return True\n\n def setWeightsTrainingHooks(self):\n assert (len(self.hooksList) == 0)\n # assign pre & post forward hooks\n self.hooksList = [self.register_forward_pre_hook(preForward), self.register_forward_hook(postForward)]\n\n def removeWeightsTrainingHooks(self):\n for handler in self.hooksList:\n handler.remove()\n # clear hooks handlers list\n self.hooksList.clear()\n\n # remove quantization from staged layers before training weights\n # quantization will be set through pre-forward hook\n # we keep ActQaunt.qunatize_during_training == True\n def removeQuantizationFromStagedLayers(self):\n for layerIdx in range(self.nLayersQuantCompleted):\n layer = self.layersList[layerIdx]\n assert (layer.quantized is True)\n # remove quantization from layer ops\n for op in layer.opsList():\n op.restore_state()\n\n # restore quantization for staged layers after training weights\n # quantization will be set through pre-forward hook\n # we keep ActQaunt.qunatize_during_training == True\n def restoreQuantizationForStagedLayers(self):\n for layerIdx in range(self.nLayersQuantCompleted):\n layer = self.layersList[layerIdx]\n assert (layer.quantized is True)\n # refresh layer ops list. we want ops list to contain the ops DataParallel GPU copies\n # quantize layer ops\n for op in layer.opsList():\n op.quantizeFunc()\n assert (check_quantization(op.getModule(Conv2d).weight) <= (2 ** op.bitwidth[0]))\n\n def quantizeUnstagedLayers(self):\n # quantize model layers that haven't switched stage yet\n # no need to turn gradients off, since with no_grad() does it\n if self.nLayersQuantCompleted < self.nLayers():\n # turn off noise if 1st unstaged layer\n layer = self.layersList[self.nLayersQuantCompleted]\n layer.turnOffNoise(self.nLayersQuantCompleted)\n # quantize all unstaged layers\n for layerIdx, layer in enumerate(self.layersList[self.nLayersQuantCompleted:]):\n # quantize\n layer.quantize(self.nLayersQuantCompleted + layerIdx)\n\n assert (self.isQuantized() is True)\n\n def unQuantizeUnstagedLayers(self):\n # restore weights (remove quantization) of model layers that haven't switched stage yet\n if self.nLayersQuantCompleted < self.nLayers():\n for layerIdx, layer in enumerate(self.layersList[self.nLayersQuantCompleted:]):\n # remove quantization\n layer.unQuantize(self.nLayersQuantCompleted + layerIdx)\n # add noise back to 1st unstaged layer\n layer = self.layersList[self.nLayersQuantCompleted]\n layer.turnOnNoise(self.nLayersQuantCompleted)\n\n def resetForwardCounters(self):\n for layer in self.layersList:\n for filter in layer.filters:\n # reset filter counters\n filter.resetOpsForwardCounters()\n\n # apply some function on baseline models\n # baseline models are per each filter bitwidth\n # this function create a map from baseline bitwidth to func() result on baseline model\n def applyOnBaseline(self, func, applyOnAlphasDistribution=False):\n baselineBops = {}\n # save current model filters curr_alpha_idx\n modelFiltersIdx = [[filter.curr_alpha_idx for filter in layer.filters] for layer in self.layersList]\n # iterate over model layers\n for layer in self.layersList:\n # we want to iterate only over MixedConvWithReLU filters layer\n if isinstance(layer.filters[0], MixedConvWithReLU):\n # get layer filters bitwidth list\n layerBitwidths = layer.getAllBitwidths()\n # iterate over bitwidth and calc bops for their uniform model\n for idx, bitwidth in enumerate(layerBitwidths):\n # calc only for bitwidths that are not in baselineBops dictionary\n if bitwidth not in baselineBops:\n # if we need to calc bops for bitwidth uniform model, then we have to set filters curr_alpha_idx\n for layer2 in self.layersList:\n # get layer bitwidth list\n layerBitwidths2 = layer2.getAllBitwidths()\n # find target bitwidth in bitwidth list\n if bitwidth in layerBitwidths2:\n idx = layerBitwidths2.index(bitwidth)\n else:\n # if it is a MixedConv layer, then modify the bitwidth we are looking for\n modifiedBitwidth = (bitwidth[0], None)\n idx = layerBitwidths2.index(modifiedBitwidth)\n # set layers curr_alpha_idx to target bitwidth index\n for filter in layer2.filters:\n filter.curr_alpha_idx = idx\n # update bops value in dictionary\n baselineBops[bitwidth] = func()\n\n # apply on current alphas distribution\n if applyOnAlphasDistribution:\n self.setFiltersByAlphas()\n # &#945; is greek alpha symbol in HTML\n baselineBops['&#945;'] = func()\n\n # restore filters curr_alpha_idx\n for layer, layerFiltersIdx in zip(self.layersList, modelFiltersIdx):\n for filter, filterIdx in zip(layer.filters, layerFiltersIdx):\n filter.curr_alpha_idx = filterIdx\n\n return baselineBops\n\n # calc bops of uniform models, based on filters ops bitwidth\n def calcBaselineBops(self):\n return self.applyOnBaseline(self.countBops)\n\n # return top k operations per layer\n def topOps(self, k):\n top = []\n for layer in self.layersList:\n # calc weights from alphas and sort them\n weights = F.softmax(layer.alphas, dim=-1)\n wSorted, wIndices = weights.sort(descending=True)\n # keep only top-k\n wSorted = wSorted[:k]\n wIndices = wIndices[:k]\n # get layer bitwidths\n bitwidths = layer.getAllBitwidths()\n # add to top\n top.append([(i, w.item(), layer.alphas[i], bitwidths[i]) for w, i in zip(wSorted, wIndices)])\n\n return top\n\n # create list of tuples (layer index, layer alphas)\n def save_alphas_state(self):\n return [(i, layer.alphas) for i, layer in enumerate(self.layersList)]\n\n def load_alphas_state(self, state, loggerFuncs=[]):\n for layerIdx, alphas in state:\n layerAlphas = self.layersList[layerIdx].alphas\n device = layerAlphas.device\n layerAlphas.data = alphas.data.to(device)\n\n logMsg = 'Loaded alphas from checkpoint'\n # log message to all loggers\n for f in loggerFuncs:\n f(logMsg)\n\n def __initAlphasDataFrame(self, saveFolder):\n if saveFolder:\n # update save path if saveFolder exists\n self.alphasCsvFileName = '{}/{}'.format(saveFolder, self.alphasCsvFileName)\n # init DataFrame cols\n cols = ['Epoch', 'Batch']\n cols += ['Layer_{}'.format(i) for i in range(self.nLayers())]\n self.cols = cols\n # init DataFrame\n self.alphas_df = DataFrame([], columns=cols)\n # set init data\n data = ['init', 'init']\n # save alphas data\n self.save_alphas_to_csv(data)\n\n # save alphas values to csv\n def save_alphas_to_csv(self, data):\n if self.alphas_df is not None:\n data += [[round(e.item(), 5) for e in layer.alphas] for layer in self.layersList]\n # create new row\n d = DataFrame([data], columns=self.cols)\n # add row\n self.alphas_df = self.alphas_df.append(d)\n # save DataFrame\n self.alphas_df.to_csv(self.alphasCsvFileName)\n\n def logDominantQuantizedOp(self, k, loggerFuncs=[]):\n if (not loggerFuncs) or (len(loggerFuncs) == 0):\n return\n\n rows = [['Layer #', 'Alphas']]\n alphaCols = ['Index', 'Ratio', 'Value', 'Bitwidth']\n\n top = self.topOps(k=k)\n for i, layerTop in enumerate(top):\n layerRow = [alphaCols]\n for idx, w, alpha, bitwidth in layerTop:\n alphaRow = [idx, '{:.5f}'.format(w), '{:.5f}'.format(alpha), bitwidth]\n # add alpha data row to layer data table\n layerRow.append(alphaRow)\n # add layer data table to model table as row\n rows.append([i, layerRow])\n\n # apply loggers functions\n for f in loggerFuncs:\n f(k, rows)\n\n def printToFile(self, saveFolder):\n logger = HtmlLogger(saveFolder, 'model')\n\n layerIdxKey = 'Layer#'\n nFiltersKey = 'Filters#'\n bitwidthsKey = 'Bitwidths'\n filterArchKey = 'Filter Architecture'\n alphasKey = 'Alphas distribution'\n\n logger.createDataTable('Model architecture', [layerIdxKey, nFiltersKey, bitwidthsKey, filterArchKey])\n for layerIdx, layer in enumerate(self.layersList):\n bitwidths = layer.getAllBitwidths()\n\n dataRow = {layerIdxKey: layerIdx, nFiltersKey: layer.nFilters(), bitwidthsKey: bitwidths, filterArchKey: next(layer.opsList())}\n logger.addDataRow(dataRow)\n\n # log layers alphas distribution\n self.logDominantQuantizedOp(len(bitwidths), loggerFuncs=[lambda k, rows: logger.addInfoTable(alphasKey, rows)])\n\n def logForwardCounters(self, loggerFuncs):\n if (not loggerFuncs) or (len(loggerFuncs) == 0):\n self.resetForwardCounters()\n return\n\n rows = [['Layer #', 'Counters']]\n counterCols = ['Prev idx', 'bitwidth', 'Counter']\n\n for layerIdx, layer in enumerate(self.layersList):\n filter = layer.filters[0]\n # sum counters of all filters by indices\n countersByIndices = [[0] * len(filter.opsForwardCounters[0]) for _ in range(len(filter.opsForwardCounters))]\n for filter in layer.filters:\n for i, counterList in enumerate(filter.opsForwardCounters):\n for j, counter in enumerate(counterList):\n countersByIndices[i][j] += counter\n # reset filter counters\n filter.resetOpsForwardCounters()\n\n # collect layer counters to 2 arrays:\n # counters holds the counters values\n # indices holds the corresponding counter value indices\n counters, indices = [], []\n for i in range(len(countersByIndices)):\n for j in range(len(countersByIndices[0])):\n counters.append(countersByIndices[i][j])\n indices.append((i, j))\n\n # get layer bitwidths\n bitwidths = layer.getAllBitwidths()\n # for each layer, sort counters in descending order\n layerRows = [counterCols]\n countersTotal = 0\n while len(counters) > 0:\n # find max counter and print it\n maxIdx = argmax(counters)\n i, j = indices[maxIdx]\n\n # add counter as new row\n layerRows.append([i, bitwidths[j], counters[maxIdx]])\n\n # update countersTotal\n countersTotal += counters[maxIdx]\n # remove max counter from lists\n del counters[maxIdx]\n del indices[maxIdx]\n\n # add counters total row\n layerRows.append(['Total', '', countersTotal])\n # add layer row to model table\n rows.append([layerIdx, layerRows])\n\n # apply loggers functions\n for f in loggerFuncs:\n f(rows)\n\n# def calcStatistics(self, statistics_queue):\n# # prepare for collecting statistics, reset register_buffers values\n# for layer in self.layersList:\n# for op in layer.opsList:\n# conv = op.getConv()\n# # reset conv register_buffer values\n# conv.layer_b = ones(1).cuda()\n# conv.layer_basis = ones(1).cuda()\n# conv.initial_clamp_value = ones(1).cuda()\n# # get actquant\n# actQuant = op.getReLU()\n# if actQuant:\n# # reset actquant register_buffer values\n# actQuant.running_mean = zeros(1).cuda()\n# actQuant.running_std = zeros(1).cuda()\n# actQuant.clamp_val.data = zeros(1).cuda()\n# # set actquant to statistics forward\n# actQuant.forward = actQuant.statisticsForward\n#\n# # train for statistics\n# criterion = CrossEntropyLoss().cuda()\n# nBatches = 80\n# self.eval()\n# with no_grad():\n# for step, (input, target) in enumerate(statistics_queue):\n# if step >= nBatches:\n# break\n#\n# output = self(input.cuda())\n# criterion(output, target.cuda())\n#\n# # apply quantize class statistics functions\n# for layerIdx, layer in enumerate(self.layersList):\n# # concat layer feature maps together, in order to get initial_clamp_value identical to NICE\n# # because initial_clamp_value is calculated based on feature maps weights values\n# x = tensor([]).cuda()\n# for op in layer.opsList:\n# x = cat((x, op.getConv().weight), dim=0)\n#\n# for op in layer.opsList:\n# clamp_value = op.quantize.basic_clamp(x)\n# conv = op.getConv()\n# conv.initial_clamp_value = clamp_value\n# # restore actquant forward function\n# actQuant = op.getReLU()\n# # set actquant to standard forward\n# if actQuant:\n# op.quantize.get_act_max_value_from_pre_calc_stats([actQuant])\n# actQuant.forward = actQuant.standardForward\n#\n# print('Layer [{}] - initial_clamp_value:[{}]'.format(layerIdx, conv.initial_clamp_value.item()))\n#\n# # for op in layer.opsList:\n# # opModulesList = list(op.modules())\n# # op.quantize.get_act_max_value_from_pre_calc_stats(opModulesList)\n# # op.quantize.set_weight_basis(opModulesList, None)\n# #\n# # conv = op.getConv()\n# # print(conv.initial_clamp_value)\n# #\n#\n# # updates statistics in checkpoint, in order to avoid calculating statistics when loading model from checkpoint\n# def updateCheckpointStatistics(self, checkpoint, path, statistics_queue):\n# needToUpdate = ('updated_statistics' not in checkpoint) or (checkpoint['updated_statistics'] is not True)\n# if needToUpdate:\n# # quantize model\n# self.quantizeUnstagedLayers()\n# # change self.nLayersQuantCompleted so calcStatistics() won't quantize again\n# nLayersQuantCompletedOrg = self.nLayersQuantCompleted\n# self.nLayersQuantCompleted = self.nLayers()\n# # load checkpoint weights\n# self.load_state_dict(checkpoint['state_dict'])\n# # calc weights statistics\n# self.calcStatistics(statistics_queue)\n# # update checkpoint\n# checkpoint['state_dict'] = self.state_dict()\n# checkpoint['updated_statistics'] = True\n# # save updated checkpoint\n# saveModel(checkpoint, path)\n# # restore nLayersQuantCompleted\n# self.nLayersQuantCompleted = nLayersQuantCompletedOrg\n#\n# return needToUpdate\n\n# def __loadStatistics(self, filename):\n# if exists(filename):\n# # stats is a list of dicts per layer\n# stats = loadModel(filename)\n# print('Loading statistics')\n#\n# for i, layer in enumerate(self.layersList):\n# # get layer dict\n# layerStats = stats[i]\n# # iterate over layer filters\n# for filter in layer.filters:\n# # iterate over filter modules\n# for m in filter.modules():\n# # create module type as string\n# moduleType = '{}'.format(type(m))\n# NICEprefix = \"'NICE.\"\n# if NICEprefix in moduleType:\n# moduleType = moduleType.replace(NICEprefix, \"'\")\n#\n# # check if type string is in dict\n# if moduleType in layerStats:\n# # go over dict keys, which is the module variables\n# for varName in layerStats[moduleType].keys():\n# v = getattr(m, varName)\n# # if variable has value in dict, assign it\n# if v is not None:\n# v.data = layerStats[moduleType][varName].data\n\n# # select random alpha\n# def chooseRandomPath(self):\n# for l in self.layers:\n# l.chooseRandomPath()\n\n# # layerIdx, alphaIdx meaning: self.layersList[layerIdx].curr_alpha_idx = alphaIdx\n# # def choosePathByAlphas(self, layerIdx=None, alphaIdx=None):\n# def choosePathByAlphas(self):\n# for l in self.layers:\n# l.choosePathByAlphas()\n#\n# if (layerIdx is not None) and (alphaIdx is not None):\n# layer = self.layersList[layerIdx]\n# layer.curr_alpha_idx = alphaIdx\n\n# def evalMode(self):\n# for l in self.layers:\n# l.evalMode()\n#\n# # calc bops ratio\n# return self.calcBopsRatio()\n\n# def uniformMode(self):\n# for l in self.layersList:\n# l.uniformMode(self._criterion.baselineBits)\n#\n# # calc bops ratio\n# return self.calcBopsRatio()\n\n# def turnOffAlphas(self):\n# for layer in self.layersList:\n# # turn off alphas gradients\n# layer.alphas.requires_grad = False\n#\n# self.learnable_alphas = []\n\n# def turnOnAlphas(self):\n# self.learnable_alphas = []\n# for layer in self.layersList:\n# # turn on alphas gradients\n# layer.alphas.requires_grad = True\n# self.learnable_alphas.append(layer.alphas)\n#\n# for op in layer.getOps():\n# # turn off noise in op\n# op.noise = False\n#\n# ## ==== for tinyNet ====\n# # # set pre & post quantization hooks, from now on we want to quantize these ops\n# # op.register_forward_pre_hook(save_quant_state)\n# # op.register_forward_hook(restore_quant_state)\n\n# # convert current model to discrete, i.e. keep nOpsPerLayer optimal operations per layer\n# def toDiscrete(self, nOpsPerLayer=1):\n# for layer in self.layersList:\n# # calc weights from alphas and sort them\n# weights = F.softmax(layer.alphas, dim=-1)\n# _, wIndices = weights.sort(descending=True)\n# # update layer alphas\n# layer.alphas = layer.alphas[wIndices[:nOpsPerLayer]]\n# # layer.alphas = tensor(tensor(layer.alphas.tolist()).cuda(), requires_grad=True)\n# layer.alphas = tensor(tensor(layer.alphas.tolist()).cuda())\n# # take indices of ops we want to remove from layer\n# wIndices = wIndices[nOpsPerLayer:]\n# # convert to list\n# wIndices = wIndices.tolist()\n# # sort indices ascending\n# wIndices.sort()\n# # remove ops and corresponding bops from layer\n# for w in reversed(wIndices):\n# del layer.ops[w]\n# del layer.bops[w]\n\n# def loadBitwidthWeigths(self, stateDict, MaxBopsBits, bitwidth):\n# # check idx of MaxBopsBits inside bitwidths\n# maxBopsBitsIdx = bitwidth.index(MaxBopsBits)\n# maxBopsStateDict = OrderedDict()\n# opsKey = 'ops.'\n# for key in stateDict.keys():\n# # if operation is for max bops bits idx\n# if opsKey in key:\n# keyOp_num = key.split(opsKey)[1][0]\n# if int(keyOp_num) == maxBopsBitsIdx:\n# maxBopsKey = key.replace(opsKey + keyOp_num, opsKey + '0')\n# maxBopsStateDict[maxBopsKey] = stateDict[key]\n# else:\n# maxBopsStateDict[key] = stateDict[key]\n#\n# self.load_state_dict(maxBopsStateDict)\n\n# def _loss(self, input, target):\n# totalLoss = 0.0\n# nIter = min(self.nPerms, 1000)\n# for _ in range(nIter):\n# logits = self.forward(input)\n#\n# # calc alphas product\n# alphasProduct = 1.0\n# for layer in self.layersList:\n# probs = F.softmax(layer.alphas)\n# alphasProduct *= probs[layer.curr_alpha_idx]\n#\n# permLoss = alphasProduct * self._criterion(logits, target, self.countBops())\n# # permLoss = self._criterion(logits, target, self.countBops()) / nIter\n# permLoss.backward(retain_graph=True)\n#\n# totalLoss += permLoss.item()\n#\n# return totalLoss\n\n# def _loss(self, input, target):\n# # sum all paths losses * the path alphas multiplication\n# totalLoss = 0.0\n# nIter = min(self.nPerms, 1000)\n# for _ in range(nIter):\n# # for perm in product(*self.layersPerm):\n# perm = [randint(0, len(layer.alphas) - 1) for layer in self.layersList]\n# alphasProduct = 1.0\n# # set perm index in each layer\n# for i, p in enumerate(perm):\n# layer = self.layersList[i]\n# layer.curr_alpha_idx = p\n# probs = F.softmax(layer.alphas)\n# alphasProduct *= probs[p]\n#\n# logits = self.forward(input)\n# # only the alphas are changing...\n# permLoss = (alphasProduct * self._criterion(logits, target, self.countBops()))\n# permLoss.backward(retain_graph=True)\n# totalLoss += permLoss.item()\n#\n# # print('totalLoss:[{:.5f}]'.format(totalLoss))\n# return totalLoss\n\n#\n# # logits = self.forward(input)\n# # return self._criterion(logits, target, self.countBops())\n\n# def _loss(self, input, target):\n# # init how many samples per alpha\n# nSamples = self.nSamples\n# # init total loss\n# totalLoss = 0.0\n# # init loss samples list for ALL alphas\n# allLossSamples = []\n# for j, layer in enumerate(self.layersList):\n# # turn off coin toss for this layer\n# layer.alphas.requires_grad = False\n# # init layer alphas gradient\n# layerAlphasGrad = zeros(len(layer.alphas)).cuda()\n# # calc layer alphas softmax\n# probs = F.softmax(layer.alphas, dim=-1)\n#\n# for i, alpha in enumerate(layer.alphas):\n# # select the specific alpha in this layer\n# layer.curr_alpha_idx = i\n# # init loss samples list\n# alphaLossSamples = []\n# for _ in range(nSamples):\n# # forward through some path in model\n# logits = self(input)\n# # alphaLoss += self._criterion(logits, target, self.countBops()).detach()\n# alphaLossSamples.append(self._criterion(logits, target, self.countBops()).detach())\n#\n# # add current alpha loss samples to all loss samples list\n# allLossSamples.extend(alphaLossSamples)\n# # calc alpha average loss\n# alphaAvgLoss = sum(alphaLossSamples) / nSamples\n# layerAlphasGrad[i] = alphaAvgLoss\n# # add alpha loss to total loss\n# totalLoss += (alphaAvgLoss * probs[i])\n#\n# # calc loss samples variance\n# lossVariance = [((x - alphaAvgLoss) ** 2) for x in alphaLossSamples]\n# lossVariance = sum(lossVariance) / (nSamples - 1)\n# # add alpha loss average to statistics\n# self.stats.containers[self.stats.alphaLossAvgKey][j][i].append(alphaAvgLoss.item())\n# # add alpha loss variance to statistics\n# self.stats.containers[self.stats.alphaLossVarianceKey][j][i].append(lossVariance.item())\n#\n# # turn in coin toss for this layer\n# layer.alphas.requires_grad = True\n# # set layer alphas gradient\n# layer.alphas.grad = layerAlphasGrad\n#\n# # add gradNorm to statistics\n# self.stats.containers[self.stats.gradNormKey][j].append(layerAlphasGrad.norm().item())\n#\n# # average total loss\n# totalLoss /= self.nLayers()\n# # calc all loss samples average\n# nTotalSamples = len(allLossSamples)\n# allLossSamplesAvg = sum(allLossSamples) / nTotalSamples\n# # calc all loss samples variance\n# allLossSamples = [((x - allLossSamplesAvg) ** 2) for x in allLossSamples]\n# allLossSamplesVariance = (sum(allLossSamples) / (nTotalSamples - 1)).item()\n# # add all samples average & loss variance to statistics\n# self.stats.containers[self.stats.allSamplesLossAvgKey][0].append(allLossSamplesAvg)\n# self.stats.containers[self.stats.allSamplesLossVarianceKey][0].append(allLossSamplesVariance)\n#\n# # subtract average total loss from every alpha gradient\n# for layer in self.layersList:\n# layer.alphas.grad -= totalLoss\n# # calc layer alphas softmax\n# probs = F.softmax(layer.alphas, dim=-1)\n# # multiply each grad by its probability\n# layer.alphas.grad *= probs\n#\n# return totalLoss\n", "from itertools import groupby\nfrom abc import abstractmethod\n\nfrom torch import cat, chunk, tensor, zeros, int32\nfrom torch.nn import ModuleList, BatchNorm2d, Conv2d\nfrom torch.distributions.multinomial import Multinomial\nfrom torch.nn import functional as F\n\nfrom cnn.MixedFilter import MixedFilter\nfrom cnn.block import Block\n\nfrom UNIQ.quantize import check_quantization\nfrom NICE.quantize import ActQuant\n\n\n# collects stats from forward output\ndef collectStats(type, val):\n funcs = [(lambda x: x.argmin(), lambda x: x.min()), (lambda x: '', lambda x: sum(x) / len(x)), (lambda x: x.argmax(), lambda x: x.max())]\n\n res = [[['Filter#', filterFunc(val)], ['Value', '{:.5f}'.format(valueFunc(val))]] for filterFunc, valueFunc in funcs]\n res = [type] + res\n\n return res\n\n\ndef postForward(self, _, output):\n assert (False)\n if self.quantized is True:\n # calc mean, max value per feature map to stats\n layerMax = tensor([output.select(1, j).max() for j in range(output.size(1))])\n layerAvg = tensor([(output.select(1, j).sum() / output.select(1, j).numel()) for j in range(output.size(1))])\n # save min, avg & max values for stats\n elements = [('Avg', layerAvg), ('Max', layerMax)]\n self.forwardStats = [collectStats(type, val) for type, val in elements]\n self.forwardStats.insert(0, ['Type', 'Min', 'Avg', 'Max'])\n\n # for i, m in enumerate(layerMax):\n # if m.item() <= 1E-5:\n # filter = self.filters[i]\n # conv = filter.ops[0][filter.curr_alpha_idx].op[0].weight\n # self.forwardStats.append([['Filter#', i], ['MaxVal', m], ['conv weights', conv]])\n\n else:\n self.forwardStats = None\n\n\n# alphas = [[-0.55621, -0.33438, 0.99768, -0.80023], [0.29986, 0.06659, 0.44075, -1.50035], [-0.10046, 0.33549, 0.64312, -1.57129],\n# [0.4849, -0.3104, 0.74277, -1.61042], [0.78503, -0.93497, -0.94867], [0.09668, 0.11817, 0.20924, -1.11723],\n# [0.01722, 0.46502, 0.33579, -1.51118], [0.04131, -0.74829, -0.39164], [0.16032, 0.38078, 0.15881, -1.39306]]\n#\n# alphaIdx = [0]\n#\n#\n# def getAlphas():\n# res = tensor(alphas[alphaIdx[0]]).cuda()\n# alphaIdx[0] = (alphaIdx[0] + 1) % len(alphas)\n# return res\n\n\nclass MixedLayer(Block):\n def __init__(self, nFilters, createMixedFilterFunc, useResidual=False):\n super(MixedLayer, self).__init__()\n\n # create mixed filters\n self.filters = ModuleList()\n for _ in range(nFilters):\n self.filters.append(createMixedFilterFunc())\n # make sure mixed filters are subclasses of MixedFilter\n assert (isinstance(self.filters[0], MixedFilter))\n\n # init operations alphas (weights)\n self.alphas = tensor((zeros(self.numOfOps())).cuda(), requires_grad=True)\n # self.alphas = tensor(getAlphas(), requires_grad=True)\n self.alphas = self.alphas.cuda()\n\n # =========== change alphas distribution ==================\n if self.numOfOps() > 1:\n from math import log\n filter = self.filters[0]\n p = 1 / ((self.numOfOps() * 2) - 1)\n logVal = p / (1 - p) * (self.numOfOps() - 1)\n for i, op in enumerate(filter.opsList()):\n opBitwidth = op.getBitwidth()\n if opBitwidth == (8, 8) or opBitwidth == (8, None):\n self.alphas.data[i].fill_(log(logVal))\n\n # init filters current partition by alphas, i.e. how many filters are for each alpha, from each quantization\n self.currFiltersPartition = [0] * self.numOfOps()\n\n # # set filters distribution\n # if self.numOfOps() > 1:\n # self.setAlphas([0.3125, 0.3125, 0.1875, 0.125, 0.0625])\n # self.setFiltersPartition()\n\n # set forward function\n self.forwardFunc = self.residualForward if useResidual else self.standardForward\n\n # # register post forward hook\n # self.register_forward_hook(postForward)\n # self.forwardStats = None\n\n # set UNIQ parameters\n self.quantized = False\n self.added_noise = False\n\n def nFilters(self):\n return len(self.filters)\n\n def getLayers(self):\n return [self]\n\n def quantize(self, layerIdx):\n assert (self.added_noise is False)\n for op in self.opsList():\n assert (op.noise is False)\n assert (op.quant is False)\n op.quant = True\n\n op.quantizeFunc()\n assert (check_quantization(op.getModule(Conv2d).weight) <= (2 ** op.bitwidth[0]))\n # quantize activations during training\n for m in op.modules():\n if isinstance(m, ActQuant):\n m.qunatize_during_training = True\n\n self.quantized = True\n print('quantized layer [{}] + quantize activations during training'.format(layerIdx))\n\n def unQuantize(self, layerIdx):\n assert (self.quantized is True)\n assert (self.added_noise is False)\n\n for op in self.opsList():\n assert (op.quant is True)\n op.quant = False\n op.restore_state()\n # remove activations quantization during training\n for m in op.modules():\n if isinstance(m, ActQuant):\n m.qunatize_during_training = False\n\n self.quantized = False\n print('removed quantization in layer [{}] + removed activations quantization during training'.format(layerIdx))\n\n # just turn on op.noise flag\n # noise is being added in pre-forward hook\n def turnOnNoise(self, layerIdx):\n assert (self.quantized is False)\n for op in self.opsList():\n assert (op.noise is False)\n op.noise = True\n\n self.added_noise = True\n print('turned on noise in layer [{}]'.format(layerIdx))\n\n def turnOffNoise(self, layerIdx):\n assert (self.quantized is False)\n assert (self.added_noise is True)\n\n for op in self.opsList():\n assert (op.noise is True)\n op.noise = False\n\n self.added_noise = False\n print('turned off noise in layer [{}]'.format(layerIdx))\n\n # ratio is a list\n def setAlphas(self, ratio):\n self.alphas.data = tensor(ratio)\n\n # set filters curr_alpha_idx based on partition tensor\n # partition is IntTensor\n def setFiltersPartition(self, partition):\n assert (partition.sum().item() == self.nFilters())\n # reset current filters partition by alphas\n self.currFiltersPartition = [0] * self.numOfOps()\n # update filters curr_alpha_idx\n idx = 0\n for i, r in enumerate(partition):\n for _ in range(r):\n self.filters[idx].curr_alpha_idx = i\n self.currFiltersPartition[i] += 1\n idx += 1\n\n # set filters partition based on ratio\n # ratio is a tensor\n def __setFiltersPartitionFromRatio(self, ratio):\n # calc partition\n partition = (ratio * self.nFilters()).type(int32)\n # fix last ratio value to sum to nFilters\n if partition.sum().item() < self.nFilters():\n partition[-1] = self.nFilters() - partition[:-1].sum().item()\n\n self.setFiltersPartition(partition)\n\n # set filters partition based on alphas ratio\n def setFiltersPartitionByAlphas(self):\n probs = F.softmax(self.alphas, dim=-1)\n self.__setFiltersPartitionFromRatio(probs)\n\n def getCurrentFiltersPartition(self):\n return self.currFiltersPartition\n\n # input_bitwidth is a list of bitwidth per feature map\n def getBops(self, input_bitwidth):\n bops = 0.0\n # init bops map\n bopsMap = {}\n for f in self.filters:\n bops += f.getBops(input_bitwidth, bopsMap)\n\n return bops\n\n # returns filters current op bitwidth\n def getCurrentBitwidth(self):\n # collect filters current bitwidths\n bitwidths = [f.getCurrentBitwidth() for f in self.filters]\n # group bitwidths\n groups = groupby(bitwidths, lambda x: x)\n # create a list of tuples [bitwidth, number of filters]\n res = []\n for _, g in groups:\n g = list(g)\n res.append([g[0], len(g)])\n\n return res\n\n # create a list of layer output feature maps bitwidth\n def getCurrentOutputBitwidth(self):\n outputBitwidth = [f.getCurrentOutputBitwidth() for f in self.filters]\n return outputBitwidth\n\n def opsList(self):\n for filter in self.filters:\n for op in filter.opsList():\n yield op\n\n def getAllBitwidths(self):\n # it doesn't matter which filter we take, the attributes are the same in all filters\n return self.filters[0].getAllBitwidths()\n\n def numOfOps(self):\n # it doesn't matter which filter we take, the attributes are the same in all filters\n return self.filters[0].numOfOps()\n\n def outputLayer(self):\n return self\n\n # select alpha based on alphas distribution\n def choosePathByAlphas(self):\n dist = Multinomial(total_count=self.nFilters(), logits=self.alphas)\n partition = dist.sample().type(int32)\n self.setFiltersPartition(partition)\n\n @abstractmethod\n def preResidualForward(self, x):\n raise NotImplementedError('subclasses must override preResidualForward()!')\n\n # operations to perform after adding residual\n def postResidualForward(self, x):\n out = x\n # apply ReLU if exists\n if self.filters[0].postResidualForward:\n out = []\n # split out1 to chunks again\n x = chunk(x, self.nFilters(), dim=1)\n # apply selected op in each filter\n for i, f in enumerate(self.filters):\n res = f.postResidualForward(x[i])\n out.append(res)\n # concat filters output\n out = cat(out, 1)\n\n return out\n\n def forward(self, x):\n return self.forwardFunc(self, x)\n\n # standard forward\n @staticmethod\n def standardForward(layer, x):\n out = layer.preResidualForward(x)\n out = layer.postResidualForward(out)\n\n return out\n\n # forward with residual\n @staticmethod\n def residualForward(layer, input):\n x, residual = input\n out = layer.preResidualForward(x)\n # add residual\n out += residual\n out = layer.postResidualForward(out)\n\n return out\n\n\nclass MixedLayerNoBN(MixedLayer):\n def __init__(self, nFilters, createMixedFilterFunc, useResidual=False):\n super(MixedLayerNoBN, self).__init__(nFilters, createMixedFilterFunc, useResidual)\n\n # operations to perform before adding residual\n def preResidualForward(self, x):\n out = []\n # apply selected op in each filter\n for f in self.filters:\n res = f(x)\n out.append(res)\n # concat filters output\n out = cat(out, 1)\n\n return out\n\n\nclass MixedLayerWithBN(MixedLayer):\n def __init__(self, nFilters, createMixedFilterFunc, useResidual=False):\n super(MixedLayerWithBN, self).__init__(nFilters, createMixedFilterFunc, useResidual)\n\n # init batch norm\n self.bn = BatchNorm2d(nFilters)\n\n # perform the convolution operation\n def forwardConv(self, x):\n out = []\n # apply selected op in each filter\n for f in self.filters:\n res = f(x)\n out.append(res)\n # concat filters output\n out = cat(out, 1)\n\n return out\n\n # operations to perform before adding residual\n def preResidualForward(self, x):\n out = self.forwardConv(x)\n # apply batch norm\n out = self.bn(out)\n\n return out\n\n# bitwidth list is the same for all filters, therefore we can use the 1st filter list\n# def getOutputBitwidthList(self):\n# return self.filters[0].getOutputBitwidthList()\n\n# def evalMode(self):\n# pass\n\n# # select random alpha\n# def chooseRandomPath(self):\n# pass\n\n# # quantize activations during training\n# def quantActOnTraining(self, layerIdx):\n# assert (self.quantized is True)\n# assert (self.added_noise is False)\n#\n# for op in self.opsList:\n# for m in op.modules():\n# if isinstance(m, ActQuant):\n# m.qunatize_during_training = True\n#\n# print('turned on qunatize_during_training in layer [{}]'.format(layerIdx))\n#\n# # stop quantize activations during training\n# def turnOnGradients(self, layerIdx):\n# assert (self.quantized is False)\n# assert (self.added_noise is False)\n#\n# for op in self.opsList:\n# for m in op.modules():\n# if isinstance(m, ActQuant):\n# m.qunatize_during_training = False\n#\n# print('turned off qunatize_during_training in layer [{}]'.format(layerIdx))\n", "from torch import zeros, tensor, no_grad\nfrom torch.nn import functional as F\n\nfrom cnn.model_replicator import ModelReplicator, set_device\n\n\nclass RandomPath(ModelReplicator):\n def __init__(self, model, modelClass, args, logger):\n super(RandomPath, self).__init__(model, modelClass, args, logger)\n\n def getModel(self, args):\n return args[0]\n\n def buildArgs(self, inputPerGPU, targetPerGPU, nSamplesPerModel):\n args = ((cModel, inputPerGPU[gpu], targetPerGPU[gpu], nSamples, gpu)\n for nSamples, (cModel, gpu) in zip(nSamplesPerModel, self.replications))\n\n return args\n\n def lossPerReplication(self, args):\n cModel, input, target, nSamples, gpu = args\n # switch to process GPU\n set_device(gpu)\n assert (cModel.training is False)\n\n with no_grad():\n # init total loss\n totalLoss = 0.0\n # init loss samples list for ALL alphas\n allLossSamples = []\n # init layers alphas grad\n alphasGrad = []\n # save stats data\n gradNorm = []\n alphaLossVariance = []\n for layerIdx in layersIndices:\n layer = cModel.layersList[layerIdx]\n # turn off coin toss for this layer\n layer.alphas.requires_grad = False\n # init layer alphas gradient\n layerAlphasGrad = zeros(len(layer.alphas)).cuda(gpu)\n # calc layer alphas softmax\n probs = F.softmax(layer.alphas, dim=-1)\n\n for i, alpha in enumerate(layer.alphas):\n # # select the specific alpha in this layer\n # layer.curr_alpha_idx = i\n\n # init loss samples list\n alphaLossSamples = []\n for _ in range(nSamples):\n # choose path in model based on alphas distribution, while current layer alpha is [i]\n cModel.choosePathByAlphas(layerIdx=layerIdx, alphaIdx=i)\n # forward input in model\n logits = cModel(input)\n # alphaLoss += cModel._criterion(logits, target, cModel.countBops()).detach()\n alphaLossSamples.append(cModel._criterion(logits, target, cModel.countBops()).detach())\n\n # add current alpha loss samples to all loss samples list\n allLossSamples.extend(alphaLossSamples)\n # calc alpha average loss\n alphaAvgLoss = sum(alphaLossSamples) / nSamples\n layerAlphasGrad[i] = alphaAvgLoss\n # add alpha loss to total loss\n totalLoss += (alphaAvgLoss * probs[i])\n\n # calc loss samples variance\n lossVariance = [((x - alphaAvgLoss) ** 2) for x in alphaLossSamples]\n lossVariance = sum(lossVariance) / (nSamples - 1)\n # add alpha loss variance to statistics\n alphaLossVariance.append((layerIdx, i, alphaAvgLoss.item(), lossVariance.item()))\n\n # turn in coin toss for this layer\n layer.alphas.requires_grad = True\n # add layer alphas grad to container\n alphasGrad.append(layerAlphasGrad)\n # add gradNorm to statistics\n gradNorm.append((layerIdx, layerAlphasGrad.norm().item()))\n\n return alphasGrad, allLossSamples, layersIndices, totalLoss, gradNorm, alphaLossVariance\n\n def processResults(self, model, results):\n stats = model.stats\n # init total loss\n totalLoss = tensor(0.0).cuda()\n # init loss samples list for ALL alphas\n allLossSamples = []\n # process returned results\n for alphasGrad, partialLossSamples, layersIndices, partialLoss, gradNorm, alphaLossVariance in results:\n # add alphas loss samples to all loss samples list\n allLossSamples.extend(partialLossSamples)\n # calc total loss & total number of samples\n totalLoss += partialLoss.to(totalLoss.device)\n # update statistics\n for layerIdx, v in gradNorm:\n stats.containers[stats.gradNormKey][layerIdx].append(v)\n for layerIdx, j, avg, variance in alphaLossVariance:\n stats.containers[stats.alphaLossAvgKey][layerIdx][j].append(avg)\n stats.containers[stats.alphaLossVarianceKey][layerIdx][j].append(variance)\n # update layers alphas gradients\n for layerAlphasGrads, layerIdx in zip(alphasGrad, layersIndices):\n alphas = model.layersList[layerIdx].alphas\n alphas.grad = layerAlphasGrads.to(alphas.device)\n\n # average total loss\n totalLoss /= model.nLayers()\n # calc all loss samples average\n nTotalSamples = len(allLossSamples)\n allLossSamplesAvg = sum(allLossSamples) / nTotalSamples\n # calc all loss samples variance\n allLossSamples = [((x - allLossSamplesAvg) ** 2) for x in allLossSamples]\n allLossSamplesVariance = (sum(allLossSamples) / (nTotalSamples - 1))\n # add all samples loss average & variance to statistics\n stats.containers[stats.allSamplesLossAvgKey][0].append(allLossSamplesAvg)\n stats.containers[stats.allSamplesLossVarianceKey][0].append(allLossSamplesVariance)\n\n # subtract average total loss from every alpha gradient\n for layerAlphas in model.arch_parameters():\n layerAlphas.grad -= totalLoss\n # calc layer alphas softmax\n probs = F.softmax(layerAlphas, dim=-1)\n # multiply each grad by its probability\n layerAlphas.grad *= probs\n\n return totalLoss\n\n# subtract average total loss from every alpha gradient\n# for layer in model.layersList:\n# layer.alphas.grad -= totalLoss\n# # calc layer alphas softmax\n# probs = F.softmax(layer.alphas, dim=-1)\n# # multiply each grad by its probability\n# layer.alphas.grad *= probs\n" ]
[ [ "torch.nn.functional.softmax", "numpy.argmax", "pandas.DataFrame" ], [ "torch.nn.functional.softmax", "torch.cat", "torch.nn.ModuleList", "torch.tensor", "torch.nn.BatchNorm2d" ], [ "torch.nn.functional.softmax", "torch.no_grad", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Knowledge-Precipitation-Tribe/Recurrent-neural-network
[ "44faf239784d6318c986ae39a0a1982786e951fe" ]
[ "code/BinaryNumberMinus.py" ]
[ "# -*- coding: utf-8 -*-#\n'''\n# Name: BinaryNumberMinus\n# Description: \n# Author: super\n# Date: 2020/6/18\n'''\n\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport math\n\nfrom MiniFramework.EnumDef_6_0 import *\nfrom MiniFramework.DataReader_2_0 import *\nfrom MiniFramework.ActivationLayer import *\nfrom MiniFramework.ClassificationLayer import *\nfrom MiniFramework.LossFunction_1_1 import *\nfrom MiniFramework.TrainingHistory_3_0 import *\n\ntrain_file = \"../data/ch19.train_minus.npz\"\ntest_file = \"../data/ch19.test_minus.npz\"\n\ndef load_data():\n dr = DataReader_2_0(train_file, test_file)\n dr.ReadData()\n dr.Shuffle()\n dr.GenerateValidationSet(k=10)\n return dr\n\nclass timestep(object):\n def forward(self,x,U,V,W,prev_s):\n self.U = U\n self.V = V\n self.W = W\n self.x = x\n # 公式6\n self.h = np.dot(x, U) + np.dot(prev_s, W)\n # 公式2\n self.s = Tanh().forward(self.h)\n # 公式3\n self.z = np.dot(self.s, V)\n # 公式4\n self.a = Logistic().forward(self.z)\n\n def backward(self, y, prev_s, next_dh):\n # 公式7\n self.dz = (self.a - y)\n # 公式11\n self.dh = (np.dot(self.dz, self.V.T) + np.dot(next_dh, self.W.T)) * (1 - np.multiply(self.s, self.s))\n # 公式12\n self.dV = np.dot(self.s.T, self.dz)\n # 公式13\n self.dU = np.dot(self.x.T, self.dh)\n # 公式15\n self.dW = np.dot(prev_s.T, self.dh)\n\nclass timestep_1(timestep):\n # compare with timestep class: no h_t value from previous layer\n def forward(self,x,U,V,W):\n self.U = U\n self.V = V\n self.W = W\n self.x = x\n # 公式1\n self.h = np.dot(self.x, U)\n # 公式2\n self.s = Tanh().forward(self.h)\n # 公式3\n self.z = np.dot(self.s, V)\n # 公式4\n self.a = Logistic().forward(self.z)\n\n # for the first timestep, there has no prev_s\n def backward(self, y, next_dh):\n # 公式7\n self.dz = (self.a - y)\n # 公式11\n self.dh = (np.dot(self.dz, self.V.T) + np.dot(next_dh, self.W.T)) * (1 - np.multiply(self.s, self.s))\n # 公式12\n self.dV = np.dot(self.s.T, self.dz)\n # 公式13\n self.dU = np.dot(self.x.T, self.dh)\n # 公式14\n self.dW = 0\n\nclass timestep_4(timestep):\n # compare with timestep class: no next_dh from future layer\n def backward(self, y, prev_s):\n # 公式7\n self.dz = self.a - y\n # 公式9\n self.dh = np.dot(self.dz, self.V.T) * (1 - np.multiply(self.s, self.s))\n # 公式12\n self.dV = np.dot(self.s.T, self.dz)\n # 公式13\n self.dU = np.dot(self.x.T, self.dh)\n # 公式15\n self.dW = np.dot(prev_s.T, self.dh)\n\nclass net(object):\n def __init__(self, dr):\n self.dr = dr\n self.loss_fun = LossFunction_1_1(NetType.BinaryClassifier)\n self.loss_trace = TrainingHistory_3_0()\n self.t1 = timestep_1()\n self.t2 = timestep()\n self.t3 = timestep()\n self.t4 = timestep_4()\n\n def forward(self,X):\n self.t1.forward(X[:,0],self.U,self.V,self.W)\n self.t2.forward(X[:,1],self.U,self.V,self.W,self.t1.s)\n self.t3.forward(X[:,2],self.U,self.V,self.W,self.t2.s)\n self.t4.forward(X[:,3],self.U,self.V,self.W,self.t3.s)\n\n def backward(self,Y):\n self.t4.backward(Y[:,3], self.t3.s)\n self.t3.backward(Y[:,2], self.t2.s, self.t4.dh)\n self.t2.backward(Y[:,1], self.t1.s, self.t3.dh)\n self.t1.backward(Y[:,0], self.t2.dh)\n\n def update(self, eta):\n self.U = self.U - (self.t1.dU + self.t2.dU + self.t3.dU + self.t4.dU)*eta\n self.V = self.V - (self.t1.dV + self.t2.dV + self.t3.dV + self.t4.dV)*eta\n self.W = self.W - (self.t1.dW + self.t2.dW + self.t3.dW + self.t4.dW)*eta\n\n def check_loss(self,X,Y):\n self.forward(X)\n loss1,acc1 = self.loss_fun.CheckLoss(self.t1.a,Y[:,0:1])\n loss2,acc2 = self.loss_fun.CheckLoss(self.t2.a,Y[:,1:2])\n loss3,acc3 = self.loss_fun.CheckLoss(self.t3.a,Y[:,2:3])\n loss4,acc4 = self.loss_fun.CheckLoss(self.t4.a,Y[:,3:4])\n # 只有四个时间步的输出全都正确时,才算作该样本预测正确\n # 假设4个时间步输入有3个正确,不能算作75%正确,而是0%\n output = np.concatenate((self.t1.a,self.t2.a,self.t3.a,self.t4.a), axis=1)\n result = np.round(output).astype(int)\n correct = 0\n for i in range(X.shape[0]):\n if (np.allclose(result[i], Y[i])):\n correct += 1\n acc = correct/X.shape[0]\n Loss = (loss1 + loss2 + loss3 + loss4)/4\n return Loss,acc,result\n\n def train(self, batch_size, checkpoint=0.1):\n num_input = 2\n num_hidden = 4\n num_output = 1\n max_epoch = 100\n eta = 0.1\n self.U = np.random.normal(size=(num_input, num_hidden))\n self.W = np.random.normal(size=(num_hidden,num_hidden))\n self.V = np.random.normal(size=(num_hidden,num_output))\n max_iteration = math.ceil(self.dr.num_train/batch_size)\n checkpoint_iteration = (int)(math.ceil(max_iteration * checkpoint))\n for epoch in range(max_epoch):\n dr.Shuffle()\n for iteration in range(max_iteration):\n # get data\n batch_x, batch_y = self.dr.GetBatchTrainSamples(1, iteration)\n # forward\n self.forward(batch_x)\n # backward\n self.backward(batch_y)\n # update\n self.update(eta)\n # check loss\n total_iteration = epoch * max_iteration + iteration\n if (total_iteration+1) % checkpoint_iteration == 0:\n X,Y = dr.GetValidationSet()\n loss,acc,_ = self.check_loss(X,Y)\n self.loss_trace.Add(epoch, total_iteration, None, None, loss, acc, None)\n print(epoch, total_iteration)\n print(str.format(\"loss={0:6f}, acc={1:6f}\", loss, acc))\n #end if\n #enf for\n if (acc == 1.0):\n break\n #end for\n self.loss_trace.ShowLossHistory(\"Loss and Accuracy\", XCoordinate.Iteration)\n\n def test(self):\n print(\"testing...\")\n X,Y = dr.GetTestSet()\n count = X.shape[0]\n loss,acc,result = self.check_loss(X,Y)\n print(str.format(\"loss={0:6f}, acc={1:6f}\", loss, acc))\n r = np.random.randint(0,count,10)\n for i in range(10):\n idx = r[i]\n x1 = X[idx,:,0]\n x2 = X[idx,:,1]\n print(\" x1:\", reverse(x1))\n print(\"- x2:\", reverse(x2))\n print(\"------------------\")\n print(\"true:\", reverse(Y[idx]))\n print(\"pred:\", reverse(result[idx]))\n print(\"====================\")\n #end for\n\ndef reverse(a):\n l = a.tolist()\n l.reverse()\n return l\n\nif __name__=='__main__':\n dr = load_data()\n count = dr.num_train\n n = net(dr)\n n.train(batch_size=1, checkpoint=0.1)\n n.test()" ]
[ [ "numpy.dot", "numpy.allclose", "numpy.multiply", "numpy.concatenate", "numpy.round", "numpy.random.normal", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
omron-sinicx/ctrm
[ "83e7fe4abb8ad8559bfb6e64170878575a03fd20" ]
[ "src/ctrm/planner/prioritized_planning.py" ]
[ "\"\"\"implementation of a standard prioritized planning\nAuthor: Keisuke Okumura\nAffiliation: TokyoTech & OSX\n\nRef:\n- Silver, D. (2005).\n Cooperative Pathfinding.\n Aiide, 1, 117-122.\n\n- Erdmann, M., & Lozano-Perez, T. (1987).\n On multiple moving objects.\n Algorithmica, 2(1), 477-521.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport heapq\n\nimport numpy as np\n\nfrom ..environment import Instance\nfrom ..roadmap import TimedNode, TimedRoadmap\nfrom .planner import Planner\n\n\nclass PrioritizedPlanning(Planner):\n def __init__(\n self,\n ins: Instance,\n trms: list[TimedRoadmap],\n verbose: int = 0,\n **kwargs,\n ):\n super().__init__(ins, trms, verbose, **kwargs)\n self.verbose: int = verbose\n\n def get_name(self):\n return \"PrioritizedPlanning\"\n\n def _solve(self) -> None:\n T = len(self.trms[0].V) - 1 # makespan\n required_timestep = 1\n\n for agent in range(self.ins.num_agents):\n self.info(f\"agent-{agent} starts planning\")\n goal_pos = self.ins.goals[agent]\n max_speed = self.ins.max_speeds[agent]\n rad = self.ins.rads[agent]\n trm = self.trms[agent]\n\n # define search details\n\n def get_f_value(v: TimedNode) -> float:\n # Note: the value is scaled for the time axis\n return v.t + np.linalg.norm(goal_pos - v.pos) / max_speed\n\n def check_fin(v: TimedNode) -> bool:\n # the last vertex is goal\n return v.t >= required_timestep and v == trm.V[v.t][-1]\n\n def insert(v: TimedNode, OPEN: list[list]) -> None:\n # tie-break: f -> g -> random\n heapq.heappush(\n OPEN, [get_f_value(v), v.t, np.random.rand(), v]\n )\n\n def valid_successor(v_from: TimedNode, v_to: TimedNode) -> bool:\n return not any(\n [\n self.collide_dynamic_agents(\n v_from.pos,\n v_to.pos,\n rad,\n self.solution[i][v_from.t].pos,\n self.solution[i][v_to.t].pos,\n self.ins.rads[i],\n )\n for i in range(agent)\n ]\n )\n\n # perform space-time A*\n path = self.get_single_agent_path(\n agent, check_fin, insert, valid_successor\n )\n\n if path is None: # failed to solve\n self.solution.clear()\n self.info(f\"agent-{agent} failed to find paths\")\n return\n\n # update required_timestep (for collision check)\n required_timestep = max(len(path) - 1, required_timestep)\n\n # format new path, extending by goals\n path += [trm.V[t][-1] for t in range(len(path), T + 1)]\n\n # update solution\n self.solution.append(path)\n\n self.solved = True\n" ]
[ [ "numpy.random.rand", "numpy.linalg.norm" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
The-SocialLion/Cervical-Cancer-Detection-using-CNN
[ "ffbeb8b6985b67226d2eb5464e742775595689d1" ]
[ "new/test.py" ]
[ "import numpy as np\r\nimport os\r\nfrom tensorflow.keras.models import load_model\r\nfrom PIL import ImageOps\r\nfrom tensorflow.keras.preprocessing import image# used for preproccesing \r\nmodel = load_model('cc.h5')\r\nprint(\"Loaded model from disk\")\r\n\r\nclasss = { 1:\"Type-1\",\r\n 2:\"Type-2\",\r\n 3:\"Type-3\"\r\n }\r\nImg=64\r\ndef classify(img_file):\r\n test_image=image.load_img(img_file)\r\n test_image=ImageOps.grayscale(test_image)\r\n test_image = test_image.resize((64, 64))\r\n test_image = np.expand_dims(test_image, axis=0)\r\n test = np.array(test_image).reshape(-1,Img,Img,1)\r\n result = model.predict_classes(test)[0]\r\n sign = classs[result + 1]\r\n print(sign)\r\n \r\nprint(\"Obtaining Images & its Labels..............\")\r\npath='D:/python/dl programs/Cervical Cancer Detection/data/test'\r\nfiles=[]\r\nprint(\"Dataset Loaded\")\r\n# r=root,d=directories,f=files\r\nfor r,d,f in os.walk(path):\r\n for file in f:\r\n if '.jpeg' or '.jpg' or '.png' or '.JPG' in file:\r\n files.append(os.path.join(r,file))\r\nfor f in files:\r\n classify(f)\r\n print('\\n')\r\n" ]
[ [ "tensorflow.keras.models.load_model", "numpy.array", "numpy.expand_dims", "tensorflow.keras.preprocessing.image.load_img" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.4", "2.3", "2.5", "2.6" ] } ]
bmeyers/VirtualMicrogridSegmentation
[ "cd9e7ef1a2ccc438a855765e4c07904740ec12ee", "cd9e7ef1a2ccc438a855765e4c07904740ec12ee" ]
[ "virtual_microgrids/agents/actor_network.py", "virtual_microgrids/powerflow/network_generation.py" ]
[ "# Actor and Critic DNNs\n# Based on code published by Patrick Emami on his blog \"Deep\n# Deterministic Policy Gradients in TensorFlow\":\n# https://pemami4911.github.io/blog/2016/08/21/ddpg-rl.html\n\nimport tensorflow as tf\n\nclass ActorNetwork(object):\n \"\"\"\n Input to the network is the state, output is the action\n under a deterministic policy.\n\n The output layer activation is a tanh, which is individually scaled and\n recentered for each input, to keep each input between p_min and p_max\n for the given device.\n \"\"\"\n\n def __init__(self, sess, state_dim, action_dim, tau,\n n_layers, size, min_p, max_p, batch_size):\n self.sess = sess\n self.s_dim = state_dim\n self.a_dim = action_dim\n self.tau = tau\n self.n_layers = n_layers\n self.size = size\n self.min_p = min_p\n self.max_p = max_p\n self.batch_size = batch_size\n\n self.actor_lr_placeholder = tf.placeholder(shape=None, dtype=tf.float32)\n\n # Actor Network\n self.inputs, self.out, self.scaled_out, self.in_training = self.create_actor_network()\n\n self.network_params = tf.trainable_variables()\n\n # Target Network\n self.target_inputs, self.target_out, self.target_scaled_out, self.target_in_training = self.create_actor_network()\n\n self.target_network_params = tf.trainable_variables()[\n len(self.network_params):]\n\n # Op for periodically updating target network with online network\n # weights\n self.update_target_network_params = \\\n [self.target_network_params[i].assign(tf.multiply(self.network_params[i], self.tau) +\n tf.multiply(self.target_network_params[i], 1. - self.tau))\n for i in range(len(self.target_network_params))]\n\n extra_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(extra_ops):\n # This gradient will be provided by the critic network\n self.action_gradient = tf.placeholder(tf.float32, [None, self.a_dim])\n\n # Combine the gradients here\n self.unnormalized_actor_gradients = tf.gradients(\n self.scaled_out, self.network_params, -self.action_gradient)\n self.actor_gradients = list(map(lambda x: tf.div(x, self.batch_size), self.unnormalized_actor_gradients))\n\n # Optimization Op\n self.optimize = tf.train.AdamOptimizer(self.actor_lr_placeholder). \\\n apply_gradients(zip(self.actor_gradients, self.network_params))\n\n self.num_trainable_vars = len(\n self.network_params) + len(self.target_network_params)\n\n def create_actor_network(self):\n\n inputs = tf.placeholder(shape=[None, self.s_dim],\n dtype=tf.float32,\n name='states')\n out = tf.layers.flatten(inputs)\n in_training_mode = tf.placeholder(tf.bool)\n for i in range(self.n_layers):\n out = tf.keras.layers.Dense(units=self.size, activation=None)(out)\n #out = tf.keras.layers.BatchNormalization()(out, training=in_training_mode)\n out = tf.keras.activations.relu(out)\n # Final layer weights are init to Uniform[-3e-3, 3e-3]\n w_init = tf.initializers.random_uniform(minval=-0.003, maxval=0.003)\n out = tf.keras.layers.Dense(units=self.a_dim, activation=None,\n kernel_initializer=w_init)(out)\n #out = tf.keras.layers.BatchNormalization()(out, training=in_training_mode)\n out = tf.keras.activations.tanh(out)\n\n centers = (self.min_p + self.max_p) / 2.0\n scales = (self.max_p -self.min_p) / 2.0\n scaled_out = tf.multiply(out, scales) + centers\n\n return inputs, out, scaled_out, in_training_mode\n\n def train(self, inputs, a_gradient, learning_rate):\n self.sess.run(self.optimize, feed_dict={\n self.inputs: inputs,\n self.action_gradient: a_gradient,\n self.actor_lr_placeholder: learning_rate,\n self.in_training: True\n })\n\n def predict(self, inputs):\n return self.sess.run(self.scaled_out, feed_dict={\n self.inputs: inputs,\n self.in_training: False\n })\n\n def predict_target(self, inputs):\n return self.sess.run(self.target_scaled_out, feed_dict={\n self.target_inputs: inputs,\n self.target_in_training: False\n })\n\n def update_target_network(self):\n self.sess.run(self.update_target_network_params)\n\n def get_num_trainable_vars(self):\n return self.num_trainable_vars\n", "import pandapower as pp\nimport numpy as np\nfrom pandapower.networks import create_synthetic_voltage_control_lv_network as mknet\n\n\ndef get_net(config):\n \"\"\"Given the configuration, call a function to create the network object.\"\"\"\n if 'Six_Bus' in config.env_name:\n return six_bus(config.vn_high, config.vn_low, config.length_km,\n config.std_type, config.battery_locations, config.init_soc,\n config.energy_capacity, config.static_feeds, config.gen_locations,\n config.gen_p_max, config.gen_p_min, config.storage_p_max,\n config.storage_p_min)\n if config.env_name in ['rural_1', 'rural_2', 'village_1', 'village_2', 'suburb_1']:\n return standard_lv(config.env_name, config.remove_q, config.static_feeds_new, config.clear_loads_sgen,\n config.clear_gen, config.battery_locations, config.percent_battery_buses,\n config.batteries_on_leaf_nodes_only, config.init_soc, config.energy_capacity,\n config.gen_locations, config.gen_p_max, config.gen_p_min, config.storage_p_max,\n config.storage_p_min)\n\n\ndef add_battery(net, bus_number, p_init, energy_capacity, init_soc=0.5,\n max_p=50, min_p=-50, eff=1.0, capital_cost=0, min_e=0.):\n \"\"\"Change the network by adding a battery / storage unit.\n\n This function creates a storage element in net, and adds two non-standard columns: efficiency and capital cost.\n\n Parameters\n ----------\n net: class\n The pandapower network model\n bus_number: int\n Where the battery will be added\n p_init: float\n The power draw / input of the battery on initialization\n init_soc: float\n The state of charge\n max_p: float\n The max rate that power can be drawn by the battery\n min_p: float\n The max rate that power can be pulled from the battery (negative).\n eff: float\n The efficiency\n capital_cost: float\n The capital cost of the battery\n min_e: float\n The minimum energy in the battery\n \"\"\"\n pp.create_storage(net, bus_number, p_init, energy_capacity,\n soc_percent=init_soc, max_p_kw=max_p, min_p_kw=min_p,\n min_e_kwh=min_e)\n idx = net.storage.index[-1]\n net.storage.loc[idx, 'eff'] = eff\n net.storage.loc[idx, 'cap_cost'] = capital_cost\n\n\ndef six_bus(vn_high=20, vn_low=0.4, length_km=0.03, std_type='NAYY 4x50 SE', battery_locations=[3, 6], init_soc=0.5,\n energy_capacity=20.0, static_feeds=None, gen_locations=None, gen_p_max=0.0, gen_p_min=-50.0,\n storage_p_max=50.0, storage_p_min=-50.0):\n \"\"\"This function creates the network model for the 6 bus POC network from scratch.\n\n Buses and lines are added to an empty network based on a hard-coded topology and parameters from the config file\n (seen as inputs). The only controllable storage added in this network are batteries, and the input static_feeds is\n used to add loads and static generators which are not controlled by the agent. The first value in the series is\n taken for initialization of those elements.\n \"\"\"\n net = pp.create_empty_network(name='6bus', f_hz=60., sn_kva=100.)\n # create buses\n for i in range(8):\n nm = 'bus{}'.format(i)\n if i == 0:\n pp.create_bus(net, name=nm, vn_kv=vn_high)\n elif i == 1:\n pp.create_bus(net, name=nm, vn_kv=vn_low)\n else:\n if i <= 4:\n zn = 'Side1'\n else:\n zn = 'Side2'\n pp.create_bus(net, name=nm, zone=zn, vn_kv=vn_low)\n # create grid connection\n pp.create_ext_grid(net, 0)\n # create lines\n pp.create_line(net, 0, 1, length_km=length_km, std_type=std_type,\n name='line0')\n pp.create_line(net, 1, 2, length_km=length_km, std_type=std_type,\n name='line1')\n pp.create_line(net, 2, 3, length_km=length_km, std_type=std_type,\n name='line2')\n pp.create_line(net, 2, 4, length_km=length_km, std_type=std_type,\n name='line3')\n pp.create_line(net, 1, 5, length_km=length_km, std_type=std_type,\n name='line4')\n pp.create_line(net, 5, 6, length_km=length_km, std_type=std_type,\n name='line5')\n pp.create_line(net, 5, 7, length_km=length_km, std_type=std_type,\n name='line6')\n\n # add controllable storage\n for idx, bus_number in enumerate(battery_locations):\n energy_capacity_here = energy_capacity\n init_soc_here = init_soc\n if np.size(energy_capacity) > 1:\n energy_capacity_here = energy_capacity[idx]\n if np.size(init_soc) > 1:\n init_soc_here = init_soc[idx]\n\n add_battery(net, bus_number=bus_number, p_init=0.0, energy_capacity=energy_capacity_here,\n init_soc=init_soc_here, max_p=storage_p_max, min_p=storage_p_min)\n\n # Add controllable generator\n if gen_locations is not None:\n for idx, bus_number in enumerate(gen_locations):\n pp.create_gen(net, bus_number, p_kw=0.0, min_q_kvar=0.0, max_q_kvar=0.0, min_p_kw=gen_p_min,\n max_p_kw=gen_p_max)\n\n ##### TODO : Have different limits for different generators and storage #####\n\n # add loads and static generation\n if static_feeds is None:\n print('No loads or generation assigned to network')\n else:\n if len(static_feeds) > 0:\n for key, val in static_feeds.items():\n init_flow = val[0]\n print('init_flow: ', init_flow, 'at bus: ', key)\n if init_flow > 0:\n pp.create_load(net, bus=key, p_kw=init_flow, q_kvar=0)\n else:\n pp.create_sgen(net, bus=key, p_kw=init_flow, q_kvar=0)\n\n return net\n\n\ndef standard_lv(env_name, remove_q=True, static_feeds_new=None, clear_loads_sgen=False, clear_gen=True,\n battery_locations=None, percent_battery_buses=0.5, batteries_on_leaf_nodes_only=True, init_soc=0.5,\n energy_capacity=20.0, gen_locations=None, gen_p_max=0.0, gen_p_min=-50.0,\n storage_p_max=50.0, storage_p_min=-50.0):\n \"\"\"This function creates a network model using the set of synthetic voltage control low voltage (LV) networks from\n pandapower.\n\n The environment name, env_name, chooses which of the models to create out of 'rural_1', 'rural_2', 'village_1',\n 'village_2', and 'suburb_1'.\n\n Then options can be triggered to remove all reactive power components from the network (as we do in this project),\n or to remove static generators, loads, and generators that come with the standard model of the network. New\n batteries and generators are added which will be used as controllable resources by the agent.\n\n Static_feeds is a dictionary used by other functions to define the state of the network as we step through time, and\n contains the power values of the non-controllable elements: static generators and loads. In this method we use\n static_feeds_new, a subset of static_feeds, to create new loads and static generators in the network that did not\n ship with the model.\n \"\"\"\n\n net = mknet(network_class=env_name)\n\n # Remove q components\n if remove_q:\n net.load.q_kvar = 0\n net.sgen.q_kvar = 0\n net.gen.q_kvar = 0\n net.gen.min_q_kvar = 0\n net.gen.max_q_kvar = 0\n net.shunt.in_service = False\n\n # Remove built in loads and generators\n if clear_loads_sgen:\n net.load.in_service = False\n net.sgen.in_service = False\n if clear_gen:\n net.gen.in_service = False\n net.storage.in_service = False\n\n # add controllable storage\n if battery_locations is not None:\n applied_battery_locations = battery_locations\n elif percent_battery_buses > 0:\n if batteries_on_leaf_nodes_only:\n leaf_nodes = []\n for i in net.line.to_bus.values:\n if i not in net.line.from_bus.values:\n leaf_nodes.append(i)\n applied_battery_locations = np.random.choice(leaf_nodes, int(percent_battery_buses * len(leaf_nodes)),\n replace=False)\n else:\n applied_battery_locations = np.random.choice(net.bus.shape[0],\n int(percent_battery_buses * net.bus.shape[0]), replace=False)\n if len(applied_battery_locations) > 0:\n num_batteries = len(applied_battery_locations)\n for idx, bus_number in enumerate(applied_battery_locations):\n energy_capacity_here = energy_capacity\n init_soc_here = init_soc\n if np.size(energy_capacity) > 1:\n energy_capacity_here = energy_capacity[0]\n if np.size(energy_capacity) == num_batteries:\n energy_capacity_here = energy_capacity[idx]\n if np.size(init_soc) > 1:\n init_soc_here = init_soc[0]\n if np.size(energy_capacity) == num_batteries:\n init_soc_here = init_soc[idx]\n add_battery(net, bus_number=bus_number, p_init=0.0, energy_capacity=energy_capacity_here,\n init_soc=init_soc_here, max_p=storage_p_max, min_p=storage_p_min)\n # Add controllable generator\n if gen_locations is not None:\n for idx, bus_number in enumerate(gen_locations):\n pp.create_gen(net, bus_number, p_kw=0.0, min_q_kvar=0.0, max_q_kvar=0.0, max_p_kw=gen_p_max,\n min_p_kw=gen_p_min)\n\n if static_feeds_new is None:\n print('No loads or generation added to network')\n else:\n if len(static_feeds_new) > 0:\n for key, val in static_feeds_new.items():\n init_flow = val[0]\n print('init_flow: ', init_flow, 'at bus: ', key)\n if init_flow > 0:\n pp.create_load(net, bus=key, p_kw=init_flow, q_kvar=0)\n else:\n pp.create_sgen(net, bus=key, p_kw=init_flow, q_kvar=0)\n\n # Name buses for plotting\n for i in range(net.bus.name.shape[0]):\n net.bus.name.at[i] = 'bus' + str(i)\n\n return net\n\n\n" ]
[ [ "tensorflow.layers.flatten", "tensorflow.initializers.random_uniform", "tensorflow.multiply", "tensorflow.keras.activations.tanh", "tensorflow.control_dependencies", "tensorflow.get_collection", "tensorflow.keras.layers.Dense", "tensorflow.gradients", "tensorflow.placeholder", "tensorflow.keras.activations.relu", "tensorflow.div", "tensorflow.train.AdamOptimizer", "tensorflow.trainable_variables" ], [ "numpy.size" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xenomarz/deep-signature
[ "f831f05971727c5d00cf3b5c556b6a8b658048df", "f831f05971727c5d00cf3b5c556b6a8b658048df" ]
[ "notebooks/utils/utils.py", "deep_signature/linalg/affine_transform.py" ]
[ "# python peripherals\nimport random\n\n# scipy\nimport scipy.io\nimport scipy.stats as ss\n\n# numpy\nimport numpy\n\n# matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.collections as mcoll\nimport matplotlib.ticker as ticker\nimport matplotlib.lines\n\n# pytorch\nimport torch\n\n# pandas\nimport pandas\n\n# ipython\nfrom IPython.display import display, HTML\n\n# deep signature\nfrom deep_signature.data_manipulation import curve_sampling\nfrom deep_signature.data_manipulation import curve_processing\nfrom deep_signature.linalg import euclidean_transform\nfrom deep_signature.linalg import affine_transform\nfrom deep_signature.utils import utils\n\n# matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport matplotlib.lines\n\n\n# plotly\nfrom plotly.subplots import make_subplots\nfrom plotly import graph_objects\n\n\n# https://stackoverflow.com/questions/36074455/python-matplotlib-with-a-line-color-gradient-and-colorbar\nfrom deep_signature.stats import discrete_distribution\n\n\n# ---------------\n# PLOTLY ROUTINES\n# ---------------\ndef plot_dist_plotly(fig, row, col, dist, line_width=2, line_color='black', point_size=10, cmap='hsv'):\n x = numpy.array(range(dist.shape[0]))\n y = dist\n\n fig.add_trace(\n trace=graph_objects.Scatter(\n x=x,\n y=y,\n mode='lines+markers',\n line={\n 'color': line_color,\n 'width': line_width\n },\n marker={\n 'color': x,\n 'colorscale': cmap,\n 'size': point_size\n },\n customdata=x,\n hovertemplate='%{customdata}'),\n row=row,\n col=col)\n\n\ndef plot_curve_sample_plotly(fig, row, col, name, curve, curve_sample, color, point_size=5, color_scale='hsv'):\n x = curve_sample[:, 0]\n y = curve_sample[:, 1]\n\n index_colors = isinstance(color, (list, numpy.ndarray))\n\n fig.add_trace(\n trace=graph_objects.Scatter(\n name=name,\n x=x,\n y=y,\n mode='markers',\n marker={\n 'color': color,\n 'cmin': 0,\n 'cmax': curve.shape[0],\n 'colorscale': color_scale,\n 'size': point_size\n },\n customdata=color if index_colors else None,\n hovertemplate='%{customdata}' if index_colors else None,\n hoverinfo='skip' if not index_colors else None),\n row=row,\n col=col)\n\n\ndef plot_curve_plotly(fig, row, col, curve, line_width=2, line_color='green'):\n x = curve[:, 0]\n y = curve[:, 1]\n\n fig.add_trace(\n trace=graph_objects.Scatter(\n x=x,\n y=y,\n mode='lines+markers',\n line={\n 'color': line_color,\n 'width': line_width\n }),\n row=row,\n col=col)\n\n\ndef plot_curvature_plotly(fig, row, col, name, curvature, line_width=2, line_color='green'):\n x = numpy.array(range(curvature.shape[0]))\n y = curvature\n\n fig.add_trace(\n trace=graph_objects.Scatter(\n name=name,\n x=x,\n y=y,\n mode='lines+markers',\n line={\n 'color': line_color,\n 'width': line_width\n },\n marker={\n 'color': line_color,\n }),\n row=row,\n col=col)\n\n\ndef plot_arclength_plotly(fig, row, col, name, arclength, line_width=2, line_color='green'):\n x = numpy.array(range(arclength.shape[0]))\n y = arclength\n\n fig.add_trace(\n trace=graph_objects.Scatter(\n name=name,\n x=x,\n y=y,\n mode='lines+markers',\n line={\n 'color': line_color,\n 'width': line_width\n },\n marker={\n 'color': line_color,\n }),\n row=row,\n col=col)\n\n\ndef plot_curvature_with_cmap_plotly(fig, row, col, name, curvature, curve, indices, line_color='black', line_width=2, point_size=5, color_scale='hsv'):\n x = numpy.array(range(curvature.shape[0]))\n y = curvature\n\n fig.add_trace(\n trace=graph_objects.Scatter(\n name=name,\n x=x,\n y=y,\n mode='lines+markers',\n line={\n 'color': line_color,\n 'width': line_width\n },\n marker={\n 'color': indices,\n 'cmin': 0,\n 'cmax': curve.shape[0],\n 'colorscale': color_scale,\n 'size': point_size\n },\n customdata=indices,\n hovertemplate='%{customdata}'),\n row=row,\n col=col)\n\n# -------------------\n# MATPLOTLIB ROUTINES\n# -------------------\ndef colorline(ax, x, y, z=None, cmap='copper', norm=plt.Normalize(0.0, 1.0), linewidth=3, alpha=1.0):\n \"\"\"\n http://nbviewer.ipython.org/github/dpsanders/matplotlib-examples/blob/master/colorline.ipynb\n http://matplotlib.org/examples/pylab_examples/multicolored_line.html\n Plot a colored line with coordinates x and y\n Optionally specify colors in the array z\n Optionally specify a colormap, a norm function and a line width\n \"\"\"\n\n # Default colors equally spaced on [0,1]:\n if z is None:\n z = numpy.linspace(0.0, 1.0, len(x))\n\n # Special case if a single number:\n # to check for numerical input -- this is a hack\n if not hasattr(z, \"__iter__\"):\n z = numpy.array([z])\n\n z = numpy.asarray(z)\n\n segments = make_segments(x, y)\n lc = mcoll.LineCollection(segments, array=z, cmap=cmap, norm=norm, linewidth=linewidth, alpha=alpha)\n\n # ax = plt.gca()\n ax.add_collection(lc)\n\n return lc\n\n\ndef make_segments(x, y):\n \"\"\"\n Create list of line segments from x and y coordinates, in the correct format\n for LineCollection: an array of the form numlines x (points per line) x 2 (x\n and y) array\n \"\"\"\n\n points = numpy.array([x, y]).T.reshape(-1, 1, 2)\n segments = numpy.concatenate([points[:-1], points[1:]], axis=1)\n return segments\n\n\ndef plot_dist(ax, dist):\n x = numpy.array(range(dist.shape[0]))\n y = dist\n ax.set_xlim(x.min(), x.max())\n ax.set_ylim(y.min(), y.max())\n return colorline(ax=ax, x=x, y=y, cmap='hsv')\n\n\ndef plot_curve_sample(ax, curve, curve_sample, indices, zorder, point_size=10, alpha=1, cmap='hsv'):\n x = curve_sample[:, 0]\n y = curve_sample[:, 1]\n c = numpy.linspace(0.0, 1.0, curve.shape[0])\n\n return ax.scatter(\n x=x,\n y=y,\n c=c[indices],\n s=point_size,\n cmap=cmap,\n alpha=alpha,\n norm=plt.Normalize(0.0, 1.0),\n zorder=zorder)\n\n\ndef plot_curve_section_center_point(ax, x, y, zorder, radius=1, color='white'):\n circle = plt.Circle((x, y), radius=radius, color=color, zorder=zorder)\n return ax.add_artist(circle)\n\n\ndef plot_graph(ax, x, y, linewidth=2, color='red', alpha=1, zorder=1):\n return ax.plot(x, y, linewidth=linewidth, color=color, alpha=alpha, zorder=zorder)\n\n\ndef plot_curve(ax, curve, linewidth=2, color='red', alpha=1, zorder=1):\n x = curve[:, 0]\n y = curve[:, 1]\n return plot_graph(ax=ax, x=x, y=y, linewidth=linewidth, color=color, alpha=alpha, zorder=zorder)\n\n\ndef plot_curvature(ax, curvature, color='red', linewidth=2, alpha=1):\n x = numpy.array(range(curvature.shape[0]))\n y = curvature\n\n ax.set_xlim(x.min(), x.max())\n ax.set_ylim(y.min(), y.max())\n\n return ax.plot(x, y, color=color, linewidth=linewidth, alpha=alpha)\n\n\ndef plot_curvature_with_cmap(ax, curvature, curve, indices, linewidth=2, alpha=1, cmap='hsv'):\n x = numpy.array(range(curvature.shape[0]))\n y = curvature\n\n c = numpy.linspace(0.0, 1.0, curve.shape[0])\n z = c[indices]\n\n ax.set_xlim(x.min(), x.max())\n ax.set_ylim(y.min(), y.max())\n\n return colorline(ax=ax, x=x, y=y, z=z, cmap='hsv')\n\n\ndef plot_sample(ax, sample, color, zorder, point_size=10, alpha=1):\n x = sample[:, 0]\n y = sample[:, 1]\n\n return ax.scatter(\n x=x,\n y=y,\n s=point_size,\n color=color,\n alpha=alpha,\n zorder=zorder)\n\n\n# ---------------------\n# GROUND TRUTH ROUTINES\n# ---------------------\ndef calculate_arclength_by_index(curve_sections, transform_type, modifier=None):\n curve = curve_sections['curve']\n full_sections = curve_sections['full_sections']\n true_arclength = numpy.zeros([len(full_sections) + 1, 2, 4])\n for i, full_section in enumerate(full_sections):\n point_index = i + 1\n for j, (indices, sample, accumulate) in enumerate(zip(full_section['indices'], full_section['samples'], full_section['accumulate'])):\n true_arclength[point_index, 0, j] = point_index\n if transform_type == 'equiaffine':\n if modifier == 'calabi':\n left_indices = numpy.mod(numpy.array([indices[0] - 1]), curve.shape[0])\n right_indices = numpy.mod(numpy.array([indices[-1] + 1]), curve.shape[0])\n segment_indices = numpy.concatenate((left_indices, indices, right_indices))\n sample = curve[segment_indices]\n else:\n left_indices = numpy.mod(numpy.array([indices[0] - 2, indices[0] - 1]), curve.shape[0])\n right_indices = numpy.mod(numpy.array([indices[-1] + 1, indices[-1] + 2]), curve.shape[0])\n segment_indices = numpy.concatenate((left_indices, indices, right_indices))\n sample = curve[segment_indices]\n\n if transform_type == 'euclidean':\n true_arclength[point_index, 1, j] = curve_processing.calculate_euclidean_arclength(curve=sample)[-1]\n elif transform_type == 'equiaffine':\n if modifier == 'calabi':\n true_arclength[point_index, 1, j] = curve_processing.calculate_equiaffine_arclength(curve=sample)[-1]\n else:\n true_arclength[point_index, 1, j] = curve_processing.calculate_equiaffine_arclength_by_euclidean_metrics(curve=sample)[-1]\n\n if accumulate is True:\n true_arclength[point_index, 1, j] = true_arclength[point_index, 1, j] + true_arclength[i, 1, j]\n\n return true_arclength\n\n\ndef calculate_curvature_by_index(curve, transform_type):\n true_curvature = numpy.zeros([curve.shape[0], 2])\n true_curvature[:, 0] = numpy.arange(curve.shape[0])\n\n if transform_type == 'euclidean':\n true_curvature[:, 1] = curve_processing.calculate_euclidean_curvature(curve=curve)\n elif transform_type == 'equiaffine':\n true_curvature[:, 1] = curve_processing.calculate_equiaffine_curvature(curve=curve)\n elif transform_type == 'affine':\n true_curvature[:, 1] = 0\n\n return true_curvature\n\n\n# -------------------\n# PREDICTION ROUTINES\n# -------------------\ndef predict_curvature_by_index(model, curve_neighborhoods):\n sampled_neighborhoods = curve_neighborhoods['sampled_neighborhoods']\n predicted_curvature = numpy.zeros([len(sampled_neighborhoods), 2])\n for point_index, sampled_neighborhood in enumerate(sampled_neighborhoods):\n for (indices, sample) in zip(sampled_neighborhood['indices'], sampled_neighborhood['samples']):\n sample = curve_processing.normalize_curve(curve=sample)\n curvature_batch_data = torch.unsqueeze(torch.unsqueeze(torch.from_numpy(sample).double(), dim=0), dim=0).cuda()\n with torch.no_grad():\n predicted_curvature[point_index, 0] = point_index\n predicted_curvature[point_index, 1] = torch.squeeze(model(curvature_batch_data), dim=0).cpu().detach().numpy()\n return predicted_curvature\n\n\ndef predict_arclength_by_index(model, curve_sections):\n sampled_sections = curve_sections['sampled_sections']\n predicted_arclength = numpy.zeros([len(sampled_sections) + 1, 2, 4])\n for i, sampled_section in enumerate(sampled_sections):\n point_index = i + 1\n for j, (indices, sample, accumulate) in enumerate(zip(sampled_section['indices'], sampled_section['samples'], sampled_section['accumulate'])):\n sample = curve_processing.normalize_curve(curve=sample)\n arclength_batch_data = torch.unsqueeze(torch.unsqueeze(torch.from_numpy(sample).double(), dim=0), dim=0).cuda()\n with torch.no_grad():\n predicted_arclength[point_index, 0, j] = point_index\n predicted_arclength[point_index, 1, j] = torch.squeeze(model(arclength_batch_data), dim=0).cpu().detach().numpy()\n\n if accumulate is True:\n predicted_arclength[point_index, 1, j] = predicted_arclength[point_index, 1, j] + predicted_arclength[i, 1, j]\n\n return predicted_arclength\n\n\n# --------------------------\n# RECORD GENERATION ROUTINES\n# --------------------------\ndef generate_curve_records(arclength_model, curvature_model, curves, transform_type, comparison_curves_count, sampling_ratio, anchors_ratio, step, neighborhood_supporting_points_count, section_supporting_points_count):\n curve_records = []\n factors = []\n\n for curve_index, curve in enumerate(curves):\n # comparison_curves = [curve_processing.center_curve(curve=curve)]\n comparison_curves = []\n for i in range(comparison_curves_count):\n if transform_type == 'euclidean':\n transform = euclidean_transform.generate_random_euclidean_transform_2d()\n elif transform_type == 'equiaffine':\n transform = affine_transform.generate_random_equiaffine_transform_2d()\n elif transform_type == 'affine':\n transform = affine_transform.generate_random_affine_transform_2d(max_scale=1, min_eig_value_ratio=1, max_eig_value_ratio=1.3, min_eig_value=0.5, max_eig_value=2)\n transformed_curve = curve_processing.transform_curve(curve=curve, transform=transform)\n comparison_curves.append(curve_processing.center_curve(curve=transformed_curve))\n\n curve_record = {\n 'curve': curve_processing.center_curve(curve=curve),\n 'comparisons': []\n }\n\n anchor_indices = numpy.linspace(start=0, stop=curve.shape[0], num=int(anchors_ratio * curve.shape[0]), endpoint=False, dtype=int)\n for i, comparison_curve in enumerate(comparison_curves):\n comparison_curve_points_count = comparison_curve.shape[0]\n sampling_points_count = int(sampling_ratio * comparison_curve_points_count)\n dist = discrete_distribution.random_discrete_dist(bins=comparison_curve_points_count, multimodality=60, max_density=1, count=1)[0]\n\n curve_neighborhoods = extract_curve_neighborhoods(\n curve=comparison_curve,\n dist=dist,\n sampling_points_count=sampling_points_count,\n supporting_points_count=neighborhood_supporting_points_count,\n anchor_indices=anchor_indices)\n\n curve_sections = extract_curve_sections(\n curve=comparison_curve,\n step=step,\n sample_points=section_supporting_points_count)\n\n curve_neighborhoods_from_sections = extract_curve_neighborhoods_from_curve_sections(\n curve=comparison_curve,\n curve_sections=curve_sections,\n supporting_points_count=neighborhood_supporting_points_count)\n\n true_arclength = calculate_arclength_by_index(\n curve_sections=curve_sections,\n transform_type=transform_type)\n\n predicted_arclength = predict_arclength_by_index(\n model=arclength_model,\n curve_sections=curve_sections)\n\n true_curvature = calculate_curvature_by_index(\n curve=curve,\n transform_type=transform_type)\n\n predicted_curvature = predict_curvature_by_index(\n model=curvature_model,\n curve_neighborhoods=curve_neighborhoods)\n\n predicted_curvature_signature = predict_curvature_by_index(\n model=curvature_model,\n curve_neighborhoods=curve_neighborhoods_from_sections)\n\n sampled_indices = discrete_distribution.sample_discrete_dist(dist=dist, sampling_points_count=sampling_points_count)\n sampled_curve = comparison_curve[sampled_indices]\n anchors = comparison_curve[anchor_indices]\n\n arclength_comparison = {\n 'curve_sections': curve_sections,\n 'true_arclength': true_arclength,\n 'predicted_arclength': predicted_arclength,\n 'predicted_arclength_original': predicted_arclength.copy()\n }\n\n curvature_comparison = {\n 'curve_neighborhoods': curve_neighborhoods,\n 'true_curvature': true_curvature,\n 'predicted_curvature': predicted_curvature,\n 'predicted_curvature_signature': predicted_curvature_signature\n }\n\n curve_record['comparisons'].append({\n 'curve': comparison_curve,\n 'sampled_curve': sampled_curve,\n 'sampled_indices': sampled_indices,\n 'anchor_indices': anchor_indices,\n 'anchors': anchors,\n 'dist': dist,\n 'arclength_comparison': arclength_comparison,\n 'curvature_comparison': curvature_comparison\n })\n\n factor = numpy.mean(true_arclength[1:, 1, 0] / predicted_arclength[1:, 1, 0])\n factors.append(factor)\n\n curve_records.append(curve_record)\n\n if transform_type != 'affine':\n factor = numpy.mean(numpy.array(factors))\n for curve_record in curve_records:\n for comparison in curve_record['comparisons']:\n comparison['arclength_comparison']['predicted_arclength'][:, 1, :] *= factor\n\n return curve_records\n\n\ndef extract_curve_neighborhoods(curve, dist, sampling_points_count, supporting_points_count, anchor_indices):\n sampled_neighborhoods = []\n indices_pool = discrete_distribution.sample_discrete_dist(dist=dist, sampling_points_count=sampling_points_count)\n for anchor_index in anchor_indices:\n sampled_indices = curve_sampling.sample_curve_neighborhood_indices(\n center_point_index=anchor_index,\n indices_pool=indices_pool,\n supporting_points_count=supporting_points_count)\n\n sampled_neighborhood = {\n 'indices': [sampled_indices],\n 'samples': [curve[sampled_indices]]\n }\n\n sampled_neighborhoods.append(sampled_neighborhood)\n\n return {\n 'sampled_neighborhoods': sampled_neighborhoods,\n 'curve': curve\n }\n\n\ndef extract_curve_neighborhoods_from_curve_sections(curve, curve_sections, supporting_points_count):\n sampled_neighborhoods = []\n sampled_sections = [curve_sections['sampled_sections'][-1]] + curve_sections['sampled_sections']\n for sampled_section in sampled_sections:\n indices12 = sampled_section['indices'][1]\n indices23 = sampled_section['indices'][2]\n indices = numpy.concatenate((indices12[-supporting_points_count-1:-1], [indices23[0]], indices23[1:supporting_points_count+1]))\n\n sampled_neighborhood = {\n 'indices': [indices],\n 'samples': [curve[indices]]\n }\n\n sampled_neighborhoods.append(sampled_neighborhood)\n\n return {\n 'sampled_neighborhoods': sampled_neighborhoods,\n 'curve': curve\n }\n\n\ndef extract_curve_sections(curve, step, sample_points):\n indices = list(range(curve.shape[0]))[::step]\n indices[-1] = 0\n indices.append(indices[1])\n\n sampled_sections = []\n full_sections = []\n\n for index1, index2, index3 in zip(indices, indices[1:], indices[2:]):\n sampled_indices1 = curve_sampling.sample_curve_section_indices_old(\n curve=curve,\n start_point_index=index1,\n end_point_index=index2,\n supporting_points_count=sample_points)\n\n sampled_indices3 = curve_sampling.sample_curve_section_indices_old(\n curve=curve,\n start_point_index=index2,\n end_point_index=index3,\n supporting_points_count=sample_points)\n\n sampled_indices4 = curve_sampling.sample_curve_section_indices_old(\n curve=curve,\n start_point_index=index1,\n end_point_index=index3,\n supporting_points_count=sample_points)\n\n sampled_section = {\n 'indices': [sampled_indices1, sampled_indices1, sampled_indices3, sampled_indices4],\n 'samples': [curve[sampled_indices1], curve[sampled_indices1], curve[sampled_indices3], curve[sampled_indices4]],\n 'accumulate': [True, False, False, False]\n }\n\n sampled_sections.append(sampled_section)\n\n full_indices1 = curve_sampling.sample_curve_section_indices_old(\n curve=curve,\n start_point_index=index1,\n end_point_index=index2)\n\n full_indices3 = curve_sampling.sample_curve_section_indices_old(\n curve=curve,\n start_point_index=index2,\n end_point_index=index3)\n\n full_indices4 = curve_sampling.sample_curve_section_indices_old(\n curve=curve,\n start_point_index=index1,\n end_point_index=index3)\n\n full_section = {\n 'indices': [full_indices1, full_indices1, full_indices3, full_indices4],\n 'samples': [curve[full_indices1], curve[full_indices1], curve[full_indices3], curve[full_indices4]],\n 'accumulate': [True, False, False, False]\n }\n\n full_sections.append(full_section)\n\n return {\n 'sampled_sections': sampled_sections,\n 'full_sections': full_sections,\n 'curve': curve\n }\n\n\n# -------------\n# PLOT ROUTINES\n# -------------\ndef plot_curve_curvature_comparisons(curve_records, curve_colors):\n for i, curve_record in enumerate(curve_records):\n display(HTML(f'<H1>Curve {i+1} - Curvature Comparison</H1>'))\n plot_curve_curvature_comparison(\n curve_record=curve_record,\n curve_colors=curve_colors)\n\n\ndef plot_curve_curvature_comparison(curve_record, curve_colors):\n axis_index = 0\n fontsize = 25\n axes_count = 15\n line_width = 2\n\n # ---------------------\n # PLOT CURVES TOGETHER\n # ---------------------\n fig = make_subplots(rows=1, cols=1)\n\n curve = curve_record['curve']\n plot_curve_plotly(fig=fig, row=1, col=1, curve=curve, line_width=line_width, line_color=curve_colors[-1])\n\n for i, comparison in enumerate(curve_record['comparisons']):\n curve = comparison['curve']\n plot_curve_plotly(fig=fig, row=1, col=1, curve=curve, line_width=line_width, line_color=curve_colors[i])\n\n fig.update_yaxes(\n scaleanchor=\"x\",\n scaleratio=1,\n )\n\n fig.show()\n\n # -------------------------------\n # PLOT CURVE SAMPLES SIDE BY SIDE\n # -------------------------------\n fig = make_subplots(rows=1, cols=len(curve_record['comparisons']))\n\n for i, comparison in enumerate(curve_record['comparisons']):\n sampled_curve = comparison['sampled_curve']\n curve = comparison['curve']\n\n plot_curve_sample_plotly(fig=fig, row=1, col=i+1, name=f'Sampled Curve {i+1}', curve=curve, curve_sample=sampled_curve, color=curve_colors[i], point_size=3)\n\n fig.update_yaxes(\n scaleanchor=f'x{i+1}',\n scaleratio=1,\n row=1,\n col=i+1)\n\n fig.show()\n\n # ----------------------------------------------------------------\n # PLOT CURVE SAMPLES, ANCHORS AND PREDICTED CURVATURE SIDE BY SIDE\n # ----------------------------------------------------------------\n\n button_offset = 0.1\n buttonX = 0.1\n buttonY = 1.3\n buttons_count = 2\n left_width = 0.25\n for i, comparison in enumerate(curve_record['comparisons']):\n fig = make_subplots(rows=1, cols=2, column_widths=[left_width, 1 - left_width])\n sampled_curve = comparison['sampled_curve']\n anchors = comparison['anchors']\n anchor_indices = comparison['anchor_indices']\n curve = comparison['curve']\n curvature_comparison = comparison['curvature_comparison']\n predicted_curvature = curvature_comparison['predicted_curvature']\n\n plot_curve_sample_plotly(fig=fig, row=1, col=1, name=\"Sampled Curve\", curve=curve, curve_sample=sampled_curve, color='grey')\n plot_curve_sample_plotly(fig=fig, row=1, col=1, name=\"Anchors\", curve=curve, curve_sample=anchors, color=anchor_indices, point_size=3)\n plot_curvature_with_cmap_plotly(fig=fig, row=1, col=2, name=\"Predicted Curvature at Anchors\", curve=curve, curvature=predicted_curvature[:, 1], indices=anchor_indices, line_color='grey', line_width=2, point_size=10, color_scale='hsv')\n\n # https://stackoverflow.com/questions/65941253/plotly-how-to-toggle-traces-with-a-button-similar-to-clicking-them-in-legend\n update_menus = [{} for _ in range(buttons_count)]\n button_labels = ['Toggle Samples', 'Toggle Anchors']\n for j in range(buttons_count):\n button = dict(method='restyle',\n label=button_labels[j],\n visible=True,\n args=[{'visible': True}, [j]],\n args2=[{'visible': False}, [j]])\n\n update_menus[j]['buttons'] = [button]\n update_menus[j]['showactive'] = False\n update_menus[j]['y'] = buttonY\n update_menus[j]['x'] = buttonX + j * button_offset\n update_menus[j]['type'] = 'buttons'\n\n fig.update_layout(\n showlegend=True,\n updatemenus=update_menus)\n\n fig.update_yaxes(\n scaleanchor=\"x\",\n scaleratio=1,\n row=1,\n col=1)\n\n fig.update_layout(\n legend=dict(\n orientation=\"v\",\n yanchor=\"bottom\",\n xanchor=\"right\"))\n\n fig.show()\n\n # ----------------------------------\n # PLOT PREDICTED CURVATURES TOGETHER\n # ----------------------------------\n fig = make_subplots(rows=1, cols=1)\n\n for i, comparison in enumerate(curve_record['comparisons']):\n curvature_comparison = comparison['curvature_comparison']\n predicted_curvature = curvature_comparison['predicted_curvature']\n\n plot_curvature_plotly(fig=fig, row=1, col=1, name=f'Predicted Curvature at Anchors {i+1}', curvature=predicted_curvature[:, 1], line_width=line_width, line_color=curve_colors[i])\n\n fig.show()\n\n\ndef plot_curve_curvature_comparison2(curve_record, curve_colors):\n fig, axes = plt.subplots(2, 1, figsize=(20,20))\n fig.patch.set_facecolor('white')\n for axis in axes:\n for label in (axis.get_xticklabels() + axis.get_yticklabels()):\n label.set_fontsize(10)\n\n axes[0].axis('equal')\n axes[0].set_xlabel('X Coordinate', fontsize=18)\n axes[0].set_ylabel('Y Coordinate', fontsize=18)\n\n for i, comparision in enumerate(curve_record['comparisons']):\n curve = comparision['curve']\n plot_curve(ax=axes[0], curve=curve, color=curve_colors[i], linewidth=3)\n\n\n # axis_index = 0\n # fontsize = 25\n # axes_count = 15\n # line_width = 2\n #\n # # ---------------------\n # # PLOT CURVES TOGETHER\n # # ---------------------\n # fig = make_subplots(rows=1, cols=1)\n #\n # for i, comparison in enumerate(curve_record['comparisons']):\n # curve = comparison['curve']\n # plot_curve_plotly(fig=fig, row=1, col=1, curve=curve, line_width=line_width, line_color=curve_colors[i])\n #\n # fig.update_yaxes(\n # scaleanchor=\"x\",\n # scaleratio=1,\n # )\n #\n # fig.show()\n\n # # -------------------------------\n # # PLOT CURVE SAMPLES SIDE BY SIDE\n # # -------------------------------\n # fig = make_subplots(rows=1, cols=len(curve_record['comparisons']))\n #\n # for i, comparison in enumerate(curve_record['comparisons']):\n # sampled_curve = comparison['sampled_curve']\n # curve = comparison['curve']\n #\n # plot_curve_sample_plotly(fig=fig, row=1, col=i+1, name=f'Sampled Curve {i+1}', curve=curve, curve_sample=sampled_curve, color=curve_colors[i], point_size=3)\n #\n # fig.update_yaxes(\n # scaleanchor=f'x{i+1}',\n # scaleratio=1,\n # row=1,\n # col=i+1)\n #\n # fig.show()\n\n # # ----------------------------------------------------------------\n # # PLOT CURVE SAMPLES, ANCHORS AND PREDICTED CURVATURE SIDE BY SIDE\n # # ----------------------------------------------------------------\n #\n # button_offset = 0.1\n # buttonX = 0.1\n # buttonY = 1.3\n # buttons_count = 2\n # left_width = 0.25\n # for i, comparison in enumerate(curve_record['comparisons']):\n # fig = make_subplots(rows=1, cols=2, column_widths=[left_width, 1 - left_width])\n # sampled_curve = comparison['sampled_curve']\n # anchors = comparison['anchors']\n # anchor_indices = comparison['anchor_indices']\n # curve = comparison['curve']\n # curvature_comparison = comparison['curvature_comparison']\n # predicted_curvature = curvature_comparison['predicted_curvature']\n #\n # plot_curve_sample_plotly(fig=fig, row=1, col=1, name=\"Sampled Curve\", curve=curve, curve_sample=sampled_curve, color='grey')\n # plot_curve_sample_plotly(fig=fig, row=1, col=1, name=\"Anchors\", curve=curve, curve_sample=anchors, color=anchor_indices, point_size=3)\n # plot_curvature_with_cmap_plotly(fig=fig, row=1, col=2, name=\"Predicted Curvature at Anchors\", curve=curve, curvature=predicted_curvature[:, 1], indices=anchor_indices, line_color='grey', line_width=2, point_size=10, color_scale='hsv')\n #\n # # https://stackoverflow.com/questions/65941253/plotly-how-to-toggle-traces-with-a-button-similar-to-clicking-them-in-legend\n # update_menus = [{} for _ in range(buttons_count)]\n # button_labels = ['Toggle Samples', 'Toggle Anchors']\n # for j in range(buttons_count):\n # button = dict(method='restyle',\n # label=button_labels[j],\n # visible=True,\n # args=[{'visible': True}, [j]],\n # args2=[{'visible': False}, [j]])\n #\n # update_menus[j]['buttons'] = [button]\n # update_menus[j]['showactive'] = False\n # update_menus[j]['y'] = buttonY\n # update_menus[j]['x'] = buttonX + j * button_offset\n # update_menus[j]['type'] = 'buttons'\n #\n # fig.update_layout(\n # showlegend=True,\n # updatemenus=update_menus)\n #\n # fig.update_yaxes(\n # scaleanchor=\"x\",\n # scaleratio=1,\n # row=1,\n # col=1)\n #\n # fig.update_layout(\n # legend=dict(\n # orientation=\"v\",\n # yanchor=\"bottom\",\n # xanchor=\"right\"))\n #\n # fig.show()\n\n # ----------------------------------\n # PLOT PREDICTED CURVATURES TOGETHER\n # ----------------------------------\n # fig = make_subplots(rows=1, cols=1)\n\n axes[1].axis('equal')\n axes[1].set_xlabel('Index', fontsize=18)\n axes[1].set_ylabel('Curvature', fontsize=18)\n\n for i, comparison in enumerate(curve_record['comparisons']):\n curvature_comparison = comparison['curvature_comparison']\n predicted_curvature = curvature_comparison['predicted_curvature']\n\n plot_curvature(ax=axes[1], curvature=predicted_curvature[:, 1], color=curve_colors[i])\n\n # plot_curvature_plotly(fig=fig, row=1, col=1, name=f'Predicted Curvature at Anchors {i+1}', curvature=predicted_curvature[:, 1], line_width=line_width, line_color=curve_colors[i])\n\n fig.show()\n\n\ndef plot_curve_arclength_records(curve_records, true_arclength_colors, predicted_arclength_colors, sample_colors, curve_color='orange', anchor_color='blue', first_anchor_color='black', second_anchor_color='pink'):\n for i, curve_record in enumerate(curve_records):\n display(HTML(f'<H1>Curve {i + 1} - Arc-Length Comparison</H1>'))\n plot_curve_arclength_record(\n curve_arclength_record=curve_record,\n true_arclength_colors=true_arclength_colors,\n predicted_arclength_colors=predicted_arclength_colors,\n sample_colors=sample_colors,\n curve_color=curve_color,\n anchor_color=anchor_color,\n first_anchor_color=first_anchor_color,\n second_anchor_color=second_anchor_color)\n\n\ndef plot_curve_arclength_record(curve_arclength_record, true_arclength_colors, predicted_arclength_colors, sample_colors, curve_color, anchor_color, first_anchor_color, second_anchor_color):\n fig, axes = plt.subplots(2, 1, figsize=(20,20))\n fig.patch.set_facecolor('white')\n for axis in axes:\n for label in (axis.get_xticklabels() + axis.get_yticklabels()):\n label.set_fontsize(10)\n\n axes[0].axis('equal')\n for i, curve_comparison in enumerate(curve_arclength_record['comparisons']):\n curve_arclength = curve_comparison['arclength_comparison']\n curve_sections = curve_arclength['curve_sections']\n curve = curve_sections['curve']\n for j, sampled_section in enumerate(curve_sections['sampled_sections']):\n point_size_regular = 7\n point_size_anchor = 50\n sample = sampled_section['samples'][0]\n axes[0].set_xlabel('X Coordinate', fontsize=18)\n axes[0].set_ylabel('Y Coordinate', fontsize=18)\n plot_curve(ax=axes[0], curve=curve, color=curve_color, linewidth=3)\n plot_sample(ax=axes[0], sample=sample, point_size=point_size_regular, color=sample_colors[i], zorder=150)\n plot_sample(ax=axes[0], sample=numpy.array([[sample[0,0] ,sample[0, 1]], [sample[-1,0] ,sample[-1, 1]]]), point_size=point_size_anchor, alpha=1, color=anchor_color, zorder=200)\n if j == 0:\n plot_sample(ax=axes[0], sample=numpy.array([[sample[0, 0] ,sample[0, 1]]]), point_size=point_size_anchor, alpha=1, color=first_anchor_color, zorder=300)\n plot_sample(ax=axes[0], sample=numpy.array([[sample[-1, 0] ,sample[-1, 1]]]), point_size=point_size_anchor, alpha=1, color=second_anchor_color, zorder=300)\n\n axes[1].set_xlabel('Index', fontsize=18)\n axes[1].set_ylabel('Arc-Length', fontsize=18)\n axes[1].xaxis.set_major_locator(ticker.MaxNLocator(integer=True))\n true_arclength_legend_labels = []\n predicted_arclength_legend_labels = []\n for i, curve_comparison in enumerate(curve_arclength_record['comparisons']):\n curve_arclength = curve_comparison['arclength_comparison']\n true_arclength = curve_arclength['true_arclength']\n predicted_arclength = curve_arclength['predicted_arclength']\n\n plot_sample(ax=axes[1], sample=true_arclength[:, :, 0], point_size=40, color=true_arclength_colors[i], zorder=250)\n plot_curve(ax=axes[1], curve=true_arclength[:, :, 0], linewidth=2, color=true_arclength_colors[i], zorder=150)\n true_arclength_legend_labels.append(f'True Arclength (Curve #{i + 1})')\n\n plot_sample(ax=axes[1], sample=predicted_arclength[:, :, 0], point_size=40, color=predicted_arclength_colors[i], zorder=250)\n plot_curve(ax=axes[1], curve=predicted_arclength[:, :, 0], linewidth=2, color=predicted_arclength_colors[i], zorder=150)\n predicted_arclength_legend_labels.append(f'Predicted Arclength (Curve #{i + 1})')\n\n true_arclength_legend_lines = [matplotlib.lines.Line2D([0], [0], color=color, linewidth=3) for color in true_arclength_colors]\n predicted_arclength_legend_lines = [matplotlib.lines.Line2D([0], [0], color=color, linewidth=3) for color in predicted_arclength_colors]\n legend_labels = true_arclength_legend_labels + predicted_arclength_legend_labels\n legend_lines = true_arclength_legend_lines + predicted_arclength_legend_lines\n axes[1].legend(legend_lines, legend_labels, prop={'size': 20})\n\n for i, curve_comparison in enumerate(curve_arclength_record['comparisons']):\n curve_arclength = curve_comparison['arclength_comparison']\n true_arclength = curve_arclength['true_arclength']\n predicted_arclength = curve_arclength['predicted_arclength']\n predicted_arclength_original = curve_arclength['predicted_arclength_original']\n\n d = {\n 'True [i, i+1]': true_arclength[1:, 1, 1],\n 'True [i+1, i+2]': true_arclength[1:, 1, 2],\n 'True [i, i+2]': true_arclength[1:, 1, 3],\n 'True [i, i+1] + True [i+1, i+2]': true_arclength[1:, 1, 1] + true_arclength[1:, 1, 2],\n 'Pred [i, i+1]': predicted_arclength[1:, 1, 1],\n 'Pred [i+1, i+2]': predicted_arclength[1:, 1, 2],\n 'Pred [i, i+2]': predicted_arclength[1:, 1, 3],\n 'Pred [i, i+1] + Pred [i+1, i+2]': predicted_arclength[1:, 1, 1] + predicted_arclength[1:, 1, 2],\n 'Diff [i, i+2]': numpy.abs((true_arclength[1:, 1, 3] - predicted_arclength[1:, 1, 3]) / true_arclength[1:, 1, 3]) * 100,\n 'PredOrg [i, i+1]': predicted_arclength_original[1:, 1, 1],\n 'PredOrg [i+1, i+2]': predicted_arclength_original[1:, 1, 2],\n 'PredOrg [i, i+2]': predicted_arclength_original[1:, 1, 3],\n 'PredOrg [i, i+1] + PredOrg [i+1, i+2]': predicted_arclength_original[1:, 1, 1] + predicted_arclength_original[1:, 1, 2]\n }\n\n df = pandas.DataFrame(data=d)\n\n style = df.style.set_properties(**{'background-color': true_arclength_colors[i]}, subset=list(d.keys())[:4])\n style = style.set_properties(**{'background-color': predicted_arclength_colors[i]}, subset=list(d.keys())[4:8])\n style = style.set_properties(**{'color': 'white', 'border-color': 'black', 'border-style': 'solid', 'border-width': '1px'})\n\n display(HTML(style.render()))\n\n # predicted_arclength1 = curve_arclength_record[0]['predicted_arclength']\n # predicted_arclength2 = curve_arclength_record[1]['predicted_arclength']\n # display(HTML((numpy.mean(predicted_arclength1[1:, 1, 3] - predicted_arclength2[1:, 1, 3])))\n\n predicted_arclength1 = curve_arclength_record['comparisons'][0]['arclength_comparison']['predicted_arclength']\n predicted_arclength2 = curve_arclength_record['comparisons'][1]['arclength_comparison']['predicted_arclength']\n\n d = {\n 'Diff [i, i+2]': (((numpy.abs(predicted_arclength1[1:, 1, 3] - predicted_arclength2[1:, 1, 3]) / predicted_arclength1[1:, 1, 3]) + (numpy.abs(predicted_arclength1[1:, 1, 3] - predicted_arclength2[1:, 1, 3]) / predicted_arclength2[1:, 1, 3])) / 2) * 100\n }\n\n df = pandas.DataFrame(data=d)\n\n # style = df.style.set_properties(**{'background-color': true_arclength_colors[i]}, subset=list(d.keys())[:4])\n # style = style.set_properties(**{'background-color': predicted_arclength_colors[i]}, subset=list(d.keys())[4:8])\n # style = style.set_properties(**{'color': 'white', 'border-color': 'black', 'border-style': 'solid', 'border-width': '1px'})\n\n display(HTML(df.style.render()))\n\n plt.show()\n\n\ndef plot_curve_signature_comparisons(curve_records, curve_colors, sample_colors, curve_color='orange', anchor_color='blue', first_anchor_color='black', second_anchor_color='pink'):\n for i, curve_record in enumerate(curve_records):\n display(HTML(f'<H1>Curve {i+1} - Signature Comparison</H1>'))\n plot_curve_signature_comparision(\n curve_record=curve_record,\n curve_colors=curve_colors,\n sample_colors=sample_colors,\n curve_color=curve_color,\n anchor_color=anchor_color,\n first_anchor_color=first_anchor_color,\n second_anchor_color=second_anchor_color)\n\n\ndef plot_curve_signature_comparision(curve_record, curve_colors, sample_colors, curve_color, anchor_color, first_anchor_color, second_anchor_color):\n fig, axes = plt.subplots(3, 1, figsize=(20,20))\n fig.patch.set_facecolor('white')\n for axis in axes:\n for label in (axis.get_xticklabels() + axis.get_yticklabels()):\n label.set_fontsize(10)\n\n axes[0].axis('equal')\n axes[0].set_xlabel('X Coordinate', fontsize=18)\n axes[0].set_ylabel('Y Coordinate', fontsize=18)\n\n for i, comparision in enumerate(curve_record['comparisons']):\n curve = comparision['curve']\n plot_curve(ax=axes[0], curve=curve, color=curve_colors[i], linewidth=3)\n\n axes[1].axis('equal')\n for i, curve_comparison in enumerate(curve_record['comparisons']):\n curve_arclength = curve_comparison['arclength_comparison']\n curve_sections = curve_arclength['curve_sections']\n curve = curve_sections['curve']\n for j, sampled_section in enumerate(curve_sections['sampled_sections']):\n point_size_regular = 7\n point_size_anchor = 50\n sample = sampled_section['samples'][0]\n axes[1].set_xlabel('X Coordinate', fontsize=18)\n axes[1].set_ylabel('Y Coordinate', fontsize=18)\n plot_curve(ax=axes[1], curve=curve, color=curve_color, linewidth=3)\n plot_sample(ax=axes[1], sample=sample, point_size=point_size_regular, color=sample_colors[i], zorder=150)\n plot_sample(ax=axes[1], sample=numpy.array([[sample[0,0] ,sample[0, 1]], [sample[-1,0] ,sample[-1, 1]]]), point_size=point_size_anchor, alpha=1, color=anchor_color, zorder=200)\n if j == 0:\n plot_sample(ax=axes[1], sample=numpy.array([[sample[0, 0] ,sample[0, 1]]]), point_size=point_size_anchor, alpha=1, color=first_anchor_color, zorder=300)\n plot_sample(ax=axes[1], sample=numpy.array([[sample[-1, 0] ,sample[-1, 1]]]), point_size=point_size_anchor, alpha=1, color=second_anchor_color, zorder=300)\n\n axes[2].set_xlabel('Arc-Length', fontsize=18)\n axes[2].set_ylabel('Curvature', fontsize=18)\n axes[2].xaxis.set_major_locator(ticker.MaxNLocator(integer=True))\n\n for i, comparision in enumerate(curve_record['comparisons']):\n arclength_comparison = comparision['arclength_comparison']\n curvature_comparison = comparision['curvature_comparison']\n predicted_arclength = arclength_comparison['predicted_arclength'][:, 1, 0]\n # predicted_arclength = numpy.concatenate((numpy.array([0]), predicted_arclength))\n predicted_curvature = curvature_comparison['predicted_curvature_signature'][:, 1]\n plot_graph(ax=axes[2], x=predicted_arclength, y=predicted_curvature, color=curve_colors[i], linewidth=3)\n\n plt.show()", "# numpy\nimport numpy\n\n\ndef _validate_eigen_values_ratio(A, min_eig_value_ratio, max_eig_value_ratio, min_eig_value, max_eig_value):\n w, v = numpy.linalg.eig(A)\n r1 = w[0] / w[1]\n r2 = w[1] / w[0]\n r = numpy.maximum(r1, r2)\n # return ((r > min_eig_value_ratio) and (r < max_eig_value_ratio))\n return ((min_eig_value <= w[0] <= max_eig_value) and (min_eig_value <= w[1] <= max_eig_value)) and ((r > min_eig_value_ratio) and (r < max_eig_value_ratio))\n\n\n# max_scale=1, min_eig_value_ratio=3, max_eig_value_ratio=8, min_eig_value=0.1, max_eig_value=8\n# def generate_random_equiaffine_transform_2d(max_scale=1, min_eig_value_ratio=3, max_eig_value_ratio=8, min_eig_value=0.1, max_eig_value=8):\ndef generate_random_equiaffine_transform_2d(max_scale=3, min_eig_value_ratio=1.3, max_eig_value_ratio=8, min_eig_value=0.1, max_eig_value=8):\n while True:\n scale = numpy.random.uniform(low=0, high=max_scale, size=2)\n coeffs = numpy.random.random(size=2)\n entries = scale * coeffs\n L = numpy.array([[1, 0], [entries[0], 1]])\n U = numpy.array([[1, entries[1]], [0, 1]])\n A = numpy.matmul(L, U)\n if _validate_eigen_values_ratio(A=A, min_eig_value_ratio=min_eig_value_ratio, max_eig_value_ratio=max_eig_value_ratio, min_eig_value=(1/max_eig_value), max_eig_value=max_eig_value):\n return A\n\n\ndef generate_random_affine_transform_2d(max_scale=1, min_eig_value_ratio=3, max_eig_value_ratio=8, min_eig_value=0.1, max_eig_value=8):\n while True:\n A = numpy.random.uniform(low=0, high=max_scale, size=(2,2))\n if _validate_eigen_values_ratio(A=A, min_eig_value_ratio=min_eig_value_ratio, max_eig_value_ratio=max_eig_value_ratio, min_eig_value=min_eig_value, max_eig_value=max_eig_value):\n return A\n\n# # numpy\n# import numpy\n#\n#\n# def _validate_eigen_values_ratio(A, min_eig_value_ratio, max_eig_value_ratio, min_eig_value, max_eig_value):\n# w, v = numpy.linalg.eig(A)\n# r1 = w[0] / w[1]\n# r2 = w[1] / w[0]\n# r = numpy.maximum(r1, r2)\n# # return ((r > min_eig_value_ratio) and (r < max_eig_value_ratio))\n# return ((min_eig_value <= w[0] <= max_eig_value) and (min_eig_value <= w[1] <= max_eig_value)) and ((r > min_eig_value_ratio) and (r < max_eig_value_ratio))\n#\n#\n# def generate_random_equiaffine_transform_2d(max_scale=1, min_eig_value_ratio=3, max_eig_value_ratio=8, min_eig_value=0.1, max_eig_value=8):\n# while True:\n# scale = numpy.random.uniform(low=0, high=max_scale, size=2)\n# coeffs = numpy.random.random(size=2)\n# entries = scale * coeffs\n# L = numpy.array([[1, 0], [entries[0], 1]])\n# U = numpy.array([[1, entries[1]], [0, 1]])\n# A = numpy.matmul(L, U)\n# if _validate_eigen_values_ratio(A=A, min_eig_value_ratio=min_eig_value_ratio, max_eig_value_ratio=max_eig_value_ratio, min_eig_value=min_eig_value, max_eig_value=max_eig_value):\n# return A\n#\n#\n# def generate_random_affine_transform_2d(max_scale=1, min_eig_value_ratio=3, max_eig_value_ratio=8, min_eig_value=0.1, max_eig_value=8):\n# while True:\n# A = numpy.random.uniform(low=0, high=max_scale, size=(2,2))\n# if _validate_eigen_values_ratio(A=A, min_eig_value_ratio=min_eig_value_ratio, max_eig_value_ratio=max_eig_value_ratio, min_eig_value=min_eig_value, max_eig_value=max_eig_value):\n# return A\n" ]
[ [ "numpy.abs", "numpy.linspace", "numpy.asarray", "matplotlib.collections.LineCollection", "numpy.arange", "matplotlib.pyplot.subplots", "pandas.DataFrame", "matplotlib.pyplot.Normalize", "numpy.concatenate", "matplotlib.pyplot.Circle", "torch.from_numpy", "numpy.mean", "matplotlib.ticker.MaxNLocator", "torch.no_grad", "numpy.array", "matplotlib.pyplot.show", "numpy.zeros" ], [ "numpy.random.random", "numpy.maximum", "numpy.linalg.eig", "numpy.matmul", "numpy.random.uniform", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lippman1125/pytorch-ssd
[ "d2c31b0d69e82ab2c46f2eecbbf9c12f3dd73309" ]
[ "eval_ssd.py" ]
[ "import torch\nfrom vision.ssd.vgg_ssd import create_vgg_ssd, create_vgg_ssd_predictor\nfrom vision.ssd.mobilenetv1_ssd import create_mobilenetv1_ssd, create_mobilenetv1_ssd_predictor\nfrom vision.ssd.mobilenetv1_ssd_lite import create_mobilenetv1_ssd_lite, create_mobilenetv1_ssd_lite_predictor\nfrom vision.ssd.mobilenet_v2_ssd_lite_xiaomi import create_mobilenetv2_ssd_lite_xiaomi, create_mobilenetv2_ssd_lite_predictor_xiaomi\nfrom vision.ssd.fairnas_a_ssd_lite import create_fairnas_a_ssd_lite, create_fairnas_a_ssd_lite_predictor\nfrom vision.ssd.fairnas_b_ssd_lite import create_fairnas_b_ssd_lite, create_fairnas_b_ssd_lite_predictor\nfrom vision.ssd.squeezenet_ssd_lite import create_squeezenet_ssd_lite, create_squeezenet_ssd_lite_predictor\nfrom vision.datasets.voc_dataset import VOCDataset\nfrom vision.datasets.open_images import OpenImagesDataset\nfrom vision.datasets.coco_dataset import CocoDatasetTest\nfrom vision.utils import box_utils, measurements\nfrom vision.utils.misc import str2bool, Timer\nimport argparse\nimport pathlib\nimport numpy as np\nimport logging\nimport sys\nfrom vision.ssd.mobilenet_v2_ssd_lite import create_mobilenetv2_ssd_lite, create_mobilenetv2_ssd_lite_predictor\n\n\nparser = argparse.ArgumentParser(description=\"SSD Evaluation on VOC Dataset.\")\nparser.add_argument('--net', default=\"vgg16-ssd\",\n help=\"The network architecture, it should be of mb1-ssd, mb1-ssd-lite, mb2-ssd-lite or vgg16-ssd.\")\nparser.add_argument(\"--trained_model\", type=str)\n\nparser.add_argument(\"--dataset_type\", default=\"voc\", type=str,\n help='Specify dataset type. Currently support voc and open_images.')\nparser.add_argument(\"--dataset\", type=str, help=\"The root directory of the VOC dataset or Open Images dataset.\")\nparser.add_argument('--annfile', type=str, help='json annotation file, just for coco dataset')\nparser.add_argument(\"--label_file\", type=str, help=\"The label file path.\")\nparser.add_argument(\"--use_cuda\", type=str2bool, default=True)\nparser.add_argument(\"--use_2007_metric\", type=str2bool, default=True)\nparser.add_argument(\"--nms_method\", type=str, default=\"hard\")\nparser.add_argument(\"--iou_threshold\", type=float, default=0.5, help=\"The threshold of Intersection over Union.\")\nparser.add_argument(\"--eval_dir\", default=\"eval_results\", type=str, help=\"The directory to store evaluation results.\")\nparser.add_argument('--mb2_width_mult', default=1.0, type=float,\n help='Width Multiplifier for MobilenetV2')\nargs = parser.parse_args()\nDEVICE = torch.device(\"cuda:0\" if torch.cuda.is_available() and args.use_cuda else \"cpu\")\n\n\ndef group_annotation_by_class(dataset):\n true_case_stat = {}\n all_gt_boxes = {}\n all_difficult_cases = {}\n for i in range(len(dataset)):\n image_id, annotation = dataset.get_annotation(i)\n gt_boxes, classes, is_difficult = annotation\n gt_boxes = torch.from_numpy(gt_boxes)\n for i, difficult in enumerate(is_difficult):\n class_index = int(classes[i])\n gt_box = gt_boxes[i]\n if not difficult:\n true_case_stat[class_index] = true_case_stat.get(class_index, 0) + 1\n\n if class_index not in all_gt_boxes:\n all_gt_boxes[class_index] = {}\n if image_id not in all_gt_boxes[class_index]:\n all_gt_boxes[class_index][image_id] = []\n all_gt_boxes[class_index][image_id].append(gt_box)\n if class_index not in all_difficult_cases:\n all_difficult_cases[class_index]={}\n if image_id not in all_difficult_cases[class_index]:\n all_difficult_cases[class_index][image_id] = []\n all_difficult_cases[class_index][image_id].append(difficult)\n\n for class_index in all_gt_boxes:\n for image_id in all_gt_boxes[class_index]:\n all_gt_boxes[class_index][image_id] = torch.stack(all_gt_boxes[class_index][image_id])\n for class_index in all_difficult_cases:\n for image_id in all_difficult_cases[class_index]:\n all_gt_boxes[class_index][image_id] = torch.tensor(all_gt_boxes[class_index][image_id])\n return true_case_stat, all_gt_boxes, all_difficult_cases\n\ndef compute_average_precision_per_class(num_true_cases, gt_boxes, difficult_cases,\n prediction_file, iou_threshold, use_2007_metric):\n with open(prediction_file) as f:\n image_ids = []\n boxes = []\n scores = []\n for line in f:\n t = line.rstrip().split(\" \")\n image_ids.append(t[0])\n scores.append(float(t[1]))\n box = torch.tensor([float(v) for v in t[2:]]).unsqueeze(0)\n box -= 1.0 # convert to python format where indexes start from 0\n boxes.append(box)\n scores = np.array(scores)\n sorted_indexes = np.argsort(-scores)\n boxes = [boxes[i] for i in sorted_indexes]\n image_ids = [image_ids[i] for i in sorted_indexes]\n true_positive = np.zeros(len(image_ids))\n false_positive = np.zeros(len(image_ids))\n matched = set()\n for i, image_id in enumerate(image_ids):\n box = boxes[i]\n if image_id not in gt_boxes:\n false_positive[i] = 1\n continue\n\n gt_box = gt_boxes[image_id]\n ious = box_utils.iou_of(box, gt_box)\n max_iou = torch.max(ious).item()\n max_arg = torch.argmax(ious).item()\n if max_iou > iou_threshold:\n if difficult_cases[image_id][max_arg] == 0:\n if (image_id, max_arg) not in matched:\n true_positive[i] = 1\n matched.add((image_id, max_arg))\n else:\n false_positive[i] = 1\n else:\n false_positive[i] = 1\n\n true_positive = true_positive.cumsum()\n false_positive = false_positive.cumsum()\n precision = true_positive / (true_positive + false_positive)\n recall = true_positive / num_true_cases\n if use_2007_metric:\n return measurements.compute_voc2007_average_precision(precision, recall)\n else:\n return measurements.compute_average_precision(precision, recall)\n\n\nif __name__ == '__main__':\n eval_path = pathlib.Path(args.eval_dir)\n eval_path.mkdir(exist_ok=True)\n timer = Timer()\n class_names = [name.strip() for name in open(args.label_file).readlines()]\n\n if args.dataset_type == \"voc\":\n dataset = VOCDataset(args.dataset, is_test=True)\n elif args.dataset_type == 'open_images':\n dataset = OpenImagesDataset(args.dataset, dataset_type=\"test\")\n elif args.dataset_type == 'coco':\n dataset = CocoDatasetTest(args.dataset, args.annfile)\n\n true_case_stat, all_gb_boxes, all_difficult_cases = group_annotation_by_class(dataset)\n if args.net == 'vgg16-ssd':\n net = create_vgg_ssd(len(class_names), is_test=True)\n elif args.net == 'mb1-ssd':\n net = create_mobilenetv1_ssd(len(class_names), is_test=True)\n elif args.net == 'mb1-ssd-lite':\n net = create_mobilenetv1_ssd_lite(len(class_names), is_test=True)\n elif args.net == 'sq-ssd-lite':\n net = create_squeezenet_ssd_lite(len(class_names), is_test=True)\n elif args.net == 'mb2-ssd-lite':\n net = create_mobilenetv2_ssd_lite(len(class_names), width_mult=args.mb2_width_mult, is_test=True)\n elif args.net == 'mb2-ssd-lite-xiaomi':\n net = create_mobilenetv2_ssd_lite_xiaomi(len(class_names), width_mult=args.mb2_width_mult, is_test=True)\n elif args.net == 'fairnas-a-ssd-lite':\n net = create_fairnas_a_ssd_lite(len(class_names), is_test=True)\n elif args.net == 'fairnas-b-ssd-lite':\n net = create_fairnas_b_ssd_lite(len(class_names), is_test=True)\n else:\n logging.fatal(\"The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite.\")\n parser.print_help(sys.stderr)\n sys.exit(1) \n\n timer.start(\"Load Model\")\n net.load(args.trained_model)\n net = net.to(DEVICE)\n print(f'It took {timer.end(\"Load Model\")} seconds to load the model.')\n if args.net == 'vgg16-ssd':\n predictor = create_vgg_ssd_predictor(net, nms_method=args.nms_method, device=DEVICE)\n elif args.net == 'mb1-ssd':\n predictor = create_mobilenetv1_ssd_predictor(net, nms_method=args.nms_method, device=DEVICE)\n elif args.net == 'mb1-ssd-lite':\n predictor = create_mobilenetv1_ssd_lite_predictor(net, nms_method=args.nms_method, device=DEVICE)\n elif args.net == 'sq-ssd-lite':\n predictor = create_squeezenet_ssd_lite_predictor(net,nms_method=args.nms_method, device=DEVICE)\n elif args.net == 'mb2-ssd-lite':\n predictor = create_mobilenetv2_ssd_lite_predictor(net, nms_method=args.nms_method, device=DEVICE)\n elif args.net == 'mb2-ssd-lite-xiaomi':\n predictor = create_mobilenetv2_ssd_lite_predictor_xiaomi(net, nms_method=args.nms_method, device=DEVICE)\n elif args.net == 'fairnas-a-ssd-lite':\n predictor = create_fairnas_a_ssd_lite_predictor(net, nms_method=args.nms_method, device=DEVICE)\n elif args.net == 'fairnas-b-ssd-lite':\n predictor = create_fairnas_b_ssd_lite_predictor(net, nms_method=args.nms_method, device=DEVICE)\n else:\n logging.fatal(\"The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite.\")\n parser.print_help(sys.stderr)\n sys.exit(1)\n\n results = []\n for i in range(len(dataset)):\n print(\"process image\", i)\n timer.start(\"Load Image\")\n image = dataset.get_image(i)\n print(\"Load Image: {:4f} seconds.\".format(timer.end(\"Load Image\")))\n timer.start(\"Predict\")\n boxes, labels, probs = predictor.predict(image)\n print(\"Prediction: {:4f} seconds.\".format(timer.end(\"Predict\")))\n indexes = torch.ones(labels.size(0), 1, dtype=torch.float32) * i\n results.append(torch.cat([\n indexes.reshape(-1, 1),\n labels.reshape(-1, 1).float(),\n probs.reshape(-1, 1),\n boxes + 1.0 # matlab's indexes start from 1\n ], dim=1))\n results = torch.cat(results)\n for class_index, class_name in enumerate(class_names):\n if class_index == 0:\n continue # ignore background\n prediction_path = eval_path / f\"det_test_{class_name}.txt\"\n with open(prediction_path, \"w\") as f:\n sub = results[results[:, 1] == class_index, :]\n for i in range(sub.size(0)):\n prob_box = sub[i, 2:].numpy()\n image_id = dataset.ids[int(sub[i, 0])]\n print(\n str(image_id) + \" \" + \" \".join([str(v) for v in prob_box]),\n file=f\n )\n aps = []\n print(\"\\n\\nAverage Precision Per-class:\")\n for class_index, class_name in enumerate(class_names):\n # coco class has _ name\n if class_index == 0:\n continue\n prediction_path = eval_path / f\"det_test_{class_name}.txt\"\n ap = compute_average_precision_per_class(\n true_case_stat[class_index],\n all_gb_boxes[class_index],\n all_difficult_cases[class_index],\n prediction_path,\n args.iou_threshold,\n args.use_2007_metric\n )\n aps.append(ap)\n print(f\"{class_name}: {ap}\")\n\n print(f\"\\nAverage Precision Across All Classes:{sum(aps)/len(aps)}\")\n\n\n\n" ]
[ [ "torch.max", "torch.cat", "torch.from_numpy", "torch.tensor", "torch.cuda.is_available", "torch.stack", "numpy.argsort", "numpy.array", "torch.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aglitoiu/pennylane
[ "fd99be754d55bbb919aadbbbdff70e40fbe3bcbf", "fd99be754d55bbb919aadbbbdff70e40fbe3bcbf", "fd99be754d55bbb919aadbbbdff70e40fbe3bcbf" ]
[ "pennylane/qnn/keras.py", "tests/devices/test_default_qubit_tf.py", "tests/math/test_functions.py" ]
[ "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This module contains the classes and functions for integrating QNodes with the Keras Layer\nAPI.\"\"\"\nimport inspect\nfrom collections.abc import Iterable\nfrom typing import Optional\n\ntry:\n import tensorflow as tf\n from tensorflow.keras.layers import Layer\n\n CORRECT_TF_VERSION = int(tf.__version__.split(\".\")[0]) > 1\nexcept ImportError:\n # The following allows this module to be imported even if TensorFlow is not installed. Users\n # will instead see an ImportError when instantiating the KerasLayer.\n from abc import ABC\n\n Layer = ABC\n CORRECT_TF_VERSION = False\n\n\nclass KerasLayer(Layer):\n \"\"\"Converts a :func:`~.QNode` to a Keras\n `Layer <https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer>`__.\n\n The result can be used within the Keras\n `Sequential <https://www.tensorflow.org/api_docs/python/tf/keras/Sequential>`__ or\n `Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ classes for\n creating quantum and hybrid models.\n\n Args:\n qnode (qml.QNode): the PennyLane QNode to be converted into a Keras Layer_\n weight_shapes (dict[str, tuple]): a dictionary mapping from all weights used in the QNode to\n their corresponding shapes\n output_dim (int): the output dimension of the QNode\n weight_specs (dict[str, dict]): An optional dictionary for users to provide additional\n specifications for weights used in the QNode, such as the method of parameter\n initialization. This specification is provided as a dictionary with keys given by the\n arguments of the `add_weight()\n <https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer#add_weight>`__.\n method and values being the corresponding specification.\n **kwargs: additional keyword arguments passed to the Layer_ base class\n\n **Example**\n\n First let's define the QNode that we want to convert into a Keras Layer_:\n\n .. code-block:: python\n\n n_qubits = 2\n dev = qml.device(\"default.qubit\", wires=n_qubits)\n\n @qml.qnode(dev)\n def qnode(inputs, weights_0, weight_1):\n qml.RX(inputs[0], wires=0)\n qml.RX(inputs[1], wires=1)\n qml.Rot(*weights_0, wires=0)\n qml.RY(weight_1, wires=1)\n qml.CNOT(wires=[0, 1])\n return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))\n\n The signature of the QNode **must** contain an ``inputs`` named argument for input data,\n with all other arguments to be treated as internal weights. We can then convert to a Keras\n Layer_ with:\n\n >>> weight_shapes = {\"weights_0\": 3, \"weight_1\": 1}\n >>> qlayer = qml.qnn.KerasLayer(qnode, weight_shapes, output_dim=2)\n\n The internal weights of the QNode are automatically initialized within the\n :class:`~.KerasLayer` and must have their shapes specified in a ``weight_shapes`` dictionary.\n It is then easy to combine with other neural network layers from the\n `tensorflow.keras.layers <https://www.tensorflow.org/api_docs/python/tf/keras/layers>`__ module\n and create a hybrid:\n\n >>> clayer = tf.keras.layers.Dense(2)\n >>> model = tf.keras.models.Sequential([qlayer, clayer])\n\n .. UsageDetails::\n\n **QNode signature**\n\n The QNode must have a signature that satisfies the following conditions:\n\n - Contain an ``inputs`` named argument for input data.\n - All other arguments must accept an array or tensor and are treated as internal\n weights of the QNode.\n - All other arguments must have no default value.\n - The ``inputs`` argument is permitted to have a default value provided the gradient with\n respect to ``inputs`` is not required.\n - There cannot be a variable number of positional or keyword arguments, e.g., no ``*args``\n or ``**kwargs`` present in the signature.\n\n **Initializing weights**\n\n The optional ``weight_specs`` argument of :class:`~.KerasLayer` allows for a more\n fine-grained specification of the QNode weights, such as the method of initialization and\n any regularization or constraints. For example, the initialization method of the ``weights``\n argument in the example above could be specified by:\n\n .. code-block::\n\n weight_specs = {\"weights\": {\"initializer\": \"random_uniform\"}}\n\n The values of ``weight_specs`` are dictionaries with keys given by arguments of\n the Keras\n `add_weight() <https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer#add_weight>`__\n method. For the ``\"initializer\"`` argument, one can specify a string such as\n ``\"random_uniform\"`` or an instance of an `Initializer\n <https://www.tensorflow.org/api_docs/python/tf/keras/initializers>`__ class, such as\n `tf.keras.initializers.RandomUniform <https://www.tensorflow.org/api_docs/python/tf/random_uniform_initializer>`__.\n\n If ``weight_specs`` is not specified, weights will be added using the Keras default\n initialization and without any regularization or constraints.\n\n **Additional example**\n\n The code block below shows how a circuit composed of templates from the\n :doc:`/code/qml_templates` module can be combined with classical\n `Dense <https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense>`__ layers to learn\n the two-dimensional `moons <https://scikit-learn.org/stable/modules/generated/sklearn\n .datasets.make_moons.html>`__ dataset.\n\n .. code-block:: python\n\n import pennylane as qml\n import tensorflow as tf\n import sklearn.datasets\n\n n_qubits = 2\n dev = qml.device(\"default.qubit\", wires=n_qubits)\n\n @qml.qnode(dev)\n def qnode(inputs, weights):\n qml.templates.AngleEmbedding(inputs, wires=range(n_qubits))\n qml.templates.StronglyEntanglingLayers(weights, wires=range(n_qubits))\n return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))\n\n weight_shapes = {\"weights\": (3, n_qubits, 3)}\n\n qlayer = qml.qnn.KerasLayer(qnode, weight_shapes, output_dim=2)\n clayer1 = tf.keras.layers.Dense(2)\n clayer2 = tf.keras.layers.Dense(2, activation=\"softmax\")\n model = tf.keras.models.Sequential([clayer1, qlayer, clayer2])\n\n data = sklearn.datasets.make_moons()\n X = tf.constant(data[0])\n Y = tf.one_hot(data[1], depth=2)\n\n opt = tf.keras.optimizers.SGD(learning_rate=0.5)\n model.compile(opt, loss='mae')\n\n The model can be trained using:\n\n >>> model.fit(X, Y, epochs=8, batch_size=5)\n Train on 100 samples\n Epoch 1/8\n 100/100 [==============================] - 9s 90ms/sample - loss: 0.3524\n Epoch 2/8\n 100/100 [==============================] - 9s 87ms/sample - loss: 0.2441\n Epoch 3/8\n 100/100 [==============================] - 9s 87ms/sample - loss: 0.1908\n Epoch 4/8\n 100/100 [==============================] - 9s 87ms/sample - loss: 0.1832\n Epoch 5/8\n 100/100 [==============================] - 9s 88ms/sample - loss: 0.1596\n Epoch 6/8\n 100/100 [==============================] - 9s 87ms/sample - loss: 0.1637\n Epoch 7/8\n 100/100 [==============================] - 9s 86ms/sample - loss: 0.1613\n Epoch 8/8\n 100/100 [==============================] - 9s 87ms/sample - loss: 0.1474\n\n **Returning a state**\n\n If your QNode returns the state of the quantum circuit using :func:`~.state` or\n :func:`~.density_matrix`, you must immediately follow your quantum Keras Layer with a layer\n that casts to reals. For example, you could use\n `tf.keras.layers.Lambda <https://www.tensorflow.org/api_docs/python/tf/keras/layers/Lambda>`__\n with the function ``lambda x: tf.abs(x)``. This casting is required because TensorFlow's\n Keras layers require a real input and are differentiated with respect to real parameters.\n\n .. _Layer: https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer\n \"\"\"\n\n def __init__(\n self, qnode, weight_shapes: dict, output_dim, weight_specs: Optional[dict] = None, **kwargs\n ):\n if not CORRECT_TF_VERSION:\n raise ImportError(\n \"KerasLayer requires TensorFlow version 2 or above. The latest \"\n \"version of TensorFlow can be installed using:\\n\"\n \"pip install tensorflow --upgrade\\nAlternatively, visit \"\n \"https://www.tensorflow.org/install for detailed instructions.\"\n )\n\n self.weight_shapes = {\n weight: (tuple(size) if isinstance(size, Iterable) else (size,) if size > 1 else ())\n for weight, size in weight_shapes.items()\n }\n\n self._signature_validation(qnode, weight_shapes)\n self.qnode = qnode\n\n dtype = tf.float32 if tf.keras.backend.floatx() == tf.float32 else tf.float64\n\n if self.qnode.diff_method != \"backprop\":\n self.qnode.to_tf(dtype=dtype)\n\n # Allows output_dim to be specified as an int or as a tuple, e.g, 5, (5,), (5, 2), [5, 2]\n # Note: Single digit values will be considered an int and multiple as a tuple, e.g [5,] or (5,)\n # are passed as integer 5 and [5, 2] will be passes as tuple (5, 2)\n if isinstance(output_dim, Iterable) and len(output_dim) > 1:\n self.output_dim = tuple(output_dim)\n else:\n self.output_dim = output_dim[0] if isinstance(output_dim, Iterable) else output_dim\n\n self.weight_specs = weight_specs if weight_specs is not None else {}\n\n self.qnode_weights = {}\n\n super().__init__(dynamic=True, **kwargs)\n\n def _signature_validation(self, qnode, weight_shapes):\n sig = inspect.signature(qnode.func).parameters\n\n if self.input_arg not in sig:\n raise TypeError(\n \"QNode must include an argument with name {} for inputting data\".format(\n self.input_arg\n )\n )\n\n if self.input_arg in set(weight_shapes.keys()):\n raise ValueError(\n \"{} argument should not have its dimension specified in \"\n \"weight_shapes\".format(self.input_arg)\n )\n\n param_kinds = [p.kind for p in sig.values()]\n\n if inspect.Parameter.VAR_POSITIONAL in param_kinds:\n raise TypeError(\"Cannot have a variable number of positional arguments\")\n\n if inspect.Parameter.VAR_KEYWORD not in param_kinds:\n if set(weight_shapes.keys()) | {self.input_arg} != set(sig.keys()):\n raise ValueError(\"Must specify a shape for every non-input parameter in the QNode\")\n\n def build(self, input_shape):\n \"\"\"Initializes the QNode weights.\n\n Args:\n input_shape (tuple or tf.TensorShape): shape of input data\n \"\"\"\n for weight, size in self.weight_shapes.items():\n spec = self.weight_specs.get(weight, {})\n self.qnode_weights[weight] = self.add_weight(name=weight, shape=size, **spec)\n\n super().build(input_shape)\n\n def call(self, inputs):\n \"\"\"Evaluates the QNode on input data using the initialized weights.\n\n Args:\n inputs (tensor): data to be processed\n\n Returns:\n tensor: output data\n \"\"\"\n if len(tf.shape(inputs)) > 1:\n # If the input size is not 1-dimensional, unstack the input along its first dimension,\n # recursively call the forward pass on each of the yielded tensors, and then stack the\n # outputs back into the correct shape\n reconstructor = []\n for x in tf.unstack(inputs):\n reconstructor.append(self.call(x))\n return tf.stack(reconstructor)\n\n return self._evaluate_qnode(inputs)\n\n def _evaluate_qnode(self, x):\n \"\"\"Evaluates a QNode for a single input datapoint.\n\n Args:\n x (tensor): the datapoint\n\n Returns:\n tensor: output datapoint\n \"\"\"\n kwargs = {**{self.input_arg: x}, **{k: 1.0 * w for k, w in self.qnode_weights.items()}}\n return self.qnode(**kwargs)\n\n def compute_output_shape(self, input_shape):\n \"\"\"Computes the output shape after passing data of shape ``input_shape`` through the\n QNode.\n\n Args:\n input_shape (tuple or tf.TensorShape): shape of input data\n\n Returns:\n tf.TensorShape: shape of output data\n \"\"\"\n return tf.TensorShape([input_shape[0]]).concatenate(self.output_dim)\n\n def __str__(self):\n detail = \"<Quantum Keras Layer: func={}>\"\n return detail.format(self.qnode.func.__name__)\n\n __repr__ = __str__\n\n _input_arg = \"inputs\"\n\n @property\n def input_arg(self):\n \"\"\"Name of the argument to be used as the input to the Keras\n `Layer <https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer>`__. Set to\n ``\"inputs\"``.\"\"\"\n return self._input_arg\n", "# Copyright 2018-2020 Xanadu Quantum Technologies Inc.\r\n\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\"\"\"\r\nUnit tests and integration tests for the ``default.qubit.tf`` device.\r\n\"\"\"\r\nfrom itertools import product\r\n\r\nimport numpy as np\r\nimport pytest\r\n\r\ntf = pytest.importorskip(\"tensorflow\", minversion=\"2.0\")\r\n\r\nimport pennylane as qml\r\nfrom pennylane.wires import Wires\r\nfrom pennylane.devices.default_qubit_tf import DefaultQubitTF\r\nfrom gate_data import (\r\n I,\r\n X,\r\n Y,\r\n Z,\r\n H,\r\n S,\r\n T,\r\n CNOT,\r\n CZ,\r\n SWAP,\r\n CNOT,\r\n Toffoli,\r\n CSWAP,\r\n Rphi,\r\n Rotx,\r\n Roty,\r\n Rotz,\r\n Rot3,\r\n CRotx,\r\n CRoty,\r\n CRotz,\r\n CRot3,\r\n MultiRZ1,\r\n MultiRZ2,\r\n ControlledPhaseShift,\r\n)\r\n\r\nnp.random.seed(42)\r\n\r\n\r\n#####################################################\r\n# Test matrices\r\n#####################################################\r\n\r\nU = np.array(\r\n [\r\n [0.83645892 - 0.40533293j, -0.20215326 + 0.30850569j],\r\n [-0.23889780 - 0.28101519j, -0.88031770 - 0.29832709j],\r\n ]\r\n)\r\n\r\nU2 = np.array([[0, 1, 1, 1], [1, 0, 1, -1], [1, -1, 0, 1], [1, 1, -1, 0]]) / np.sqrt(3)\r\nA = np.array([[1.02789352, 1.61296440 - 0.3498192j], [1.61296440 + 0.3498192j, 1.23920938 + 0j]])\r\n\r\n\r\n#####################################################\r\n# Define standard qubit operations\r\n#####################################################\r\n\r\nsingle_qubit = [(qml.S, S), (qml.T, T), (qml.PauliX, X), (qml.PauliY, Y), (qml.PauliZ, Z), (qml.Hadamard, H)]\r\nsingle_qubit_param = [\r\n (qml.PhaseShift, Rphi),\r\n (qml.RX, Rotx),\r\n (qml.RY, Roty),\r\n (qml.RZ, Rotz),\r\n (qml.MultiRZ, MultiRZ1),\r\n]\r\ntwo_qubit = [(qml.CZ, CZ), (qml.CNOT, CNOT), (qml.SWAP, SWAP)]\r\ntwo_qubit_param = [\r\n (qml.CRX, CRotx),\r\n (qml.CRY, CRoty),\r\n (qml.CRZ, CRotz),\r\n (qml.MultiRZ, MultiRZ2),\r\n (qml.ControlledPhaseShift, ControlledPhaseShift),\r\n]\r\nthree_qubit = [(qml.Toffoli, Toffoli), (qml.CSWAP, CSWAP)]\r\n\r\n\r\n#####################################################\r\n# Fixtures\r\n#####################################################\r\n\r\n\r\[email protected]\r\ndef init_state(scope=\"session\"):\r\n \"\"\"Generates a random initial state\"\"\"\r\n\r\n def _init_state(n):\r\n \"\"\"random initial state\"\"\"\r\n state = np.random.random([2 ** n]) + np.random.random([2 ** n]) * 1j\r\n state /= np.linalg.norm(state)\r\n return state\r\n\r\n return _init_state\r\n\r\n\r\n#####################################################\r\n# Device-level integration tests\r\n#####################################################\r\n\r\n\r\nclass TestApply:\r\n \"\"\"Test application of PennyLane operations.\"\"\"\r\n\r\n def test_basis_state(self, tol):\r\n \"\"\"Test basis state initialization\"\"\"\r\n dev = DefaultQubitTF(wires=4)\r\n state = np.array([0, 0, 1, 0])\r\n\r\n dev.apply([qml.BasisState(state, wires=[0, 1, 2, 3])])\r\n\r\n res = dev.state\r\n expected = np.zeros([2 ** 4])\r\n expected[np.ravel_multi_index(state, [2] * 4)] = 1\r\n\r\n assert isinstance(res, tf.Tensor)\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n def test_invalid_basis_state_length(self, tol):\r\n \"\"\"Test that an exception is raised if the basis state is the wrong size\"\"\"\r\n dev = DefaultQubitTF(wires=4)\r\n state = np.array([0, 0, 1, 0])\r\n\r\n with pytest.raises(\r\n ValueError, match=r\"BasisState parameter and wires must be of equal length\"\r\n ):\r\n dev.apply([qml.BasisState(state, wires=[0, 1, 2])])\r\n\r\n def test_invalid_basis_state(self, tol):\r\n \"\"\"Test that an exception is raised if the basis state is invalid\"\"\"\r\n dev = DefaultQubitTF(wires=4)\r\n state = np.array([0, 0, 1, 2])\r\n\r\n with pytest.raises(\r\n ValueError, match=r\"BasisState parameter must consist of 0 or 1 integers\"\r\n ):\r\n dev.apply([qml.BasisState(state, wires=[0, 1, 2, 3])])\r\n\r\n def test_qubit_state_vector(self, init_state, tol):\r\n \"\"\"Test qubit state vector application\"\"\"\r\n dev = DefaultQubitTF(wires=1)\r\n state = init_state(1)\r\n\r\n dev.apply([qml.QubitStateVector(state, wires=[0])])\r\n\r\n res = dev.state\r\n expected = state\r\n assert isinstance(res, tf.Tensor)\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n def test_full_subsystem_statevector(self, mocker):\r\n \"\"\"Test applying a state vector to the full subsystem\"\"\"\r\n dev = DefaultQubitTF(wires=['a', 'b', 'c'])\r\n state = tf.constant([1, 0, 0, 0, 1, 0, 1, 1], dtype=tf.complex128) / 2.\r\n state_wires = qml.wires.Wires(['a', 'b', 'c'])\r\n\r\n spy = mocker.spy(dev, \"_scatter\")\r\n dev._apply_state_vector(state=state, device_wires=state_wires)\r\n\r\n assert np.all(tf.reshape(dev._state, [-1]) == state)\r\n spy.assert_not_called()\r\n\r\n def test_partial_subsystem_statevector(self, mocker):\r\n \"\"\"Test applying a state vector to a subset of wires of the full subsystem\"\"\"\r\n dev = DefaultQubitTF(wires=['a', 'b', 'c'])\r\n state = tf.constant([1, 0, 1, 0], dtype=tf.complex128) / np.sqrt(2.)\r\n state_wires = qml.wires.Wires(['a', 'c'])\r\n\r\n spy = mocker.spy(dev, \"_scatter\")\r\n dev._apply_state_vector(state=state, device_wires=state_wires)\r\n res = tf.reshape(tf.reduce_sum(dev._state, axis=(1,)), [-1])\r\n\r\n assert np.all(res == state)\r\n spy.assert_called()\r\n\r\n def test_invalid_qubit_state_vector_size(self):\r\n \"\"\"Test that an exception is raised if the state\r\n vector is the wrong size\"\"\"\r\n dev = DefaultQubitTF(wires=2)\r\n state = np.array([0, 1])\r\n\r\n with pytest.raises(ValueError, match=r\"State vector must be of length 2\\*\\*wires\"):\r\n dev.apply([qml.QubitStateVector(state, wires=[0, 1])])\r\n\r\n def test_invalid_qubit_state_vector_norm(self):\r\n \"\"\"Test that an exception is raised if the state\r\n vector is not normalized\"\"\"\r\n dev = DefaultQubitTF(wires=2)\r\n state = np.array([0, 12])\r\n\r\n with pytest.raises(ValueError, match=r\"Sum of amplitudes-squared does not equal one\"):\r\n dev.apply([qml.QubitStateVector(state, wires=[0])])\r\n\r\n def test_invalid_state_prep(self):\r\n \"\"\"Test that an exception is raised if a state preparation is not the\r\n first operation in the circuit.\"\"\"\r\n dev = DefaultQubitTF(wires=2)\r\n state = np.array([0, 12])\r\n\r\n with pytest.raises(\r\n qml.DeviceError,\r\n match=r\"cannot be used after other Operations have already been applied\",\r\n ):\r\n dev.apply([qml.PauliZ(0), qml.QubitStateVector(state, wires=[0])])\r\n\r\n @pytest.mark.parametrize(\"op,mat\", single_qubit)\r\n def test_single_qubit_no_parameters(self, init_state, op, mat, tol):\r\n \"\"\"Test non-parametrized single qubit operations\"\"\"\r\n dev = DefaultQubitTF(wires=1)\r\n state = init_state(1)\r\n\r\n queue = [qml.QubitStateVector(state, wires=[0])]\r\n queue += [op(wires=0)]\r\n dev.apply(queue)\r\n\r\n res = dev.state\r\n expected = mat @ state\r\n assert isinstance(res, tf.Tensor)\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n @pytest.mark.parametrize(\"theta\", [0.5432, -0.232])\r\n @pytest.mark.parametrize(\"op,func\", single_qubit_param)\r\n def test_single_qubit_parameters(self, init_state, op, func, theta, tol):\r\n \"\"\"Test parametrized single qubit operations\"\"\"\r\n dev = DefaultQubitTF(wires=1)\r\n state = init_state(1)\r\n\r\n queue = [qml.QubitStateVector(state, wires=[0])]\r\n queue += [op(theta, wires=0)]\r\n dev.apply(queue)\r\n\r\n res = dev.state\r\n expected = func(theta) @ state\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n def test_rotation(self, init_state, tol):\r\n \"\"\"Test three axis rotation gate\"\"\"\r\n dev = DefaultQubitTF(wires=1)\r\n state = init_state(1)\r\n\r\n a = 0.542\r\n b = 1.3432\r\n c = -0.654\r\n\r\n queue = [qml.QubitStateVector(state, wires=[0])]\r\n queue += [qml.Rot(a, b, c, wires=0)]\r\n dev.apply(queue)\r\n\r\n res = dev.state\r\n expected = Rot3(a, b, c) @ state\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n def test_controlled_rotation(self, init_state, tol):\r\n \"\"\"Test three axis controlled-rotation gate\"\"\"\r\n dev = DefaultQubitTF(wires=2)\r\n state = init_state(2)\r\n\r\n a = 0.542\r\n b = 1.3432\r\n c = -0.654\r\n\r\n queue = [qml.QubitStateVector(state, wires=[0, 1])]\r\n queue += [qml.CRot(a, b, c, wires=[0, 1])]\r\n dev.apply(queue)\r\n\r\n res = dev.state\r\n expected = CRot3(a, b, c) @ state\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n def test_inverse_operation(self, init_state, tol):\r\n \"\"\"Test that the inverse of an operation is correctly applied\"\"\"\r\n \"\"\"Test three axis rotation gate\"\"\"\r\n dev = DefaultQubitTF(wires=1)\r\n state = init_state(1)\r\n\r\n a = 0.542\r\n b = 1.3432\r\n c = -0.654\r\n\r\n queue = [qml.QubitStateVector(state, wires=[0])]\r\n queue += [qml.Rot(a, b, c, wires=0).inv()]\r\n dev.apply(queue)\r\n\r\n res = dev.state\r\n expected = np.linalg.inv(Rot3(a, b, c)) @ state\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n @pytest.mark.parametrize(\"op,mat\", two_qubit)\r\n def test_two_qubit_no_parameters(self, init_state, op, mat, tol):\r\n \"\"\"Test non-parametrized two qubit operations\"\"\"\r\n dev = DefaultQubitTF(wires=2)\r\n state = init_state(2)\r\n\r\n queue = [qml.QubitStateVector(state, wires=[0, 1])]\r\n queue += [op(wires=[0, 1])]\r\n dev.apply(queue)\r\n\r\n res = dev.state\r\n expected = mat @ state\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n @pytest.mark.parametrize(\"mat\", [U, U2])\r\n def test_qubit_unitary(self, init_state, mat, tol):\r\n \"\"\"Test application of arbitrary qubit unitaries\"\"\"\r\n N = int(np.log2(len(mat)))\r\n dev = DefaultQubitTF(wires=N)\r\n state = init_state(N)\r\n\r\n queue = [qml.QubitStateVector(state, wires=range(N))]\r\n queue += [qml.QubitUnitary(mat, wires=range(N))]\r\n dev.apply(queue)\r\n\r\n res = dev.state\r\n expected = mat @ state\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n @pytest.mark.parametrize(\"op, mat\", three_qubit)\r\n def test_three_qubit_no_parameters(self, init_state, op, mat, tol):\r\n \"\"\"Test non-parametrized three qubit operations\"\"\"\r\n dev = DefaultQubitTF(wires=3)\r\n state = init_state(3)\r\n\r\n queue = [qml.QubitStateVector(state, wires=[0, 1, 2])]\r\n queue += [op(wires=[0, 1, 2])]\r\n dev.apply(queue)\r\n\r\n res = dev.state\r\n expected = mat @ state\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n @pytest.mark.parametrize(\"theta\", [0.5432, -0.232])\r\n @pytest.mark.parametrize(\"op,func\", two_qubit_param)\r\n def test_two_qubit_parameters(self, init_state, op, func, theta, tol):\r\n \"\"\"Test two qubit parametrized operations\"\"\"\r\n dev = DefaultQubitTF(wires=2)\r\n state = init_state(2)\r\n\r\n queue = [qml.QubitStateVector(state, wires=[0, 1])]\r\n queue += [op(theta, wires=[0, 1])]\r\n dev.apply(queue)\r\n\r\n res = dev.state\r\n expected = func(theta) @ state\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n def test_apply_ops_not_supported(self, mocker, monkeypatch):\r\n \"\"\"Test that when a version of TensorFlow before 2.3.0 is used, the _apply_ops dictionary is\r\n empty and application of a CNOT gate is performed using _apply_unitary_einsum\"\"\"\r\n with monkeypatch.context() as m:\r\n m.setattr(\"pennylane.devices.default_qubit_tf.SUPPORTS_APPLY_OPS\", False)\r\n dev = DefaultQubitTF(wires=3)\r\n assert dev._apply_ops == {}\r\n\r\n spy = mocker.spy(DefaultQubitTF, \"_apply_unitary_einsum\")\r\n\r\n queue = [qml.CNOT(wires=[1, 2])]\r\n dev.apply(queue)\r\n\r\n spy.assert_called_once()\r\n\r\n def test_apply_ops_above_8_wires(self, mocker):\r\n \"\"\"Test that when 9 wires are used, the _apply_ops dictionary is empty and application of a\r\n CNOT gate is performed using _apply_unitary_einsum\"\"\"\r\n dev = DefaultQubitTF(wires=9)\r\n assert dev._apply_ops == {}\r\n\r\n spy = mocker.spy(DefaultQubitTF, \"_apply_unitary_einsum\")\r\n\r\n queue = [qml.CNOT(wires=[1, 2])]\r\n dev.apply(queue)\r\n\r\n spy.assert_called_once()\r\n\r\n @pytest.mark.xfail(\r\n raises=tf.errors.UnimplementedError,\r\n reason=\"Slicing is not supported for more than 8 wires\",\r\n strict=True,\r\n )\r\n def test_apply_ops_above_8_wires_using_special(self):\r\n \"\"\"Test that special apply methods that involve slicing function correctly when using 9\r\n wires\"\"\"\r\n dev = DefaultQubitTF(wires=9)\r\n dev._apply_ops = {\"CNOT\": dev._apply_cnot}\r\n\r\n queue = [qml.CNOT(wires=[1, 2])]\r\n dev.apply(queue)\r\n\r\n\r\nTHETA = np.linspace(0.11, 1, 3)\r\nPHI = np.linspace(0.32, 1, 3)\r\nVARPHI = np.linspace(0.02, 1, 3)\r\n\r\n\r\[email protected](\"theta, phi, varphi\", list(zip(THETA, PHI, VARPHI)))\r\nclass TestExpval:\r\n \"\"\"Test expectation values\"\"\"\r\n\r\n # test data; each tuple is of the form (GATE, OBSERVABLE, EXPECTED)\r\n single_wire_expval_test_data = [\r\n (qml.RX, qml.Identity, lambda t, p: np.array([1, 1])),\r\n (qml.RX, qml.PauliZ, lambda t, p: np.array([np.cos(t), np.cos(t) * np.cos(p)])),\r\n (qml.RY, qml.PauliX, lambda t, p: np.array([np.sin(t) * np.sin(p), np.sin(p)])),\r\n (qml.RX, qml.PauliY, lambda t, p: np.array([0, -np.cos(t) * np.sin(p)])),\r\n (\r\n qml.RY,\r\n qml.Hadamard,\r\n lambda t, p: np.array(\r\n [np.sin(t) * np.sin(p) + np.cos(t), np.cos(t) * np.cos(p) + np.sin(p)]\r\n )\r\n / np.sqrt(2),\r\n ),\r\n ]\r\n\r\n @pytest.mark.parametrize(\"gate,obs,expected\", single_wire_expval_test_data)\r\n def test_single_wire_expectation(self, gate, obs, expected, theta, phi, varphi, tol):\r\n \"\"\"Test that identity expectation value (i.e. the trace) is 1\"\"\"\r\n dev = DefaultQubitTF(wires=2)\r\n\r\n with qml.tape.QuantumTape() as tape:\r\n queue = [gate(theta, wires=0), gate(phi, wires=1), qml.CNOT(wires=[0, 1])]\r\n observables = [qml.expval(obs(wires=[i])) for i in range(2)]\r\n\r\n res = dev.execute(tape)\r\n assert np.allclose(res, expected(theta, phi), atol=tol, rtol=0)\r\n\r\n def test_hermitian_expectation(self, theta, phi, varphi, tol):\r\n \"\"\"Test that arbitrary Hermitian expectation values are correct\"\"\"\r\n dev = DefaultQubitTF(wires=2)\r\n\r\n with qml.tape.QuantumTape() as tape:\r\n queue = [qml.RY(theta, wires=0), qml.RY(phi, wires=1), qml.CNOT(wires=[0, 1])]\r\n observables = [qml.expval(qml.Hermitian(A, wires=[i])) for i in range(2)]\r\n\r\n res = dev.execute(tape)\r\n\r\n a = A[0, 0]\r\n re_b = A[0, 1].real\r\n d = A[1, 1]\r\n ev1 = ((a - d) * np.cos(theta) + 2 * re_b * np.sin(theta) * np.sin(phi) + a + d) / 2\r\n ev2 = ((a - d) * np.cos(theta) * np.cos(phi) + 2 * re_b * np.sin(phi) + a + d) / 2\r\n expected = np.array([ev1, ev2])\r\n\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n def test_multi_mode_hermitian_expectation(self, theta, phi, varphi, tol):\r\n \"\"\"Test that arbitrary multi-mode Hermitian expectation values are correct\"\"\"\r\n A = np.array(\r\n [\r\n [-6, 2 + 1j, -3, -5 + 2j],\r\n [2 - 1j, 0, 2 - 1j, -5 + 4j],\r\n [-3, 2 + 1j, 0, -4 + 3j],\r\n [-5 - 2j, -5 - 4j, -4 - 3j, -6],\r\n ]\r\n )\r\n\r\n dev = DefaultQubitTF(wires=2)\r\n\r\n\r\n with qml.tape.QuantumTape() as tape:\r\n queue = [qml.RY(theta, wires=0), qml.RY(phi, wires=1), qml.CNOT(wires=[0, 1])]\r\n observables = [qml.expval(qml.Hermitian(A, wires=[0, 1]))]\r\n\r\n res = dev.execute(tape)\r\n\r\n # below is the analytic expectation value for this circuit with arbitrary\r\n # Hermitian observable A\r\n expected = 0.5 * (\r\n 6 * np.cos(theta) * np.sin(phi)\r\n - np.sin(theta) * (8 * np.sin(phi) + 7 * np.cos(phi) + 3)\r\n - 2 * np.sin(phi)\r\n - 6 * np.cos(phi)\r\n - 6\r\n )\r\n\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n def test_paulix_pauliy(self, theta, phi, varphi, tol):\r\n \"\"\"Test that a tensor product involving PauliX and PauliY works correctly\"\"\"\r\n dev = qml.device(\"default.qubit.tf\", wires=3)\r\n dev.reset()\r\n\r\n obs = qml.PauliX(0) @ qml.PauliY(2)\r\n\r\n dev.apply(\r\n [\r\n qml.RX(theta, wires=[0]),\r\n qml.RX(phi, wires=[1]),\r\n qml.RX(varphi, wires=[2]),\r\n qml.CNOT(wires=[0, 1]),\r\n qml.CNOT(wires=[1, 2])\r\n ],\r\n obs.diagonalizing_gates()\r\n )\r\n\r\n res = dev.expval(obs)\r\n\r\n expected = np.sin(theta) * np.sin(phi) * np.sin(varphi)\r\n\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n def test_pauliz_identity(self, theta, phi, varphi, tol):\r\n \"\"\"Test that a tensor product involving PauliZ and Identity works correctly\"\"\"\r\n dev = qml.device(\"default.qubit.tf\", wires=3)\r\n dev.reset()\r\n\r\n obs = qml.PauliZ(0) @ qml.Identity(1) @ qml.PauliZ(2)\r\n\r\n dev.apply(\r\n [\r\n qml.RX(theta, wires=[0]),\r\n qml.RX(phi, wires=[1]),\r\n qml.RX(varphi, wires=[2]),\r\n qml.CNOT(wires=[0, 1]),\r\n qml.CNOT(wires=[1, 2])\r\n ],\r\n obs.diagonalizing_gates()\r\n )\r\n\r\n res = dev.expval(obs)\r\n\r\n expected = np.cos(varphi)*np.cos(phi)\r\n\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n def test_pauliz_hadamard(self, theta, phi, varphi, tol):\r\n \"\"\"Test that a tensor product involving PauliZ and PauliY and hadamard works correctly\"\"\"\r\n dev = qml.device(\"default.qubit.tf\", wires=3)\r\n obs = qml.PauliZ(0) @ qml.Hadamard(1) @ qml.PauliY(2)\r\n\r\n dev.reset()\r\n dev.apply(\r\n [\r\n qml.RX(theta, wires=[0]),\r\n qml.RX(phi, wires=[1]),\r\n qml.RX(varphi, wires=[2]),\r\n qml.CNOT(wires=[0, 1]),\r\n qml.CNOT(wires=[1, 2])\r\n ],\r\n obs.diagonalizing_gates()\r\n )\r\n\r\n res = dev.expval(obs)\r\n\r\n expected = -(np.cos(varphi) * np.sin(phi) + np.sin(varphi) * np.cos(theta)) / np.sqrt(2)\r\n\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n def test_hermitian(self, theta, phi, varphi, tol):\r\n \"\"\"Test that a tensor product involving qml.Hermitian works correctly\"\"\"\r\n dev = qml.device(\"default.qubit.tf\", wires=3)\r\n dev.reset()\r\n\r\n A = np.array(\r\n [\r\n [-6, 2 + 1j, -3, -5 + 2j],\r\n [2 - 1j, 0, 2 - 1j, -5 + 4j],\r\n [-3, 2 + 1j, 0, -4 + 3j],\r\n [-5 - 2j, -5 - 4j, -4 - 3j, -6],\r\n ]\r\n )\r\n\r\n obs = qml.PauliZ(0) @ qml.Hermitian(A, wires=[1, 2])\r\n\r\n dev.apply(\r\n [\r\n qml.RX(theta, wires=[0]),\r\n qml.RX(phi, wires=[1]),\r\n qml.RX(varphi, wires=[2]),\r\n qml.CNOT(wires=[0, 1]),\r\n qml.CNOT(wires=[1, 2])\r\n ],\r\n obs.diagonalizing_gates()\r\n )\r\n\r\n res = dev.expval(obs)\r\n\r\n expected = 0.5 * (\r\n -6 * np.cos(theta) * (np.cos(varphi) + 1)\r\n - 2 * np.sin(varphi) * (np.cos(theta) + np.sin(phi) - 2 * np.cos(phi))\r\n + 3 * np.cos(varphi) * np.sin(phi)\r\n + np.sin(phi)\r\n )\r\n\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n def test_hermitian_hermitian(self, theta, phi, varphi, tol):\r\n \"\"\"Test that a tensor product involving two Hermitian matrices works correctly\"\"\"\r\n dev = qml.device(\"default.qubit.tf\", wires=3)\r\n\r\n A1 = np.array([[1, 2],\r\n [2, 4]])\r\n\r\n A2 = np.array(\r\n [\r\n [-6, 2 + 1j, -3, -5 + 2j],\r\n [2 - 1j, 0, 2 - 1j, -5 + 4j],\r\n [-3, 2 + 1j, 0, -4 + 3j],\r\n [-5 - 2j, -5 - 4j, -4 - 3j, -6],\r\n ]\r\n )\r\n\r\n obs = qml.Hermitian(A1, wires=[0]) @ qml.Hermitian(A2, wires=[1, 2])\r\n\r\n dev.apply(\r\n [\r\n qml.RX(theta, wires=[0]),\r\n qml.RX(phi, wires=[1]),\r\n qml.RX(varphi, wires=[2]),\r\n qml.CNOT(wires=[0, 1]),\r\n qml.CNOT(wires=[1, 2])\r\n ],\r\n obs.diagonalizing_gates()\r\n )\r\n\r\n res = dev.expval(obs)\r\n\r\n expected = 0.25 * (\r\n -30\r\n + 4 * np.cos(phi) * np.sin(theta)\r\n + 3 * np.cos(varphi) * (-10 + 4 * np.cos(phi) * np.sin(theta) - 3 * np.sin(phi))\r\n - 3 * np.sin(phi)\r\n - 2 * (5 + np.cos(phi) * (6 + 4 * np.sin(theta)) + (-3 + 8 * np.sin(theta)) * np.sin(phi))\r\n * np.sin(varphi)\r\n + np.cos(theta)\r\n * (\r\n 18\r\n + 5 * np.sin(phi)\r\n + 3 * np.cos(varphi) * (6 + 5 * np.sin(phi))\r\n + 2 * (3 + 10 * np.cos(phi) - 5 * np.sin(phi)) * np.sin(varphi)\r\n )\r\n )\r\n\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n def test_hermitian_identity_expectation(self, theta, phi, varphi, tol):\r\n \"\"\"Test that a tensor product involving an Hermitian matrix and the identity works correctly\"\"\"\r\n dev = qml.device(\"default.qubit.tf\", wires=2)\r\n\r\n A = np.array([[1.02789352, 1.61296440 - 0.3498192j], [1.61296440 + 0.3498192j, 1.23920938 + 0j]])\r\n\r\n obs = qml.Hermitian(A, wires=[0]) @ qml.Identity(wires=[1])\r\n\r\n dev.apply(\r\n [\r\n qml.RY(theta, wires=[0]),\r\n qml.RY(phi, wires=[1]),\r\n qml.CNOT(wires=[0, 1])\r\n ],\r\n obs.diagonalizing_gates()\r\n )\r\n\r\n res = dev.expval(obs)\r\n\r\n a = A[0, 0]\r\n re_b = A[0, 1].real\r\n d = A[1, 1]\r\n expected = ((a - d) * np.cos(theta) + 2 * re_b * np.sin(theta) * np.sin(phi) + a + d) / 2\r\n\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n def test_hermitian_two_wires_identity_expectation(self, theta, phi, varphi, tol):\r\n \"\"\"Test that a tensor product involving an Hermitian matrix for two wires and the identity works correctly\"\"\"\r\n dev = qml.device(\"default.qubit.tf\", wires=3, shots=None)\r\n\r\n A = np.array([[1.02789352, 1.61296440 - 0.3498192j], [1.61296440 + 0.3498192j, 1.23920938 + 0j]])\r\n Identity = np.array([[1, 0],[0, 1]])\r\n H = np.kron(np.kron(Identity,Identity), A)\r\n obs = qml.Hermitian(H, wires=[2, 1, 0])\r\n\r\n dev.apply(\r\n [\r\n qml.RY(theta, wires=[0]),\r\n qml.RY(phi, wires=[1]),\r\n qml.CNOT(wires=[0, 1])\r\n ],\r\n obs.diagonalizing_gates()\r\n )\r\n res = dev.expval(obs)\r\n\r\n a = A[0, 0]\r\n re_b = A[0, 1].real\r\n d = A[1, 1]\r\n\r\n expected = ((a - d) * np.cos(theta) + 2 * re_b * np.sin(theta) * np.sin(phi) + a + d) / 2\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n\r\[email protected](\"theta, phi, varphi\", list(zip(THETA, PHI, VARPHI)))\r\nclass TestVar:\r\n \"\"\"Tests for the variance\"\"\"\r\n\r\n def test_var(self, theta, phi, varphi, tol):\r\n \"\"\"Tests for variance calculation\"\"\"\r\n dev = DefaultQubitTF(wires=1)\r\n # test correct variance for <Z> of a rotated state\r\n\r\n with qml.tape.QuantumTape() as tape:\r\n queue = [qml.RX(phi, wires=0), qml.RY(theta, wires=0)]\r\n observables = [qml.var(qml.PauliZ(wires=[0]))]\r\n\r\n res = dev.execute(tape)\r\n expected = 0.25 * (3 - np.cos(2 * theta) - 2 * np.cos(theta) ** 2 * np.cos(2 * phi))\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n def test_var_hermitian(self, theta, phi, varphi, tol):\r\n \"\"\"Tests for variance calculation using an arbitrary Hermitian observable\"\"\"\r\n dev = DefaultQubitTF(wires=2)\r\n\r\n # test correct variance for <H> of a rotated state\r\n H = np.array([[4, -1 + 6j], [-1 - 6j, 2]])\r\n\r\n with qml.tape.QuantumTape() as tape:\r\n queue = [qml.RX(phi, wires=0), qml.RY(theta, wires=0)]\r\n observables = [qml.var(qml.Hermitian(H, wires=[0]))]\r\n\r\n res = dev.execute(tape)\r\n expected = 0.5 * (\r\n 2 * np.sin(2 * theta) * np.cos(phi) ** 2\r\n + 24 * np.sin(phi) * np.cos(phi) * (np.sin(theta) - np.cos(theta))\r\n + 35 * np.cos(2 * phi)\r\n + 39\r\n )\r\n\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n def test_paulix_pauliy(self, theta, phi, varphi, tol):\r\n \"\"\"Test that a tensor product involving PauliX and PauliY works correctly\"\"\"\r\n dev = qml.device(\"default.qubit.tf\", wires=3)\r\n\r\n obs = qml.PauliX(0) @ qml.PauliY(2)\r\n\r\n dev.apply(\r\n [\r\n qml.RX(theta, wires=[0]),\r\n qml.RX(phi, wires=[1]),\r\n qml.RX(varphi, wires=[2]),\r\n qml.CNOT(wires=[0, 1]),\r\n qml.CNOT(wires=[1, 2])\r\n ],\r\n obs.diagonalizing_gates()\r\n )\r\n\r\n res = dev.var(obs)\r\n\r\n expected = (\r\n 8 * np.sin(theta) ** 2 * np.cos(2 * varphi) * np.sin(phi) ** 2\r\n - np.cos(2 * (theta - phi))\r\n - np.cos(2 * (theta + phi))\r\n + 2 * np.cos(2 * theta)\r\n + 2 * np.cos(2 * phi)\r\n + 14\r\n ) / 16\r\n\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n def test_pauliz_hadamard(self, theta, phi, varphi, tol):\r\n \"\"\"Test that a tensor product involving PauliZ and PauliY and hadamard works correctly\"\"\"\r\n dev = qml.device(\"default.qubit.tf\", wires=3)\r\n obs = qml.PauliZ(0) @ qml.Hadamard(1) @ qml.PauliY(2)\r\n\r\n dev.reset()\r\n dev.apply(\r\n [\r\n qml.RX(theta, wires=[0]),\r\n qml.RX(phi, wires=[1]),\r\n qml.RX(varphi, wires=[2]),\r\n qml.CNOT(wires=[0, 1]),\r\n qml.CNOT(wires=[1, 2])\r\n ],\r\n obs.diagonalizing_gates()\r\n )\r\n\r\n res = dev.var(obs)\r\n\r\n expected = (\r\n 3\r\n + np.cos(2 * phi) * np.cos(varphi) ** 2\r\n - np.cos(2 * theta) * np.sin(varphi) ** 2\r\n - 2 * np.cos(theta) * np.sin(phi) * np.sin(2 * varphi)\r\n ) / 4\r\n\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n def test_hermitian(self, theta, phi, varphi, tol):\r\n \"\"\"Test that a tensor product involving qml.Hermitian works correctly\"\"\"\r\n dev = qml.device(\"default.qubit.tf\", wires=3)\r\n\r\n A = np.array(\r\n [\r\n [-6, 2 + 1j, -3, -5 + 2j],\r\n [2 - 1j, 0, 2 - 1j, -5 + 4j],\r\n [-3, 2 + 1j, 0, -4 + 3j],\r\n [-5 - 2j, -5 - 4j, -4 - 3j, -6],\r\n ]\r\n )\r\n\r\n obs = qml.PauliZ(0) @ qml.Hermitian(A, wires=[1, 2])\r\n\r\n dev.apply(\r\n [\r\n qml.RX(theta, wires=[0]),\r\n qml.RX(phi, wires=[1]),\r\n qml.RX(varphi, wires=[2]),\r\n qml.CNOT(wires=[0, 1]),\r\n qml.CNOT(wires=[1, 2])\r\n ],\r\n obs.diagonalizing_gates()\r\n )\r\n\r\n res = dev.var(obs)\r\n\r\n expected = (\r\n 1057\r\n - np.cos(2 * phi)\r\n + 12 * (27 + np.cos(2 * phi)) * np.cos(varphi)\r\n - 2 * np.cos(2 * varphi) * np.sin(phi) * (16 * np.cos(phi) + 21 * np.sin(phi))\r\n + 16 * np.sin(2 * phi)\r\n - 8 * (-17 + np.cos(2 * phi) + 2 * np.sin(2 * phi)) * np.sin(varphi)\r\n - 8 * np.cos(2 * theta) * (3 + 3 * np.cos(varphi) + np.sin(varphi)) ** 2\r\n - 24 * np.cos(phi) * (np.cos(phi) + 2 * np.sin(phi)) * np.sin(2 * varphi)\r\n - 8\r\n * np.cos(theta)\r\n * (\r\n 4\r\n * np.cos(phi)\r\n * (\r\n 4\r\n + 8 * np.cos(varphi)\r\n + np.cos(2 * varphi)\r\n - (1 + 6 * np.cos(varphi)) * np.sin(varphi)\r\n )\r\n + np.sin(phi)\r\n * (\r\n 15\r\n + 8 * np.cos(varphi)\r\n - 11 * np.cos(2 * varphi)\r\n + 42 * np.sin(varphi)\r\n + 3 * np.sin(2 * varphi)\r\n )\r\n )\r\n ) / 16\r\n\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n\r\n#####################################################\r\n# QNode-level integration tests\r\n#####################################################\r\n\r\n\r\nclass TestQNodeIntegration:\r\n \"\"\"Integration tests for default.qubit.tf. This test ensures it integrates\r\n properly with the PennyLane UI, in particular the new QNode.\"\"\"\r\n\r\n def test_defines_correct_capabilities(self):\r\n \"\"\"Test that the device defines the right capabilities\"\"\"\r\n\r\n dev = qml.device(\"default.qubit.tf\", wires=1)\r\n cap = dev.capabilities()\r\n capabilities = {\r\n \"model\": \"qubit\",\r\n \"supports_finite_shots\": True,\r\n \"supports_tensor_observables\": True,\r\n \"returns_probs\": True,\r\n \"returns_state\": True,\r\n \"supports_reversible_diff\": False,\r\n \"supports_inverse_operations\": True,\r\n \"supports_analytic_computation\": True,\r\n \"passthru_interface\": 'tf',\r\n \"passthru_devices\": {\r\n \"tf\": \"default.qubit.tf\",\r\n \"autograd\": \"default.qubit.autograd\",\r\n \"jax\": \"default.qubit.jax\",\r\n },\r\n }\r\n assert cap == capabilities\r\n\r\n def test_load_tensornet_tf_device(self):\r\n \"\"\"Test that the tensor network plugin loads correctly\"\"\"\r\n dev = qml.device(\"default.qubit.tf\", wires=2)\r\n assert dev.num_wires == 2\r\n assert dev.shots is None\r\n assert dev.short_name == \"default.qubit.tf\"\r\n assert dev.capabilities()[\"passthru_interface\"] == \"tf\"\r\n\r\n def test_qubit_circuit(self, tol):\r\n \"\"\"Test that the tensor network plugin provides correct\r\n result for a simple circuit using the old QNode.\"\"\"\r\n p = tf.Variable(0.543)\r\n\r\n dev = qml.device(\"default.qubit.tf\", wires=1)\r\n\r\n @qml.qnode(dev, interface=\"tf\")\r\n def circuit(x):\r\n qml.RX(x, wires=0)\r\n return qml.expval(qml.PauliY(0))\r\n\r\n expected = -tf.math.sin(p)\r\n\r\n assert circuit.diff_options[\"method\"] == \"backprop\"\r\n assert np.isclose(circuit(p), expected, atol=tol, rtol=0)\r\n\r\n def test_correct_state(self, tol):\r\n \"\"\"Test that the device state is correct after applying a\r\n quantum function on the device\"\"\"\r\n\r\n dev = qml.device(\"default.qubit.tf\", wires=2)\r\n\r\n state = dev.state\r\n expected = np.array([1, 0, 0, 0])\r\n assert np.allclose(state, expected, atol=tol, rtol=0)\r\n\r\n @qml.qnode(dev, interface=\"tf\", diff_method=\"backprop\")\r\n def circuit():\r\n qml.Hadamard(wires=0)\r\n qml.RZ(np.pi / 4, wires=0)\r\n return qml.expval(qml.PauliZ(0))\r\n\r\n circuit()\r\n state = dev.state\r\n\r\n amplitude = np.exp(-1j * np.pi / 8) / np.sqrt(2)\r\n\r\n expected = np.array([amplitude, 0, np.conj(amplitude), 0])\r\n assert np.allclose(state, expected, atol=tol, rtol=0)\r\n\r\n @pytest.mark.parametrize(\"theta\", [0.5432, -0.232])\r\n @pytest.mark.parametrize(\"op,func\", single_qubit_param)\r\n def test_one_qubit_param_gates(self, theta, op, func, init_state, tol):\r\n \"\"\"Test the integration of the one-qubit single parameter rotations by passing\r\n a TF data structure as a parameter\"\"\"\r\n dev = qml.device(\"default.qubit.tf\", wires=1)\r\n state = init_state(1)\r\n\r\n @qml.qnode(dev, interface='tf')\r\n def circuit(params):\r\n qml.QubitStateVector(state, wires=[0])\r\n op(params[0], wires=[0])\r\n return qml.expval(qml.PauliZ(0))\r\n\r\n # Pass a TF Variable to the qfunc\r\n params = tf.Variable(np.array([theta]))\r\n circuit(params)\r\n res = dev.state\r\n expected = func(theta) @ state\r\n assert np.allclose(res.numpy(), expected, atol=tol, rtol=0)\r\n\r\n @pytest.mark.parametrize(\"theta\", [0.5432, 4.213])\r\n @pytest.mark.parametrize(\"op,func\", two_qubit_param)\r\n def test_two_qubit_param_gates(self, theta, op, func, init_state, tol):\r\n \"\"\"Test the integration of the two-qubit single parameter rotations by passing\r\n a TF data structure as a parameter\"\"\"\r\n dev = qml.device(\"default.qubit.tf\", wires=2)\r\n state = init_state(2)\r\n\r\n @qml.qnode(dev, interface='tf')\r\n def circuit(params):\r\n qml.QubitStateVector(state, wires=[0,1])\r\n op(params[0], wires=[0, 1])\r\n return qml.expval(qml.PauliZ(0))\r\n\r\n # Pass a TF Variable to the qfunc\r\n params = tf.Variable(np.array([theta]))\r\n circuit(params)\r\n res = dev.state\r\n expected = func(theta) @ state\r\n assert np.allclose(res.numpy(), expected, atol=tol, rtol=0)\r\n\r\n def test_controlled_rotation_integration(self, init_state, tol):\r\n \"\"\"Test the integration of the two-qubit controlled rotation by passing\r\n a TF data structure as a parameter\"\"\"\r\n dev = qml.device(\"default.qubit.tf\", wires=2)\r\n a = 1.7\r\n b = 1.3432\r\n c = -0.654\r\n state = init_state(2)\r\n\r\n @qml.qnode(dev, interface='tf')\r\n def circuit(params):\r\n qml.QubitStateVector(state, wires=[0,1])\r\n qml.CRot(params[0], params[1], params[2], wires=[0,1])\r\n return qml.expval(qml.PauliZ(0))\r\n\r\n # Pass a TF Variable to the qfunc\r\n params = tf.Variable(np.array([a,b,c]))\r\n circuit(params)\r\n res = dev.state\r\n expected = CRot3(a, b, c) @ state\r\n assert np.allclose(res.numpy(), expected, atol=tol, rtol=0)\r\n\r\nclass TestPassthruIntegration:\r\n \"\"\"Tests for integration with the PassthruQNode\"\"\"\r\n\r\n def test_jacobian_variable_multiply(self, tol):\r\n \"\"\"Test that jacobian of a QNode with an attached default.qubit.tf device\r\n gives the correct result in the case of parameters multiplied by scalars\"\"\"\r\n x = tf.Variable(0.43316321)\r\n y = tf.Variable(0.2162158)\r\n z = tf.Variable(0.75110998)\r\n\r\n dev = qml.device(\"default.qubit.tf\", wires=1)\r\n\r\n @qml.qnode(dev, interface=\"tf\", diff_method=\"backprop\")\r\n def circuit(p):\r\n qml.RX(3 * p[0], wires=0)\r\n qml.RY(p[1], wires=0)\r\n qml.RX(p[2] / 2, wires=0)\r\n return qml.expval(qml.PauliZ(0))\r\n\r\n with tf.GradientTape() as tape:\r\n res = circuit([x, y, z])\r\n\r\n expected = tf.math.cos(3 * x) * tf.math.cos(y) * tf.math.cos(z / 2) - tf.math.sin(\r\n 3 * x\r\n ) * tf.math.sin(z / 2)\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n res = tf.concat(tape.jacobian(res, [x, y, z]), axis=0)\r\n\r\n expected = np.array(\r\n [\r\n -3\r\n * (\r\n tf.math.sin(3 * x) * tf.math.cos(y) * tf.math.cos(z / 2)\r\n + tf.math.cos(3 * x) * tf.math.sin(z / 2)\r\n ),\r\n -tf.math.cos(3 * x) * tf.math.sin(y) * tf.math.cos(z / 2),\r\n -0.5\r\n * (\r\n tf.math.sin(3 * x) * tf.math.cos(z / 2)\r\n + tf.math.cos(3 * x) * tf.math.cos(y) * tf.math.sin(z / 2)\r\n ),\r\n ]\r\n )\r\n\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n def test_jacobian_repeated(self, tol):\r\n \"\"\"Test that jacobian of a QNode with an attached default.qubit.tf device\r\n gives the correct result in the case of repeated parameters\"\"\"\r\n x = 0.43316321\r\n y = 0.2162158\r\n z = 0.75110998\r\n p = tf.Variable([x, y, z])\r\n dev = qml.device(\"default.qubit.tf\", wires=1)\r\n\r\n @qml.qnode(dev, interface=\"tf\", diff_method=\"backprop\")\r\n def circuit(x):\r\n qml.RX(x[1], wires=0)\r\n qml.Rot(x[0], x[1], x[2], wires=0)\r\n return qml.expval(qml.PauliZ(0))\r\n\r\n with tf.GradientTape() as tape:\r\n res = circuit(p)\r\n\r\n expected = np.cos(y) ** 2 - np.sin(x) * np.sin(y) ** 2\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n res = tape.jacobian(res, p)\r\n\r\n expected = np.array(\r\n [-np.cos(x) * np.sin(y) ** 2, -2 * (np.sin(x) + 1) * np.sin(y) * np.cos(y), 0]\r\n )\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n def test_jacobian_agrees_backprop_parameter_shift(self, tol):\r\n \"\"\"Test that jacobian of a QNode with an attached default.qubit.tf device\r\n gives the correct result with respect to the parameter-shift method\"\"\"\r\n p = np.array([0.43316321, 0.2162158, 0.75110998, 0.94714242])\r\n\r\n def circuit(x):\r\n for i in range(0, len(p), 2):\r\n qml.RX(x[i], wires=0)\r\n qml.RY(x[i + 1], wires=1)\r\n for i in range(2):\r\n qml.CNOT(wires=[i, i + 1])\r\n return qml.expval(qml.PauliZ(0)), qml.var(qml.PauliZ(1))\r\n\r\n dev1 = qml.device(\"default.qubit.tf\", wires=3)\r\n dev2 = qml.device(\"default.qubit.tf\", wires=3)\r\n\r\n circuit1 = qml.QNode(circuit, dev1, diff_method=\"backprop\", interface=\"tf\")\r\n circuit2 = qml.QNode(circuit, dev2, diff_method=\"parameter-shift\")\r\n\r\n p_tf = tf.Variable(p)\r\n with tf.GradientTape() as tape:\r\n res = circuit1(p_tf)\r\n\r\n assert np.allclose(res, circuit2(p), atol=tol, rtol=0)\r\n\r\n res = tape.jacobian(res, p_tf)\r\n assert np.allclose(res, qml.jacobian(circuit2)(p), atol=tol, rtol=0)\r\n\r\n def test_state_differentiability(self, tol):\r\n \"\"\"Test that the device state can be differentiated\"\"\"\r\n dev = qml.device(\"default.qubit.tf\", wires=1)\r\n\r\n @qml.qnode(dev, diff_method=\"backprop\", interface=\"tf\")\r\n def circuit(a):\r\n qml.RY(a, wires=0)\r\n return qml.expval(qml.PauliZ(0))\r\n\r\n a = tf.Variable(0.54)\r\n\r\n with tf.GradientTape() as tape:\r\n circuit(a)\r\n res = tf.abs(dev.state) ** 2\r\n res = res[1] - res[0]\r\n\r\n grad = tape.gradient(res, a)\r\n expected = tf.sin(a)\r\n assert np.allclose(grad, expected, atol=tol, rtol=0)\r\n\r\n def test_prob_differentiability(self, tol):\r\n \"\"\"Test that the device probability can be differentiated\"\"\"\r\n dev = qml.device(\"default.qubit.tf\", wires=2)\r\n\r\n @qml.qnode(dev, diff_method=\"backprop\", interface=\"tf\")\r\n def circuit(a, b):\r\n qml.RX(a, wires=0)\r\n qml.RY(b, wires=1)\r\n qml.CNOT(wires=[0, 1])\r\n return qml.probs(wires=[1])\r\n\r\n a = tf.Variable(0.54)\r\n b = tf.Variable(0.12)\r\n\r\n with tf.GradientTape() as tape:\r\n # get the probability of wire 1\r\n prob_wire_1 = circuit(a, b)\r\n # compute Prob(|1>_1) - Prob(|0>_1)\r\n res = prob_wire_1[1] - prob_wire_1[0]\r\n\r\n expected = -tf.cos(a) * tf.cos(b)\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n grad = tape.gradient(res, [a, b])\r\n expected = [tf.sin(a) * tf.cos(b), tf.cos(a) * tf.sin(b)]\r\n assert np.allclose(grad, expected, atol=tol, rtol=0)\r\n\r\n def test_backprop_gradient(self, tol):\r\n \"\"\"Tests that the gradient of the qnode is correct\"\"\"\r\n dev = qml.device(\"default.qubit.tf\", wires=2)\r\n\r\n @qml.qnode(dev, diff_method=\"backprop\", interface=\"tf\")\r\n def circuit(a, b):\r\n qml.RX(a, wires=0)\r\n qml.CRX(b, wires=[0, 1])\r\n return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))\r\n\r\n a = -0.234\r\n b = 0.654\r\n\r\n a_tf = tf.Variable(a, dtype=tf.float64)\r\n b_tf = tf.Variable(b, dtype=tf.float64)\r\n\r\n with tf.GradientTape() as tape:\r\n tape.watch([a_tf, b_tf])\r\n res = circuit(a_tf, b_tf)\r\n\r\n # the analytic result of evaluating circuit(a, b)\r\n expected_cost = 0.5 * (np.cos(a) * np.cos(b) + np.cos(a) - np.cos(b) + 1)\r\n\r\n # the analytic result of evaluating grad(circuit(a, b))\r\n expected_grad = np.array(\r\n [-0.5 * np.sin(a) * (np.cos(b) + 1), 0.5 * np.sin(b) * (1 - np.cos(a))]\r\n )\r\n\r\n assert np.allclose(res.numpy(), expected_cost, atol=tol, rtol=0)\r\n\r\n res = tape.gradient(res, [a_tf, b_tf])\r\n assert np.allclose(res, expected_grad, atol=tol, rtol=0)\r\n\r\n @pytest.mark.parametrize(\"operation\", [qml.U3, qml.U3.decomposition])\r\n @pytest.mark.parametrize(\"diff_method\", [\"backprop\", \"parameter-shift\", \"finite-diff\"])\r\n def test_tf_interface_gradient(self, operation, diff_method, tol):\r\n \"\"\"Tests that the gradient of an arbitrary U3 gate is correct\r\n using the TensorFlow interface, using a variety of differentiation methods.\"\"\"\r\n dev = qml.device(\"default.qubit.tf\", wires=1)\r\n\r\n @qml.qnode(dev, diff_method=diff_method, interface=\"tf\")\r\n def circuit(x, weights, w):\r\n \"\"\"In this example, a mixture of scalar\r\n arguments, array arguments, and keyword arguments are used.\"\"\"\r\n qml.QubitStateVector(1j * np.array([1, -1]) / np.sqrt(2), wires=w)\r\n operation(x, weights[0], weights[1], wires=w)\r\n return qml.expval(qml.PauliX(w))\r\n\r\n # Check that the correct QNode type is being used.\r\n if diff_method == \"backprop\":\r\n assert circuit.diff_options[\"method\"] == \"backprop\"\r\n elif diff_method == \"parameter-shift\":\r\n assert circuit.diff_options[\"method\"] == \"analytic\"\r\n elif diff_method == \"finite-diff\":\r\n assert circuit.diff_options[\"method\"] == \"numeric\"\r\n\r\n def cost(params):\r\n \"\"\"Perform some classical processing\"\"\"\r\n return circuit(params[0], params[1:], w=0) ** 2\r\n\r\n theta = 0.543\r\n phi = -0.234\r\n lam = 0.654\r\n\r\n params = tf.Variable([theta, phi, lam], dtype=tf.float64)\r\n\r\n with tf.GradientTape() as tape:\r\n tape.watch(params)\r\n res = cost(params)\r\n\r\n # check that the result is correct\r\n expected_cost = (np.sin(lam) * np.sin(phi) - np.cos(theta) * np.cos(lam) * np.cos(phi)) ** 2\r\n assert np.allclose(res.numpy(), expected_cost, atol=tol, rtol=0)\r\n\r\n res = tape.gradient(res, params)\r\n\r\n # check that the gradient is correct\r\n expected_grad = (\r\n np.array(\r\n [\r\n np.sin(theta) * np.cos(lam) * np.cos(phi),\r\n np.cos(theta) * np.cos(lam) * np.sin(phi) + np.sin(lam) * np.cos(phi),\r\n np.cos(theta) * np.sin(lam) * np.cos(phi) + np.cos(lam) * np.sin(phi),\r\n ]\r\n )\r\n * 2\r\n * (np.sin(lam) * np.sin(phi) - np.cos(theta) * np.cos(lam) * np.cos(phi))\r\n )\r\n assert np.allclose(res.numpy(), expected_grad, atol=tol, rtol=0)\r\n\r\n def test_inverse_operation_jacobian_backprop(self, tol):\r\n \"\"\"Test that inverse operations work in backprop\r\n mode\"\"\"\r\n dev = qml.device('default.qubit.tf', wires=1)\r\n\r\n @qml.qnode(dev, diff_method=\"backprop\", interface=\"tf\")\r\n def circuit(param):\r\n qml.RY(param, wires=0).inv()\r\n return qml.expval(qml.PauliX(0))\r\n\r\n x = tf.Variable(0.3)\r\n\r\n with tf.GradientTape() as tape:\r\n res = circuit(x)\r\n\r\n assert np.allclose(res, -tf.sin(x), atol=tol, rtol=0)\r\n\r\n grad = tape.gradient(res, x)\r\n assert np.allclose(grad, -tf.cos(x), atol=tol, rtol=0)\r\n\r\n @pytest.mark.parametrize(\"interface\", [\"autograd\", \"torch\"])\r\n def test_error_backprop_wrong_interface(self, interface, tol):\r\n \"\"\"Tests that an error is raised if diff_method='backprop' but not using\r\n the TF interface\"\"\"\r\n dev = qml.device(\"default.qubit.tf\", wires=1)\r\n\r\n def circuit(x, w=None):\r\n qml.RZ(x, wires=w)\r\n return qml.expval(qml.PauliX(w))\r\n\r\n with pytest.raises(\r\n qml.QuantumFunctionError,\r\n match=\"default.qubit.tf only supports diff_method='backprop' when using the tf interface\",\r\n ):\r\n qml.qnode(dev, diff_method=\"backprop\", interface=interface)(circuit)\r\n\r\n\r\nclass TestSamples:\r\n \"\"\"Tests for sampling outputs\"\"\"\r\n\r\n def test_sample_observables(self):\r\n \"\"\"Test that the device allows for sampling from observables.\"\"\"\r\n shots = 100\r\n dev = qml.device(\"default.qubit.tf\", wires=2, shots=shots)\r\n\r\n @qml.qnode(dev, diff_method=\"backprop\", interface=\"tf\")\r\n def circuit(a):\r\n qml.RX(a, wires=0)\r\n return qml.sample(qml.PauliZ(0))\r\n\r\n a = tf.Variable(0.54)\r\n res = circuit(a)\r\n\r\n assert isinstance(res, tf.Tensor)\r\n assert res.shape == (shots,)\r\n assert set(res.numpy()) == {-1, 1}\r\n\r\n def test_sample_observables_non_differentiable(self):\r\n \"\"\"Test that sampled observables cannot be differentiated.\"\"\"\r\n shots = 100\r\n dev = qml.device(\"default.qubit.tf\", wires=2, shots=shots)\r\n\r\n @qml.qnode(dev, diff_method=\"backprop\", interface=\"tf\")\r\n def circuit(a):\r\n qml.RX(a, wires=0)\r\n return qml.sample(qml.PauliZ(0))\r\n\r\n a = tf.Variable(0.54)\r\n\r\n with tf.GradientTape() as tape:\r\n res = circuit(a)\r\n\r\n assert tape.gradient(res, a) is None\r\n\r\n def test_estimating_marginal_probability(self, tol):\r\n \"\"\"Test that the probability of a subset of wires is accurately estimated.\"\"\"\r\n dev = qml.device(\"default.qubit.tf\", wires=2, shots=1000)\r\n\r\n @qml.qnode(dev, diff_method=\"backprop\", interface=\"tf\")\r\n def circuit():\r\n qml.PauliX(0)\r\n return qml.probs(wires=[0])\r\n\r\n res = circuit()\r\n\r\n assert isinstance(res, tf.Tensor)\r\n\r\n expected = np.array([0, 1])\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n def test_estimating_full_probability(self, tol):\r\n \"\"\"Test that the probability of a subset of wires is accurately estimated.\"\"\"\r\n dev = qml.device(\"default.qubit.tf\", wires=2, shots=1000)\r\n\r\n @qml.qnode(dev, diff_method=\"backprop\", interface=\"tf\")\r\n def circuit():\r\n qml.PauliX(0)\r\n qml.PauliX(1)\r\n return qml.probs(wires=[0, 1])\r\n\r\n res = circuit()\r\n\r\n assert isinstance(res, tf.Tensor)\r\n\r\n expected = np.array([0, 0, 0, 1])\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n def test_estimating_expectation_values(self, tol):\r\n \"\"\"Test that estimating expectation values using a finite number\r\n of shots produces a numeric tensor\"\"\"\r\n dev = qml.device(\"default.qubit.tf\", wires=3, shots=1000)\r\n\r\n @qml.qnode(dev, diff_method=\"backprop\", interface=\"tf\")\r\n def circuit(a, b):\r\n qml.RX(a, wires=[0])\r\n qml.RX(b, wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))\r\n\r\n a = tf.Variable(0.543)\r\n b = tf.Variable(0.43)\r\n\r\n res = circuit(a, b)\r\n assert isinstance(res, tf.Tensor)\r\n\r\n # We don't check the expected value due to stochasticity, but\r\n # leave it here for completeness.\r\n # expected = [tf.cos(a), tf.cos(a) * tf.cos(b)]\r\n # assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n def test_estimating_expectation_values_not_differentiable(self, tol):\r\n \"\"\"Test that finite shots results in non-differentiable QNodes\"\"\"\r\n\r\n dev = qml.device(\"default.qubit.tf\", wires=3, shots=1000)\r\n\r\n @qml.qnode(dev, diff_method=\"backprop\", interface=\"tf\")\r\n def circuit(a, b):\r\n qml.RX(a, wires=[0])\r\n qml.RX(b, wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))\r\n\r\n a = tf.Variable(0.543)\r\n b = tf.Variable(0.43)\r\n\r\n with tf.GradientTape() as tape:\r\n res = circuit(a, b)\r\n\r\n assert isinstance(res, tf.Tensor)\r\n grad = tape.gradient(res, [a, b])\r\n assert grad == [None, None]\r\n\r\n\r\nclass TestHighLevelIntegration:\r\n \"\"\"Tests for integration with higher level components of PennyLane.\"\"\"\r\n\r\n def test_qnode_collection_integration(self):\r\n \"\"\"Test that a PassthruQNode default.qubit.tf works with QNodeCollections.\"\"\"\r\n dev = qml.device(\"default.qubit.tf\", wires=2)\r\n\r\n obs_list = [qml.PauliX(0) @ qml.PauliY(1), qml.PauliZ(0), qml.PauliZ(0) @ qml.PauliZ(1)]\r\n qnodes = qml.map(qml.templates.StronglyEntanglingLayers, obs_list, dev, interface=\"tf\")\r\n\r\n assert qnodes.interface == \"tf\"\r\n\r\n weights = tf.Variable(qml.init.strong_ent_layers_normal(n_wires=2, n_layers=2))\r\n\r\n @tf.function\r\n def cost(weights):\r\n return tf.reduce_sum(qnodes(weights))\r\n\r\n with tf.GradientTape() as tape:\r\n res = qnodes(weights)\r\n\r\n grad = tape.gradient(res, weights)\r\n\r\n assert isinstance(grad, tf.Tensor)\r\n assert grad.shape == weights.shape\r\n", "# Copyright 2018-2020 Xanadu Quantum Technologies Inc.\r\n\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\"\"\"Unit tests for the TensorBox functional API in pennylane.math.fn\r\n\"\"\"\r\nimport itertools\r\nimport numpy as onp\r\nimport pytest\r\n\r\nimport pennylane as qml\r\nfrom pennylane import numpy as np\r\nfrom pennylane.math import fn\r\n\r\n\r\ntf = pytest.importorskip(\"tensorflow\", minversion=\"2.1\")\r\ntorch = pytest.importorskip(\"torch\")\r\njax = pytest.importorskip(\"jax\")\r\njnp = pytest.importorskip(\"jax.numpy\")\r\n\r\n\r\nclass TestGetMultiTensorbox:\r\n \"\"\"Tests for the _get_multi_tensorbox utility function\"\"\"\r\n\r\n def test_exception_tensorflow_and_torch(self):\r\n \"\"\"Test that an exception is raised if the sequence of tensors contains\r\n tensors from incompatible dispatch libraries\"\"\"\r\n x = tf.Variable([1.0, 2.0, 3.0])\r\n y = onp.array([0.5, 0.1])\r\n z = torch.tensor([0.6])\r\n\r\n with pytest.raises(ValueError, match=\"Tensors contain mixed types\"):\r\n fn._get_multi_tensorbox([x, y, z])\r\n\r\n def test_warning_tensorflow_and_autograd(self):\r\n \"\"\"Test that a warning is raised if the sequence of tensors contains\r\n both tensorflow and autograd tensors.\"\"\"\r\n x = tf.Variable([1.0, 2.0, 3.0])\r\n y = np.array([0.5, 0.1])\r\n\r\n with pytest.warns(UserWarning, match=\"Consider replacing Autograd with vanilla NumPy\"):\r\n fn._get_multi_tensorbox([x, y])\r\n\r\n def test_warning_torch_and_autograd(self):\r\n \"\"\"Test that a warning is raised if the sequence of tensors contains\r\n both torch and autograd tensors.\"\"\"\r\n x = torch.tensor([1.0, 2.0, 3.0])\r\n y = np.array([0.5, 0.1])\r\n\r\n with pytest.warns(UserWarning, match=\"Consider replacing Autograd with vanilla NumPy\"):\r\n fn._get_multi_tensorbox([x, y])\r\n\r\n def test_return_tensorflow_box(self):\r\n \"\"\"Test that TensorFlow is correctly identified as the dispatching library.\"\"\"\r\n x = tf.Variable([1.0, 2.0, 3.0])\r\n y = onp.array([0.5, 0.1])\r\n\r\n res = fn._get_multi_tensorbox([y, x])\r\n assert res.interface == \"tf\"\r\n\r\n def test_return_torch_box(self):\r\n \"\"\"Test that Torch is correctly identified as the dispatching library.\"\"\"\r\n x = torch.tensor([1.0, 2.0, 3.0])\r\n y = onp.array([0.5, 0.1])\r\n\r\n res = fn._get_multi_tensorbox([y, x])\r\n assert res.interface == \"torch\"\r\n\r\n def test_return_autograd_box(self):\r\n \"\"\"Test that autograd is correctly identified as the dispatching library.\"\"\"\r\n x = np.array([1.0, 2.0, 3.0])\r\n y = [0.5, 0.1]\r\n\r\n res = fn._get_multi_tensorbox([y, x])\r\n assert res.interface == \"autograd\"\r\n\r\n def test_return_numpy_box(self):\r\n \"\"\"Test that NumPy is correctly identified as the dispatching library.\"\"\"\r\n x = onp.array([1.0, 2.0, 3.0])\r\n y = [0.5, 0.1]\r\n\r\n res = fn._get_multi_tensorbox([y, x])\r\n assert res.interface == \"numpy\"\r\n\r\n\r\ntest_abs_data = [\r\n (1, -2, 3 + 4j),\r\n [1, -2, 3 + 4j],\r\n onp.array([1, -2, 3 + 4j]),\r\n np.array([1, -2, 3 + 4j]),\r\n torch.tensor([1, -2, 3 + 4j], dtype=torch.complex128),\r\n tf.Variable([1, -2, 3 + 4j], dtype=tf.complex128),\r\n tf.constant([1, -2, 3 + 4j], dtype=tf.complex128),\r\n]\r\n\r\n\r\n\r\[email protected](\"t\", test_abs_data)\r\ndef test_abs(t):\r\n \"\"\"Test that the absolute function works for a variety\r\n of input\"\"\"\r\n res = fn.abs_(t)\r\n assert fn.allequal(res, [1, 2, 5])\r\n\r\n\r\ntest_data = [\r\n (1, 2, 3),\r\n [1, 2, 3],\r\n onp.array([1, 2, 3]),\r\n np.array([1, 2, 3]),\r\n torch.tensor([1, 2, 3]),\r\n tf.Variable([1, 2, 3]),\r\n tf.constant([1, 2, 3]),\r\n]\r\n\r\n\r\[email protected](\"t1,t2\", list(itertools.combinations(test_data, r=2)))\r\ndef test_allequal(t1, t2):\r\n \"\"\"Test that the allequal function works for a variety of inputs.\"\"\"\r\n res = fn.allequal(t1, t2)\r\n\r\n if isinstance(t1, tf.Variable):\r\n t1 = tf.convert_to_tensor(t1)\r\n\r\n if isinstance(t2, tf.Variable):\r\n t2 = tf.convert_to_tensor(t2)\r\n\r\n expected = all(float(x) == float(y) for x, y in zip(t1, t2))\r\n assert res == expected\r\n\r\n\r\[email protected](\"t1,t2\", list(itertools.combinations(test_data, r=2)))\r\ndef test_allclose(t1, t2):\r\n \"\"\"Test that the allclose function works for a variety of inputs.\"\"\"\r\n res = fn.allclose(t1, t2)\r\n\r\n if isinstance(t1, tf.Variable):\r\n t1 = tf.convert_to_tensor(t1)\r\n\r\n if isinstance(t2, tf.Variable):\r\n t2 = tf.convert_to_tensor(t2)\r\n\r\n expected = all(float(x) == float(y) for x, y in zip(t1, t2))\r\n assert res == expected\r\n\r\n\r\ntest_angle_data = [\r\n [1.0, 1.0j, 1+1j],\r\n [1.0, 1.0j, 1+1j],\r\n onp.array([1.0, 1.0j, 1+1j]),\r\n np.array([1.0, 1.0j, 1+1j]),\r\n torch.tensor([1.0, 1.0j, 1+1j], dtype=torch.complex128),\r\n tf.Variable([1.0, 1.0j, 1+1j], dtype=tf.complex128),\r\n tf.constant([1.0, 1.0j, 1+1j], dtype=tf.complex128),\r\n]\r\n\r\n\r\n\r\[email protected](\"t\", test_angle_data)\r\ndef test_angle(t):\r\n \"\"\"Test that the angle function works for a variety\r\n of input\"\"\"\r\n res = fn.angle(t)\r\n assert fn.allequal(res, [0, np.pi / 2, np.pi / 4])\r\n\r\n\r\ntest_arcsin_data = [\r\n (1, 0.2, -0.5),\r\n [1, 0.2, -0.5],\r\n onp.array([1, 0.2, -0.5]),\r\n np.array([1, 0.2, -0.5]),\r\n torch.tensor([1, 0.2, -0.5], dtype=torch.float64),\r\n tf.Variable([1, 0.2, -0.5], dtype=tf.float64),\r\n tf.constant([1, 0.2, -0.5], dtype=tf.float64),\r\n]\r\n\r\n\r\[email protected](\"t\", test_arcsin_data)\r\ndef test_arcsin(t):\r\n \"\"\"Test that the arcsin function works for a variety\r\n of input\"\"\"\r\n res = fn.arcsin(t)\r\n assert fn.allequal(res, np.arcsin([1, 0.2, -0.5]))\r\n\r\ntest_conj_data = [\r\n [1.0, 1.0j, 1+1j],\r\n onp.array([1.0, 1.0j, 1+1j]),\r\n np.array([1.0, 1.0j, 1+1j]),\r\n jnp.array([1.0, 1.0j, 1+1j]),\r\n torch.tensor([1.0, 1.0j, 1+1j], dtype=torch.complex128),\r\n tf.Variable([1.0, 1.0j, 1+1j], dtype=tf.complex128),\r\n tf.constant([1.0, 1.0j, 1+1j], dtype=tf.complex128),\r\n]\r\n\r\[email protected](\"t\", test_conj_data)\r\ndef test_conj(t):\r\n res = fn.conj(t)\r\n assert fn.allequal(res, np.conj(t))\r\n\r\n\r\nclass TestCast:\r\n \"\"\"Tests for the cast function\"\"\"\r\n\r\n @pytest.mark.parametrize(\"t\", test_data)\r\n def test_cast_numpy(self, t):\r\n \"\"\"Test that specifying a NumPy dtype results in proper casting\r\n behaviour\"\"\"\r\n res = fn.cast(t, onp.float64)\r\n assert fn.get_interface(res) == fn.get_interface(t)\r\n\r\n if hasattr(res, \"numpy\"):\r\n # if tensorflow or pytorch, extract view of underlying data\r\n res = res.numpy()\r\n t = t.numpy()\r\n\r\n assert onp.issubdtype(onp.asarray(t).dtype, onp.integer)\r\n assert res.dtype.type is onp.float64\r\n\r\n @pytest.mark.parametrize(\"t\", test_data)\r\n def test_cast_numpy_dtype(self, t):\r\n \"\"\"Test that specifying a NumPy dtype object results in proper casting\r\n behaviour\"\"\"\r\n res = fn.cast(t, onp.dtype(\"float64\"))\r\n assert fn.get_interface(res) == fn.get_interface(t)\r\n\r\n if hasattr(res, \"numpy\"):\r\n # if tensorflow or pytorch, extract view of underlying data\r\n res = res.numpy()\r\n t = t.numpy()\r\n\r\n assert onp.issubdtype(onp.asarray(t).dtype, onp.integer)\r\n assert res.dtype.type is onp.float64\r\n\r\n @pytest.mark.parametrize(\"t\", test_data)\r\n def test_cast_numpy_string(self, t):\r\n \"\"\"Test that specifying a NumPy dtype via a string results in proper casting\r\n behaviour\"\"\"\r\n res = fn.cast(t, \"float64\")\r\n assert fn.get_interface(res) == fn.get_interface(t)\r\n\r\n if hasattr(res, \"numpy\"):\r\n # if tensorflow or pytorch, extract view of underlying data\r\n res = res.numpy()\r\n t = t.numpy()\r\n\r\n assert onp.issubdtype(onp.asarray(t).dtype, onp.integer)\r\n assert res.dtype.type is onp.float64\r\n\r\n def test_cast_tensorflow_dtype(self):\r\n \"\"\"If the tensor is a TensorFlow tensor, casting using a TensorFlow dtype\r\n will also work\"\"\"\r\n t = tf.Variable([1, 2, 3])\r\n res = fn.cast(t, tf.complex128)\r\n assert isinstance(res, tf.Tensor)\r\n assert res.dtype is tf.complex128\r\n\r\n def test_cast_torch_dtype(self):\r\n \"\"\"If the tensor is a Torch tensor, casting using a Torch dtype\r\n will also work\"\"\"\r\n t = torch.tensor([1, 2, 3], dtype=torch.int64)\r\n res = fn.cast(t, torch.float64)\r\n assert isinstance(res, torch.Tensor)\r\n assert res.dtype is torch.float64\r\n\r\n\r\ncast_like_test_data = [\r\n (1, 2, 3),\r\n [1, 2, 3],\r\n onp.array([1, 2, 3], dtype=onp.int64),\r\n np.array([1, 2, 3], dtype=np.int64),\r\n torch.tensor([1, 2, 3], dtype=torch.int64),\r\n tf.Variable([1, 2, 3], dtype=tf.int64),\r\n tf.constant([1, 2, 3], dtype=tf.int64),\r\n (1.0, 2.0, 3.0),\r\n [1.0, 2.0, 3.0],\r\n onp.array([1, 2, 3], dtype=onp.float64),\r\n np.array([1, 2, 3], dtype=np.float64),\r\n torch.tensor([1, 2, 3], dtype=torch.float64),\r\n tf.Variable([1, 2, 3], dtype=tf.float64),\r\n tf.constant([1, 2, 3], dtype=tf.float64),\r\n]\r\n\r\n\r\[email protected](\"t1,t2\", list(itertools.combinations(cast_like_test_data, r=2)))\r\ndef test_cast_like(t1, t2):\r\n \"\"\"Test that casting t1 like t2 results in t1 being cast to the same datatype as t2\"\"\"\r\n res = fn.cast_like(t1, t2)\r\n\r\n # if tensorflow or pytorch, extract view of underlying data\r\n if hasattr(res, \"numpy\"):\r\n res = res.numpy()\r\n\r\n if hasattr(t2, \"numpy\"):\r\n t2 = t2.numpy()\r\n\r\n assert fn.allequal(res, t1)\r\n assert onp.asarray(res).dtype.type is onp.asarray(t2).dtype.type\r\n\r\n\r\nclass TestConcatenate:\r\n \"\"\"Tests for the concatenate function\"\"\"\r\n\r\n def test_concatenate_array(self):\r\n \"\"\"Test that concatenate, called without the axis arguments, concatenates across the 0th dimension\"\"\"\r\n t1 = [0.6, 0.1, 0.6]\r\n t2 = np.array([0.1, 0.2, 0.3])\r\n t3 = onp.array([5.0, 8.0, 101.0])\r\n\r\n res = fn.concatenate([t1, t2, t3])\r\n assert isinstance(res, np.ndarray)\r\n assert np.all(res == np.concatenate([t1, t2, t3]))\r\n\r\n def test_concatenate_jax(self):\r\n \"\"\"Test that concatenate, called without the axis arguments, concatenates across the 0th dimension\"\"\"\r\n t1 = jnp.array([5.0, 8.0, 101.0])\r\n t2 = jnp.array([0.6, 0.1, 0.6])\r\n t3 = jnp.array([0.1, 0.2, 0.3])\r\n\r\n\r\n res = fn.concatenate([t1, t2, t3])\r\n assert jnp.all(res == jnp.concatenate([t1, t2, t3]))\r\n\r\n\r\n def test_stack_tensorflow(self):\r\n \"\"\"Test that concatenate, called without the axis arguments, concatenates across the 0th dimension\"\"\"\r\n t1 = tf.constant([0.6, 0.1, 0.6])\r\n t2 = tf.Variable([0.1, 0.2, 0.3])\r\n t3 = onp.array([5.0, 8.0, 101.0])\r\n\r\n res = fn.concatenate([t1, t2, t3])\r\n assert isinstance(res, tf.Tensor)\r\n assert np.all(res.numpy() == np.concatenate([t1.numpy(), t2.numpy(), t3]))\r\n\r\n def test_stack_torch(self):\r\n \"\"\"Test that concatenate, called without the axis arguments, concatenates across the 0th dimension\"\"\"\r\n t1 = onp.array([5.0, 8.0, 101.0], dtype=np.float64)\r\n t2 = torch.tensor([0.6, 0.1, 0.6], dtype=torch.float64)\r\n t3 = torch.tensor([0.1, 0.2, 0.3], dtype=torch.float64)\r\n\r\n res = fn.concatenate([t1, t2, t3])\r\n assert isinstance(res, torch.Tensor)\r\n assert np.all(res.numpy() == np.concatenate([t1, t2.numpy(), t3.numpy()]))\r\n\r\n @pytest.mark.parametrize(\"t1\", [onp.array([[1], [2]]), torch.tensor([[1], [2]]), tf.constant([[1], [2]])])\r\n def test_stack_axis(self, t1):\r\n \"\"\"Test that passing the axis argument allows for concatenating along\r\n a different axis\"\"\"\r\n t2 = onp.array([[3], [4]])\r\n res = fn.concatenate([t1, t2], axis=1)\r\n\r\n # if tensorflow or pytorch, extract view of underlying data\r\n if hasattr(res, \"numpy\"):\r\n res = res.numpy()\r\n\r\n assert fn.allclose(res, np.array([[1, 3], [2, 4]]))\r\n assert list(res.shape) == [2, 2]\r\n\r\n @pytest.mark.parametrize(\"t1\", [onp.array([[1], [2]]), torch.tensor([[1], [2]]), tf.constant([[1], [2]])])\r\n def test_concatenate_flattened_arrays(self, t1):\r\n \"\"\"Concatenating arrays with axis=None will result in all arrays being pre-flattened\"\"\"\r\n t2 = onp.array([5])\r\n res = fn.concatenate([t1, t2], axis=None)\r\n\r\n # if tensorflow or pytorch, extract view of underlying data\r\n if hasattr(res, \"numpy\"):\r\n res = res.numpy()\r\n\r\n assert fn.allclose(res, np.array([1, 2, 5]))\r\n assert list(res.shape) == [3]\r\n\r\n\r\nclass TestConvertLike:\r\n \"\"\"tests for the convert like function\"\"\"\r\n\r\n @pytest.mark.parametrize(\"t1,t2\", list(itertools.combinations(test_data, r=2)))\r\n def test_convert_tensor_like(self, t1, t2):\r\n \"\"\"Test that converting t1 like t2 results in t1 being cast to the same tensor type as t2\"\"\"\r\n res = fn.convert_like(t1, t2)\r\n\r\n # if tensorflow or pytorch, extract view of underlying data\r\n if hasattr(res, \"numpy\"):\r\n res = res.numpy()\r\n\r\n if hasattr(t2, \"numpy\"):\r\n t2 = t2.numpy()\r\n\r\n assert fn.allequal(res, t1)\r\n assert isinstance(res, np.ndarray if isinstance(t2, (list, tuple)) else t2.__class__)\r\n\r\n @pytest.mark.parametrize(\"t_like\", [np.array([1]), tf.constant([1]), torch.tensor([1])])\r\n def test_convert_scalar(self, t_like):\r\n \"\"\"Test that a python scalar is converted to a scalar tensor\"\"\"\r\n res = fn.convert_like(5, t_like)\r\n assert isinstance(res, t_like.__class__)\r\n assert res.ndim == 0\r\n assert fn.allequal(res, [5])\r\n\r\n\r\nclass TestDot:\r\n \"\"\"Tests for the dot product function\"\"\"\r\n scalar_product_data = [\r\n [2, 6],\r\n [np.array(2), np.array(6)],\r\n [torch.tensor(2), onp.array(6)],\r\n [torch.tensor(2), torch.tensor(6)],\r\n [tf.Variable(2), onp.array(6)],\r\n [tf.constant(2), onp.array(6)],\r\n [tf.Variable(2), tf.Variable(6)],\r\n [jnp.array(2), jnp.array(6)],\r\n ]\r\n\r\n @pytest.mark.parametrize(\"t1, t2\", scalar_product_data)\r\n def test_scalar_product(self, t1, t2):\r\n \"\"\"Test that the dot product of two scalars results in a scalar\"\"\"\r\n res = fn.dot(t1, t2)\r\n assert fn.allequal(res, 12)\r\n\r\n vector_product_data = [\r\n [[1, 2, 3], [1, 2, 3]],\r\n [np.array([1, 2, 3]), np.array([1, 2, 3])],\r\n [torch.tensor([1, 2, 3]), onp.array([1, 2, 3])],\r\n [torch.tensor([1, 2, 3]), torch.tensor([1, 2, 3])],\r\n [tf.Variable([1, 2, 3]), onp.array([1, 2, 3])],\r\n [tf.constant([1, 2, 3]), onp.array([1, 2, 3])],\r\n [tf.Variable([1, 2, 3]), tf.Variable([1, 2, 3])],\r\n [jnp.array([1, 2, 3]), jnp.array([1, 2, 3])],\r\n ]\r\n\r\n @pytest.mark.parametrize(\"t1, t2\", vector_product_data)\r\n def test_vector_product(self, t1, t2):\r\n \"\"\"Test that the dot product of two vectors results in a scalar\"\"\"\r\n res = fn.dot(t1, t2)\r\n assert fn.allequal(res, 14)\r\n\r\n matrix_vector_product_data = [\r\n [[[1, 2], [3, 4]], [6, 7]],\r\n [np.array([[1, 2], [3, 4]]), np.array([6, 7])],\r\n [torch.tensor([[1, 2], [3, 4]]), onp.array([6, 7])],\r\n [torch.tensor([[1, 2], [3, 4]]), torch.tensor([6, 7])],\r\n [tf.Variable([[1, 2], [3, 4]]), onp.array([6, 7])],\r\n [tf.constant([[1, 2], [3, 4]]), onp.array([6, 7])],\r\n [tf.Variable([[1, 2], [3, 4]]), tf.Variable([6, 7])],\r\n [jnp.array([[1, 2], [3, 4]]), jnp.array([6, 7])],\r\n [np.array([[1, 2], [3, 4]]), jnp.array([6, 7])],\r\n\r\n ]\r\n\r\n @pytest.mark.parametrize(\"t1, t2\", matrix_vector_product_data)\r\n def test_matrix_vector_product(self, t1, t2):\r\n \"\"\"Test that the matrix-vector dot product of two vectors results in a vector\"\"\"\r\n res = fn.dot(t1, t2)\r\n assert fn.allequal(res, [20, 46])\r\n\r\n @pytest.mark.parametrize(\"t1, t2\", matrix_vector_product_data)\r\n def test_vector_matrix_product(self, t1, t2):\r\n \"\"\"Test that the vector-matrix dot product of two vectors results in a vector\"\"\"\r\n res = fn.dot(t2, t1)\r\n assert fn.allequal(res, [27, 40])\r\n\r\n @pytest.mark.parametrize(\"t1, t2\", matrix_vector_product_data)\r\n def test_matrix_matrix_product(self, t1, t2):\r\n \"\"\"Test that the matrix-matrix dot product of two vectors results in a matrix\"\"\"\r\n res = fn.dot(t1, t1)\r\n assert fn.allequal(res, np.array([[7, 10], [15, 22]]))\r\n\r\n multidim_product_data = [\r\n [np.array([[[1, 2], [3, 4], [-1, 1]], [[5, 6], [0, -1], [2, 1]]]), np.array([[[1, 1], [3, 3]], [[3, 1], [3, 2]]])],\r\n [torch.tensor([[[1, 2], [3, 4], [-1, 1]], [[5, 6], [0, -1], [2, 1]]]), onp.array([[[1, 1], [3, 3]], [[3, 1], [3, 2]]])],\r\n [torch.tensor([[[1, 2], [3, 4], [-1, 1]], [[5, 6], [0, -1], [2, 1]]]), torch.tensor([[[1, 1], [3, 3]], [[3, 1], [3, 2]]])],\r\n [onp.array([[[1, 2], [3, 4], [-1, 1]], [[5, 6], [0, -1], [2, 1]]]), tf.Variable([[[1, 1], [3, 3]], [[3, 1], [3, 2]]])],\r\n [tf.constant([[[1, 2], [3, 4], [-1, 1]], [[5, 6], [0, -1], [2, 1]]]), onp.array([[[1, 1], [3, 3]], [[3, 1], [3, 2]]])],\r\n [tf.Variable([[[1, 2], [3, 4], [-1, 1]], [[5, 6], [0, -1], [2, 1]]]), tf.constant([[[1, 1], [3, 3]], [[3, 1], [3, 2]]])],\r\n [jnp.array([[[1, 2], [3, 4], [-1, 1]], [[5, 6], [0, -1], [2, 1]]]), jnp.array([[[1, 1], [3, 3]], [[3, 1], [3, 2]]])],\r\n\r\n ]\r\n\r\n @pytest.mark.parametrize(\"t1, t2\", multidim_product_data)\r\n def test_multidimensional_product(self, t1, t2):\r\n \"\"\"Test that the multi-dimensional dot product reduces across the last dimension of the first\r\n tensor, and the second-to-last dimension of the second tensor.\"\"\"\r\n res = fn.dot(t1, t2)\r\n expected = np.array([[[[ 7, 7],\r\n [ 9, 5]],\r\n\r\n [[15, 15],\r\n [21, 11]],\r\n\r\n [[ 2, 2],\r\n [ 0, 1]]],\r\n\r\n\r\n [[[23, 23],\r\n [33, 17]],\r\n\r\n [[-3, -3],\r\n [-3, -2]],\r\n\r\n [[ 5, 5],\r\n [ 9, 4]]]]\r\n )\r\n assert fn.allequal(res, expected)\r\n\r\n\r\n# the following test data is of the form\r\n# [original shape, axis to expand, new shape]\r\nexpand_dims_test_data = [\r\n [tuple(), 0, (1,)],\r\n [(3,), 0, (1, 3)],\r\n [(3,), 1, (3, 1)],\r\n [(2, 2), 0, (1, 2, 2)],\r\n [(2, 2), 1, (2, 1, 2)],\r\n [(2, 2), 2, (2, 2, 1)],\r\n]\r\n\r\n\r\[email protected](\"shape,axis,new_shape\", expand_dims_test_data)\r\nclass TestExpandDims:\r\n \"\"\"Tests for the expand_dims function\"\"\"\r\n\r\n def test_expand_dims_sequence(self, shape, axis, new_shape):\r\n \"\"\"Test that expand_dimensions works correctly\r\n when given a sequence\"\"\"\r\n if not shape:\r\n pytest.skip(\"Cannot expand the dimensions of a Python scalar!\")\r\n\r\n t1 = np.empty(shape).tolist()\r\n t2 = fn.expand_dims(t1, axis=axis)\r\n assert t2.shape == new_shape\r\n\r\n def test_expand_dims_array(self, shape, axis, new_shape):\r\n \"\"\"Test that expand_dimensions works correctly\r\n when given an array\"\"\"\r\n t1 = np.empty(shape)\r\n t2 = fn.expand_dims(t1, axis=axis)\r\n assert t2.shape == new_shape\r\n assert isinstance(t2, np.ndarray)\r\n\r\n def test_expand_dims_torch(self, shape, axis, new_shape):\r\n \"\"\"Test that the expand dimensions works correctly\r\n when given a torch tensor\"\"\"\r\n t1 = torch.empty(shape)\r\n t2 = fn.expand_dims(t1, axis=axis)\r\n assert t2.shape == new_shape\r\n assert isinstance(t2, torch.Tensor)\r\n\r\n def test_expand_dims_tf(self, shape, axis, new_shape):\r\n \"\"\"Test that the expand dimensions works correctly\r\n when given a TF tensor\"\"\"\r\n t1 = tf.ones(shape)\r\n t2 = fn.expand_dims(t1, axis=axis)\r\n assert t2.shape == new_shape\r\n assert isinstance(t2, tf.Tensor)\r\n\r\n\r\ninterface_test_data = [\r\n [(1, 2, 3), \"numpy\"],\r\n [[1, 2, 3], \"numpy\"],\r\n [onp.array([1, 2, 3]), \"numpy\"],\r\n [np.array([1, 2, 3]), \"autograd\"],\r\n [torch.tensor([1, 2, 3]), \"torch\"],\r\n [tf.Variable([1, 2, 3]), \"tf\"],\r\n [tf.constant([1, 2, 3]), \"tf\"],\r\n [jnp.array([1, 2, 3]), \"jax\"],\r\n]\r\n\r\n\r\[email protected](\"t,interface\", interface_test_data)\r\ndef test_get_interface(t, interface):\r\n \"\"\"Test that the interface of a tensor-like object\r\n\r\n is correctly returned.\"\"\"\r\n res = fn.get_interface(t)\r\n assert res == interface\r\n\r\n\r\[email protected](\"t\", test_data)\r\ndef test_toarray(t):\r\n \"\"\"Test that the toarray method correctly converts the input\r\n tensor into a NumPy array.\"\"\"\r\n res = fn.toarray(t)\r\n assert fn.allequal(res, t)\r\n assert isinstance(res, onp.ndarray)\r\n\r\n\r\nclass TestOnesLike:\r\n \"\"\"Tests for the ones_like function\"\"\"\r\n\r\n @pytest.mark.parametrize(\"t\", cast_like_test_data)\r\n def test_ones_like_inferred_dtype(self, t):\r\n \"\"\"Test that the ones like function creates the correct\r\n shape and type tensor.\"\"\"\r\n res = fn.ones_like(t)\r\n\r\n if isinstance(t, (list, tuple)):\r\n t = onp.asarray(t)\r\n\r\n assert res.shape == t.shape\r\n assert fn.get_interface(res) == fn.get_interface(t)\r\n assert fn.allclose(res, np.ones(t.shape))\r\n\r\n # if tensorflow or pytorch, extract view of underlying data\r\n if hasattr(res, \"numpy\"):\r\n res = res.numpy()\r\n t = t.numpy()\r\n\r\n assert onp.asarray(res).dtype.type is onp.asarray(t).dtype.type\r\n\r\n @pytest.mark.parametrize(\"t\", cast_like_test_data)\r\n def test_ones_like_explicit_dtype(self, t):\r\n \"\"\"Test that the ones like function creates the correct\r\n shape and type tensor.\"\"\"\r\n res = fn.ones_like(t, dtype=np.float16)\r\n\r\n if isinstance(t, (list, tuple)):\r\n t = onp.asarray(t)\r\n\r\n assert res.shape == t.shape\r\n assert fn.get_interface(res) == fn.get_interface(t)\r\n assert fn.allclose(res, np.ones(t.shape))\r\n\r\n # if tensorflow or pytorch, extract view of underlying data\r\n if hasattr(res, \"numpy\"):\r\n res = res.numpy()\r\n t = t.numpy()\r\n\r\n assert onp.asarray(res).dtype.type is np.float16\r\n\r\n\r\nclass TestRequiresGrad:\r\n \"\"\"Tests for the requires_grad function\"\"\"\r\n\r\n @pytest.mark.parametrize(\"t\", [(1, 2, 3), [1, 2, 3], onp.array([1, 2, 3])])\r\n def test_numpy(self, t):\r\n \"\"\"Vanilla NumPy arrays, sequences, and lists will always return False\"\"\"\r\n assert not fn.requires_grad(t)\r\n\r\n @pytest.mark.parametrize(\"t\", [jnp.array([1, 2, 3])])\r\n def test_jax(self, t):\r\n \"\"\"jax.DeviceArrays will always return True\"\"\"\r\n assert fn.requires_grad(t)\r\n\r\n def test_autograd(self):\r\n \"\"\"Autograd arrays will simply return their requires_grad attribute\"\"\"\r\n t = np.array([1.0, 2.0], requires_grad=True)\r\n assert fn.requires_grad(t)\r\n\r\n t = np.array([1.0, 2.0], requires_grad=False)\r\n assert not fn.requires_grad(t)\r\n\r\n def test_torch(self):\r\n \"\"\"Torch tensors will simply return their requires_grad attribute\"\"\"\r\n t = torch.tensor([1.0, 2.0], requires_grad=True)\r\n assert fn.requires_grad(t)\r\n\r\n t = torch.tensor([1.0, 2.0], requires_grad=False)\r\n assert not fn.requires_grad(t)\r\n\r\n def test_tf(self):\r\n \"\"\"TensorFlow tensors will True *if* they are being watched by a gradient tape\"\"\"\r\n t1 = tf.Variable([1.0, 2.0])\r\n t2 = tf.constant([1.0, 2.0])\r\n assert not fn.requires_grad(t1)\r\n assert not fn.requires_grad(t2)\r\n\r\n with tf.GradientTape():\r\n # variables are automatically watched within a context,\r\n # but constants are not\r\n assert fn.requires_grad(t1)\r\n assert not fn.requires_grad(t2)\r\n\r\n with tf.GradientTape() as tape:\r\n # watching makes all tensors trainable\r\n tape.watch([t1, t2])\r\n assert fn.requires_grad(t1)\r\n assert fn.requires_grad(t2)\r\n\r\n\r\nshape_test_data = [\r\n tuple(),\r\n (3,),\r\n (2, 2),\r\n (3, 2, 2),\r\n (2, 1, 1, 2),\r\n]\r\n\r\n\r\[email protected](\r\n \"interface,create_array\",\r\n [\r\n (\"sequence\", lambda shape: np.empty(shape).tolist()),\r\n (\"autograd\", np.empty),\r\n (\"torch\", torch.empty),\r\n (\"jax\", jnp.ones),\r\n (\"tf\", tf.ones),\r\n ],\r\n)\r\[email protected](\"shape\", shape_test_data)\r\ndef test_shape(shape, interface, create_array):\r\n \"\"\"Test that the shape of tensors is correctly returned\"\"\"\r\n if interface == \"sequence\" and not shape:\r\n pytest.skip(\"Cannot expand the dimensions of a Python scalar!\")\r\n\r\n t = create_array(shape)\r\n assert fn.shape(t) == shape\r\n\r\n\r\[email protected](\"t\", test_data)\r\ndef test_sqrt(t):\r\n \"\"\"Test that the square root function works for a variety\r\n of input\"\"\"\r\n res = fn.sqrt(t)\r\n assert fn.allclose(res, [1, np.sqrt(2), np.sqrt(3)])\r\n\r\n\r\nclass TestStack:\r\n \"\"\"Tests for the stack function\"\"\"\r\n\r\n def test_stack_array(self):\r\n \"\"\"Test that stack, called without the axis arguments, stacks vertically\"\"\"\r\n t1 = [0.6, 0.1, 0.6]\r\n t2 = np.array([0.1, 0.2, 0.3])\r\n t3 = onp.array([5.0, 8.0, 101.0])\r\n\r\n res = fn.stack([t1, t2, t3])\r\n assert isinstance(res, np.ndarray)\r\n assert np.all(res == np.stack([t1, t2, t3]))\r\n\r\n\r\n def test_stack_array_jax(self):\r\n \"\"\"Test that stack, called without the axis arguments, stacks vertically\"\"\"\r\n t1 = onp.array([0.6, 0.1, 0.6])\r\n t2 = jnp.array([0.1, 0.2, 0.3])\r\n t3 = jnp.array([5.0, 8.0, 101.0])\r\n\r\n res = fn.stack([t1, t2, t3])\r\n assert np.all(res == np.stack([t1, t2, t3]))\r\n\r\n\r\n def test_stack_tensorflow(self):\r\n \"\"\"Test that stack, called without the axis arguments, stacks vertically\"\"\"\r\n t1 = tf.constant([0.6, 0.1, 0.6])\r\n t2 = tf.Variable([0.1, 0.2, 0.3])\r\n t3 = onp.array([5.0, 8.0, 101.0])\r\n\r\n res = fn.stack([t1, t2, t3])\r\n assert isinstance(res, tf.Tensor)\r\n assert np.all(res.numpy() == np.stack([t1.numpy(), t2.numpy(), t3]))\r\n\r\n def test_stack_torch(self):\r\n \"\"\"Test that stack, called without the axis arguments, stacks vertically\"\"\"\r\n t1 = onp.array([5.0, 8.0, 101.0], dtype=np.float64)\r\n t2 = torch.tensor([0.6, 0.1, 0.6], dtype=torch.float64)\r\n t3 = torch.tensor([0.1, 0.2, 0.3], dtype=torch.float64)\r\n\r\n res = fn.stack([t1, t2, t3])\r\n assert isinstance(res, torch.Tensor)\r\n assert np.all(res.numpy() == np.stack([t1, t2.numpy(), t3.numpy()]))\r\n\r\n @pytest.mark.parametrize(\"t1\", [onp.array([1, 2]), torch.tensor([1, 2]), tf.constant([1, 2])])\r\n def test_stack_axis(self, t1):\r\n \"\"\"Test that passing the axis argument allows for stacking along\r\n a different axis\"\"\"\r\n t2 = onp.array([3, 4])\r\n res = fn.stack([t1, t2], axis=1)\r\n\r\n # if tensorflow or pytorch, extract view of underlying data\r\n if hasattr(res, \"numpy\"):\r\n res = res.numpy()\r\n\r\n assert fn.allclose(res, np.array([[1, 3], [2, 4]]))\r\n assert list(res.shape) == [2, 2]\r\n\r\n\r\nclass TestSum:\r\n \"\"\"Tests for the summation function\"\"\"\r\n\r\n def test_array(self):\r\n \"\"\"Test that sum, called without the axis arguments, returns a scalar\"\"\"\r\n t = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]])\r\n res = fn.sum_(t)\r\n assert isinstance(res, np.ndarray)\r\n assert fn.allclose(res, 2.1)\r\n\r\n def test_tensorflow(self):\r\n \"\"\"Test that sum, called without the axis arguments, returns a scalar\"\"\"\r\n t = tf.Variable([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]])\r\n res = fn.sum_(t)\r\n assert isinstance(res, tf.Tensor)\r\n assert fn.allclose(res, 2.1)\r\n\r\n def test_torch(self):\r\n \"\"\"Test that sum, called without the axis arguments, returns a scalar\"\"\"\r\n t = torch.tensor([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]])\r\n res = fn.sum_(t)\r\n assert isinstance(res, torch.Tensor)\r\n assert fn.allclose(res, 2.1)\r\n\r\n def test_jax(self):\r\n \"\"\"Test that sum, called without the axis arguments, returns a scalar\"\"\"\r\n t = jnp.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]])\r\n res = fn.sum_(t)\r\n assert fn.allclose(res, 2.1)\r\n\r\n\r\n @pytest.mark.parametrize(\"t1\", [\r\n np.array([[[1, 2], [3, 4], [-1, 1]], [[5, 6], [0, -1], [2, 1]]]),\r\n torch.tensor([[[1, 2], [3, 4], [-1, 1]], [[5, 6], [0, -1], [2, 1]]]),\r\n tf.constant([[[1, 2], [3, 4], [-1, 1]], [[5, 6], [0, -1], [2, 1]]]),\r\n jnp.array([[[1, 2], [3, 4], [-1, 1]], [[5, 6], [0, -1], [2, 1]]]),\r\n ])\r\n def test_sum_axis(self, t1):\r\n \"\"\"Test that passing the axis argument allows for summing along\r\n a specific axis\"\"\"\r\n res = fn.sum_(t1, axis=(0, 2))\r\n\r\n # if tensorflow or pytorch, extract view of underlying data\r\n if hasattr(res, \"numpy\"):\r\n res = res.numpy()\r\n\r\n assert fn.allclose(res, np.array([14, 6, 3]))\r\n assert res.shape == (3,)\r\n\r\n @pytest.mark.parametrize(\"t1\", [\r\n np.array([[[1, 2], [3, 4], [-1, 1]], [[5, 6], [0, -1], [2, 1]]]),\r\n torch.tensor([[[1, 2], [3, 4], [-1, 1]], [[5, 6], [0, -1], [2, 1]]]),\r\n tf.constant([[[1, 2], [3, 4], [-1, 1]], [[5, 6], [0, -1], [2, 1]]]),\r\n jnp.array([[[1, 2], [3, 4], [-1, 1]], [[5, 6], [0, -1], [2, 1]]])\r\n ])\r\n def test_sum_axis_keepdims(self, t1):\r\n \"\"\"Test that passing the axis argument allows for summing along\r\n a specific axis, while keepdims avoids the summed dimensions from being removed\"\"\"\r\n res = fn.sum_(t1, axis=(0, 2), keepdims=True)\r\n\r\n # if tensorflow or pytorch, extract view of underlying data\r\n if hasattr(res, \"numpy\"):\r\n res = res.numpy()\r\n\r\n assert fn.allclose(res, np.array([[[14], [6], [3]]]))\r\n assert res.shape == (1, 3, 1)\r\n\r\n\r\[email protected](\"t\", test_data)\r\ndef test_T(t):\r\n \"\"\"Test the simple transpose (T) function\"\"\"\r\n res = fn.T(t)\r\n\r\n if isinstance(t, (list, tuple)):\r\n t = onp.asarray(t)\r\n\r\n assert fn.get_interface(res) == fn.get_interface(t)\r\n\r\n # if tensorflow or pytorch, extract view of underlying data\r\n if hasattr(res, \"numpy\"):\r\n res = res.numpy()\r\n t = t.numpy()\r\n\r\n assert np.all(res.T == t.T)\r\n\r\n\r\nclass TestTake:\r\n \"\"\"Tests for the qml.take function\"\"\"\r\n\r\n take_data = [\r\n np.array([[[1, 2], [3, 4], [-1, 1]], [[5, 6], [0, -1], [2, 1]]]),\r\n torch.tensor([[[1, 2], [3, 4], [-1, 1]], [[5, 6], [0, -1], [2, 1]]]),\r\n onp.array([[[1, 2], [3, 4], [-1, 1]], [[5, 6], [0, -1], [2, 1]]]),\r\n tf.constant([[[1, 2], [3, 4], [-1, 1]], [[5, 6], [0, -1], [2, 1]]]),\r\n tf.Variable([[[1, 2], [3, 4], [-1, 1]], [[5, 6], [0, -1], [2, 1]]]),\r\n jnp.asarray([[[1, 2], [3, 4], [-1, 1]], [[5, 6], [0, -1], [2, 1]]]),\r\n ]\r\n\r\n @pytest.mark.parametrize(\"t\", take_data)\r\n def test_flattened_indexing(self, t):\r\n \"\"\"Test that indexing without the axis argument\r\n will flatten the tensor first\"\"\"\r\n indices = 5\r\n res = fn.take(t, indices)\r\n assert fn.allclose(res, 1)\r\n\r\n @pytest.mark.parametrize(\"t\", take_data)\r\n def test_array_indexing(self, t):\r\n \"\"\"Test that indexing with a sequence properly extracts\r\n the elements from the flattened tensor\"\"\"\r\n indices = [0, 2, 3, 6, -2]\r\n res = fn.take(t, indices)\r\n assert fn.allclose(res, [1, 3, 4, 5, 2])\r\n\r\n @pytest.mark.parametrize(\"t\", take_data)\r\n def test_multidimensional_indexing(self, t):\r\n \"\"\"Test that indexing with a multi-dimensional sequence properly extracts\r\n the elements from the flattened tensor\"\"\"\r\n indices = [[0, 1], [3, 2]]\r\n res = fn.take(t, indices)\r\n assert fn.allclose(res, [[1, 2], [4, 3]])\r\n\r\n @pytest.mark.parametrize(\"t\", take_data)\r\n def test_array_indexing_along_axis(self, t):\r\n \"\"\"Test that indexing with a sequence properly extracts\r\n the elements from the specified tensor axis\"\"\"\r\n indices = [0, 1, -2]\r\n res = fn.take(t, indices, axis=2)\r\n expected = np.array([\r\n [[ 1, 2, 1],\r\n [ 3, 4, 3],\r\n [-1, 1, -1]],\r\n [[ 5, 6, 5],\r\n [ 0, -1, 0],\r\n [ 2, 1, 2]]\r\n ])\r\n assert fn.allclose(res, expected)\r\n\r\n @pytest.mark.parametrize(\"t\", take_data)\r\n def test_multidimensional_indexing_along_axis(self, t):\r\n \"\"\"Test that indexing with a sequence properly extracts\r\n the elements from the specified tensor axis\"\"\"\r\n indices = np.array([[0, 0], [1, 0]])\r\n res = fn.take(t, indices, axis=1)\r\n expected = np.array(\r\n [\r\n [\r\n [[ 1, 2],\r\n [ 1, 2]],\r\n [[ 3, 4],\r\n [ 1, 2]]\r\n ],\r\n [\r\n [[ 5, 6],\r\n [ 5, 6]],\r\n [[ 0, -1],\r\n [ 5, 6]]\r\n ]\r\n ]\r\n )\r\n assert fn.allclose(res, expected)\r\n\r\n\r\nwhere_data = [\r\n np.array([[[1, 2], [3, 4], [-1, 1]], [[5, 6], [0, -1], [2, 1]]]),\r\n torch.tensor([[[1, 2], [3, 4], [-1, 1]], [[5, 6], [0, -1], [2, 1]]]),\r\n onp.array([[[1, 2], [3, 4], [-1, 1]], [[5, 6], [0, -1], [2, 1]]]),\r\n tf.constant([[[1, 2], [3, 4], [-1, 1]], [[5, 6], [0, -1], [2, 1]]]),\r\n tf.Variable([[[1, 2], [3, 4], [-1, 1]], [[5, 6], [0, -1], [2, 1]]]),\r\n jnp.array([[[1, 2], [3, 4], [-1, 1]], [[5, 6], [0, -1], [2, 1]]]),\r\n]\r\n\r\n\r\[email protected](\"t\", where_data)\r\ndef test_where(t):\r\n \"\"\"Test that the where function works as expected\"\"\"\r\n res = fn.where(t < 0, 100 * fn.ones_like(t), t)\r\n expected = np.array([[[1, 2], [3, 4], [100, 1]], [[5, 6], [0, 100], [2, 1]]])\r\n assert fn.allclose(res, expected)\r\n\r\nsqueeze_data = [\r\n np.ones((1, 2, 3, 1, 5, 1)),\r\n torch.ones((1, 2, 3, 1, 5, 1)),\r\n tf.ones((1, 2, 3, 1, 5, 1)),\r\n jnp.ones((1, 2, 3, 1, 5, 1)),\r\n onp.ones((1, 2, 3, 1, 5, 1))\r\n]\r\n\r\[email protected](\"t\", squeeze_data)\r\ndef test_squeeze(t):\r\n \"\"\"Test that the squeeze function works as expected\"\"\"\r\n res = fn.squeeze(t)\r\n assert res.shape == (2, 3, 5)\r\n\r\n\r\nclass TestScatterElementAdd:\r\n \"\"\"Tests for the scatter_element_add function\"\"\"\r\n\r\n def test_array(self):\r\n \"\"\"Test that a NumPy array is differentiable when using scatter addition\"\"\"\r\n x = np.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], requires_grad=True)\r\n y = np.array(0.56, requires_grad=True)\r\n\r\n def cost(weights):\r\n return fn.scatter_element_add(weights[0], [1, 2], weights[1] ** 2)\r\n\r\n res = cost([x, y])\r\n assert isinstance(res, np.ndarray)\r\n assert fn.allclose(res, onp.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.3136]]))\r\n\r\n grad = qml.grad(lambda weights: cost(weights)[1, 2])([x, y])\r\n assert fn.allclose(grad[0], onp.array([[0, 0, 0], [0, 0, 1.]]))\r\n assert fn.allclose(grad[1], 2 * y)\r\n\r\n def test_tensorflow(self):\r\n \"\"\"Test that a TF tensor is differentiable when using scatter addition\"\"\"\r\n x = tf.Variable([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])\r\n y = tf.Variable(0.56)\r\n\r\n with tf.GradientTape() as tape:\r\n res = fn.scatter_element_add(x, [1, 2], y ** 2)\r\n loss = res[1, 2]\r\n\r\n assert isinstance(res, tf.Tensor)\r\n assert fn.allclose(res, onp.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.3136]]))\r\n\r\n grad = tape.gradient(loss, [x, y])\r\n assert fn.allclose(grad[0], onp.array([[0, 0, 0], [0, 0, 1.]]))\r\n assert fn.allclose(grad[1], 2 * y)\r\n\r\n def test_torch(self):\r\n \"\"\"Test that a torch tensor is differentiable when using scatter addition\"\"\"\r\n x = torch.tensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], requires_grad=True)\r\n y = torch.tensor(0.56, requires_grad=True)\r\n\r\n res = fn.scatter_element_add(x, [1, 2], y ** 2)\r\n loss = res[1, 2]\r\n\r\n assert isinstance(res, torch.Tensor)\r\n assert fn.allclose(res.detach(), onp.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.3136]]))\r\n\r\n loss.backward()\r\n assert fn.allclose(x.grad, onp.array([[0, 0, 0], [0, 0, 1.]]))\r\n assert fn.allclose(y.grad, 2 * y)\r\n\r\n def test_jax(self):\r\n \"\"\"Test that a JAX array is differentiable when using scatter addition\"\"\"\r\n x = jnp.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])\r\n y = jnp.array(0.56)\r\n\r\n def cost(weights):\r\n return fn.scatter_element_add(weights[0], [1, 2], weights[1] ** 2)\r\n\r\n res = cost([x, y])\r\n assert isinstance(res, jax.interpreters.xla.DeviceArray)\r\n assert fn.allclose(res, onp.array([[1.0, 1.0, 1.0], [1.0, 1.0, 1.3136]]))\r\n\r\n grad = jax.grad(lambda weights: cost(weights)[1, 2])([x, y])\r\n assert fn.allclose(grad[0], onp.array([[0, 0, 0], [0, 0, 1.]]))\r\n assert fn.allclose(grad[1], 2 * y)\r\n\r\n\r\nclass TestDiag:\r\n \"\"\"Tests for the diag function\"\"\"\r\n\r\n @pytest.mark.parametrize(\"a, interface\", [[np.array(0.5), \"autograd\"], [tf.Variable(0.5), \"tf\"], [torch.tensor(0.5), \"torch\"]])\r\n def test_sequence(self, a, interface):\r\n \"\"\"Test that a sequence is automatically converted into\r\n a diagonal tensor\"\"\"\r\n t = [0.1, 0.2, a]\r\n res = fn.diag(t)\r\n assert fn.get_interface(res) == interface\r\n assert fn.allclose(res, onp.diag([0.1, 0.2, 0.5]))\r\n\r\n def test_array(self):\r\n \"\"\"Test that a NumPy array is automatically converted into\r\n a diagonal tensor\"\"\"\r\n t = np.array([0.1, 0.2, 0.3])\r\n res = fn.diag(t)\r\n assert isinstance(res, np.ndarray)\r\n assert fn.allclose(res, onp.diag([0.1, 0.2, 0.3]))\r\n\r\n res = fn.diag(t, k=1)\r\n assert fn.allclose(res, onp.diag([0.1, 0.2, 0.3], k=1))\r\n\r\n def test_tensorflow(self):\r\n \"\"\"Test that a tensorflow tensor is automatically converted into\r\n a diagonal tensor\"\"\"\r\n t = tf.Variable([0.1, 0.2, 0.3])\r\n res = fn.diag(t)\r\n assert isinstance(res, tf.Tensor)\r\n assert fn.allclose(res, onp.diag([0.1, 0.2, 0.3]))\r\n\r\n res = fn.diag(t, k=1)\r\n assert fn.allclose(res, onp.diag([0.1, 0.2, 0.3], k=1))\r\n\r\n def test_torch(self):\r\n \"\"\"Test that a torch tensor is automatically converted into\r\n a diagonal tensor\"\"\"\r\n t = torch.tensor([0.1, 0.2, 0.3])\r\n res = fn.diag(t)\r\n assert isinstance(res, torch.Tensor)\r\n assert fn.allclose(res, onp.diag([0.1, 0.2, 0.3]))\r\n\r\n res = fn.diag(t, k=1)\r\n assert fn.allclose(res, onp.diag([0.1, 0.2, 0.3], k=1))\r\n\r\n def test_jax(self):\r\n \"\"\"Test that a jax array is automatically converted into\r\n a diagonal tensor\"\"\"\r\n t = jnp.array([0.1, 0.2, 0.3])\r\n res = fn.diag(t)\r\n assert fn.allclose(res, onp.diag([0.1, 0.2, 0.3]))\r\n\r\n res = fn.diag(t, k=1)\r\n assert fn.allclose(res, onp.diag([0.1, 0.2, 0.3], k=1))\r\n\r\n\r\nclass TestCovMatrix:\r\n \"\"\"Tests for the cov matrix function\"\"\"\r\n obs_list = [qml.PauliZ(0) @ qml.PauliZ(1), qml.PauliY(2)]\r\n\r\n @staticmethod\r\n def ansatz(weights, wires):\r\n \"\"\"Circuit ansatz for testing\"\"\"\r\n qml.RY(weights[0], wires=wires[0])\r\n qml.RX(weights[1], wires=wires[1])\r\n qml.RX(weights[2], wires=wires[2])\r\n qml.CNOT(wires=[wires[0], wires[1]])\r\n qml.CNOT(wires=[wires[1], wires[2]])\r\n\r\n @staticmethod\r\n def expected_cov(weights):\r\n \"\"\"Analytic covariance matrix for ansatz and obs_list\"\"\"\r\n a, b, c = weights\r\n return np.array([\r\n [np.sin(b) ** 2, -np.cos(a) * np.sin(b) ** 2 * np.sin(c)],\r\n [-np.cos(a) * np.sin(b) ** 2 * np.sin(c), 1 - np.cos(a) ** 2 * np.cos(b) ** 2 * np.sin(c) ** 2]\r\n ])\r\n\r\n @staticmethod\r\n def expected_grad(weights):\r\n \"\"\"Analytic covariance matrix gradient for ansatz and obs_list\"\"\"\r\n a, b, c = weights\r\n return np.array([\r\n np.sin(a) * np.sin(b) ** 2 * np.sin(c),\r\n -2 * np.cos(a) * np.cos(b) * np.sin(b) * np.sin(c),\r\n -np.cos(a) * np.cos(c) * np.sin(b) ** 2\r\n ])\r\n\r\n def test_weird_wires(self, tol):\r\n \"\"\"Test that the covariance matrix computes the correct\r\n result when weird wires are used\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=[\"a\", -1, \"q\"])\r\n obs_list = [qml.PauliZ(\"a\") @ qml.PauliZ(-1), qml.PauliY(\"q\")]\r\n\r\n @qml.qnode(dev, interface=\"autograd\")\r\n def circuit(weights):\r\n \"\"\"Returns the shared probability distribution of ansatz\r\n in the joint basis for obs_list\"\"\"\r\n self.ansatz(weights, wires=dev.wires)\r\n\r\n for o in obs_list:\r\n o.diagonalizing_gates()\r\n\r\n return qml.probs(wires=dev.wires)\r\n\r\n def cov(weights):\r\n probs = circuit(weights)\r\n return fn.cov_matrix(probs, obs_list, wires=dev.wires)\r\n\r\n weights = np.array([0.1, 0.2, 0.3])\r\n res = cov(weights)\r\n expected = self.expected_cov(weights)\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n grad_fn = qml.grad(lambda weights: cov(weights)[0, 1])\r\n res = grad_fn(weights)\r\n expected = self.expected_grad(weights)\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n def test_autograd(self, tol):\r\n \"\"\"Test that the covariance matrix computes the correct\r\n result, and is differentiable, using the Autograd interface\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=3)\r\n\r\n @qml.qnode(dev, interface=\"autograd\")\r\n def circuit(weights):\r\n \"\"\"Returns the shared probability distribution of ansatz\r\n in the joint basis for obs_list\"\"\"\r\n self.ansatz(weights, wires=dev.wires)\r\n\r\n for o in self.obs_list:\r\n o.diagonalizing_gates()\r\n\r\n return qml.probs(wires=[0, 1, 2])\r\n\r\n def cov(weights):\r\n probs = circuit(weights)\r\n return fn.cov_matrix(probs, self.obs_list)\r\n\r\n weights = np.array([0.1, 0.2, 0.3])\r\n res = cov(weights)\r\n expected = self.expected_cov(weights)\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n grad_fn = qml.grad(lambda weights: cov(weights)[0, 1])\r\n res = grad_fn(weights)\r\n expected = self.expected_grad(weights)\r\n assert np.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n def test_torch(self, tol):\r\n \"\"\"Test that the covariance matrix computes the correct\r\n result, and is differentiable, using the Torch interface\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=3)\r\n\r\n @qml.qnode(dev, interface=\"torch\")\r\n def circuit(weights):\r\n \"\"\"Returns the shared probability distribution of ansatz\r\n in the joint basis for obs_list\"\"\"\r\n self.ansatz(weights, wires=dev.wires)\r\n\r\n for o in self.obs_list:\r\n o.diagonalizing_gates()\r\n\r\n return qml.probs(wires=[0, 1, 2])\r\n\r\n weights = np.array([0.1, 0.2, 0.3])\r\n weights_t = torch.tensor(weights, requires_grad=True)\r\n probs = circuit(weights_t)\r\n res = fn.cov_matrix(probs, self.obs_list)\r\n expected = self.expected_cov(weights)\r\n assert np.allclose(res.detach().numpy(), expected, atol=tol, rtol=0)\r\n\r\n loss = res[0, 1]\r\n loss.backward()\r\n res = weights_t.grad\r\n expected = self.expected_grad(weights)\r\n assert np.allclose(res.detach().numpy(), expected, atol=tol, rtol=0)\r\n\r\n def test_tf(self, tol):\r\n \"\"\"Test that the covariance matrix computes the correct\r\n result, and is differentiable, using the TF interface\"\"\"\r\n dev = qml.device(\"default.qubit\", wires=3)\r\n\r\n @qml.qnode(dev, interface=\"tf\")\r\n def circuit(weights):\r\n \"\"\"Returns the shared probability distribution of ansatz\r\n in the joint basis for obs_list\"\"\"\r\n self.ansatz(weights, wires=dev.wires)\r\n\r\n for o in self.obs_list:\r\n o.diagonalizing_gates()\r\n\r\n return qml.probs(wires=[0, 1, 2])\r\n\r\n weights = np.array([0.1, 0.2, 0.3])\r\n weights_t = tf.Variable(weights)\r\n\r\n with tf.GradientTape() as tape:\r\n probs = circuit(weights_t)\r\n cov = fn.cov_matrix(probs, self.obs_list)\r\n loss = cov[0, 1]\r\n\r\n expected = self.expected_cov(weights)\r\n assert np.allclose(cov, expected, atol=tol, rtol=0)\r\n\r\n grad = tape.gradient(loss, weights_t)\r\n expected = self.expected_grad(weights)\r\n assert np.allclose(grad, expected, atol=tol, rtol=0)\r\n\r\n def test_jax(self, tol):\r\n \"\"\"Test that the covariance matrix computes the correct\r\n result, and is differentiable, using the JAX interface\"\"\"\r\n dev = qml.device(\"default.qubit.jax\", wires=3)\r\n\r\n @qml.qnode(dev, interface=\"jax\", diff_method=\"backprop\")\r\n def circuit(weights):\r\n \"\"\"Returns the shared probability distribution of ansatz\r\n in the joint basis for obs_list\"\"\"\r\n self.ansatz(weights, wires=dev.wires)\r\n\r\n for o in self.obs_list:\r\n o.diagonalizing_gates()\r\n\r\n return qml.probs(wires=[0, 1, 2])\r\n\r\n def cov(weights):\r\n probs = circuit(weights)\r\n return fn.cov_matrix(probs, self.obs_list)\r\n\r\n weights = jnp.array([0.1, 0.2, 0.3])\r\n res = cov(weights)\r\n expected = self.expected_cov(weights)\r\n assert jnp.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n grad_fn = jax.grad(lambda weights: cov(weights)[0, 1])\r\n res = grad_fn(weights)\r\n expected = self.expected_grad(weights)\r\n assert jnp.allclose(res, expected, atol=tol, rtol=0)\r\n\r\n\r\nblock_diag_data = [\r\n [\r\n onp.array([[1, 2], [3, 4]]),\r\n torch.tensor([[1, 2], [-1, -6]]),\r\n torch.tensor([[5]])\r\n ],\r\n [\r\n onp.array([[1, 2], [3, 4]]),\r\n tf.Variable([[1, 2], [-1, -6]]),\r\n tf.constant([[5]])\r\n ],\r\n [\r\n np.array([[1, 2], [3, 4]]),\r\n np.array([[1, 2], [-1, -6]]),\r\n np.array([[5]])\r\n ],\r\n [\r\n jnp.array([[1, 2], [3, 4]]),\r\n jnp.array([[1, 2], [-1, -6]]),\r\n jnp.array([[5]])\r\n ]\r\n]\r\n\r\n\r\[email protected](\"tensors\", block_diag_data)\r\ndef test_block_diag(tensors):\r\n \"\"\"Tests for the block diagonal function\"\"\"\r\n res = fn.block_diag(tensors)\r\n expected = np.array([\r\n [ 1, 2, 0, 0, 0],\r\n [ 3, 4, 0, 0, 0],\r\n [ 0, 0, 1, 2, 0],\r\n [ 0, 0, -1, -6, 0],\r\n [ 0, 0, 0, 0, 5]\r\n ])\r\n assert fn.allclose(res, expected)\r\n\r\n\r\ngather_data = [\r\n torch.tensor([[1, 2, 3], [-1, -6, -3]]),\r\n tf.Variable([[1, 2, 3], [-1, -6, -3]]),\r\n jnp.array([[1, 2, 3], [-1, -6, -3]]),\r\n np.array([[1, 2, 3], [-1, -6, -3]])\r\n]\r\n\r\n\r\[email protected](\"tensor\", gather_data)\r\ndef test_gather(tensor):\r\n \"\"\"Tests for the gather function\"\"\"\r\n indices = [1, 0]\r\n res = fn.gather(tensor, indices)\r\n expected = np.array([\r\n [-1, -6, -3],\r\n [ 1, 2, 3]\r\n ])\r\n assert fn.allclose(res, expected)\r\n" ]
[ [ "tensorflow.keras.backend.floatx", "tensorflow.__version__.split", "tensorflow.TensorShape", "tensorflow.unstack", "tensorflow.shape", "tensorflow.stack" ], [ "numpy.random.random", "numpy.sqrt", "numpy.random.seed", "numpy.linspace", "numpy.allclose", "numpy.conj", "numpy.kron", "numpy.linalg.norm", "numpy.cos", "numpy.sin", "numpy.all", "numpy.ravel_multi_index", "numpy.exp", "numpy.array", "numpy.zeros" ], [ "numpy.diag", "numpy.asarray", "numpy.dtype", "numpy.ones", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vishrawji/Task-DyVA
[ "6cf68210a85e1afb4fd0dc82e912e577d0ce1ec6", "6cf68210a85e1afb4fd0dc82e912e577d0ce1ec6", "6cf68210a85e1afb4fd0dc82e912e577d0ce1ec6" ]
[ "task_dyva/visualization.py", "manuscript/preprocessing.py", "manuscript/figureS4.py" ]
[ "import matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport numpy as np\n\nfrom .taskdataset import EbbFlowStats\n\n\nclass PlotRTs(EbbFlowStats):\n \"\"\"Plot RT distributions.\n\n Args\n ----\n stats_obj (EbbFlowStats instance): Data from the model/participant.\n palette (str, optional): Color palette used for plotting.\n \"\"\"\n\n def __init__(self, stats_obj, palette='viridis'):\n self.__dict__ = stats_obj.__dict__\n self.palette = palette\n\n def plot_rt_dists(self, ax, plot_type):\n if plot_type == 'all':\n plot_df = self._format_all()\n elif plot_type == 'switch':\n plot_df = self._format_by_switch()\n elif plot_type == 'congruency':\n plot_df = self._format_by_congruency()\n\n sns.violinplot(x='trial_type', y='rts', hue='model_or_user', \n data=plot_df, split=True, inner=None, ax=ax,\n palette=self.palette, cut=0, linewidth=0.5)\n\n if plot_type == 'all':\n ax.set_xticks([])\n else:\n ax.set_xticklabels(\n ax.get_xticklabels(), rotation=45, ha='right', \n rotation_mode='anchor')\n ax.set_xlabel('')\n ax.set_ylabel('')\n return ax\n\n def _format_as_df(self, plot_dists, model_or_user, trial_types):\n all_rts = pd.concat(plot_dists)\n m_u_array = []\n ttype_array = []\n for rts, mu, ttype in zip(plot_dists, model_or_user, trial_types):\n m_u_array.extend(len(rts) * [mu])\n ttype_array.extend(len(rts) * [ttype])\n plot_df = pd.DataFrame({'rts': all_rts, 'model_or_user': m_u_array,\n 'trial_type': ttype_array})\n return plot_df\n\n def _format_all(self):\n plot_dists = [self.df['urt_ms'], self.df['mrt_ms']]\n m_or_u = ['user', 'model']\n trial_types = ['N/A', 'N/A']\n return self._format_as_df(plot_dists, m_or_u, trial_types)\n\n def _format_by_switch(self):\n stay_inds = self.select(**{'is_switch': 0})\n switch_inds = self.select(**{'is_switch': 1})\n u_stay_rts = self.df['urt_ms'][stay_inds]\n m_stay_rts = self.df['mrt_ms'][stay_inds]\n u_switch_rts = self.df['urt_ms'][switch_inds]\n m_switch_rts = self.df['mrt_ms'][switch_inds]\n plot_dists = [u_stay_rts, u_switch_rts, m_stay_rts, m_switch_rts]\n trial_types = ['Stay', 'Switch', 'Stay', 'Switch']\n m_or_u = ['user', 'user', 'model', 'model']\n return self._format_as_df(plot_dists, m_or_u, trial_types)\n\n def _format_by_congruency(self):\n con_inds = self.select(**{'is_congruent': 1})\n incon_inds = self.select(**{'is_congruent': 0})\n u_con_rts = self.df['urt_ms'][con_inds]\n m_con_rts = self.df['mrt_ms'][con_inds]\n u_incon_rts = self.df['urt_ms'][incon_inds]\n m_incon_rts = self.df['mrt_ms'][incon_inds]\n plot_dists = [u_con_rts, u_incon_rts, m_con_rts, m_incon_rts]\n trial_types = ['Congruent', 'Incongruent', 'Congruent',\n 'Incongruent']\n m_or_u = ['user', 'user', 'model', 'model']\n return self._format_as_df(plot_dists, m_or_u, trial_types)\n\n\nclass BarPlot():\n \"\"\"Plot seaborn style barplots, but allow plotting of\n s.e.m. error bars. See figure2.py and figure3.py for usage.\n\n Args\n ----\n df (pandas DataFrame): Data to plot. \n palette (str, optional): Color palette used for plotting.\n \"\"\"\n\n supported_error = {'sem', 'sd'}\n\n def __init__(self, df, palette='viridis'):\n self.df = df\n self.palette = palette\n\n def plot_grouped_bar(self, x, y, hue, error_type, ax, **kwargs):\n # Note: Currently this only supports plotting two groups\n # (designated by the hue argument)\n assert error_type in self.supported_error, \\\n 'error_type must be one of the following: ' \\\n f'{self.supported_error}'\n colors = [(0.2363, 0.3986, 0.5104, 1.0),\n (0.2719, 0.6549, 0.4705, 1.0)]\n width = kwargs.get('width', 0.35)\n x_offset = -width / 2\n hue_types = self.df[hue].unique()\n elinewidth = kwargs.get('elinewidth', 0.5)\n error_kw = {'elinewidth': elinewidth}\n for i, h in enumerate(hue_types):\n group_df = self.df.query(f'{hue} == @h')\n group_means, group_errors = self._get_group_data(\n group_df, x, y, error_type)\n plot_x = np.arange(len(group_means))\n ax.bar(plot_x + x_offset, group_means, yerr=group_errors,\n width=width, label=h, error_kw=error_kw, \n **{'fc': colors[i]})\n x_offset += width\n ax = self._adjust_bar(plot_x, ax, **kwargs)\n return ax\n\n def plot_bar(self, keys, error_type, ax, **kwargs):\n assert error_type in self.supported_error, \\\n 'error_type must be one of the following: ' \\\n f'{self.supported_error}'\n colors = sns.color_palette(palette=self.palette, n_colors=len(keys))\n width = kwargs.get('width', 0.75)\n plot_data = [self.df[key] for key in keys]\n for di, d in enumerate(plot_data):\n d_mean = np.mean(d)\n d_sem = np.std(d) / np.sqrt(len(d))\n ax.bar(di, d_mean, yerr=d_sem, width=width, error_kw={'elinewidth': 1},\n **{'fc': colors[di]})\n ax = self._adjust_bar(np.arange(len(plot_data)), ax, **kwargs)\n return ax\n\n def _get_group_data(self, group_df, x, y, error_type):\n means = group_df.groupby(x)[y].mean().to_numpy()\n if error_type == 'sem':\n errors = group_df.groupby(x)[y].sem().to_numpy()\n elif error_type == 'sd':\n errors = group_df.groupby(x)[y].std().to_numpy()\n return means, errors\n\n def _adjust_bar(self, plot_x, ax, **kwargs):\n ax.set_xlabel(kwargs.get('xlabel', None))\n ax.set_ylabel(kwargs.get('ylabel', None))\n ax.set_xticks(plot_x)\n ax.set_xticklabels(kwargs.get('xticklabels', None),\n rotation=45, ha='right', rotation_mode='anchor')\n if 'yticks' in kwargs.keys():\n ax.set_yticks(kwargs['yticks'])\n ax.set_xlim(kwargs.get('xlim', None))\n ax.set_ylim(kwargs.get('ylim', None))\n if kwargs.get('plot_legend', False):\n ax.legend()\n ax.get_legend().get_frame().set_linewidth(0.0) \n return ax\n\n\nclass PlotModelLatents():\n \"\"\"Plot the model latents in 3D. See e.g. figure3.py and \n figure4.py for usage.\n\n Args\n ----\n data (EbbFlowStats instance): Data to plot.\n post_on_dur (int, optional): Duration after stimulus onset to plot (ms).\n pcs_to_plot (list, optional): Which PCs to plot. \n fixed_points (pandas DataFrame, optional): Fixed points to plot. \n \"\"\"\n \n default_colors = 2 * ['royalblue', 'forestgreen', 'crimson', 'orange']\n\n def __init__(self, data, post_on_dur=1200, pcs_to_plot=[0, 1, 2],\n fixed_points=None, plot_pre_onset=True):\n self.data = data\n self.pcs_to_plot = pcs_to_plot\n self.latents = data.windowed['pca_latents'][:, :, pcs_to_plot]\n self.m_rts = data.df['mrt_ms'].to_numpy()\n self.step = data.step\n self.n_pre = data.n_pre\n self.t_off_ind = self.n_pre + 1 \\\n + np.round(post_on_dur / self.step).astype('int')\n if plot_pre_onset:\n self.t_on_ind = 0\n else:\n self.t_on_ind = self.n_pre\n self.fixed_points = fixed_points\n\n def plot_stay_switch(self, ax, params, elev=30, azim=60):\n # Plot average stay and switch trajectories for one task cue\n # and one response direction (used e.g. in Fig. 4A).\n labels = ['Stay', 'Switch']\n styles = ['-', '-']\n series = self._get_stay_switch_series(params)\n cmap = sns.color_palette('viridis', as_cmap=True)\n color_indices = [0.2, 0.8]\n colors = [cmap(i) for i in color_indices]\n plot_kwargs = {'colors': colors, 'line_styles': styles,\n 'line_width': 0.5, 'plot_series_onset': True,\n 'plot_series_rt': True, 'plot_times': np.array([100])}\n ax = self.plot_3d(series, labels, ax, elev=elev, azim=azim, \n **plot_kwargs)\n return ax\n\n def _get_stay_switch_series(self, params):\n pt, mv, cue = (params['pt'], params['mv'], params['cue'])\n stay_filter = {'point_dir': pt, 'mv_dir': mv, 'task_cue': cue,\n 'prev_task_cue': cue}\n switch_filter = {'point_dir': pt, 'mv_dir': mv, 'task_cue': cue,\n 'prev_task_cue': 1 - cue}\n stay_inds = self.data.select(**stay_filter)\n switch_inds = self.data.select(**switch_filter)\n all_selections = [stay_inds, switch_inds]\n return all_selections\n\n def plot_main_conditions(self, ax, elev=30, azim=60, \n plot_task_centroid=False, **kwargs):\n # Plot the 8 task cue x relevant stimulus direction combinations;\n # also plot the fixed points. Used e.g. in Figs. 3A and S6. \n stim_cue_vals = [(0, 0),\n (0, 1),\n (0, 2),\n (0, 3),\n (1, 0),\n (1, 1),\n (1, 2),\n (1, 3)]\n labels = ['Moving L', \n 'Moving R', \n 'Moving U',\n 'Moving D',\n 'Pointing L', \n 'Pointing R',\n 'Pointing U',\n 'Pointing D']\n styles = ['-', '-', '-', '-',\n '--', '--', '--', '--']\n series = self._get_main_series(stim_cue_vals)\n plot_kwargs = {'plot_series_onset': True, 'plot_series_rt': True, \n 'plot_task_centroid': plot_task_centroid,\n 'line_width': 0.5, 'line_styles': styles}\n plot_kwargs.update(kwargs)\n ax = self.plot_3d(series, labels, ax, elev=elev, azim=azim, \n **plot_kwargs)\n return ax\n\n def _get_main_series(self, stim_cue_vals):\n all_selections = []\n for this_stim_cue in stim_cue_vals:\n this_cue = this_stim_cue[0]\n this_stim = this_stim_cue[1]\n if this_cue == 0: # moving task\n this_filters = {'mv_dir': this_stim, \n 'task_cue': this_cue}\n else: # pointing task\n this_filters = {'point_dir': this_stim, \n 'task_cue': this_cue} \n this_inds = self.data.select(**this_filters)\n all_selections.append(this_inds)\n return all_selections\n\n def plot_3d(self, series, labels, ax, elev=30, azim=60, **kwargs):\n # series should be a list of numpy arrays: the model latents\n # are averaged over each array of indices, then plotted. \n colors = kwargs.get('colors', self.default_colors)\n line_styles = kwargs.get('line_styles', len(series) * ['-']) \n width = kwargs.get('line_width', 0.5)\n rt_marker = kwargs.get('rt_marker', 'o')\n if kwargs.get('plot_task_centroid', False):\n ax = self._plot_task_centroid(ax)\n plot_series_onset = kwargs.get('plot_series_onset', False)\n plot_series_rt = kwargs.get('plot_series_rt', False)\n plot_times = kwargs.get('plot_times', None)\n\n for i, s in enumerate(series):\n label = labels[i]\n color = colors[i]\n style = line_styles[i]\n ax = self._plot_3d_line(ax, s, color, style, label, width)\n if plot_series_onset:\n ax = self._mark_3d_plot(ax, s, self.n_pre, 'k', 10, '.')\n if plot_series_rt:\n t_ind = self._get_series_rt_samples(s) + self.n_pre\n ax = self._mark_3d_plot(ax, s, t_ind, color, 6, 'o')\n if plot_times is not None:\n ax = self._plot_timepoints(ax, s, plot_times, color)\n\n if self.fixed_points is not None:\n ax = self._plot_fixed_points(ax)\n ax = self._adjust_plot(ax, elev, azim, **kwargs)\n return ax\n\n def _plot_timepoints(self, ax, series, times, color):\n for t in times:\n t_plot = int(self.n_pre + t / self.step)\n ax = self._mark_3d_plot(ax, series, t_plot, color, 7, 'x')\n return ax\n\n def _get_series_rt_samples(self, series):\n rt = np.round(np.mean(self.m_rts[series]) / self.step).astype('int')\n return rt\n\n def _plot_task_centroid(self, ax):\n color, size, marker = 'k', 20, '*'\n t_ind = self.n_pre\n mv_filter = {'task_cue': 0, 'prev_task_cue': 0}\n mv_inds = self.data.select(**mv_filter)\n pt_filter = {'task_cue': 1, 'prev_task_cue': 1}\n pt_inds = self.data.select(**pt_filter)\n for inds in [mv_inds, pt_inds]:\n ax = self._mark_3d_plot(ax, inds, t_ind, color, size, marker)\n return ax\n\n def _mark_3d_plot(self, ax, series_inds, t_ind, color, size, marker):\n x = np.mean(self.latents[:, series_inds, 0], 1)\n y = np.mean(self.latents[:, series_inds, 1], 1)\n z = np.mean(self.latents[:, series_inds, 2], 1)\n ax.scatter(x[t_ind], y[t_ind], z[t_ind], marker=marker,\n facecolors=color, s=size,\n linewidth=0.5, label=None)\n return ax\n\n def _plot_3d_line(self, ax, series_inds, color, style, label, width):\n x = np.mean(self.latents[self.t_on_ind:self.t_off_ind, series_inds, 0], 1)\n y = np.mean(self.latents[self.t_on_ind:self.t_off_ind, series_inds, 1], 1)\n z = np.mean(self.latents[self.t_on_ind:self.t_off_ind, series_inds, 2], 1)\n ax.plot(x, y, z, color=color, label=label, linestyle=style, \n linewidth=width)\n return ax\n\n def _plot_fixed_points(self, ax):\n mv_fps = self.fixed_points.query('cue == 0')\n pt_fps = self.fixed_points.query('cue == 1')\n plot_fps = [mv_fps, pt_fps]\n fp_markers = ['x', 'x']\n colors = ['crimson', 'royalblue']\n size = 10\n for plot_fp, mark, c in zip(plot_fps, fp_markers, colors):\n for fpz in plot_fp['zloc_pca']:\n ax.scatter(fpz[0], fpz[1], fpz[2], s=size, color=c, \n marker=mark, zorder=2, linewidth=0.5)\n return ax\n\n def _adjust_plot(self, ax, elev, azim, **kwargs):\n ax.view_init(elev=elev, azim=azim)\n ax.xaxis._axinfo['grid']['linewidth'] = 0.25\n ax.yaxis._axinfo['grid']['linewidth'] = 0.25\n ax.zaxis._axinfo['grid']['linewidth'] = 0.25\n ax.set_xlim(kwargs.get('xlim', None))\n ax.set_ylim(kwargs.get('ylim', None))\n ax.set_zlim(kwargs.get('zlim', None))\n if kwargs.get('remove_tick_labels', True):\n ax.set_xticklabels('')\n ax.set_yticklabels('')\n ax.set_zticklabels('')\n if kwargs.get('annotate', True):\n ax = self._annotate_plot(ax, **kwargs)\n return ax\n\n def _annotate_plot(self, ax, **kwargs):\n ax.set_xlabel(f'PC {self.pcs_to_plot[0] + 1}', labelpad=-15)\n ax.set_ylabel(f'PC {self.pcs_to_plot[1] + 1}', labelpad=-15)\n ax.set_zlabel(f'PC {self.pcs_to_plot[2] + 1}', labelpad=-15)\n ax.set_title(kwargs.get('title', None))\n ax.legend(loc='upper center', ncol=2, frameon=False)\n return ax\n", "import os\nimport copy\nimport pickle\n\nimport numpy as np\nimport pandas as pd\n\nfrom task_dyva import Experiment\nfrom task_dyva.model_analysis import FixedPointFinder, LatentsLDA\n\n\nclass Preprocess():\n \"\"\"Run preprocessing for the manuscript:\n 1) Get the model outputs on the test set at different noise levels.\n 2) Find stable fixed points for each model.\n 3) Run the LDA analyses (Fig. 3).\n \"\"\"\n\n analysis_dir = 'model_analysis'\n device = 'cpu'\n raw_fn = 'data_pre_split.pickle'\n params_fn = 'model_params.pth'\n rand_seed = 12345 # Enforce reproducibility\n\n # Noise conditions\n acc_noise_sds = np.arange(0.1, 0.65, 0.05)\n acc_noise_keys = ['01', '015', '02', '025', '03', '035', '04', '045',\n '05', '055', '06']\n sc_noise_sds = np.array([0.7, 0.8, 0.9, 1.0])\n sc_noise_keys = ['07', '08', '09', '1']\n primary_noise_key = '01'\n primary_noise_sd = 0.1\n primary_outputs_fn = f'holdout_outputs_{primary_noise_key}SD.pkl'\n\n # Behavior summary\n behavior_summary_fn = 'behavior_summary.pkl'\n behavior_metrics = ['accuracy', 'acc_switch_cost', 'acc_con_effect',\n 'mean_rt', 'switch_cost', 'con_effect']\n\n # Fixed point params\n fp_fn = 'fixed_points.pkl'\n fp_summary_fn = 'fixed_point_summary.pkl'\n fp_N = 10\n fp_T = 50000\n\n # LDA params\n lda_fn = 'lda_summary.pkl'\n lda_time_range = [-100, 1600]\n lda_n_shuffle = 100\n\n def __init__(self, model_dir, metadata, reload_primary_outputs=True,\n reload_behavior_summary=True, reload_fixed_points=True,\n reload_lda_summary=True):\n self.model_dir = model_dir\n self.expts = metadata['name']\n self.sc_status = metadata['switch_cost_type']\n self.reload_primary = reload_primary_outputs\n self.reload_behavior = reload_behavior_summary\n self.reload_fixed_points = reload_fixed_points\n self.reload_lda_summary = reload_lda_summary\n\n def run_preprocessing(self):\n for expt_str, model_type in zip(self.expts, \n self.sc_status):\n print(f'Preprocessing experiment {expt_str}')\n this_model_dir = os.path.join(self.model_dir, expt_str)\n\n # Get model outputs used for the bulk of analyses (0.1SD noise)\n self._primary_outputs_wrapper(this_model_dir, expt_str)\n\n # Get model / participant behavior at all noise levels\n self._behavior_wrapper(this_model_dir, expt_str, model_type)\n\n # Find stable fixed points\n self._fp_wrapper(this_model_dir, expt_str, model_type)\n\n # LDA analyses\n self._lda_wrapper(this_model_dir, expt_str, model_type)\n\n def _get_model_outputs(self, model_dir, expt_str, \n noise_key, noise_sd, try_reload=False):\n save_str = f'holdout_outputs_{noise_key}SD.pkl'\n noise_params = {'noise_type': 'indep',\n 'noise_sd': noise_sd}\n expt_kwargs = {'do_logging': False, \n 'test': noise_params,\n 'mode': 'testing', \n 'params_to_load': self.params_fn}\n\n if noise_key == self.primary_noise_key:\n analyze_latents = True\n else:\n analyze_latents = False\n\n # Get model outputs and stats\n expt = Experiment(model_dir, model_dir, self.raw_fn, \n expt_str, processed_dir=model_dir,\n device=self.device, **expt_kwargs)\n\n expt_stats = expt.get_behavior_metrics(expt.test_dataset, \n save_fn=save_str,\n save_local=True,\n load_local=try_reload,\n analyze_latents=analyze_latents,\n stats_dir=self.analysis_dir)\n return expt, expt_stats\n\n def _get_behavior_summary(self, model_dir, expt_str, model_type):\n noise_keys = self.acc_noise_keys.copy()\n if model_type in ['sc+', 'sc-']:\n noise_cons = np.concatenate((self.acc_noise_sds, \n self.sc_noise_sds))\n noise_keys.extend(self.sc_noise_keys)\n else:\n noise_cons = self.acc_noise_sds\n\n summary = {key: {} for key in noise_keys}\n for noise_sd, noise_key in zip(noise_cons, noise_keys):\n expt, expt_stats = self._get_model_outputs(model_dir, \n expt_str,\n noise_key, \n noise_sd,\n try_reload=True)\n for metric in self.behavior_metrics:\n u_key = f'u_{metric}'\n m_key = f'm_{metric}'\n summary[noise_key][u_key] = expt_stats.summary_stats[u_key]\n summary[noise_key][m_key] = expt_stats.summary_stats[m_key]\n\n if model_type in ['sc+', 'sc-']:\n # Get conditional error rates\n error_info = self._get_error_info(expt_stats.df)\n summary[noise_key].update(error_info)\n\n # Save and remove intermediate files\n save_path = os.path.join(model_dir, \n self.analysis_dir, \n self.behavior_summary_fn)\n with open(save_path, 'wb') as path:\n pickle.dump(summary, path, protocol=4)\n self._clean_up_behavior_summary(model_dir, noise_keys)\n\n def _get_error_info(self, df):\n errors = {}\n\n n_con = len(df.query('is_congruent == 1'))\n n_con_errors = len(df.query('is_congruent == 1 and mcorrect == 0'))\n errors['con_error_rate'] = n_con_errors / n_con\n\n n_incon = len(df.query('is_congruent == 0'))\n n_incon_errors = len(df.query('is_congruent == 0 and mcorrect == 0'))\n errors['incon_error_rate'] = n_incon_errors / n_incon\n\n n_stay = len(df.query('is_switch == 0'))\n n_stay_errors = len(df.query('is_switch == 0 and mcorrect == 0'))\n errors['stay_error_rate'] = n_stay_errors / n_stay\n\n n_switch = len(df.query('is_switch == 1'))\n n_switch_errors = len(df.query('is_switch == 1 and mcorrect == 0'))\n errors['switch_error_rate'] = n_switch_errors / n_switch\n\n return errors\n\n def _clean_up_behavior_summary(self, model_dir, noise_keys):\n for key in noise_keys:\n if key == self.primary_noise_key:\n continue\n else:\n fn = os.path.join(model_dir, self.analysis_dir,\n f'holdout_outputs_{key}SD.pkl')\n os.remove(fn)\n\n def _primary_outputs_wrapper(self, model_dir, expt_str):\n primary_stats_path = os.path.join(model_dir,\n self.analysis_dir,\n self.primary_outputs_fn)\n if self.reload_primary and os.path.exists(primary_stats_path):\n pass\n else:\n _, _ = self._get_model_outputs(model_dir, \n expt_str,\n self.primary_noise_key, \n self.primary_noise_sd)\n\n def _behavior_wrapper(self, model_dir, expt_str, model_type): \n behavior_summary_path = os.path.join(model_dir,\n self.analysis_dir,\n self.behavior_summary_fn)\n if self.reload_behavior and os.path.exists(behavior_summary_path):\n pass\n else:\n self._get_behavior_summary(model_dir, expt_str, model_type)\n\n def _fp_wrapper(self, model_dir, expt_str, model_type):\n fp_path = os.path.join(model_dir,\n self.analysis_dir,\n self.fp_fn)\n fp_summary_path = os.path.join(model_dir,\n self.analysis_dir,\n self.fp_summary_fn)\n if (self.reload_fixed_points and os.path.exists(fp_path)\n and os.path.exists(fp_summary_path)):\n pass\n elif model_type == 'sc-':\n pass\n else:\n expt, expt_stats = self._get_model_outputs(model_dir, \n expt_str,\n self.primary_noise_key, \n self.primary_noise_sd,\n try_reload=True)\n fpf = FixedPointFinder(expt, expt_stats, fp_path,\n fp_summary_path,\n load_saved=False,\n rand_seed=self.rand_seed)\n this_fps = fpf.find_fixed_points(self.fp_N, self.fp_T)\n fp_summary = fpf.get_fixed_point_summary(this_fps)\n\n def _lda_wrapper(self, model_dir, expt_str, model_type):\n lda_path = os.path.join(model_dir,\n self.analysis_dir,\n self.lda_fn)\n if self.reload_lda_summary and os.path.exists(lda_path):\n pass\n elif model_type == 'sc-':\n pass\n else:\n _, expt_stats = self._get_model_outputs(model_dir, \n expt_str,\n self.primary_noise_key, \n self.primary_noise_sd,\n try_reload=True)\n lda = LatentsLDA(expt_stats, lda_path, load_saved=False,\n time_range=self.lda_time_range,\n n_shuffle=self.lda_n_shuffle,\n rand_seed=self.rand_seed)\n lda_summary = lda.run_lda_analysis()\n", "import os\nimport pickle\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom task_dyva.visualization import BarPlot\nfrom task_dyva.utils import save_figure, plot_scatter, expt_stats_to_df\n\n\nclass FigureS4():\n \"\"\"Analysis methods and plotting routines to reproduce\n Figure S4 from the manuscript (RT variability).\n \"\"\"\n\n analysis_dir = 'model_analysis'\n stats_fn = 'holdout_outputs_01SD.pkl'\n figsize = (4.5, 2)\n figdpi = 300\n line_ext = 0.1\n age_bin_labels = ['20-29', '30-39', '40-49', '50-59', \n '60-69', '70-79', '80-89']\n\n def __init__(self, model_dir, save_dir, metadata):\n self.model_dir = model_dir\n self.save_dir = save_dir\n self.expts = metadata['name']\n self.age_bins = metadata['age_range']\n self.sc_status = metadata['switch_cost_type']\n\n # Containers for summary stats\n self.group_stats = {'u_rt_sd': [], 'm_rt_sd': []}\n self.analysis_expt_stats = []\n self.analysis_age_bins = []\n self.analysis_expt_strs = []\n\n def make_figure(self):\n print('Making Figure S4...')\n self._run_preprocessing()\n print('Stats for Figure S4')\n print('------------------')\n fig = self._plot_figure_get_stats()\n save_figure(fig, self.save_dir, 'FigS4')\n print('')\n\n def _run_preprocessing(self):\n for expt_str, ab, sc in zip(self.expts, \n self.age_bins, \n self.sc_status):\n # Skip sc- models\n if sc == 'sc-':\n continue\n \n # Load stats from the holdout data\n stats_path = os.path.join(self.model_dir, expt_str, \n self.analysis_dir, self.stats_fn)\n with open(stats_path, 'rb') as path:\n expt_stats = pickle.load(path)\n self.analysis_age_bins.append(ab)\n self.analysis_expt_stats.append(expt_stats)\n self.analysis_expt_strs.append(expt_str)\n for key in self.group_stats.keys():\n self.group_stats[key].append(expt_stats.summary_stats[key])\n\n def _plot_figure_get_stats(self):\n fig, axes = plt.subplots(1, 2, figsize=self.figsize,\n dpi=self.figdpi)\n\n # Panel A: Model vs. participant scatter for RT SD\n A_params = {'ax_lims': [20, 350],\n 'metric': 'rt_sd',\n 'label': 'RT SD (ms)'}\n plot_scatter(self.group_stats, A_params, axes[0], self.line_ext)\n\n # Panel B: Model vs. participant RT SD binned by age\n error_type = 'sem'\n stats_df = expt_stats_to_df(['rt_sd'],\n self.analysis_expt_strs,\n self.analysis_age_bins,\n self.analysis_expt_stats)\n B_params = {'ylabel': 'RT SD (ms)',\n 'xticklabels': self.age_bin_labels,\n 'plot_legend': True}\n B_bar = BarPlot(stats_df)\n B_bar.plot_grouped_bar('age_bin', 'value', 'model_or_user',\n error_type, axes[1], **B_params)\n axes[1].set_xlabel('Age bin (years)')\n\n plt.tight_layout()\n\n return fig\n" ]
[ [ "pandas.concat", "pandas.DataFrame", "numpy.round", "numpy.std", "numpy.mean", "numpy.array" ], [ "numpy.concatenate", "numpy.arange", "numpy.array" ], [ "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.subplots" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
WolfsSky/poliastro
[ "fc5e0825b110a0d6095b4b174e47624147ae1a29" ]
[ "src/poliastro/plotting/static.py" ]
[ "from typing import List\n\nimport matplotlib as mpl\nimport numpy as np\nfrom astropy import units as u\nfrom astropy.coordinates import CartesianRepresentation\nfrom matplotlib import pyplot as plt\n\nfrom poliastro.plotting.util import BODY_COLORS, generate_label\nfrom poliastro.util import norm\n\nfrom ._base import Trajectory\n\n\nclass StaticOrbitPlotter:\n \"\"\"StaticOrbitPlotter class.\n\n This class holds the perifocal plane of the first\n :py:class:`~poliastro.twobody.orbit.Orbit` plotted in it using\n :py:meth:`plot`, so all following\n plots will be projected on that plane. Alternatively, you can call\n :py:meth:`set_frame` to set the frame before plotting.\n\n \"\"\"\n\n def __init__(self, ax=None, num_points=150, dark=False):\n \"\"\"Constructor.\n\n Parameters\n ----------\n ax : ~matplotlib.axes.Axes\n Axes in which to plot. If not given, new ones will be created.\n num_points : int, optional\n Number of points to use in plots, default to 150.\n dark : bool, optional\n If set as True, plots the orbit in Dark mode.\n \"\"\"\n self.ax = ax\n if not self.ax:\n if dark:\n with plt.style.context(\"dark_background\"):\n _, self.ax = plt.subplots(figsize=(6, 6))\n else:\n _, self.ax = plt.subplots(figsize=(6, 6))\n self.num_points = num_points\n self._frame = None\n self._attractor = None\n self._attractor_radius = np.inf * u.km\n self._trajectories = [] # type: List[Trajectory]\n\n @property\n def trajectories(self):\n return self._trajectories\n\n def set_frame(self, p_vec, q_vec, w_vec):\n \"\"\"Sets perifocal frame.\n\n Raises\n ------\n ValueError\n If the vectors are not a set of mutually orthogonal unit vectors.\n \"\"\"\n if not np.allclose([norm(v) for v in (p_vec, q_vec, w_vec)], 1):\n raise ValueError(\"Vectors must be unit.\")\n elif not np.allclose([p_vec.dot(q_vec), q_vec.dot(w_vec), w_vec.dot(p_vec)], 0):\n raise ValueError(\"Vectors must be mutually orthogonal.\")\n else:\n self._frame = p_vec, q_vec, w_vec\n\n if self._trajectories:\n self._redraw()\n\n def _redraw(self):\n for artist in self.ax.lines + self.ax.collections:\n artist.remove()\n\n for trajectory, state, label, color in self._trajectories:\n self._plot(trajectory, state, label, color)\n\n self.ax.relim()\n self.ax.autoscale()\n\n def _plot_trajectory(self, trajectory, color=None):\n rr = trajectory.represent_as(CartesianRepresentation).xyz.transpose()\n x, y = self._project(rr)\n lines = self.ax.plot(x.to(u.km).value, y.to(u.km).value, \"--\", color=color)\n\n return lines\n\n def plot_trajectory(self, trajectory, *, label=None, color=None):\n \"\"\"Plots a precomputed trajectory.\n\n Parameters\n ----------\n trajectory : ~astropy.coordinates.BaseRepresentation, ~astropy.coordinates.BaseCoordinateFrame\n Trajectory to plot.\n label : str, optional\n Label.\n color : str, optional\n Color string.\n\n \"\"\"\n if self._attractor is None or self._frame is None:\n raise ValueError(\n \"An attractor and a frame must be set up first, please use \"\n \"set_attractor(Major_Body) and set_frame(*orbit.pqw()) \"\n \"or plot(orbit).\"\n )\n\n self._redraw_attractor(\n trajectory.represent_as(CartesianRepresentation).norm().min() * 0.15\n ) # Arbitrary threshold\n lines = self._plot_trajectory(trajectory, color)\n\n if label:\n lines[0].set_label(label)\n self.ax.legend(\n loc=\"upper left\", bbox_to_anchor=(1.05, 1.015), title=\"Names and epochs\"\n )\n\n self._trajectories.append(\n Trajectory(trajectory, None, label, lines[0].get_color())\n )\n\n return lines\n\n def set_attractor(self, attractor):\n \"\"\"Sets plotting attractor.\n\n Parameters\n ----------\n attractor : ~poliastro.bodies.Body\n Central body.\n\n \"\"\"\n if self._attractor is None:\n self._attractor = attractor\n\n elif attractor is not self._attractor:\n raise NotImplementedError(\n \"Attractor has already been set to {}.\".format(self._attractor.name)\n )\n\n def _project(self, rr):\n rr_proj = rr - rr.dot(self._frame[2])[:, None] * self._frame[2]\n x = rr_proj.dot(self._frame[0])\n y = rr_proj.dot(self._frame[1])\n return x, y\n\n def _redraw_attractor(self, min_radius=0 * u.km):\n radius = max(self._attractor.R.to(u.km), min_radius.to(u.km))\n color = BODY_COLORS.get(self._attractor.name, \"#999999\")\n\n for attractor in self.ax.findobj(match=mpl.patches.Circle):\n attractor.remove()\n\n if radius < self._attractor_radius:\n self._attractor_radius = radius\n\n self.ax.add_patch(\n mpl.patches.Circle((0, 0), self._attractor_radius.value, lw=0, color=color)\n )\n\n def _plot(self, trajectory, state=None, label=None, color=None):\n lines = self._plot_trajectory(trajectory, color)\n\n if state is not None:\n x0, y0 = self._project(state[None])\n\n # Plot current position\n l, = self.ax.plot(\n x0.to(u.km).value,\n y0.to(u.km).value,\n \"o\",\n mew=0,\n color=lines[0].get_color(),\n )\n lines.append(l)\n\n if label:\n if not self.ax.get_legend():\n size = self.ax.figure.get_size_inches() + [8, 0]\n self.ax.figure.set_size_inches(size)\n\n # This will apply the label to either the point or the osculating\n # orbit depending on the last plotted line\n # NOTE: What about generating both labels,\n # indicating that one is the osculating orbit?\n lines[-1].set_label(label)\n self.ax.legend(\n loc=\"upper left\", bbox_to_anchor=(1.05, 1.015), title=\"Names and epochs\"\n )\n\n self.ax.set_xlabel(\"$x$ (km)\")\n self.ax.set_ylabel(\"$y$ (km)\")\n self.ax.set_aspect(1)\n\n return lines\n\n def plot(self, orbit, label=None, color=None):\n \"\"\"Plots state and osculating orbit in their plane.\n \"\"\"\n if not self._frame:\n self.set_frame(*orbit.pqw())\n\n self.set_attractor(orbit.attractor)\n self._redraw_attractor(orbit.r_p * 0.15) # Arbitrary threshold\n positions = orbit.sample(self.num_points)\n if label:\n label = generate_label(orbit, label)\n\n lines = self._plot(positions, orbit.r, label, color)\n\n self._trajectories.append(\n Trajectory(positions, orbit.r, label, lines[0].get_color())\n )\n return lines\n" ]
[ [ "matplotlib.pyplot.subplots", "matplotlib.pyplot.style.context", "matplotlib.patches.Circle" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Yui010206/mmf
[ "01e7ccd664a4492f65ba10aeb3eeeafef62c3b87" ]
[ "mmf/trainers/mmf_trainer.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\nimport logging\nimport warnings\n\nimport omegaconf\nimport torch\nfrom mmf.common.dataset_loader import DatasetLoader\nfrom mmf.common.registry import registry\nfrom mmf.modules.metrics import Metrics\nfrom mmf.trainers.base_trainer import BaseTrainer\nfrom mmf.trainers.callbacks.checkpoint import CheckpointCallback\nfrom mmf.trainers.callbacks.early_stopping import EarlyStoppingCallback\nfrom mmf.trainers.callbacks.logistics import LogisticsCallback\nfrom mmf.trainers.callbacks.lr_scheduler import LRSchedulerCallback\nfrom mmf.trainers.core.callback_hook import TrainerCallbackHookMixin\nfrom mmf.trainers.core.device import TrainerDeviceMixin\nfrom mmf.trainers.core.evaluation_loop import TrainerEvaluationLoopMixin\nfrom mmf.trainers.core.profiling import TrainerProfilingMixin\nfrom mmf.trainers.core.reporting import TrainerReportingMixin\nfrom mmf.trainers.core.training_loop import TrainerTrainingLoopMixin\nfrom mmf.utils.build import build_model, build_optimizer\nfrom mmf.utils.general import print_model_parameters\nfrom omegaconf import DictConfig, OmegaConf\n\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]_trainer(\"mmf\")\nclass MMFTrainer(\n TrainerCallbackHookMixin,\n TrainerTrainingLoopMixin,\n TrainerDeviceMixin,\n TrainerEvaluationLoopMixin,\n TrainerReportingMixin,\n TrainerProfilingMixin,\n BaseTrainer,\n):\n def __init__(self, config: DictConfig):\n super().__init__(config)\n\n def load(self):\n super().load()\n self.load_fp16_scaler()\n\n # Callbacks\n self.on_init_start()\n\n # Parallize model\n self.parallelize_model()\n\n # Callbacks\n self.on_init_end()\n\n def configure_callbacks(self):\n self.checkpoint_callback = CheckpointCallback(self.config, self)\n self.early_stop_callback = EarlyStoppingCallback(self.config, self)\n self.logistics_callback = LogisticsCallback(self.config, self)\n self.lr_scheduler_callback = LRSchedulerCallback(self.config, self)\n\n # Add callbacks for execution during events\n self.callbacks.append(self.lr_scheduler_callback)\n # checkpoint_callback needs to be called after lr_scheduler_callback so that\n # lr_scheduler_callback._scheduler.step() happens before saving checkpoints\n # (otherwise the saved last_epoch in scheduler would be wrong)\n self.callbacks.append(self.checkpoint_callback)\n self.callbacks.append(self.logistics_callback)\n\n def load_datasets(self):\n logger.info(\"Loading datasets\")\n self.dataset_loader = DatasetLoader(self.config)\n self.dataset_loader.load_datasets()\n\n self.train_dataset = self.dataset_loader.train_dataset\n self.val_dataset = self.dataset_loader.val_dataset\n self.test_dataset = self.dataset_loader.test_dataset\n\n self.train_loader = self.dataset_loader.train_loader\n self.val_loader = self.dataset_loader.val_loader\n self.test_loader = self.dataset_loader.test_loader\n\n def load_model(self):\n logger.info(\"Loading model\")\n if self.config.model in self.config.model_config:\n attributes = self.config.model_config[self.config.model]\n else:\n warnings.warn(\n f\"Model {self.config.model}'s config not present. \"\n + \"Continuing with empty config\"\n )\n attributes = OmegaConf.create()\n # Easy way to point to config for other model\n if isinstance(attributes, str):\n attributes = self.config.model_config[attributes]\n\n with omegaconf.open_dict(attributes):\n attributes.model = self.config.model\n\n self.model = build_model(attributes)\n self.model = self.model.to(self.device)\n\n def load_optimizer(self):\n logger.info(\"Loading optimizer\")\n self.optimizer = build_optimizer(self.model, self.config)\n\n def load_metrics(self) -> None:\n logger.info(\"Loading metrics\")\n metrics = self.config.evaluation.get(\"metrics\", [])\n self.metrics = Metrics(metrics)\n self.metrics_params = self.metrics.required_params\n\n def load_fp16_scaler(self):\n if self.training_config.fp16:\n assert (\n torch.__version__ >= \"1.6\"\n ), \"Using fp16 requires torch version >- 1.6\"\n assert self.device != torch.device(\"cpu\"), \"fp16 cannot be used on cpu\"\n\n set_torch_grad_scaler = True\n if self.training_config.fp16 and self.distributed:\n try:\n from fairscale.optim.oss import OSS\n from fairscale.optim.grad_scaler import ShardedGradScaler\n\n if isinstance(self.optimizer, OSS):\n self.scaler = ShardedGradScaler()\n set_torch_grad_scaler = False\n logger.info(\"Using FairScale ShardedGradScaler\")\n except ImportError:\n logger.info(\"Using Pytorch AMP GradScaler\")\n\n if set_torch_grad_scaler:\n self.scaler = torch.cuda.amp.GradScaler(enabled=self.training_config.fp16)\n\n def train(self):\n logger.info(\"===== Model =====\")\n logger.info(self.model)\n print_model_parameters(self.model)\n\n if \"train\" not in self.run_type:\n self.inference()\n return\n\n self.on_train_start()\n self.training_loop()\n self.on_train_end()\n\n self.inference()\n\n def inference(self):\n dataset_type = []\n if \"val\" in self.run_type:\n dataset_type.append(\"val\")\n if any(rt in self.run_type for rt in [\"inference\", \"test\", \"predict\"]):\n dataset_type.append(\"test\")\n\n for dataset in dataset_type:\n if self.config.evaluation.predict:\n self.on_prediction_start()\n self.prediction_loop(dataset)\n self.on_prediction_end()\n else:\n self.on_test_start()\n logger.info(f\"Starting inference on {dataset} set\")\n report, meter = self.evaluation_loop(dataset, use_tqdm=True)\n self.on_test_end(report=report, meter=meter)\n" ]
[ [ "torch.device", "torch.cuda.amp.GradScaler" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
michaelfedell/instacart
[ "dc7a49d1247e0a894cc1b1efa5fa876df6bd5683" ]
[ "src/db.py" ]
[ "import argparse\nimport logging\nimport os\nimport sys\n\nimport pandas as pd\nimport sqlalchemy as sql\nfrom sqlalchemy import Column, Integer, Float, String\nfrom sqlalchemy.ext.declarative import declarative_base\nsys.path.append(os.path.dirname(sys.path[0])) # so that config can be imported from project root\nimport config\nimport yaml\n\nBase = declarative_base()\n\n# set up looging config\nlogging.basicConfig(format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\nwith open(os.path.join('config', 'features_config.yml'), 'r') as f:\n col_types = yaml.load(f).get('col_types')\n\n\nclass OrderType(Base):\n \"\"\"Create a data model for order types derived from cluster centroids.\n\n Each of these rows will describe one of the order types derived from\n clustering during the feature generation process. An order type is\n described by its centroid for the most part. Temporal features and order_size\n are defined by the mode of the cluster since most common hour of day is more\n interesting than the average of all times (same logic for other mode values).\n \"\"\"\n __tablename__ = 'ordertypes'\n # We use the column_types (mean or mode) to determine if column should be stored as int or float\n col_types = {col: (Integer if t == 'mode' else Float)\n for col, t in col_types.items()}\n\n index = Column(Integer, primary_key=True)\n label = Column(Integer, unique=False, nullable=False)\n # Described by means\n reordered = Column(col_types.get('reordered', Float), unique=False, nullable=False)\n organic = Column(col_types.get('organic', Float), unique=False, nullable=False)\n popular = Column(col_types.get('popular', Float), unique=False, nullable=False)\n prepared = Column(col_types.get('prepared', Float), unique=False, nullable=False)\n dairy = Column(col_types.get('dairy', Float), unique=False, nullable=False)\n gluten = Column(col_types.get('gluten', Float), unique=False, nullable=False)\n snack = Column(col_types.get('snack', Float), unique=False, nullable=False)\n meat = Column(col_types.get('meat', Float), unique=False, nullable=False)\n fish = Column(col_types.get('fish', Float), unique=False, nullable=False)\n beverage = Column(col_types.get('beverage', Float), unique=False, nullable=False)\n veg = Column(col_types.get('veg', Float), unique=False, nullable=False)\n\n # Described by modes\n order_dow = Column(col_types.get('order_dow', Float), unique=False, nullable=False)\n order_hour_of_day = Column(col_types.get('order_hour_of_day', Float), unique=False, nullable=False)\n days_since_prior_order = Column(col_types.get('days_since_prior_order', Float), unique=False, nullable=False)\n order_size = Column(col_types.get('order_size', Float), unique=False, nullable=False)\n\n # Descriptions will be populated by hand upon cluster examination\n desc = Column(String(240), nullable=True)\n\n def __repr__(self):\n return '<OrderType %s>' % self.label\n\n\ndef run_ingest(engine_string, order_types_path):\n \"\"\"\n Create db if needed and populate with data\n\n Args:\n engine_string (str): Connection string to use\n order_types_path (str): Path to order_types csv describing centroids\n\n Returns:\n\n \"\"\"\n order_types = pd.read_csv(order_types_path)\n\n logger.info('Connecting to: %s', engine_string)\n engine = sql.create_engine(engine_string)\n\n logger.info('Writing %d order types to database', len(order_types))\n order_types.index = order_types.index.astype(int)\n order_types.to_sql('ordertypes', engine, if_exists='append')\n\n logger.info('Done!')\n\n\ndef run_build(args):\n \"\"\"Create the database with ordertypes table\"\"\"\n if args.mode == 'local':\n engine_string = config.SQLITE_DB_STRING\n elif args.mode == 'rds':\n engine_string = config.RDS_DB_STRING\n # Default to local if any required env vars are missing\n if (config.user is None or config.password is None or\n config.host is None or config.port is None):\n logger.error('MYSQL environment vars not specified. Be sure to '\n '`export MYSQL_XXX=YYY` for XXX {USER, PASSWORD, HOST, PORT}')\n logger.info('Defaulting to local sqlite file')\n engine_string = config.SQLITE_DB_STRING\n else:\n logger.warning('%s is not a valid mode, defaulting to local', args.mode)\n engine_string = config.SQLITE_DB_STRING\n\n logger.info('Connecting to: %s', engine_string)\n engine = sql.create_engine(engine_string)\n Base.metadata.create_all(engine)\n logger.info('Tables Created for %s', list(Base.metadata.tables.keys()))\n\n if args.populate:\n logger.debug('Running Ingestion Process')\n run_ingest(engine_string, args.ordertypes)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description=\"Create a database with the appropriate tables\")\n parser.add_argument('--mode', default='local', choices=['local', 'rds'],\n help='Can be either \"local\" or \"rds\" (will create sqlite or mysql)')\n parser.add_argument('--populate', action='store_true',\n help='Will fill database with features if included')\n parser.add_argument('--ordertypes', default='data/features/order_types.csv',\n help='Path to order_types.csv file')\n\n args = parser.parse_args()\n\n run_build(args)\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
darpa-sail-on/sail-on-client
[ "1fd7c0ec359469040fd7af0c8e56fe53277d4a27", "1fd7c0ec359469040fd7af0c8e56fe53277d4a27", "1fd7c0ec359469040fd7af0c8e56fe53277d4a27" ]
[ "sail_on_client/feedback/document_transcription_feedback.py", "sail_on_client/agent/mock_condda_agents.py", "sail_on_client/feedback/feedback.py" ]
[ "\"\"\"Document Transcription Feedback.\"\"\"\n\nimport pandas as pd\nfrom sail_on_client.harness.local_harness import LocalHarness\nfrom sail_on_client.harness.par_harness import ParHarness\nfrom sail_on_client.feedback.feedback import Feedback\n\nfrom typing import Union, Dict\n\nSUPPORTED_FEEDBACK = [\"classification\", \"score\", \"transcription\"]\n\n\nclass DocumentTranscriptionFeedback(Feedback):\n \"\"\"Feedback for document transcription.\"\"\"\n\n def __init__(\n self,\n first_budget: int,\n income_per_batch: int,\n maximum_budget: int,\n interface: Union[LocalHarness, ParHarness],\n session_id: str,\n test_id: str,\n feedback_type: str,\n ) -> None:\n \"\"\"\n Initialize document transcription feedback object.\n\n Args:\n first_budget: Initial budget\n income_per_batch: Additional labels added after every batch\n maximum_budget: Max labels that can be requested\n interface: An instance of evaluation interface\n session_id: Session identifier\n test_id: Test identifier\n feedback_type: Type of feedback that can be requested\n\n Returns:\n None\n \"\"\"\n if feedback_type not in SUPPORTED_FEEDBACK:\n raise ValueError(f\"Unsupported feedback_type {feedback_type}\")\n super(DocumentTranscriptionFeedback, self).__init__(\n first_budget,\n income_per_batch,\n maximum_budget,\n interface,\n session_id,\n test_id,\n feedback_type,\n )\n\n def get_levenshtein_feedback(\n self, round_id: int, images_id_list: list, image_names: list\n ) -> Union[Dict, None]:\n \"\"\"\n Get levenshtein feedback for the round.\n\n Args:\n round_id: Round identifier\n image_id_list: List if indices for images\n image_names: List of image names for the round\n\n Return:\n A dictionary containing levenshtein score or None if\n feedback is requested for an older round\n \"\"\"\n if round_id > self.current_round:\n self.deposit_income()\n self.current_round = round_id\n if len(images_id_list) <= self.budget:\n self.budget = self.budget - len(images_id_list)\n image_ids = [image_names[int(idx)] for idx in images_id_list]\n feedback_file = self.interface.get_feedback_request(\n image_ids,\n self.feedback_type,\n self.test_id,\n round_id,\n self.session_id,\n )\n df = pd.read_csv(feedback_file, delimiter=\",\", header=None)\n return df\n else:\n raise ValueError(\"the function should be added\")\n else:\n return None\n\n def get_feedback(\n self, round_id: int, images_id_list: list, image_names: list\n ) -> Union[pd.DataFrame, Dict, None]:\n \"\"\"\n Get feedback for the round.\n\n Args:\n round_id: Round identifier\n image_id_list: List if indices for images\n image_names: List of image names for the round\n\n Return:\n Either a dataframe or dictionary with score if the request is valid\n for the current round.\n \"\"\"\n if self.feedback_type == \"classification\":\n feedback_fn = self.get_labeled_feedback\n elif self.feedback_type == \"score\":\n feedback_fn = self.get_score_feedback\n elif self.feedback_type == \"transcription\":\n feedback_fn = self.get_levenshtein_feedback\n else:\n raise ValueError(\"Unsupported feedback type {self.feedback_type} specified\")\n return feedback_fn(round_id, images_id_list, image_names)\n", "\"\"\"Mocks mainly used for testing CONDDA.\"\"\"\n\nfrom sail_on_client.checkpointer import Checkpointer\nfrom sail_on_client.agent.condda_agent import CONDDAAgent\nfrom typing import Dict, Any, Tuple, Callable\n\nimport logging\nimport os\nimport shutil\nimport torch\n\nlog = logging.getLogger(__name__)\n\n\nclass MockCONDDAAgent(CONDDAAgent):\n \"\"\"Mock Detector for CONDDA Protocol.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Construct Mock CONDDA Detector.\"\"\"\n super().__init__()\n\n self.step_dict: Dict[str, Callable] = {\n \"Initialize\": self.initialize,\n \"FeatureExtraction\": self.feature_extraction,\n \"WorldDetection\": self.world_detection,\n \"NoveltyCharacterization\": self.novelty_characterization,\n }\n\n def initialize(self, toolset: Dict) -> None:\n \"\"\"\n Algorithm Initialization.\n\n Args:\n toolset (dict): Dictionary containing parameters for different steps\n\n Return:\n None\n \"\"\"\n pass\n\n def get_config(self) -> Dict:\n \"\"\"\n Get config for the plugin.\n\n Returns:\n Parameters for the agent\n \"\"\"\n return {}\n\n def feature_extraction(\n self, toolset: Dict\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n \"\"\"\n Feature extraction step for the algorithm.\n\n Args:\n toolset (dict): Dictionary containing parameters for different steps\n\n Return:\n Tuple of dictionary\n \"\"\"\n self.dataset = toolset[\"dataset\"]\n return {}, {}\n\n def world_detection(self, toolset: Dict) -> str:\n \"\"\"\n Detect change in world ( Novelty has been introduced ).\n\n Args:\n toolset (dict): Dictionary containing parameters for different steps\n\n Return:\n path to csv file containing the results for change in world\n \"\"\"\n dataset_dir = os.path.dirname(self.dataset)\n dst_file = os.path.join(dataset_dir, \"wc.csv\")\n shutil.copyfile(self.dataset, dst_file)\n return dst_file\n\n def novelty_characterization(self, toolset: Dict) -> str:\n \"\"\"\n Characterize novelty by clustering different novel samples.\n\n Args:\n toolset (dict): Dictionary containing parameters for different steps\n\n Return:\n path to csv file containing the results for novelty characterization step\n \"\"\"\n dataset_dir = os.path.dirname(self.dataset)\n dst_file = os.path.join(dataset_dir, \"nc.csv\")\n shutil.copyfile(self.dataset, dst_file)\n return dst_file\n\n def execute(self, toolset: Dict, step_descriptor: str) -> Any:\n \"\"\"\n Execute method used by the protocol to run different steps.\n\n Args:\n toolset (dict): Dictionary containing parameters for different steps\n step_descriptor (str): Name of the step\n \"\"\"\n log.info(f\"Executing {step_descriptor}\")\n return self.step_dict[step_descriptor](toolset)\n\n\nclass MockCONDDAAgentWithAttributes(MockCONDDAAgent):\n \"\"\"Mock Detector for testing checkpointing.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"\n Detector constructor.\n\n Args:\n toolset (dict): Dictionary containing parameters for the constructor\n \"\"\"\n MockCONDDAAgent.__init__(self)\n\n def feature_extraction(\n self, toolset: Dict\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n \"\"\"\n Feature extraction step for the algorithm.\n\n Args:\n toolset (dict): Dictionary containing parameters for different steps\n\n Return:\n Tuple of dictionary\n \"\"\"\n self.dummy_dict = toolset[\"dummy_dict\"]\n self.dummy_list = toolset[\"dummy_list\"]\n self.dummy_tuple = toolset[\"dummy_tuple\"]\n self.dummy_tensor = toolset[\"dummy_tensor\"]\n self.dummy_val = toolset[\"dummy_val\"]\n return {}, {}\n\n\nclass MockCONDDAAdapterWithCheckpoint(MockCONDDAAgent, Checkpointer):\n \"\"\"Mock Adapter for testing checkpointing.\"\"\"\n\n def __init__(self, toolset: Dict) -> None:\n \"\"\"\n Detector constructor.\n\n Args:\n toolset (dict): Dictionary containing parameters for the constructor\n \"\"\"\n MockCONDDAAgent.__init__(self)\n Checkpointer.__init__(self, toolset)\n self.detector = MockCONDDAAgentWithAttributes()\n\n def get_config(self) -> Dict:\n \"\"\"\n Get config for the plugin.\n\n Returns:\n Parameters for the agent\n \"\"\"\n config = super().get_config()\n config.update(self.toolset)\n return config\n\n def execute(self, toolset: Dict, step_descriptor: str) -> Any:\n \"\"\"\n Execute method used by the protocol to run different steps.\n\n Args:\n toolset (dict): Dictionary containing parameters for different steps\n step_descriptor (str): Name of the step\n \"\"\"\n log.info(f\"Executing {step_descriptor}\")\n return self.detector.step_dict[step_descriptor](toolset)\n\n def __eq__(self, other: object) -> bool:\n \"\"\"\n Overriden method to compare two mock adapters.\n\n Args:\n other (MockCONDDAAdapterWithCheckpoint): Another instance of mock adapter\n\n Return:\n True if both instances have same attributes\n \"\"\"\n if not isinstance(other, MockCONDDAAdapterWithCheckpoint):\n return NotImplemented\n\n return (\n self.detector.dummy_dict == other.detector.dummy_dict\n and self.detector.dummy_list == other.detector.dummy_list\n and self.detector.dummy_tuple == other.detector.dummy_tuple\n and bool(\n torch.all(\n torch.eq(self.detector.dummy_tensor, other.detector.dummy_tensor)\n )\n )\n and self.detector.dummy_val == other.detector.dummy_val\n )\n", "\"\"\"Abstract class for feedback for sail-on.\"\"\"\n\nimport pandas as pd\nfrom sail_on_client.harness.par_harness import ParHarness\nfrom sail_on_client.harness.local_harness import LocalHarness\n\nfrom typing import Union, Dict\n\n\nclass Feedback:\n \"\"\"Base class for Feedback.\"\"\"\n\n def __init__(\n self,\n first_budget: int,\n income_per_batch: int,\n maximum_budget: int,\n interface: Union[LocalHarness, ParHarness],\n session_id: str,\n test_id: str,\n feedback_type: str,\n ) -> None:\n \"\"\"\n Initialize.\n\n Args:\n first_budget: Initial budget\n income_per_batch: Additional labels added after every batch\n maximum_budget: Max labels that can be requested\n interface: An instance of evaluation interface\n session_id: Session identifier\n test_id: Test identifier\n feedback_type: Type of feedback that can be requested\n\n Returns:\n None\n \"\"\"\n self.budget = first_budget\n self.income_per_batch = income_per_batch\n self.maximum_budget = maximum_budget\n self.current_round = -1\n self.interface = interface\n self.session_id = session_id\n self.test_id = test_id\n self.feedback_type = feedback_type\n\n def get_labeled_feedback(\n self, round_id: int, images_id_list: list, image_names: list\n ) -> Union[pd.DataFrame, None]:\n \"\"\"\n Get labeled feedback for the round.\n\n Args:\n round_id: Round identifier\n image_id_list: List if indices for images\n image_names: List of image names for the round\n\n Return:\n A dictionary with the accuracy value or None if\n feedback is requested for an older round\n \"\"\"\n if round_id > self.current_round:\n self.deposit_income()\n self.current_round = round_id\n if len(images_id_list) <= self.budget:\n self.budget = self.budget - len(images_id_list)\n image_ids = [image_names[int(idx)] for idx in images_id_list]\n feedback_file = self.interface.get_feedback_request(\n image_ids,\n self.feedback_type,\n self.test_id,\n round_id,\n self.session_id,\n )\n\n df = pd.read_csv(\n feedback_file, delimiter=\",\", header=None, names=[\"id\", \"labels\"]\n )\n else:\n raise ValueError(\"the function should be added\")\n else:\n df = None\n return df\n\n def get_score_feedback(\n self, round_id: int, images_id_list: list, image_names: list\n ) -> Union[Dict, None]:\n \"\"\"\n Get accuracy value for the round. Note: this is not budgeted.\n\n Args:\n round_id: Round identifier\n image_id_list: List if indices for images\n image_names: List of image names for the round\n\n Return:\n A dictionary with the accuracy value or None if\n feedback is requested for an older round\n \"\"\"\n if round_id > self.current_round:\n self.deposit_income()\n self.current_round = round_id\n image_ids = [image_names[int(idx)] for idx in images_id_list]\n feedback_file = self.interface.get_feedback_request(\n image_ids, self.feedback_type, self.test_id, round_id, self.session_id,\n )\n df = pd.read_csv(feedback_file, delimiter=\",\", header=None)\n return df\n else:\n return None\n\n def get_feedback(\n self, round_id: int, images_id_list: list, image_names: list\n ) -> Union[pd.DataFrame, Dict, None]:\n \"\"\"\n Get feedback for the round.\n\n Args:\n round_id: Round identifier\n image_id_list: List if indices for images\n image_names: List of image names for the round\n\n Return:\n Either a dataframe or dictionary with score if the request is valid\n for the current round.\n \"\"\"\n if self.feedback_type == \"classification\":\n feedback_fn = self.get_labeled_feedback\n elif self.feedback_type == \"score\":\n feedback_fn = self.get_score_feedback\n else:\n raise ValueError(\"Unsupported feedback type {self.feedback_type} specified\")\n return feedback_fn(round_id, images_id_list, image_names)\n\n def deposit_income(self) -> None:\n \"\"\"Get income for a round.\"\"\"\n self.budget = min(self.maximum_budget, (self.budget + self.income_per_batch))\n\n def get_budget(self) -> int:\n \"\"\"Get current budget.\"\"\"\n return self.budget\n" ]
[ [ "pandas.read_csv" ], [ "torch.eq" ], [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
kvlsky/interactive-bitcoin-price-prediction
[ "39413fc595473f24594cd7cff85b84baed1a90a5" ]
[ "api/server.py" ]
[ "from flask import Flask, render_template, request, jsonify\nfrom flask_bootstrap import Bootstrap\nimport matplotlib\n\nfrom datetime import datetime\n\nfrom api.plots import Plots\nfrom api.data_loader import load_currency\nfrom models.predictor import Predictor\n\nmatplotlib.use('Agg')\n\napp = Flask(__name__)\nBootstrap(app)\n\nCURRS = ['BTC-USD']\n\nplots = Plots()\npredictor = Predictor()\n\n\[email protected]('/')\ndef index():\n \"\"\"Index endpoint\n\n @author: Andrii Koval\n \"\"\"\n\n start = '2015-08-07'\n end = datetime.today().strftime('%Y-%m-%d')\n\n for curr in CURRS:\n load_currency(currency=curr,\n start_date=start,\n end_date=end,\n path=f'data/{curr}.csv')\n\n return render_template('index.html')\n\n\[email protected]('/prophet', methods=['POST', 'GET'])\ndef prophet():\n \"\"\"Prophet output endpoint.\n Sends data to bokeh js handler.\n\n @author: Andrii Koval\n \"\"\"\n if request.method == 'POST':\n return jsonify(message='Post message')\n elif request.method == 'GET':\n data = plots.prophet_df\n\n data['ds'] = data['ds'].astype(str)\n data = {'ds': data['ds'].tolist(),\n 'yhat': data['yhat'].tolist(),\n 'yhat_lower': data['yhat_lower'].tolist(),\n 'yhat_upper': data['yhat_upper'].tolist(),\n 'y_actual': data['y_actual'].tolist()\n }\n\n return jsonify(isError=False,\n message=\"Success\",\n statusCode=200,\n data=data), 200\n\n\[email protected]('/arima', methods=['POST', 'GET'])\ndef arima():\n \"\"\"Arima output endpoint.\n Sends data to bokeh js handler.\n\n @author: Yulia Khlyaka\n \"\"\"\n if request.method == 'POST':\n return jsonify(message='Post message')\n elif request.method == 'GET':\n data = plots.arima_df\n\n data['ds'] = data['ds'].astype(str)\n data = {'ds': data['ds'].tolist(),\n 'yhat': data['yhat'].tolist(),\n 'y_actual': data['y_actual'].tolist(),\n 'yhat_lower': data['yhat_lower'].tolist(),\n 'yhat_upper': data['yhat_upper'].tolist()\n }\n\n return jsonify(isError=False,\n message=\"Success\",\n statusCode=200,\n data=data), 200\n\n\[email protected]('/lstm', methods=['POST', 'GET'])\ndef lstm():\n \"\"\"LSTM output endpoint.\n Sends data to bokeh js handler.\n\n @author: Pavlo Mospan\n \"\"\"\n if request.method == 'POST':\n return jsonify(message='Post message')\n elif request.method == 'GET':\n data = plots.lstm_df\n data['ds'] = data['ds'].astype(str)\n data = {'ds': data['ds'].tolist(),\n 'yhat': data['yhat'].tolist(),\n 'y_actual': data['y_actual'].tolist(),\n 'yhat_lower': data['yhat_lower'].tolist(),\n 'yhat_upper': data['yhat_upper'].tolist()\n }\n\n return jsonify(isError=False,\n message=\"Success\",\n statusCode=200,\n data=data), 200\n\n\[email protected]('/predict_model', methods=['GET', 'POST'])\ndef predict_model():\n \"\"\"Predict endpoint.\n Sets model name to predict.\n\n @author: Andrii Koval\n \"\"\"\n data = request.json\n\n if data:\n predictor.pred_dict[\"model\"] = data[\"model\"]\n else:\n pass\n\n return 'Non tam praeclarum est scire latine, quam turpe nescire'\n\n\[email protected]('/predict_date', methods=['GET', 'POST'])\ndef predict_start():\n \"\"\"Predict date endpoint.\n Sets start date of training data.\n\n @author: Andrii Koval\n \"\"\"\n data = request.json\n\n if data:\n predictor.pred_dict[\"start_date\"] = data[\"start_date\"]\n else:\n pass\n\n return 'Non tam praeclarum est scire latine, quam turpe nescire'\n\n\[email protected]('/predict_date_end', methods=['GET', 'POST'])\ndef predict_end():\n \"\"\"Predict date end endpoint.\n Sets end date for prediction.\n\n @author: Andrii Koval\n \"\"\"\n data = request.json\n\n if data:\n predictor.pred_dict[\"end_date\"] = data[\"end_date\"]\n else:\n pass\n\n return 'Non tam praeclarum est scire latine, quam turpe nescire'\n\n\[email protected]('/predict', methods=['GET', 'POST'])\ndef predict():\n \"\"\"Predict endpoint.\n Chooses model for prediction and predcits\n bitcoin price for the given time period.\n\n @author: Andrii Koval, Yulia Khlyaka, Pavlo Mospan\n \"\"\"\n data = request.json\n\n if data:\n predict = bool(data[\"predict\"])\n\n if predict:\n if predictor.pred_dict[\"model\"] == 0:\n # ARIMA\n arima_forecast = predictor.get_prediction_arima()\n plots.arima_df = arima_forecast\n elif predictor.pred_dict[\"model\"] == 1:\n # Prophet\n prophet_forecast = predictor.get_prediction_prophet()\n plots.prophet_df = prophet_forecast\n elif predictor.pred_dict[\"model\"] == 2:\n # LSTM\n lstm_forecast = predictor.get_prediction_bidirectlstm()\n plots.lstm_df = lstm_forecast\n else:\n pass\n\n return 'Non tam praeclarum est scire latine, quam turpe nescire'\n\n\[email protected]('/dashboard/')\ndef show_dashboard():\n \"\"\"Dashboard endpoint.\n Draws bokeh plots.\n\n @author: Andrii Koval\n \"\"\"\n script, div = plots.make_plot()\n script_tab, div_tab = plots.make_tabs()\n script_trend, div_trend = plots.make_trend()\n\n return render_template('layout.html',\n script=script,\n div=div,\n script_trend=script_trend,\n div_trend=div_trend,\n script_tab=script_tab,\n div_tab=div_tab)\n" ]
[ [ "matplotlib.use" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
busyboxs/faster_rcnn_voc
[ "58a158b6568a9b4545f4a8e97d731ac7322da84f" ]
[ "lib/rpn/generate.py" ]
[ "# --------------------------------------------------------\n# Faster R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\n\nfrom fast_rcnn.config import cfg\nfrom utils.blob import im_list_to_blob\nfrom utils.timer import Timer\nimport numpy as np\nimport cv2\n\n\ndef _vis_proposals(im, dets, thresh=0.5):\n \"\"\"Draw detected bounding boxes.\"\"\"\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n return\n\n class_name = 'obj'\n im = im[:, :, (2, 1, 0)]\n fig, ax = plt.subplots(figsize=(12, 12))\n ax.imshow(im, aspect='equal')\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n\n ax.add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n bbox[2] - bbox[0],\n bbox[3] - bbox[1], fill=False,\n edgecolor='red', linewidth=3.5)\n )\n ax.text(bbox[0], bbox[1] - 2,\n '{:s} {:.3f}'.format(class_name, score),\n bbox=dict(facecolor='blue', alpha=0.5),\n fontsize=14, color='white')\n\n ax.set_title(('{} detections with '\n 'p({} | box) >= {:.1f}').format(class_name, class_name,\n thresh),\n fontsize=14)\n plt.axis('off')\n plt.tight_layout()\n plt.draw()\n\n\ndef _get_image_blob(im):\n \"\"\"Converts an image into a network input.\n\n Arguments:\n im (ndarray): a color image in BGR order\n\n Returns:\n blob (ndarray): a data blob holding an image pyramid\n im_scale_factors (list): list of image scales (relative to im) used\n in the image pyramid\n \"\"\"\n im_orig = im.astype(np.float32, copy=True)\n im_orig -= cfg.PIXEL_MEANS\n\n im_shape = im_orig.shape\n im_size_min = np.min(im_shape[0:2])\n im_size_max = np.max(im_shape[0:2])\n\n processed_ims = []\n\n assert len(cfg.TEST.SCALES) == 1\n target_size = cfg.TEST.SCALES[0]\n\n im_scale = float(target_size) / float(im_size_min)\n # Prevent the biggest axis from being more than MAX_SIZE\n if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:\n im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)\n im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,\n interpolation=cv2.INTER_LINEAR)\n im_info = np.hstack((im.shape[:2], im_scale))[np.newaxis, :]\n processed_ims.append(im)\n\n # Create a blob to hold the input images\n blob = im_list_to_blob(processed_ims)\n\n return blob, im_info\n\n\ndef im_proposals(net, im):\n \"\"\"Generate RPN proposals on a single image.\"\"\"\n blobs = {}\n blobs['data'], blobs['im_info'] = _get_image_blob(im)\n net.blobs['data'].reshape(*(blobs['data'].shape))\n net.blobs['im_info'].reshape(*(blobs['im_info'].shape))\n blobs_out = net.forward(\n data=blobs['data'].astype(np.float32, copy=False),\n im_info=blobs['im_info'].astype(np.float32, copy=False))\n\n scale = blobs['im_info'][0, 2]\n boxes = blobs_out['rois'][:, 1:].copy() / scale\n scores = blobs_out['scores'].copy()\n return boxes, scores\n\n\ndef imdb_proposals(net, imdb):\n \"\"\"Generate RPN proposals on all images in an imdb.\"\"\"\n\n _t = Timer()\n imdb_boxes = [[] for _ in range(imdb.num_images)]\n for i in range(imdb.num_images):\n im = cv2.imread(imdb.image_path_at(i))\n _t.tic()\n imdb_boxes[i], scores = im_proposals(net, im)\n _t.toc()\n print('im_proposals: {:d}/{:d} {:.3f}s' \\\n .format(i + 1, imdb.num_images, _t.average_time))\n if 0:\n dets = np.hstack((imdb_boxes[i], scores))\n # from IPython import embed; embed()\n _vis_proposals(im, dets[:3, :], thresh=0.9)\n plt.show()\n\n return imdb_boxes\n" ]
[ [ "numpy.hstack", "numpy.min", "numpy.round", "numpy.max", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
VictorAlulema/Analysis-of-Mission-Planner-log-files
[ "1b55768db6545c5c989c180975deb3b5622edaab" ]
[ "FileProcessing.py" ]
[ "from LogFile import Parameters\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import rcParams\r\nfrom matplotlib import cm\r\nimport numpy as np\r\n\r\n\r\nclass LogFileProcessing:\r\n \"\"\"\r\n Provide the filename of the log to be analyzed\r\n example: '000039.log'\r\n To instantiate the class, type something like:\r\n Analysis = LogFileProcessing('000039.log')\r\n \"\"\"\r\n\r\n\r\n def __init__(self, filename):\r\n self.file = filename\r\n self.Params = Parameters.keys()\r\n self.Data = LogFileProcessing.GetData(self)\r\n\r\n\r\n def GetData(self):\r\n \"\"\"\r\n This function does the hard work: Read the log file and\r\n store the data in a Python dictionary, very convenient to access the\r\n data. \r\n \"\"\"\r\n LogFile = open(self.file, 'r')\r\n i = 0\r\n for line in LogFile:\r\n content = line.rstrip().split(',')\r\n if content[0] in self.Params:\r\n for key in Parameters[content[0]].keys():\r\n index = Parameters[content[0]][key][0]\r\n if content[index] == 'NaN':\r\n pass\r\n else:\r\n Parameters[content[0]][key].append(float(content[index]))\r\n else:\r\n pass\r\n i = i + 1\r\n LogFile.close()\r\n print('Read {} lines '.format(i))\r\n return Parameters\r\n\r\n\r\n def Subparams(self, Param):\r\n \"\"\"\r\n This function tells you the subparams of a param:\r\n Example:\r\n Subparams('BAT')\r\n Output will be:\r\n ['Volt','VoltR','Curr','CurrTot','Temp','Res']\r\n Now, you know what subparams you could plot against the variable Time\r\n \"\"\"\r\n return self.Data[Param].keys()\r\n\r\n\r\n def Plot(self,Param=None,Subparam=None,\r\n logx=False,logy=False,\r\n matrix=False,FigureSize=None):\r\n \"\"\"\r\n This functions plots the results\r\n Just type something like:\r\n Plot('GPS','Alt')\r\n That's it!\r\n \"\"\"\r\n # Data\r\n y = self.Data[Param][Subparam][1:]\r\n x = np.linspace(0, 30, len(y))\r\n if FigureSize:\r\n pass\r\n else:\r\n FigureSize = [5,2.5]\r\n fig, ax = LogFileProcessing.Plot_config(self,FigureSize)\r\n if matrix:\r\n # TODO\r\n pass\r\n else:\r\n if logx:\r\n ax.semilogx(x, y, 'b-', linewidth=0.75)\r\n if logy:\r\n ax.semilogy(x, y, 'b-', linewidth=0.75)\r\n else:\r\n ax.plot(x, y, 'b-', linewidth=0.75)\r\n ax.set_xlabel('Time')\r\n ax.set_ylabel(Param + ' ' + Subparam)\r\n fig.tight_layout()\r\n plt.show()\r\n \r\n \r\n def Plot_config(self,FigureSize):\r\n plt.rc('font', family='serif')\r\n plt.rc('font', size=9)\r\n plt.rc('axes', labelsize=9)\r\n fig = plt.figure(figsize=(FigureSize[0],FigureSize[1]))\r\n ax = fig.add_subplot(111)\r\n ax.spines['right'].set_visible(False)\r\n ax.spines['top'].set_visible(False)\r\n return fig, ax\r\n\r\n \r\n def PlotDemo(self,X,Y,FigureSize=[5,2.5]):\r\n fig, ax = fig, ax = LogFileProcessing.Plot_config(self,FigureSize)\r\n ax.plot(X,Y, 'b-', linewidth=0.75)\r\n ax.set_xlabel('Time [min]')\r\n ax.set_ylabel('Power [W]')\r\n fig.tight_layout()\r\n plt.show()\r\n\r\n def PieChart(self,labels,sizes):\r\n fig, ax = plt.subplots()\r\n ax.pie(sizes, labels=labels, autopct='%1.1f%%',\r\n shadow=True, startangle=90)\r\n ax.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle\r\n plt.show()\r\n\r\n\r\n# Let's analyze the flight data\r\n\r\nif __name__ == '__main__': \r\n analysis = LogFileProcessing('00000104.log')\r\n analysis.Plot(Param='GPS',Subparam='Alt')\r\n analysis.Plot(Param='GPS',Subparam='Spd')\r\n analysis.Plot(Param='BARO',Subparam='Alt')\r\n analysis.Plot(Param='BAT',Subparam='Volt')\r\n analysis.Plot(Param='POWR',Subparam='VServo')\r\n analysis.Plot(Param='BAT',Subparam='CurrTot')\r\n \r\n # Get data for further analysis\r\n Voltage = analysis.Data['BAT']['Volt'][1:] # This is the variable of interest\r\n Current = analysis.Data['BAT']['Curr'][1:]\r\n Power = np.array([Voltage[i] * Current[i] for i in range(len(Voltage))])\r\n # PowerCruise = Power[np.where(Power > 50)]\r\n time = np.linspace(0, 30, len(Power)) # This is the variable Time\r\n analysis.PlotDemo(time,Power)\r\n \r\n # 3D Plot of flight data\r\n latitude = analysis.Data['GPS']['Lat'][1:]\r\n longitude = analysis.Data['GPS']['Lng'][1:]\r\n altitude = analysis.Data['GPS']['Alt'][1:]\r\n fig = plt.figure(figsize=(8,6))\r\n ax = plt.subplot(111, projection='3d')\r\n ax.xaxis.pane.fill = False\r\n ax.xaxis.pane.set_edgecolor('white')\r\n ax.yaxis.pane.fill = False\r\n ax.yaxis.pane.set_edgecolor('white')\r\n ax.zaxis.pane.fill = False\r\n ax.zaxis.pane.set_edgecolor('white')\r\n ax.grid(False)\r\n ax.plot(latitude, longitude, altitude)\r\n New_Power = []\r\n m = 0\r\n for i in range(0,int(len(Power)/2)-1):\r\n New_Power.append(Power[m])\r\n m = m + 2\r\n size = [100 * power / max(New_Power) for power in New_Power]\r\n s = ax.scatter(latitude, longitude, altitude ,\r\n s = size, marker = 'o' , c = New_Power,\r\n cmap = cm.jet, linewidths = 0.025,\r\n edgecolors = 'k') \r\n c_bar = fig.colorbar(s, ax = ax)\r\n plt.show()\r\n\r\n # Just to check the new Power have the same behaviour of the original data due to the resampling\r\n t1 = np.linspace(0,30,len(Power))\r\n plt.plot(t1, Power)\r\n t2 = np.linspace(0,30,len(New_Power))\r\n plt.plot(t2,New_Power)\r\n plt.show()\r\n\r\n # Power breackdown respect to flight time\r\n CurrTot = analysis.Data['BAT']['CurrTot'][1:]\r\n # mAh consumed during take-off\r\n mAh_TakeOff = CurrTot[(np.where(Power == max(Power)))[0][0]]\r\n t_TakeOff = time[np.where(Power == max(Power))][0]\r\n\r\n # mAh consumed during cruise (i.e. during the mission)\r\n # t = 27 ===> time when the UAV starts descending\r\n mAh_cruise = CurrTot[(np.where(time >= 27))[0][0]] - mAh_TakeOff\r\n t_cruise = time[(np.where(time >= 27))[0][0]] - t_TakeOff\r\n\r\n # mAh consumed during landing\r\n mAh_landing = CurrTot[-1] - mAh_cruise - mAh_TakeOff\r\n t_landing = time[-1] - t_cruise - t_TakeOff\r\n\r\n Cumulative_Current = max(CurrTot)\r\n # Let's see the results\r\n\r\n # First, let's see the fractions of mAhs consumed during each flight phase respect to\r\n # the total of mAhs consumed\r\n \r\n f_TakeOff = mAh_TakeOff / Cumulative_Current\r\n f_Cruise = mAh_cruise / Cumulative_Current\r\n f_Landing = mAh_landing / Cumulative_Current\r\n \r\n labels = ['Takeoff', 'Cruise', 'Landing']\r\n sizes = [f_TakeOff, f_Cruise, f_Landing]\r\n analysis.PieChart(labels, sizes)\r\n\r\n # Now, let'se see the fractions respect to the battery capacity\\\r\n Bat_Capacity = 8000\r\n f_TakeOff = mAh_TakeOff / Bat_Capacity\r\n f_Cruise = mAh_cruise / Bat_Capacity\r\n f_Landing = mAh_landing / Bat_Capacity\r\n f_Remaining = (Bat_Capacity - Cumulative_Current) / Bat_Capacity\r\n\r\n labels = ['Takeoff', 'Cruise', 'Landing', 'Remaining']\r\n sizes = [f_TakeOff, f_Cruise, f_Landing, f_Remaining]\r\n analysis.PieChart(labels, sizes)\r\n \r\n \r\n" ]
[ [ "matplotlib.pyplot.rc", "matplotlib.pyplot.subplots", "matplotlib.pyplot.plot", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "numpy.where", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
coolteemf/coolteemf-deformetrica
[ "f965d6ecc0d04f243e487468a9dafe9fe864eed2", "dbcb69962dd02f14dde5d63a9abc1de69112f273", "dbcb69962dd02f14dde5d63a9abc1de69112f273", "dbcb69962dd02f14dde5d63a9abc1de69112f273" ]
[ "deformetrica/support/kernels/keops_kernel.py", "tests/functional_tests/data/atlas/brain_structures/run.py", "deformetrica/core/model_tools/manifolds/logistic_exponential.py", "deformetrica/core/models/abstract_statistical_model.py" ]
[ "import torch\n\nfrom ...support.kernels import AbstractKernel\nfrom ...core import default, GpuMode\nfrom pykeops.torch import Genred\n\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nclass KeopsKernel(AbstractKernel):\n def __init__(self, gpu_mode=default.gpu_mode, kernel_width=None, cuda_type=None, freeze_DOFs=None, **kwargs):\n super().__init__('keops', gpu_mode, kernel_width)\n\n if cuda_type is None:\n cuda_type = default.dtype\n\n self.cuda_type = cuda_type\n\n self.gamma = 1. / default.tensor_scalar_type([self.kernel_width ** 2])\n\n self.gaussian_convolve = []\n self.point_cloud_convolve = []\n self.varifold_convolve = []\n self.gaussian_convolve_gradient_x = []\n self.freeze_DOFs = freeze_DOFs\n\n for dimension in [2, 3]:\n self.gaussian_convolve.append(Genred(\n \"Exp(-G*SqDist(X,Y)) * P\",\n [\"G = Pm(1)\",\n \"X = Vi(\" + str(dimension) + \")\",\n \"Y = Vj(\" + str(dimension) + \")\",\n \"P = Vj(\" + str(dimension) + \")\"],\n reduction_op='Sum', axis=1, cuda_type=cuda_type))\n\n self.point_cloud_convolve.append(Genred(\n \"Exp(-G*SqDist(X,Y)) * P\",\n [\"G = Pm(1)\",\n \"X = Vi(\" + str(dimension) + \")\",\n \"Y = Vj(\" + str(dimension) + \")\",\n \"P = Vj(1)\"],\n reduction_op='Sum', axis=1, cuda_type=cuda_type))\n\n self.varifold_convolve.append(Genred(\n \"Exp(-(WeightedSqDist(G, X, Y))) * Square((Nx|Ny)) * P\",\n [\"G = Pm(1)\",\n \"X = Vi(\" + str(dimension) + \")\",\n \"Y = Vj(\" + str(dimension) + \")\",\n \"Nx = Vi(\" + str(dimension) + \")\",\n \"Ny = Vj(\" + str(dimension) + \")\",\n \"P = Vj(1)\"],\n reduction_op='Sum', axis=1, cuda_type=cuda_type))\n\n self.gaussian_convolve_gradient_x.append(Genred(\n \"(Px|Py) * Exp(-G*SqDist(X,Y)) * (X-Y)\",\n [\"G = Pm(1)\",\n \"X = Vi(\" + str(dimension) + \")\",\n \"Y = Vj(\" + str(dimension) + \")\",\n \"Px = Vi(\" + str(dimension) + \")\",\n \"Py = Vj(\" + str(dimension) + \")\"],\n reduction_op='Sum', axis=1, cuda_type=cuda_type))\n\n def __eq__(self, other):\n return AbstractKernel.__eq__(self, other) and self.cuda_type == other.cuda_type\n\n def convolve(self, x, y, p, mode='gaussian'):\n if mode == 'gaussian':\n assert isinstance(x, torch.Tensor), 'x variable must be a torch Tensor'\n assert isinstance(y, torch.Tensor), 'y variable must be a torch Tensor'\n assert isinstance(p, torch.Tensor), 'p variable must be a torch Tensor'\n\n # move tensors with respect to gpu_mode\n x, y, p = (self._move_to_device(t, gpu_mode=self.gpu_mode) for t in [x, y, p])\n assert x.device == y.device == p.device, 'tensors must be on the same device. x.device=' + str(x.device) \\\n + ', y.device=' + str(y.device) + ', p.device=' + str(p.device)\n if self.freeze_DOFs is not None:\n mask = self._move_to_device(torch.ones(p.shape, dtype=p.dtype), gpu_mode=self.gpu_mode)\n mask[:,[self.freeze_DOFs]] = 0\n p = p * mask\n\n d = x.size(1)\n gamma = self.gamma.to(x.device, dtype=x.dtype)\n\n device_id = x.device.index if x.device.index is not None else -1\n res = self.gaussian_convolve[d - 2](gamma, x.contiguous(), y.contiguous(), p.contiguous(), device_id=device_id)\n return res.cpu() if self.gpu_mode is GpuMode.KERNEL else res\n\n elif mode == 'pointcloud':\n assert isinstance(x, torch.Tensor), 'x variable must be a torch Tensor'\n assert isinstance(y, torch.Tensor), 'y variable must be a torch Tensor'\n assert isinstance(p, torch.Tensor), 'p variable must be a torch Tensor'\n\n # move tensors with respect to gpu_mode\n x, y, p = (self._move_to_device(t, gpu_mode=self.gpu_mode) for t in [x, y, p])\n assert x.device == y.device == p.device, 'tensors must be on the same device. x.device=' + str(x.device) \\\n + ', y.device=' + str(y.device) + ', p.device=' + str(p.device)\n\n d = x.size(1)\n gamma = self.gamma.to(x.device, dtype=x.dtype)\n\n device_id = x.device.index if x.device.index is not None else -1\n res = self.point_cloud_convolve[d - 2](gamma, x.contiguous(), y.contiguous(), p.contiguous(), device_id=device_id)\n return res.cpu() if self.gpu_mode is GpuMode.KERNEL else res\n\n elif mode == 'varifold':\n assert isinstance(x, tuple), 'x must be a tuple'\n assert len(x) == 2, 'tuple length must be 2'\n assert isinstance(y, tuple), 'y must be a tuple'\n assert len(y) == 2, 'tuple length must be 2'\n\n # tuples are immutable, mutability is needed to mode to device\n x = list(x)\n y = list(y)\n\n # move tensors with respect to gpu_mode\n x[0], x[1], y[0], y[1], p = (self._move_to_device(t, gpu_mode=self.gpu_mode) for t in [x[0], x[1], y[0], y[1], p])\n assert x[0].device == y[0].device == p.device, 'x, y and p must be on the same device'\n assert x[1].device == y[1].device == p.device, 'x, y and p must be on the same device'\n\n x, nx = x\n y, ny = y\n d = x.size(1)\n gamma = self.gamma.to(x.device, dtype=x.dtype)\n\n device_id = x.device.index if x.device.index is not None else -1\n res = self.varifold_convolve[d - 2](gamma, x.contiguous(), y.contiguous(), nx.contiguous(), ny.contiguous(), p.contiguous(), device_id=device_id)\n return res.cpu() if self.gpu_mode is GpuMode.KERNEL else res\n\n else:\n raise RuntimeError('Unknown kernel mode.')\n\n def convolve_gradient(self, px, x, y=None, py=None, mode='gaussian'):\n if y is None:\n y = x\n if py is None:\n py = px\n\n assert isinstance(px, torch.Tensor), 'px variable must be a torch Tensor'\n assert isinstance(x, torch.Tensor), 'x variable must be a torch Tensor'\n assert isinstance(y, torch.Tensor), 'y variable must be a torch Tensor'\n assert isinstance(py, torch.Tensor), 'py variable must be a torch Tensor'\n\n # move tensors with respect to gpu_mode\n x, px, y, py = (self._move_to_device(t, gpu_mode=self.gpu_mode) for t in [x, px, y, py])\n assert px.device == x.device == y.device == py.device, 'tensors must be on the same device'\n\n d = x.size(1)\n gamma = self.gamma.to(x.device, dtype=x.dtype)\n\n device_id = x.device.index if x.device.index is not None else -1\n res = (-2 * gamma * self.gaussian_convolve_gradient_x[d - 2](gamma, x, y, px, py, device_id=device_id))\n return res.cpu() if self.gpu_mode is GpuMode.KERNEL else res\n", "import os\nimport torch\nimport unittest\n\nfrom tests.functional_tests.functional_test import FunctionalTest\n\n\nclass AtlasBrainStructures(FunctionalTest):\n \"\"\"\n Methods with names starting by \"test\" will be run.\n \"\"\"\n\n def test_configuration_1(self):\n self.run_configuration(os.path.abspath(__file__), 'output__1', 'output_saved__1',\n 'model__1.xml', 'data_set.xml', 'optimization_parameters__1.xml')\n\n @unittest.skipIf(not torch.cuda.is_available(), 'cuda is not available')\n def test_configuration_2(self):\n self.run_configuration(os.path.abspath(__file__), 'output__2', 'output_saved__2',\n 'model__2.xml', 'data_set.xml', 'optimization_parameters__2.xml')\n\n @unittest.skipIf(not torch.cuda.is_available(), 'cuda is not available')\n def test_configuration_3(self):\n self.run_configuration(os.path.abspath(__file__), 'output__3', 'output_saved__3',\n 'model__3.xml', 'data_set.xml', 'optimization_parameters__3.xml')\n\n def test_configuration_4(self):\n self.run_configuration(os.path.abspath(__file__), 'output__4', 'output_saved__4',\n 'model__4.xml', 'data_set.xml', 'optimization_parameters__4.xml')\n\n def test_configuration_5(self):\n self.run_configuration(os.path.abspath(__file__), 'output__5', 'output_saved__5',\n 'model__5.xml', 'data_set.xml', 'optimization_parameters__5.xml')\n\n def test_configuration_6(self):\n self.run_configuration(os.path.abspath(__file__), 'output__6', 'output_saved__6',\n 'model__6.xml', 'data_set.xml', 'optimization_parameters__6.xml')\n", "import torch\n\nfrom ....core.model_tools.manifolds.exponential_interface import ExponentialInterface\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\n\"\"\"\nExponential on \\R for 1/(q**2(1-q)) metric i.e. logistic curves.\n\"\"\"\n\nclass LogisticExponential(ExponentialInterface):\n\n def __init__(self):\n # Mother class constructor\n ExponentialInterface.__init__(self)\n self.has_closed_form = True\n self.has_closed_form_parallel_transport = False\n\n def inverse_metric(self, q):\n return torch.diag((q*(1-q))**2)\n\n def closed_form(self, q, v, t):\n return 1./(1 + (1/q - 1) * torch.exp(-1.*v/(q * (1-q)) * t))\n\n def closed_form_velocity(self, q, v, t):\n aux = torch.exp(-1. * v * t / (q * (1 - q)))\n return v/q**2 * aux/(1 + (1/q - 1) * aux)**2\n\n", "import logging\nimport os\nimport time\nimport torch\nfrom abc import abstractmethod\n\nimport torch.multiprocessing as mp\n\nfrom ...core import default\n\nlogger = logging.getLogger(__name__)\n\n# used as a global variable when processes are initially started.\nprocess_initial_data = None\n\n\ndef _initializer(*args):\n \"\"\"\n Process initializer function that is called when mp.Pool is started.\n :param args: arguments that are to be copied to the target process. This can be a tuple for convenience.\n \"\"\"\n global process_initial_data\n process_id, process_initial_data = args\n\n assert 'OMP_NUM_THREADS' in os.environ\n torch.set_num_threads(int(os.environ['OMP_NUM_THREADS']))\n\n # manually set process name\n with process_id.get_lock():\n mp.current_process().name = 'PoolWorker-' + str(process_id.value)\n logger.info('pid=' + str(os.getpid()) + ' : ' + mp.current_process().name)\n\n process_id.value += 1\n\n\nclass AbstractStatisticalModel:\n \"\"\"\n AbstractStatisticalModel object class.\n A statistical model is a generative function, which tries to explain an observed stochastic process.\n \"\"\"\n\n ####################################################################################################################\n ### Constructor:\n ####################################################################################################################\n\n def __init__(self, name='undefined', number_of_processes=default.number_of_processes, gpu_mode=default.gpu_mode):\n self.name = name\n self.fixed_effects = {}\n self.priors = {}\n self.population_random_effects = {}\n self.individual_random_effects = {}\n self.has_maximization_procedure = None\n\n self.number_of_processes = number_of_processes\n self.gpu_mode = gpu_mode\n self.pool = None\n\n @abstractmethod\n def get_fixed_effects(self):\n raise NotImplementedError\n\n @abstractmethod\n def setup_multiprocess_pool(self, dataset):\n raise NotImplementedError\n\n def _setup_multiprocess_pool(self, initargs=()):\n if self.number_of_processes > 1:\n logger.info('Starting multiprocess using ' + str(self.number_of_processes) + ' processes')\n assert len(mp.active_children()) == 0, 'This should not happen. Has the cleanup() method been called ?'\n start = time.perf_counter()\n process_id = mp.Value('i', 0, lock=True) # shared between processes\n initargs = (process_id, initargs)\n\n self.pool = mp.Pool(processes=self.number_of_processes, maxtasksperchild=None,\n initializer=_initializer, initargs=initargs)\n logger.info('Multiprocess pool started using start method \"' + mp.get_sharing_strategy() + '\"' +\n ' in: ' + str(time.perf_counter()-start) + ' seconds')\n\n if torch.cuda.is_available() and self.number_of_processes > torch.cuda.device_count():\n logger.warning(\"You are trying to run more processes than there are available GPUs, \"\n \"it is advised to run `nvidia-cuda-mps-control` to leverage concurrent cuda executions. \"\n \"If run in background mode, don't forget to stop the daemon when done.\")\n\n def _cleanup_multiprocess_pool(self):\n if self.pool is not None:\n self.pool.terminate()\n\n ####################################################################################################################\n ### Common methods, not necessarily useful for every model.\n ####################################################################################################################\n\n def cleanup(self):\n self._cleanup_multiprocess_pool()\n\n def clear_memory(self):\n pass\n\n" ]
[ [ "torch.ones" ], [ "torch.cuda.is_available" ], [ "torch.exp", "torch.diag" ], [ "torch.multiprocessing.current_process", "torch.multiprocessing.Value", "torch.multiprocessing.active_children", "torch.multiprocessing.Pool", "torch.cuda.is_available", "torch.cuda.device_count", "torch.multiprocessing.get_sharing_strategy" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
1044197988/TF.Keras-Commonly-used-models
[ "b37276bcee454b2c39b8fcc60e87b72ec8a6a5d4", "b37276bcee454b2c39b8fcc60e87b72ec8a6a5d4", "b37276bcee454b2c39b8fcc60e87b72ec8a6a5d4" ]
[ "常用分割损失函数和指标/C_Focal_loss.py", "常用分割模型/ICNet.py", "常用分割损失函数和指标/WCCE.py" ]
[ "# focal loss with multi label\r\ndef focal_loss(classes_num, gamma=2., alpha=.25, e=0.1):\r\n # classes_num contains sample number of each classes\r\n def focal_loss_fixed(target_tensor, prediction_tensor):\r\n '''\r\n prediction_tensor is the output tensor with shape [None, 100], where 100 is the number of classes\r\n target_tensor is the label tensor, same shape as predcition_tensor\r\n '''\r\n import tensorflow as tf\r\n from tensorflow.python.ops import array_ops\r\n from keras import backend as K\r\n\r\n #1# get focal loss with no balanced weight which presented in paper function (4)\r\n zeros = array_ops.zeros_like(prediction_tensor, dtype=prediction_tensor.dtype)\r\n one_minus_p = array_ops.where(tf.greater(target_tensor,zeros), target_tensor - prediction_tensor, zeros)\r\n FT = -1 * (one_minus_p ** gamma) * tf.log(tf.clip_by_value(prediction_tensor, 1e-8, 1.0))\r\n\r\n #2# get balanced weight alpha\r\n classes_weight = array_ops.zeros_like(prediction_tensor, dtype=prediction_tensor.dtype)\r\n\r\n total_num = float(sum(classes_num))\r\n classes_w_t1 = [ total_num / ff for ff in classes_num ]\r\n sum_ = sum(classes_w_t1)\r\n classes_w_t2 = [ ff/sum_ for ff in classes_w_t1 ] #scale\r\n classes_w_tensor = tf.convert_to_tensor(classes_w_t2, dtype=prediction_tensor.dtype)\r\n classes_weight += classes_w_tensor\r\n\r\n alpha = array_ops.where(tf.greater(target_tensor, zeros), classes_weight, zeros)\r\n\r\n #3# get balanced focal loss\r\n balanced_fl = alpha * FT\r\n balanced_fl = tf.reduce_mean(balanced_fl)\r\n\r\n #4# add other op to prevent overfit\r\n # reference : https://spaces.ac.cn/archives/4493\r\n nb_classes = len(classes_num)\r\n fianal_loss = (1-e) * balanced_fl + e * K.categorical_crossentropy(K.ones_like(prediction_tensor)/nb_classes, prediction_tensor)\r\n\r\n return fianal_loss\r\n return focal_loss_fixed\r\n", "from tensorflow.keras.layers import Activation\r\nfrom tensorflow.keras.layers import Lambda\r\nfrom tensorflow.keras.layers import Conv2D\r\nfrom tensorflow.keras.layers import Add\r\nfrom tensorflow.keras.layers import MaxPooling2D\r\nfrom tensorflow.keras.layers import AveragePooling2D\r\nfrom tensorflow.keras.layers import ZeroPadding2D\r\nfrom tensorflow.keras.layers import Input\r\nfrom tensorflow.keras.layers import BatchNormalization\r\nfrom tensorflow.keras.models import Model\r\nimport tensorflow.keras.backend as K\r\nimport tensorflow as tf\r\n\r\ndef build(width, height, n_classes, weights_path=None, train=False):\r\n inp = Input(shape=(height, width, 3))\r\n x = Lambda(lambda x: (x - 127.5)/255.0)(inp)\r\n\r\n # (1/2)\r\n y = Lambda(lambda x: tf.image.resize(x, size=(int(x.shape[1])//2, int(x.shape[2])//2)), name='data_sub2')(x)\r\n y = Conv2D(32, 3, strides=2, padding='same', activation='relu', name='conv1_1_3x3_s2')(y)\r\n y = Conv2D(32, 3, padding='same', activation='relu', name='conv1_2_3x3')(y)\r\n y = Conv2D(64, 3, padding='same', activation='relu', name='conv1_3_3x3')(y)\r\n y_ = MaxPooling2D(pool_size=3, strides=2, name='pool1_3x3_s2')(y)\r\n y = Conv2D(128, 1, name='conv2_1_1x1_proj')(y_)\r\n\r\n y_ = Conv2D(32, 1, activation='relu', name='conv2_1_1x1_reduce')(y_)\r\n y_ = ZeroPadding2D(name='padding1')(y_)\r\n y_ = Conv2D(32, 3, activation='relu', name='conv2_1_3x3')(y_)\r\n y_ = Conv2D(128, 1, name='conv2_1_1x1_increase')(y_)\r\n y = Add(name='conv2_1')([y,y_])\r\n y_ = Activation('relu', name='conv2_1/relu')(y)\r\n\r\n y = Conv2D(32, 1, activation='relu', name='conv2_2_1x1_reduce')(y_)\r\n y = ZeroPadding2D(name='padding2')(y)\r\n y = Conv2D(32, 3, activation='relu', name='conv2_2_3x3')(y)\r\n y = Conv2D(128, 1, name='conv2_2_1x1_increase')(y)\r\n y = Add(name='conv2_2')([y,y_])\r\n y_ = Activation('relu', name='conv2_2/relu')(y)\r\n\r\n y = Conv2D(32, 1, activation='relu', name='conv2_3_1x1_reduce')(y_)\r\n y = ZeroPadding2D(name='padding3')(y)\r\n y = Conv2D(32, 3, activation='relu', name='conv2_3_3x3')(y)\r\n y = Conv2D(128, 1, name='conv2_3_1x1_increase')(y)\r\n y = Add(name='conv2_3')([y,y_])\r\n y_ = Activation('relu', name='conv2_3/relu')(y)\r\n\r\n y = Conv2D(256, 1, strides=2, name='conv3_1_1x1_proj')(y_)\r\n y_ = Conv2D(64, 1, strides=2, activation='relu', name='conv3_1_1x1_reduce')(y_) \r\n y_ = ZeroPadding2D(name='padding4')(y_)\r\n y_ = Conv2D(64, 3, activation='relu', name='conv3_1_3x3')(y_)\r\n y_ = Conv2D(256, 1, name='conv3_1_1x1_increase')(y_)\r\n y = Add(name='conv3_1')([y,y_])\r\n z = Activation('relu', name='conv3_1/relu')(y)\r\n\r\n # (1/4)\r\n y_ = Lambda(lambda x: tf.image.resize(x, size=(int(x.shape[1])//2, int(x.shape[2])//2)), name='conv3_1_sub4')(z)\r\n y = Conv2D(64, 1, activation='relu', name='conv3_2_1x1_reduce')(y_)\r\n y = ZeroPadding2D(name='padding5')(y)\r\n y = Conv2D(64, 3, activation='relu', name='conv3_2_3x3')(y)\r\n y = Conv2D(256, 1, name='conv3_2_1x1_increase')(y)\r\n y = Add(name='conv3_2')([y,y_])\r\n y_ = Activation('relu', name='conv3_2/relu')(y)\r\n\r\n y = Conv2D(64, 1, activation='relu', name='conv3_3_1x1_reduce')(y_)\r\n y = ZeroPadding2D(name='padding6')(y)\r\n y = Conv2D(64, 3, activation='relu', name='conv3_3_3x3')(y)\r\n y = Conv2D(256, 1, name='conv3_3_1x1_increase')(y)\r\n y = Add(name='conv3_3')([y,y_])\r\n y_ = Activation('relu', name='conv3_3/relu')(y)\r\n\r\n y = Conv2D(64, 1, activation='relu', name='conv3_4_1x1_reduce')(y_)\r\n y = ZeroPadding2D(name='padding7')(y)\r\n y = Conv2D(64, 3, activation='relu', name='conv3_4_3x3')(y)\r\n y = Conv2D(256, 1, name='conv3_4_1x1_increase')(y)\r\n y = Add(name='conv3_4')([y,y_])\r\n y_ = Activation('relu', name='conv3_4/relu')(y)\r\n\r\n y = Conv2D(512, 1, name='conv4_1_1x1_proj')(y_)\r\n y_ = Conv2D(128, 1, activation='relu', name='conv4_1_1x1_reduce')(y_)\r\n y_ = ZeroPadding2D(padding=2, name='padding8')(y_)\r\n y_ = Conv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_1_3x3')(y_)\r\n y_ = Conv2D(512, 1, name='conv4_1_1x1_increase')(y_)\r\n y = Add(name='conv4_1')([y,y_])\r\n y_ = Activation('relu', name='conv4_1/relu')(y)\r\n\r\n y = Conv2D(128, 1, activation='relu', name='conv4_2_1x1_reduce')(y_)\r\n y = ZeroPadding2D(padding=2, name='padding9')(y)\r\n y = Conv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_2_3x3')(y)\r\n y = Conv2D(512, 1, name='conv4_2_1x1_increase')(y)\r\n y = Add(name='conv4_2')([y,y_])\r\n y_ = Activation('relu', name='conv4_2/relu')(y)\r\n\r\n y = Conv2D(128, 1, activation='relu', name='conv4_3_1x1_reduce')(y_)\r\n y = ZeroPadding2D(padding=2, name='padding10')(y)\r\n y = Conv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_3_3x3')(y)\r\n y = Conv2D(512, 1, name='conv4_3_1x1_increase')(y)\r\n y = Add(name='conv4_3')([y,y_])\r\n y_ = Activation('relu', name='conv4_3/relu')(y)\r\n\r\n y = Conv2D(128, 1, activation='relu', name='conv4_4_1x1_reduce')(y_)\r\n y = ZeroPadding2D(padding=2, name='padding11')(y)\r\n y = Conv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_4_3x3')(y)\r\n y = Conv2D(512, 1, name='conv4_4_1x1_increase')(y)\r\n y = Add(name='conv4_4')([y,y_])\r\n y_ = Activation('relu', name='conv4_4/relu')(y)\r\n\r\n y = Conv2D(128, 1, activation='relu', name='conv4_5_1x1_reduce')(y_)\r\n y = ZeroPadding2D(padding=2, name='padding12')(y)\r\n y = Conv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_5_3x3')(y)\r\n y = Conv2D(512, 1, name='conv4_5_1x1_increase')(y)\r\n y = Add(name='conv4_5')([y,y_])\r\n y_ = Activation('relu', name='conv4_5/relu')(y)\r\n\r\n y = Conv2D(128, 1, activation='relu', name='conv4_6_1x1_reduce')(y_)\r\n y = ZeroPadding2D(padding=2, name='padding13')(y)\r\n y = Conv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_6_3x3')(y)\r\n y = Conv2D(512, 1, name='conv4_6_1x1_increase')(y)\r\n y = Add(name='conv4_6')([y,y_])\r\n y = Activation('relu', name='conv4_6/relu')(y)\r\n\r\n y_ = Conv2D(1024, 1, name='conv5_1_1x1_proj')(y)\r\n y = Conv2D(256, 1, activation='relu', name='conv5_1_1x1_reduce')(y)\r\n y = ZeroPadding2D(padding=4, name='padding14')(y)\r\n y = Conv2D(256, 3, dilation_rate=4, activation='relu', name='conv5_1_3x3')(y)\r\n y = Conv2D(1024, 1, name='conv5_1_1x1_increase')(y)\r\n y = Add(name='conv5_1')([y,y_])\r\n y_ = Activation('relu', name='conv5_1/relu')(y)\r\n\r\n y = Conv2D(256, 1, activation='relu', name='conv5_2_1x1_reduce')(y_)\r\n y = ZeroPadding2D(padding=4, name='padding15')(y)\r\n y = Conv2D(256, 3, dilation_rate=4, activation='relu', name='conv5_2_3x3')(y)\r\n y = Conv2D(1024, 1, name='conv5_2_1x1_increase')(y)\r\n y = Add(name='conv5_2')([y,y_])\r\n y_ = Activation('relu', name='conv5_2/relu')(y)\r\n\r\n y = Conv2D(256, 1, activation='relu', name='conv5_3_1x1_reduce')(y_)\r\n y = ZeroPadding2D(padding=4, name='padding16')(y)\r\n y = Conv2D(256, 3, dilation_rate=4, activation='relu', name='conv5_3_3x3')(y)\r\n y = Conv2D(1024, 1, name='conv5_3_1x1_increase')(y)\r\n y = Add(name='conv5_3')([y,y_])\r\n y = Activation('relu', name='conv5_3/relu')(y)\r\n\r\n h, w = y.shape[1:3].as_list()\r\n pool1 = AveragePooling2D(pool_size=(h,w), strides=(h,w), name='conv5_3_pool1')(y)\r\n pool1 = Lambda(lambda x: tf.image.resize(x, size=(h,w)), name='conv5_3_pool1_interp')(pool1)\r\n pool2 = AveragePooling2D(pool_size=(h/2,w/2), strides=(h//2,w//2), name='conv5_3_pool2')(y)\r\n pool2 = Lambda(lambda x: tf.image.resize(x, size=(h,w)), name='conv5_3_pool2_interp')(pool2)\r\n pool3 = AveragePooling2D(pool_size=(h/3,w/3), strides=(h//3,w//3), name='conv5_3_pool3')(y)\r\n pool3 = Lambda(lambda x: tf.image.resize(x, size=(h,w)), name='conv5_3_pool3_interp')(pool3)\r\n pool6 = AveragePooling2D(pool_size=(h/4,w/4), strides=(h//4,w//4), name='conv5_3_pool6')(y)\r\n pool6 = Lambda(lambda x: tf.image.resize(x, size=(h,w)), name='conv5_3_pool6_interp')(pool6)\r\n\r\n y = Add(name='conv5_3_sum')([y, pool1, pool2, pool3, pool6])\r\n y = Conv2D(256, 1, activation='relu', name='conv5_4_k1')(y)\r\n aux_1 = Lambda(lambda x: tf.image.resize(x, size=(int(x.shape[1])*2, int(x.shape[2])*2)), name='conv5_4_interp')(y)\r\n y = ZeroPadding2D(padding=2, name='padding17')(aux_1)\r\n y = Conv2D(128, 3, dilation_rate=2, name='conv_sub4')(y)\r\n y_ = Conv2D(128, 1, name='conv3_1_sub2_proj')(z)\r\n y = Add(name='sub24_sum')([y,y_])\r\n y = Activation('relu', name='sub24_sum/relu')(y)\r\n\r\n aux_2 = Lambda(lambda x: tf.image.resize(x, size=(int(x.shape[1])*2, int(x.shape[2])*2)), name='sub24_sum_interp')(y)\r\n y = ZeroPadding2D(padding=2, name='padding18')(aux_2)\r\n y_ = Conv2D(128, 3, dilation_rate=2, name='conv_sub2')(y)\r\n\r\n # (1)\r\n y = Conv2D(32, 3, strides=2, padding='same', activation='relu', name='conv1_sub1')(x)\r\n y = Conv2D(32, 3, strides=2, padding='same', activation='relu', name='conv2_sub1')(y)\r\n y = Conv2D(64, 3, strides=2, padding='same', activation='relu', name='conv3_sub1')(y)\r\n y = Conv2D(128, 1, name='conv3_sub1_proj')(y)\r\n\r\n y = Add(name='sub12_sum')([y,y_])\r\n y = Activation('relu', name='sub12_sum/relu')(y)\r\n y = Lambda(lambda x: tf.image.resize(x, size=(int(x.shape[1])*2, int(x.shape[2])*2)), name='sub12_sum_interp')(y)\r\n\r\n out = Conv2D(n_classes, 1, activation='softmax', name='conv6_cls')(y)\r\n\r\n if train:\r\n aux_1 = Conv2D(n_classes, 1, activation='softmax', name='sub4_out')(aux_1)\r\n aux_2 = Conv2D(n_classes, 1, activation='softmax', name='sub24_out')(aux_2)\r\n\r\n model = Model(inputs=inp, outputs=[out, aux_2, aux_1])\r\n else:\r\n model = Model(inputs=inp, outputs=out)\r\n \r\n if weights_path is not None:\r\n model.load_weights(weights_path, by_name=True)\r\n return model\r\n\r\ndef build_bn(width, height, n_classes, weights_path=None, train=False):\r\n inp = Input(shape=(height, width, 3))\r\n x = Lambda(lambda x: (x - 127.5)/255.0)(inp)\r\n\r\n # (1/2)\r\n y = Lambda(lambda x: tf.image.resize(x, size=(int(x.shape[1])//2, int(x.shape[2])//2)), name='data_sub2')(x)\r\n y = Conv2D(32, 3, strides=2, padding='same', activation='relu', name='conv1_1_3x3_s2')(y)\r\n y = BatchNormalization(name='conv1_1_3x3_s2_bn')(y)\r\n y = Conv2D(32, 3, padding='same', activation='relu', name='conv1_2_3x3')(y)\r\n y = BatchNormalization(name='conv1_2_3x3_s2_bn')(y)\r\n y = Conv2D(64, 3, padding='same', activation='relu', name='conv1_3_3x3')(y)\r\n y = BatchNormalization(name='conv1_3_3x3_bn')(y)\r\n y_ = MaxPooling2D(pool_size=3, strides=2, name='pool1_3x3_s2')(y)\r\n \r\n y = Conv2D(128, 1, name='conv2_1_1x1_proj')(y_)\r\n y = BatchNormalization(name='conv2_1_1x1_proj_bn')(y)\r\n y_ = Conv2D(32, 1, activation='relu', name='conv2_1_1x1_reduce')(y_)\r\n y_ = BatchNormalization(name='conv2_1_1x1_reduce_bn')(y_)\r\n y_ = ZeroPadding2D(name='padding1')(y_)\r\n y_ = Conv2D(32, 3, activation='relu', name='conv2_1_3x3')(y_)\r\n y_ = BatchNormalization(name='conv2_1_3x3_bn')(y_)\r\n y_ = Conv2D(128, 1, name='conv2_1_1x1_increase')(y_)\r\n y_ = BatchNormalization(name='conv2_1_1x1_increase_bn')(y_)\r\n y = Add(name='conv2_1')([y,y_])\r\n y_ = Activation('relu', name='conv2_1/relu')(y)\r\n\r\n y = Conv2D(32, 1, activation='relu', name='conv2_2_1x1_reduce')(y_)\r\n y = BatchNormalization(name='conv2_2_1x1_reduce_bn')(y)\r\n y = ZeroPadding2D(name='padding2')(y)\r\n y = Conv2D(32, 3, activation='relu', name='conv2_2_3x3')(y)\r\n y = BatchNormalization(name='conv2_2_3x3_bn')(y)\r\n y = Conv2D(128, 1, name='conv2_2_1x1_increase')(y)\r\n y = BatchNormalization(name='conv2_2_1x1_increase_bn')(y)\r\n y = Add(name='conv2_2')([y,y_])\r\n y_ = Activation('relu', name='conv2_2/relu')(y)\r\n\r\n y = Conv2D(32, 1, activation='relu', name='conv2_3_1x1_reduce')(y_)\r\n y = BatchNormalization(name='conv2_3_1x1_reduce_bn')(y)\r\n y = ZeroPadding2D(name='padding3')(y)\r\n y = Conv2D(32, 3, activation='relu', name='conv2_3_3x3')(y)\r\n y = BatchNormalization(name='conv2_3_3x3_bn')(y)\r\n y = Conv2D(128, 1, name='conv2_3_1x1_increase')(y)\r\n y = BatchNormalization(name='conv2_3_1x1_increase_bn')(y)\r\n y = Add(name='conv2_3')([y,y_])\r\n y_ = Activation('relu', name='conv2_3/relu')(y)\r\n\r\n y = Conv2D(256, 1, strides=2, name='conv3_1_1x1_proj')(y_)\r\n y = BatchNormalization(name='conv3_1_1x1_proj_bn')(y)\r\n y_ = Conv2D(64, 1, strides=2, activation='relu', name='conv3_1_1x1_reduce')(y_)\r\n y_ = BatchNormalization(name='conv3_1_1x1_reduce_bn')(y_) \r\n y_ = ZeroPadding2D(name='padding4')(y_)\r\n y_ = Conv2D(64, 3, activation='relu', name='conv3_1_3x3')(y_)\r\n y_ = BatchNormalization(name='conv3_1_3x3_bn')(y_)\r\n y_ = Conv2D(256, 1, name='conv3_1_1x1_increase')(y_)\r\n y_ = BatchNormalization(name='conv3_1_1x1_increase_bn')(y_)\r\n y = Add(name='conv3_1')([y,y_])\r\n z = Activation('relu', name='conv3_1/relu')(y)\r\n\r\n # (1/4)\r\n y_ = Lambda(lambda x: tf.image.resize(x, size=(int(x.shape[1])//2, int(x.shape[2])//2)), name='conv3_1_sub4')(z)\r\n y = Conv2D(64, 1, activation='relu', name='conv3_2_1x1_reduce')(y_)\r\n y = BatchNormalization(name='conv3_2_1x1_reduce_bn')(y)\r\n y = ZeroPadding2D(name='padding5')(y)\r\n y = Conv2D(64, 3, activation='relu', name='conv3_2_3x3')(y)\r\n y = BatchNormalization(name='conv3_2_3x3_bn')(y)\r\n y = Conv2D(256, 1, name='conv3_2_1x1_increase')(y)\r\n y = BatchNormalization(name='conv3_2_1x1_increase_bn')(y)\r\n y = Add(name='conv3_2')([y,y_])\r\n y_ = Activation('relu', name='conv3_2/relu')(y)\r\n\r\n y = Conv2D(64, 1, activation='relu', name='conv3_3_1x1_reduce')(y_)\r\n y = BatchNormalization(name='conv3_3_1x1_reduce_bn')(y)\r\n y = ZeroPadding2D(name='padding6')(y)\r\n y = Conv2D(64, 3, activation='relu', name='conv3_3_3x3')(y)\r\n y = BatchNormalization(name='conv3_3_3x3_bn')(y)\r\n y = Conv2D(256, 1, name='conv3_3_1x1_increase')(y)\r\n y = BatchNormalization(name='conv3_3_1x1_increase_bn')(y)\r\n y = Add(name='conv3_3')([y,y_])\r\n y_ = Activation('relu', name='conv3_3/relu')(y)\r\n\r\n y = Conv2D(64, 1, activation='relu', name='conv3_4_1x1_reduce')(y_)\r\n y = BatchNormalization(name='conv3_4_1x1_reduce_bn')(y)\r\n y = ZeroPadding2D(name='padding7')(y)\r\n y = Conv2D(64, 3, activation='relu', name='conv3_4_3x3')(y)\r\n y = BatchNormalization(name='conv3_4_3x3_bn')(y)\r\n y = Conv2D(256, 1, name='conv3_4_1x1_increase')(y)\r\n y = BatchNormalization(name='conv3_4_1x1_increase_bn')(y)\r\n y = Add(name='conv3_4')([y,y_])\r\n y_ = Activation('relu', name='conv3_4/relu')(y)\r\n\r\n y = Conv2D(512, 1, name='conv4_1_1x1_proj')(y_)\r\n y = BatchNormalization(name='conv4_1_1x1_proj_bn')(y)\r\n y_ = Conv2D(128, 1, activation='relu', name='conv4_1_1x1_reduce')(y_)\r\n y_ = BatchNormalization(name='conv4_1_1x1_reduce_bn')(y_)\r\n y_ = ZeroPadding2D(padding=2, name='padding8')(y_)\r\n y_ = Conv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_1_3x3')(y_)\r\n y_ = BatchNormalization(name='conv4_1_3x3_bn')(y_)\r\n y_ = Conv2D(512, 1, name='conv4_1_1x1_increase')(y_)\r\n y_ = BatchNormalization(name='conv4_1_1x1_increase_bn')(y_)\r\n y = Add(name='conv4_1')([y,y_])\r\n y_ = Activation('relu', name='conv4_1/relu')(y)\r\n\r\n y = Conv2D(128, 1, activation='relu', name='conv4_2_1x1_reduce')(y_)\r\n y = BatchNormalization(name='conv4_2_1x1_reduce_bn')(y)\r\n y = ZeroPadding2D(padding=2, name='padding9')(y)\r\n y = Conv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_2_3x3')(y)\r\n y = BatchNormalization(name='conv4_2_3x3_bn')(y)\r\n y = Conv2D(512, 1, name='conv4_2_1x1_increase')(y)\r\n y = BatchNormalization(name='conv4_2_1x1_increase_bn')(y)\r\n y = Add(name='conv4_2')([y,y_])\r\n y_ = Activation('relu', name='conv4_2/relu')(y)\r\n\r\n y = Conv2D(128, 1, activation='relu', name='conv4_3_1x1_reduce')(y_)\r\n y = BatchNormalization(name='conv4_3_1x1_reduce_bn')(y)\r\n y = ZeroPadding2D(padding=2, name='padding10')(y)\r\n y = Conv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_3_3x3')(y)\r\n y = BatchNormalization(name='conv4_3_3x3_bn')(y)\r\n y = Conv2D(512, 1, name='conv4_3_1x1_increase')(y)\r\n y = BatchNormalization(name='conv4_3_1x1_increase_bn')(y)\r\n y = Add(name='conv4_3')([y,y_])\r\n y_ = Activation('relu', name='conv4_3/relu')(y)\r\n\r\n y = Conv2D(128, 1, activation='relu', name='conv4_4_1x1_reduce')(y_)\r\n y = BatchNormalization(name='conv4_4_1x1_reduce_bn')(y)\r\n y = ZeroPadding2D(padding=2, name='padding11')(y)\r\n y = Conv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_4_3x3')(y)\r\n y = BatchNormalization(name='conv4_4_3x3_bn')(y)\r\n y = Conv2D(512, 1, name='conv4_4_1x1_increase')(y)\r\n y = BatchNormalization(name='conv4_4_1x1_increase_bn')(y)\r\n y = Add(name='conv4_4')([y,y_])\r\n y_ = Activation('relu', name='conv4_4/relu')(y)\r\n\r\n y = Conv2D(128, 1, activation='relu', name='conv4_5_1x1_reduce')(y_)\r\n y = BatchNormalization(name='conv4_5_1x1_reduce_bn')(y)\r\n y = ZeroPadding2D(padding=2, name='padding12')(y)\r\n y = Conv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_5_3x3')(y)\r\n y = BatchNormalization(name='conv4_5_3x3_bn')(y)\r\n y = Conv2D(512, 1, name='conv4_5_1x1_increase')(y)\r\n y = BatchNormalization(name='conv4_5_1x1_increase_bn')(y)\r\n y = Add(name='conv4_5')([y,y_])\r\n y_ = Activation('relu', name='conv4_5/relu')(y)\r\n\r\n y = Conv2D(128, 1, activation='relu', name='conv4_6_1x1_reduce')(y_)\r\n y = BatchNormalization(name='conv4_6_1x1_reduce_bn')(y)\r\n y = ZeroPadding2D(padding=2, name='padding13')(y)\r\n y = Conv2D(128, 3, dilation_rate=2, activation='relu', name='conv4_6_3x3')(y)\r\n y = BatchNormalization(name='conv4_6_3x3_bn')(y)\r\n y = Conv2D(512, 1, name='conv4_6_1x1_increase')(y)\r\n y = BatchNormalization(name='conv4_6_1x1_increase_bn')(y)\r\n y = Add(name='conv4_6')([y,y_])\r\n y = Activation('relu', name='conv4_6/relu')(y)\r\n\r\n y_ = Conv2D(1024, 1, name='conv5_1_1x1_proj')(y)\r\n y_ = BatchNormalization(name='conv5_1_1x1_proj_bn')(y_)\r\n y = Conv2D(256, 1, activation='relu', name='conv5_1_1x1_reduce')(y)\r\n y = BatchNormalization(name='conv5_1_1x1_reduce_bn')(y)\r\n y = ZeroPadding2D(padding=4, name='padding14')(y)\r\n y = Conv2D(256, 3, dilation_rate=4, activation='relu', name='conv5_1_3x3')(y)\r\n y = BatchNormalization(name='conv5_1_3x3_bn')(y)\r\n y = Conv2D(1024, 1, name='conv5_1_1x1_increase')(y)\r\n y = BatchNormalization(name='conv5_1_1x1_increase_bn')(y)\r\n y = Add(name='conv5_1')([y,y_])\r\n y_ = Activation('relu', name='conv5_1/relu')(y)\r\n\r\n y = Conv2D(256, 1, activation='relu', name='conv5_2_1x1_reduce')(y_)\r\n y = BatchNormalization(name='conv5_2_1x1_reduce_bn')(y)\r\n y = ZeroPadding2D(padding=4, name='padding15')(y)\r\n y = Conv2D(256, 3, dilation_rate=4, activation='relu', name='conv5_2_3x3')(y)\r\n y = BatchNormalization(name='conv5_2_3x3_bn')(y)\r\n y = Conv2D(1024, 1, name='conv5_2_1x1_increase')(y)\r\n y = BatchNormalization(name='conv5_2_1x1_increase_bn')(y)\r\n y = Add(name='conv5_2')([y,y_])\r\n y_ = Activation('relu', name='conv5_2/relu')(y)\r\n\r\n y = Conv2D(256, 1, activation='relu', name='conv5_3_1x1_reduce')(y_)\r\n y = BatchNormalization(name='conv5_3_1x1_reduce_bn')(y)\r\n y = ZeroPadding2D(padding=4, name='padding16')(y)\r\n y = Conv2D(256, 3, dilation_rate=4, activation='relu', name='conv5_3_3x3')(y)\r\n y = BatchNormalization(name='conv5_3_3x3_bn')(y)\r\n y = Conv2D(1024, 1, name='conv5_3_1x1_increase')(y)\r\n y = BatchNormalization(name='conv5_3_1x1_increase_bn')(y)\r\n y = Add(name='conv5_3')([y,y_])\r\n y = Activation('relu', name='conv5_3/relu')(y)\r\n\r\n h, w = y.shape[1:3].as_list()\r\n pool1 = AveragePooling2D(pool_size=(h,w), strides=(h,w), name='conv5_3_pool1')(y)\r\n pool1 = Lambda(lambda x: tf.image.resize(x, size=(h,w)), name='conv5_3_pool1_interp')(pool1)\r\n pool2 = AveragePooling2D(pool_size=(h/2,w/2), strides=(h//2,w//2), name='conv5_3_pool2')(y)\r\n pool2 = Lambda(lambda x: tf.image.resize(x, size=(h,w)), name='conv5_3_pool2_interp')(pool2)\r\n pool3 = AveragePooling2D(pool_size=(h/3,w/3), strides=(h//3,w//3), name='conv5_3_pool3')(y)\r\n pool3 = Lambda(lambda x: tf.image.resize(x, size=(h,w)), name='conv5_3_pool3_interp')(pool3)\r\n pool6 = AveragePooling2D(pool_size=(h/4,w/4), strides=(h//4,w//4), name='conv5_3_pool6')(y)\r\n pool6 = Lambda(lambda x: tf.image.resize(x, size=(h,w)), name='conv5_3_pool6_interp')(pool6)\r\n\r\n y = Add(name='conv5_3_sum')([y, pool1, pool2, pool3, pool6])\r\n y = Conv2D(256, 1, activation='relu', name='conv5_4_k1')(y)\r\n y = BatchNormalization(name='conv5_4_k1_bn')(y)\r\n aux_1 = Lambda(lambda x: tf.image.resize(x, size=(int(x.shape[1])*2, int(x.shape[2])*2)), name='conv5_4_interp')(y)\r\n y = ZeroPadding2D(padding=2, name='padding17')(aux_1)\r\n y = Conv2D(128, 3, dilation_rate=2, name='conv_sub4')(y)\r\n y = BatchNormalization(name='conv_sub4_bn')(y)\r\n y_ = Conv2D(128, 1, name='conv3_1_sub2_proj')(z)\r\n y_ = BatchNormalization(name='conv3_1_sub2_proj_bn')(y_)\r\n y = Add(name='sub24_sum')([y,y_])\r\n y = Activation('relu', name='sub24_sum/relu')(y)\r\n\r\n aux_2 = Lambda(lambda x: tf.image.resize(x, size=(int(x.shape[1])*2, int(x.shape[2])*2)), name='sub24_sum_interp')(y)\r\n y = ZeroPadding2D(padding=2, name='padding18')(aux_2)\r\n y_ = Conv2D(128, 3, dilation_rate=2, name='conv_sub2')(y)\r\n y_ = BatchNormalization(name='conv_sub2_bn')(y_)\r\n\r\n # (1)\r\n y = Conv2D(32, 3, strides=2, padding='same', activation='relu', name='conv1_sub1')(x)\r\n y = BatchNormalization(name='conv1_sub1_bn')(y)\r\n y = Conv2D(32, 3, strides=2, padding='same', activation='relu', name='conv2_sub1')(y)\r\n y = BatchNormalization(name='conv2_sub1_bn')(y)\r\n y = Conv2D(64, 3, strides=2, padding='same', activation='relu', name='conv3_sub1')(y)\r\n y = BatchNormalization(name='conv3_sub1_bn')(y)\r\n y = Conv2D(128, 1, name='conv3_sub1_proj')(y)\r\n y = BatchNormalization(name='conv3_sub1_proj_bn')(y)\r\n\r\n y = Add(name='sub12_sum')([y,y_])\r\n y = Activation('relu', name='sub12_sum/relu')(y)\r\n y = Lambda(lambda x: tf.image.resize(x, size=(int(x.shape[1])*2, int(x.shape[2])*2)), name='sub12_sum_interp')(y)\r\n \r\n out = Conv2D(n_classes, 1, activation='softmax', name='conv6_cls')(y)\r\n\r\n if train:\r\n aux_1 = Conv2D(n_classes, 1, activation='softmax', name='sub4_out')(aux_1)\r\n aux_2 = Conv2D(n_classes, 1, activation='softmax', name='sub24_out')(aux_2)\r\n model = Model(inputs=inp, outputs=[out, aux_2, aux_1])\r\n else:\r\n model = Model(inputs=inp, outputs=out)\r\n \r\n if weights_path is not None:\r\n model.load_weights(weights_path, by_name=True)\r\n return model\r\n\r\nA=build_bn(512, 512, 10, weights_path=None, train=True)\r\nA.summary()\r\n", "from tensorflow.python import keras\r\nfrom itertools import product\r\nimport numpy as np\r\nfrom tensorflow.python.keras.utils import losses_utils\r\n#weights->数组\r\nclass WeightedCategoricalCrossentropy(keras.losses.CategoricalCrossentropy):\r\n\r\n def __init__(\r\n self,\r\n weights,\r\n from_logits=False,\r\n label_smoothing=0,\r\n reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE,\r\n name='categorical_crossentropy',\r\n ):\r\n super().__init__(\r\n from_logits, label_smoothing, reduction, name=f\"weighted_{name}\"\r\n )\r\n self.weights = weights\r\n\r\n def call(self, y_true, y_pred):\r\n weights = self.weights\r\n nb_cl = len(weights)\r\n final_mask = keras.backend.zeros_like(y_pred[:, 0])\r\n y_pred_max = keras.backend.max(y_pred, axis=1)\r\n y_pred_max = keras.backend.reshape(\r\n y_pred_max, (keras.backend.shape(y_pred)[0], 1))\r\n y_pred_max_mat = keras.backend.cast(\r\n keras.backend.equal(y_pred, y_pred_max), keras.backend.floatx())\r\n for c_p, c_t in product(range(nb_cl), range(nb_cl)):\r\n final_mask += (\r\n weights[c_t, c_p] * y_pred_max_mat[:, c_p] * y_true[:, c_t])\r\n return super().call(y_true, y_pred) * final_mask\r\n\r\n\r\nfrom tensorflow.keras import backend as K\r\ndef weighted_categorical_crossentropy(weights):\r\n \"\"\"\r\n A weighted version of keras.objectives.categorical_crossentropy\r\n \r\n Variables:\r\n weights: numpy array of shape (C,) where C is the number of classes\r\n \r\n Usage:\r\n weights = np.array([0.5,2,10]) # Class one at 0.5, class 2 twice the normal weights, class 3 10x.\r\n loss = weighted_categorical_crossentropy(weights)\r\n model.compile(loss=loss,optimizer='adam')\r\n \"\"\"\r\n \r\n weights = K.variable(weights)\r\n \r\n def loss(y_true, y_pred):\r\n # scale predictions so that the class probas of each sample sum to 1\r\n y_pred /= K.sum(y_pred, axis=-1, keepdims=True)\r\n # clip to prevent NaN's and Inf's\r\n y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())\r\n # calc\r\n loss = y_true * K.log(y_pred) * weights\r\n loss = -K.sum(loss, -1)\r\n return loss\r\n \r\n return loss\r\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.clip_by_value", "tensorflow.greater", "tensorflow.reduce_mean", "tensorflow.python.ops.array_ops.zeros_like" ], [ "tensorflow.keras.layers.AveragePooling2D", "tensorflow.keras.layers.Activation", "tensorflow.keras.layers.Lambda", "tensorflow.keras.models.Model", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.BatchNormalization", "tensorflow.image.resize", "tensorflow.keras.layers.ZeroPadding2D", "tensorflow.keras.layers.Add", "tensorflow.keras.layers.MaxPooling2D", "tensorflow.keras.layers.Input" ], [ "tensorflow.python.keras.backend.max", "tensorflow.keras.backend.sum", "tensorflow.python.keras.backend.shape", "tensorflow.python.keras.backend.zeros_like", "tensorflow.python.keras.backend.floatx", "tensorflow.keras.backend.log", "tensorflow.python.keras.backend.equal", "tensorflow.keras.backend.epsilon", "tensorflow.keras.backend.variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
Laknath1996/kdg
[ "11d6286c1eafefc09a86c5bb0a1a6b0d2e4b4a83" ]
[ "benchmarks/openml_cc18_wmle.py" ]
[ "#%%\nfrom kdg import kdf\nfrom kdg.utils import get_ece\nimport openml\nfrom joblib import Parallel, delayed\nimport numpy as np\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier as rf\nfrom sklearn.metrics import cohen_kappa_score\nfrom kdg.utils import get_ece\nimport os\nfrom os import listdir, getcwd \n# %%\ndef experiment(task_id, folder, n_estimators=500, reps=30):\n task = openml.tasks.get_task(task_id)\n X, y = task.get_X_and_y()\n\n if np.isnan(np.sum(y)):\n return\n\n if np.isnan(np.sum(X)):\n return\n\n total_sample = X.shape[0]\n unique_classes, counts = np.unique(y, return_counts=True)\n\n test_sample = min(counts)//3\n\n indx = []\n for label in unique_classes:\n indx.append(\n np.where(\n y==label\n )[0]\n )\n\n max_sample = min(counts) - test_sample\n train_samples = np.logspace(\n np.log10(2),\n np.log10(max_sample),\n num=3,\n endpoint=True,\n dtype=int\n )\n \n err = []\n err_rf = []\n ece = []\n ece_rf = []\n kappa = []\n kappa_rf = []\n mc_rep = []\n samples = []\n\n for train_sample in train_samples:\n \n for rep in range(reps):\n indx_to_take_train = []\n indx_to_take_test = []\n\n for ii, _ in enumerate(unique_classes):\n np.random.shuffle(indx[ii])\n indx_to_take_train.extend(\n list(\n indx[ii][:train_sample]\n )\n )\n indx_to_take_test.extend(\n list(\n indx[ii][-test_sample:counts[ii]]\n )\n )\n model_kdf = kdf(kwargs={'n_estimators':n_estimators})\n model_kdf.fit(X[indx_to_take_train], y[indx_to_take_train])\n proba_kdf = model_kdf.predict_proba(X[indx_to_take_test])\n proba_rf = model_kdf.rf_model.predict_proba(X[indx_to_take_test])\n predicted_label_kdf = np.argmax(proba_kdf, axis = 1)\n predicted_label_rf = np.argmax(proba_rf, axis = 1)\n\n err.append(\n 1 - np.mean(\n predicted_label_kdf==y[indx_to_take_test]\n )\n )\n err_rf.append(\n 1 - np.mean(\n predicted_label_rf==y[indx_to_take_test]\n )\n )\n kappa.append(\n cohen_kappa_score(predicted_label_kdf, y[indx_to_take_test])\n )\n kappa_rf.append(\n cohen_kappa_score(predicted_label_rf, y[indx_to_take_test])\n )\n ece.append(\n get_ece(proba_kdf, predicted_label_kdf, y[indx_to_take_test])\n )\n ece_rf.append(\n get_ece(proba_rf, predicted_label_rf, y[indx_to_take_test])\n )\n samples.append(\n train_sample*len(unique_classes)\n )\n mc_rep.append(rep)\n\n df = pd.DataFrame() \n df['err_kdf'] = err\n df['err_rf'] = err_rf\n df['kappa_kdf'] = kappa\n df['kappa_rf'] = kappa_rf\n df['ece_kdf'] = ece\n df['ece_rf'] = ece_rf\n df['rep'] = mc_rep\n df['samples'] = samples\n\n df.to_csv(folder+'/'+'openML_cc18_'+str(task_id)+'.csv')\n\n#%%\nfolder = 'ledoit_wolf'\n#os.mkdir(folder)\nbenchmark_suite = openml.study.get_suite('OpenML-CC18')\ncurrent_dir = getcwd()\nfiles = listdir(current_dir+'/'+folder)\nParallel(n_jobs=10,verbose=1)(\n delayed(experiment)(\n task_id,\n folder\n ) for task_id in benchmark_suite.tasks\n )\n\n'''for task_id in benchmark_suite.tasks:\n filename = 'openML_cc18_' + str(task_id) + '.csv'\n\n if filename not in files:\n print(filename)\n try:\n experiment(task_id,folder)\n except:\n print(\"couldn't run!\")\n else:\n print(\"Ran successfully!\")'''\n# %%\n" ]
[ [ "numpy.unique", "sklearn.metrics.cohen_kappa_score", "pandas.DataFrame", "numpy.random.shuffle", "numpy.log10", "numpy.argmax", "numpy.mean", "numpy.where", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
spott/matplotlib2tikz
[ "0a362a2078b067c527c568b0c564cf384e8cc213" ]
[ "matplotlib2tikz/text.py" ]
[ "# -*- coding: utf-8 -*-\n#\nfrom . import color\n\nimport matplotlib as mpl\n\n\ndef draw_text(data, obj):\n '''Paints text on the graph.\n '''\n content = []\n properties = []\n style = []\n if isinstance(obj, mpl.text.Annotation):\n ann_xy = obj.xy\n ann_xycoords = obj.xycoords\n ann_xytext = obj.xyann\n ann_textcoords = obj.anncoords\n if ann_xycoords != 'data' or ann_textcoords != 'data':\n print('Warning: Anything else except for explicit positioning '\n 'is not supported for annotations yet :(')\n return data, content\n else: # Create a basic tikz arrow\n arrow_style = []\n if obj.arrowprops is not None:\n if obj.arrowprops['arrowstyle'] is not None:\n if obj.arrowprops['arrowstyle'] in ['-', '->',\n '<-', '<->']:\n arrow_style.append(obj.arrowprops['arrowstyle'])\n data, col, _ = color.mpl_color2xcolor(\n data,\n obj.arrow_patch.get_ec()\n )\n arrow_style.append(col)\n\n arrow_proto = '\\\\draw[%s] (axis cs:%.15g,%.15g) ' \\\n '-- (axis cs:%.15g,%.15g);\\n'\n the_arrow = arrow_proto % (','.join(arrow_style),\n ann_xytext[0], ann_xytext[1],\n ann_xy[0], ann_xy[1]\n )\n content.append(the_arrow)\n\n # 1: coordinates\n # 2: properties (shapes, rotation, etc)\n # 3: text style\n # 4: the text\n # -------1--------2---3--4--\n pos = obj.get_position()\n text = obj.get_text()\n size = obj.get_size()\n bbox = obj.get_bbox_patch()\n converter = mpl.colors.ColorConverter()\n # without the factor 0.5, the fonts are too big most of the time.\n # TODO fix this\n scaling = 0.5 * size / data['font size']\n if scaling != 1.0:\n properties.append('scale=%.15g' % scaling)\n\n if bbox is not None:\n bbox_style = bbox.get_boxstyle()\n if bbox.get_fill():\n data, fc, _ = color.mpl_color2xcolor(data, bbox.get_facecolor())\n if fc:\n properties.append('fill=%s' % fc)\n data, ec, _ = color.mpl_color2xcolor(data, bbox.get_edgecolor())\n if ec:\n properties.append('draw=%s' % ec)\n # XXX: This is ugly, too\n properties.append('line width=%.15gpt' % (bbox.get_lw() * 0.4))\n properties.append('inner sep=%.15gpt'\n % (bbox_style.pad * data['font size'])\n )\n # Rounded boxes\n if isinstance(bbox_style, mpl.patches.BoxStyle.Round):\n properties.append('rounded corners')\n elif isinstance(bbox_style, mpl.patches.BoxStyle.RArrow):\n data['tikz libs'].add('shapes.arrows')\n properties.append('single arrow')\n elif isinstance(bbox_style, mpl.patches.BoxStyle.LArrow):\n data['tikz libs'].add('shapes.arrows')\n properties.append('single arrow')\n properties.append('shape border rotate=180')\n elif isinstance(bbox_style, mpl.patches.BoxStyle.DArrow):\n data['tikz libs'].add('shapes.arrows')\n properties.append('double arrow')\n elif isinstance(bbox_style, mpl.patches.BoxStyle.Circle):\n properties.append('circle')\n elif isinstance(bbox_style, mpl.patches.BoxStyle.Roundtooth):\n properties.append('decorate')\n properties.append(\n 'decoration={snake,amplitude=0.5,segment length=3}'\n )\n elif isinstance(bbox_style, mpl.patches.BoxStyle.Sawtooth):\n properties.append('decorate')\n properties.append(\n 'decoration={zigzag,amplitude=0.5,segment length=3}'\n )\n else:\n # TODO Round4\n assert isinstance(bbox_style, mpl.patches.BoxStyle.Square)\n\n # Line style\n if bbox.get_ls() == 'dotted':\n properties.append('dotted')\n elif bbox.get_ls() == 'dashed':\n properties.append('dashed')\n # TODO Check if there is there any way to extract the dashdot\n # pattern from matplotlib instead of hardcoding\n # an approximation?\n elif bbox.get_ls() == 'dashdot':\n properties.append(('dash pattern=on %.3gpt off %.3gpt on '\n '%.3gpt off %.3gpt'\n ) % (1.0 / scaling, 3.0 / scaling,\n 6.0 / scaling, 3.0 / scaling)\n )\n else:\n assert bbox.get_ls() == 'solid'\n\n ha = obj.get_ha()\n va = obj.get_va()\n anchor = _transform_positioning(ha, va)\n if anchor is not None:\n properties.append(anchor)\n data, col, _ = color.mpl_color2xcolor(\n data,\n converter.to_rgb(obj.get_color())\n )\n properties.append('text=%s' % col)\n properties.append('rotate=%.1f' % obj.get_rotation())\n\n if obj.get_style() == 'italic':\n style.append('\\\\itshape')\n else:\n assert obj.get_style() == 'normal'\n\n # From matplotlib/font_manager.py:\n # weight_dict = {\n # 'ultralight' : 100,\n # 'light' : 200,\n # 'normal' : 400,\n # 'regular' : 400,\n # 'book' : 400,\n # 'medium' : 500,\n # 'roman' : 500,\n # 'semibold' : 600,\n # 'demibold' : 600,\n # 'demi' : 600,\n # 'bold' : 700,\n # 'heavy' : 800,\n # 'extra bold' : 800,\n # 'black' : 900}\n if obj.get_weight() > 550:\n style.append('\\\\bfseries')\n\n if obj.axes:\n # If the coordinates are relative to an axis, use `axis cs`.\n tikz_pos = '(axis cs:%.15g,%.15g)' % pos\n else:\n # relative to the entire figure, it's a getting a littler harder. See\n # <http://tex.stackexchange.com/a/274902/13262> for a solution to the\n # problem:\n tikz_pos = (\n '({$(current bounding box.south west)!%.15g!'\n '(current bounding box.south east)$}'\n '|-'\n '{$(current bounding box.south west)!%0.15g!'\n '(current bounding box.north west)$})'\n ) % pos\n\n if '\\n' in text:\n # http://tex.stackexchange.com/a/124114/13262\n properties.append('align=%s' % ha)\n # Manipulating the text here is actually against mpl2tikz's policy not\n # to do that. On the other hand, newlines should translate into\n # newlines.\n # We might want to remove this here in the future.\n text = text.replace('\\n ', '\\\\\\\\')\n\n content.append(\n '\\\\node at %s[\\n %s\\n]{%s %s};\\n' %\n (tikz_pos, ',\\n '.join(properties), ' '.join(style), text)\n )\n return data, content\n\n\ndef _transform_positioning(ha, va):\n '''Converts matplotlib positioning to pgf node positioning.\n Not quite accurate but the results are equivalent more or less.'''\n if ha == 'center' and va == 'center':\n return None\n else:\n ha_mpl_to_tikz = {'right': 'east',\n 'left': 'west',\n 'center': ''\n }\n va_mpl_to_tikz = {'top': 'north',\n 'bottom': 'south',\n 'center': '',\n 'baseline': 'base'\n }\n return ('anchor=%s %s' % (va_mpl_to_tikz[va], ha_mpl_to_tikz[ha])\n ).strip()\n" ]
[ [ "matplotlib.colors.ColorConverter" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shengfuintel/tensorflow
[ "e67f3af48c94c9456c3ff376dc30c82a4bf982cd", "e67f3af48c94c9456c3ff376dc30c82a4bf982cd", "5828e285209ff8c3d1bef2e4bd7c55ca611080d5", "e67f3af48c94c9456c3ff376dc30c82a4bf982cd" ]
[ "tensorflow/python/ops/gradients_impl.py", "tensorflow/python/kernel_tests/depthtospace_op_test.py", "tensorflow/contrib/cudnn_rnn/python/kernel_tests/cudnn_rnn_ops_benchmark.py", "tensorflow/python/estimator/canned/head.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implements the graph generation for computation of gradients.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport contextlib\nimport warnings\n\nimport numpy as np\nimport six\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_grad # pylint: disable=unused-import\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import control_flow_grad # pylint: disable=unused-import\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import functional_ops\nfrom tensorflow.python.ops import image_grad # pylint: disable=unused-import\nfrom tensorflow.python.ops import linalg_grad # pylint: disable=unused-import\nfrom tensorflow.python.ops import linalg_ops # pylint: disable=unused-import\nfrom tensorflow.python.ops import logging_ops # pylint: disable=unused-import\nfrom tensorflow.python.ops import math_grad # pylint: disable=unused-import\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import spectral_grad # pylint: disable=unused-import\nfrom tensorflow.python.ops import tensor_array_ops\nfrom tensorflow.python.platform import tf_logging as logging\n\n\n# Warn the user if we convert a sparse representation to dense with at\n# least this number of elements.\n_LARGE_SPARSE_NUM_ELEMENTS = 100000000\n\n\ndef _IndexedSlicesToTensor(value, dtype=None, name=None, as_ref=False):\n \"\"\"Converts an IndexedSlices object `value` to a Tensor.\n\n NOTE(mrry): This function is potentially expensive.\n\n Args:\n value: An ops.IndexedSlices object.\n dtype: The dtype of the Tensor to be returned.\n name: Optional name to use for the returned Tensor.\n as_ref: True if a ref is requested.\n\n Returns:\n A dense Tensor representing the values in the given IndexedSlices.\n\n Raises:\n ValueError: If the IndexedSlices does not have the same dtype.\n \"\"\"\n _ = as_ref\n if dtype and not dtype.is_compatible_with(value.dtype):\n raise ValueError(\n \"Tensor conversion requested dtype %s for IndexedSlices with dtype %s\" %\n (dtype.name, value.dtype.name))\n if value.dense_shape is None:\n raise ValueError(\n \"Tensor conversion requested for IndexedSlices without dense_shape: %s\"\n % str(value))\n # TODO(mrry): Consider adding static shape information to\n # IndexedSlices, to avoid using numpy here.\n dense_shape_value = tensor_util.constant_value(value.dense_shape)\n if dense_shape_value is not None:\n num_elements = np.prod(dense_shape_value)\n if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS:\n warnings.warn(\n \"Converting sparse IndexedSlices to a dense Tensor with %d elements. \"\n \"This may consume a large amount of memory.\" % num_elements)\n else:\n warnings.warn(\n \"Converting sparse IndexedSlices to a dense Tensor of unknown shape. \"\n \"This may consume a large amount of memory.\")\n return math_ops.unsorted_segment_sum(\n value.values, value.indices, value.dense_shape[0], name=name)\n\n\nops.register_tensor_conversion_function(ops.IndexedSlices,\n _IndexedSlicesToTensor)\n\n\ndef _MarkReachedOps(from_ops, reached_ops):\n \"\"\"Mark all ops reached from \"from_ops\".\n\n Args:\n from_ops: list of Operations.\n reached_ops: list of booleans, indexed by operation id.\n \"\"\"\n queue = collections.deque()\n queue.extend(from_ops)\n while queue:\n op = queue.popleft()\n if not reached_ops[op._id]:\n reached_ops[op._id] = True\n for output in op.outputs:\n queue.extend(output.consumers())\n\n\ndef _GatherInputs(to_ops, reached_ops):\n \"\"\"List all inputs of to_ops that are in reached_ops.\n\n Args:\n to_ops: list of Operations.\n reached_ops: list of booleans, indexed by operation id.\n\n Returns:\n The list of all inputs of to_ops that are in reached_ops.\n That list includes all elements of to_ops.\n \"\"\"\n inputs = []\n queue = collections.deque()\n queue.extend(to_ops)\n while queue:\n op = queue.popleft()\n # We are interested in this op.\n if reached_ops[op._id]:\n inputs.append(op)\n # Clear the boolean so we won't add the inputs again.\n reached_ops[op._id] = False\n for inp in op.inputs:\n queue.append(inp.op)\n return inputs\n\n\ndef _PendingCount(graph, to_ops, from_ops, colocate_gradients_with_ops):\n \"\"\"Initialize the pending count for ops between two lists of Operations.\n\n 'pending_count[op._id]' indicates the number of backprop inputs\n to this operation.\n\n Args:\n graph: a Graph.\n to_ops: list of Operations.\n from_ops: list of Operations.\n colocate_gradients_with_ops: Python bool. See docstring of gradients().\n\n Returns:\n A tuple containing: (1) a list of integers indexed by operation id,\n indicating the number of backprop inputs to this operation, and (2)\n a ControlFlowState object which is not None if the ops between from_ops\n and to_ops contain control flow loops.\n \"\"\"\n # Mark reachable ops from from_ops.\n reached_ops = [False] * (graph._last_id + 1)\n for op in to_ops:\n reached_ops[op._id] = True\n _MarkReachedOps(from_ops, reached_ops)\n\n # Mark between ops.\n between_ops = [False] * (graph._last_id + 1)\n between_op_list = []\n queue = collections.deque()\n queue.extend(to_ops)\n while queue:\n op = queue.popleft()\n # We are interested in this op.\n if reached_ops[op._id]:\n between_ops[op._id] = True\n between_op_list.append(op)\n # Clear the boolean so we won't add the inputs again.\n reached_ops[op._id] = False\n for inp in op.inputs:\n queue.append(inp.op)\n\n # 'loop_state' is None if there are no while loops.\n loop_state = control_flow_ops.MaybeCreateControlFlowState(\n between_op_list, between_ops, colocate_gradients_with_ops)\n\n # Initialize pending count for between ops.\n pending_count = [0] * (graph._last_id + 1)\n for op in between_op_list:\n for x in op.inputs:\n if between_ops[x.op._id]:\n pending_count[x.op._id] += 1\n\n return pending_count, loop_state\n\n\ndef _AsList(x):\n return x if isinstance(x, (list, tuple)) else [x]\n\n\ndef _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops):\n \"\"\"Fill in default values for grad_ys.\n\n Args:\n grad_ys: List of gradients, can contain None.\n ys: List of tensors.\n colocate_gradients_with_ops: If True, try colocating gradients with\n the corresponding op.\n\n Returns:\n A list of gradients to use, without None.\n\n Raises:\n ValueError: If sizes of gradients and inputs don't match\n TypeError: If type of any gradient is not valid for its input.\n \"\"\"\n if len(grad_ys) != len(ys):\n raise ValueError(\"Passed %d grad_ys for %d ys\" % (len(grad_ys), len(ys)))\n grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name=\"grad_y\")\n new_grad_ys = []\n for i in xrange(len(grad_ys)):\n grad_y = grad_ys[i]\n y = ys[i]\n if grad_y is None:\n if y.dtype.is_complex:\n raise TypeError(\n \"Gradients of complex tensors must set grad_ys (y.dtype = %r)\" %\n y.dtype)\n with _maybe_colocate_with(y.op, colocate_gradients_with_ops):\n new_grad_ys.append(array_ops.fill(\n array_ops.shape(y), constant_op.constant(\n 1, dtype=y.dtype, name=\"grad_ys_%d\" % i)))\n continue\n if y.dtype.is_floating or y.dtype.is_integer:\n if not grad_y.dtype.is_floating and not grad_y.dtype.is_integer:\n raise TypeError(\"Gradient type %s generated for real or \"\n \"integer-valued tensor %s with type %s must be \"\n \"real or integer\" %\n (dtypes.as_dtype(grad_y.dtype).name, y,\n dtypes.as_dtype(y.dtype).name))\n elif y.dtype.is_complex:\n if not grad_y.dtype.is_complex:\n raise TypeError(\"Gradient type %s generated for complex-valued \"\n \"tensor %s with type %s must be real\" %\n (dtypes.as_dtype(grad_y.dtype).name, y,\n dtypes.as_dtype(y.dtype).name))\n else:\n raise TypeError(\"Tensor %s with type %s must be numeric \"\n \"to obtain a default gradient\" %\n (y, dtypes.as_dtype(y.dtype).name))\n # Create a grad_y tensor in the name scope of the gradient.\n # Required for TensorArrays to identify which gradient call a\n # grad_y value is coming from.\n if isinstance(grad_y, ops.IndexedSlices):\n new_grad_ys.append(\n ops.IndexedSlices(\n indices=(array_ops.identity(grad_y.indices,\n name=\"grad_ys_%d_indices\" % i)\n if isinstance(grad_y.indices, ops.Tensor)\n else grad_y.indices),\n values=(array_ops.identity(grad_y.values,\n name=\"grad_ys_%d_values\" % i)\n if isinstance(grad_y.values, ops.Tensor)\n else grad_y.values),\n dense_shape=(array_ops.identity(grad_y.dense_shape,\n name=\"grad_ys_%d_shape\" % i)\n if isinstance(grad_y.dense_shape, ops.Tensor)\n else grad_y.dense_shape)))\n else:\n new_grad_ys.append(array_ops.identity(grad_y, name=\"grad_ys_%d\" % i))\n\n return new_grad_ys\n\n\ndef _IsTrainable(tensor):\n dtype = dtypes.as_dtype(tensor.dtype)\n return dtype.base_dtype in (dtypes.float16, dtypes.float32, dtypes.float64,\n dtypes.complex64, dtypes.complex128)\n\n\ndef _VerifyGeneratedGradients(grads, op):\n \"\"\"Verify that gradients are valid in number and type.\n\n Args:\n grads: List of generated gradients.\n op: Operation for which the gradients where generated.\n\n Raises:\n ValueError: if sizes of gradients and inputs don't match.\n TypeError: if type of any gradient is not valid for its input.\n \"\"\"\n if len(grads) != len(op.inputs):\n raise ValueError(\"Num gradients %d generated for op %s do not match num \"\n \"inputs %d\" % (len(grads), op.node_def, len(op.inputs)))\n\n\ndef _StopOps(from_ops, stop_gradient_ops, pending_count):\n \"\"\"The set of ops that terminate the gradient computation.\n\n This computes the frontier of the forward graph *before* which backprop\n should stop. Operations in the returned set will not be differentiated.\n This set is defined as the subset of `from_ops` containing ops that have\n no predecessor in `from_ops`. `pending_count` is the result of\n `_PendingCount(g, xs, from_ops)`. An 'op' has predecessors in `from_ops`\n iff pending_count[op._id] > 0.\n\n In addition, none of `stop_gradient_ops` will be differentiated.\n\n Args:\n from_ops: list of Operations.\n stop_gradient_ops: list of Operations never to backprop through.\n pending_count: List of integers, indexed by operation id.\n\n Returns:\n The set of operations.\n \"\"\"\n stop_ops = set()\n for op in from_ops:\n is_stop_op = True\n for inp in op.inputs:\n if pending_count[inp.op._id] > 0:\n is_stop_op = False\n break\n if is_stop_op:\n stop_ops.add(op._id)\n stop_ops.update(op._id for op in stop_gradient_ops) # pylint: disable=protected-access\n return stop_ops\n\n\[email protected]\ndef _maybe_colocate_with(op, colocate_gradients_with_ops):\n \"\"\"Context to colocate with `op` if `colocate_gradients_with_ops`.\"\"\"\n if colocate_gradients_with_ops:\n with ops.colocate_with(op):\n yield\n else:\n yield\n\n\ndef _SymGrad(op, out_grads):\n \"\"\"Backprop through a function call node op given its outputs' gradients.\"\"\"\n f_in = [x for x in op.inputs] + out_grads\n f_types = [x.dtype for x in op.inputs]\n f = attr_value_pb2.NameAttrList()\n f.name = op.type\n for k in op.node_def.attr:\n f.attr[k].CopyFrom(op.node_def.attr[k])\n # pylint: disable=protected-access\n in_grads = functional_ops._symbolic_gradient(input=f_in, Tout=f_types, f=f)\n # pylint: enable=protected-access\n return in_grads\n\n\ndef _MaybeCompile(scope, op, func, grad_fn):\n \"\"\"Compile the calculation in grad_fn if op was marked as compiled.\"\"\"\n scope = scope.rstrip(\"/\").replace(\"/\", \"_\")\n if func is not None:\n xla_compile = func.definition.attr[\"_XlaCompile\"].b\n xla_separate_compiled_gradients = func.definition.attr[\n \"_XlaSeparateCompiledGradients\"].b\n xla_scope = func.definition.attr[\"_XlaScope\"].s.decode()\n else:\n try:\n xla_compile = op.get_attr(\"_XlaCompile\")\n xla_separate_compiled_gradients = op.get_attr(\n \"_XlaSeparateCompiledGradients\")\n xla_scope = op.get_attr(\"_XlaScope\").decode()\n except ValueError:\n return grad_fn() # Exit early\n\n if not xla_compile:\n return grad_fn() # Exit early\n\n # If the gradients are supposed to be compiled separately, we give them a\n # _XlaScope name that is based on the name_scope of the gradients. Otherwise\n # they just inherit the existing _XlaScope name, which lets them be merged\n # together with the non-gradient computation.\n if xla_separate_compiled_gradients:\n xla_grad_scope = \"%s_grad_%s\" % (xla_scope, scope)\n else:\n xla_grad_scope = xla_scope\n\n attrs = {\n \"_XlaCompile\": attr_value_pb2.AttrValue(b=xla_compile),\n \"_XlaScope\": attr_value_pb2.AttrValue(s=xla_grad_scope.encode())\n }\n with ops.get_default_graph()._attr_scope(attrs): # pylint: disable=protected-access\n return grad_fn()\n\n\ndef gradients(ys,\n xs,\n grad_ys=None,\n name=\"gradients\",\n colocate_gradients_with_ops=False,\n gate_gradients=False,\n aggregation_method=None,\n stop_gradients=None):\n \"\"\"Constructs symbolic derivatives of sum of `ys` w.r.t. x in `xs`.\n\n `ys` and `xs` are each a `Tensor` or a list of tensors. `grad_ys`\n is a list of `Tensor`, holding the gradients received by the\n `ys`. The list must be the same length as `ys`.\n\n `gradients()` adds ops to the graph to output the derivatives of `ys` with\n respect to `xs`. It returns a list of `Tensor` of length `len(xs)` where\n each tensor is the `sum(dy/dx)` for y in `ys`.\n\n `grad_ys` is a list of tensors of the same length as `ys` that holds\n the initial gradients for each y in `ys`. When `grad_ys` is None,\n we fill in a tensor of '1's of the shape of y for each y in `ys`. A\n user can provide their own initial `grad_ys` to compute the\n derivatives using a different initial gradient for each y (e.g., if\n one wanted to weight the gradient differently for each value in\n each y).\n\n `stop_gradients` is a `Tensor` or a list of tensors to be considered constant\n with respect to all `xs`. These tensors will not be backpropagated through,\n as though they had been explicitly disconnected using `stop_gradient`. Among\n other things, this allows computation of partial derivatives as opposed to\n total derivatives. For example:\n\n a = tf.constant(0.)\n b = 2 * a\n g = tf.gradients(a + b, [a, b], stop_gradients=[a, b])\n\n Here the partial derivatives `g` evaluate to `[1.0, 1.0]`, compared to the\n total derivatives `tf.gradients(a + b, [a, b])`, which take into account the\n influence of `a` on `b` and evaluate to `[3.0, 1.0]`. Note that the above is\n equivalent to:\n\n a = tf.stop_gradient(tf.constant(0.))\n b = tf.stop_gradient(2 * a)\n g = tf.gradients(a + b, [a, b])\n\n `stop_gradients` provides a way of stopping gradient after the graph has\n already been constructed, as compared to `tf.stop_gradient` which is used\n during graph construction. When the two approaches are combined,\n backpropagation stops at both `tf.stop_gradient` nodes and nodes in\n `stop_gradients`, whichever is encountered first.\n\n Args:\n ys: A `Tensor` or list of tensors to be differentiated.\n xs: A `Tensor` or list of tensors to be used for differentiation.\n grad_ys: Optional. A `Tensor` or list of tensors the same size as\n `ys` and holding the gradients computed for each y in `ys`.\n name: Optional name to use for grouping all the gradient ops together.\n defaults to 'gradients'.\n colocate_gradients_with_ops: If True, try colocating gradients with\n the corresponding op.\n gate_gradients: If True, add a tuple around the gradients returned\n for an operations. This avoids some race conditions.\n aggregation_method: Specifies the method used to combine gradient terms.\n Accepted values are constants defined in the class `AggregationMethod`.\n stop_gradients: Optional. A `Tensor` or list of tensors not to differentiate\n through.\n\n Returns:\n A list of `sum(dy/dx)` for each x in `xs`.\n\n Raises:\n LookupError: if one of the operations between `x` and `y` does not\n have a registered gradient function.\n ValueError: if the arguments are invalid.\n RuntimeError: if called in Eager mode.\n\n \"\"\"\n if context.in_eager_mode():\n raise RuntimeError(\"tf.gradients not supported in EAGER mode. Use \"\n \"functions in tf.contrib.eager.backprop instead.\")\n ys = _AsList(ys)\n xs = _AsList(xs)\n stop_gradients = [] if stop_gradients is None else _AsList(stop_gradients)\n if grad_ys is None:\n grad_ys = [None] * len(ys)\n else:\n grad_ys = _AsList(grad_ys)\n\n with ops.name_scope(\n name, \"gradients\",\n list(ys) + list(xs) + list(stop_gradients) + list(grad_ys)) as grad_scope:\n ys = ops.convert_n_to_tensor_or_indexed_slices(ys, name=\"y\")\n xs = [x.handle if isinstance(x, resource_variable_ops.ResourceVariable)\n else x\n for x in xs]\n xs = ops.internal_convert_n_to_tensor_or_indexed_slices(xs, name=\"x\",\n as_ref=True)\n grad_ys = _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops)\n\n # The approach we take here is as follows: Create a list of all ops in the\n # subgraph between the ys and xs. Visit these ops in reverse order of ids\n # to ensure that when we visit an op the gradients w.r.t its outputs have\n # been collected. Then aggregate these gradients if needed, call the op's\n # gradient function, and add the generated gradients to the gradients for\n # its input.\n\n # Initialize the pending count for ops in the connected subgraph from ys\n # to the xs.\n if len(ys) > 1:\n ys = [array_ops.identity(y) if y.consumers() else y for y in ys]\n to_ops = [t.op for t in ys]\n from_ops = [t.op for t in xs]\n stop_gradient_ops = [t.op for t in stop_gradients]\n pending_count, loop_state = _PendingCount(ops.get_default_graph(), to_ops,\n from_ops,\n colocate_gradients_with_ops)\n\n # Iterate over the collected ops.\n #\n # grads: op => list of gradients received on each output endpoint of the\n # op. The gradients for each endpoint are initially collected as a list.\n # When it is time to call the op's gradient function, for each endpoint we\n # aggregate the list of received gradients into a Add() Operation if there\n # is more than one.\n grads = {}\n\n # Add the initial gradients for the ys.\n for y, grad_y in zip(ys, grad_ys):\n _SetGrad(grads, y, grad_y)\n\n # Initialize queue with to_ops.\n queue = collections.deque()\n # Add the ops in 'to_ops' into the queue.\n to_ops_set = set()\n for op in to_ops:\n # 'ready' handles the case where one output gradient relies on\n # another output's gradient.\n # pylint: disable=protected-access\n ready = (pending_count[op._id] == 0)\n if ready and op._id not in to_ops_set:\n to_ops_set.add(op._id)\n queue.append(op)\n # pylint: enable=protected-access\n\n if loop_state:\n loop_exits = loop_state.ProcessUnusedLoopExits(pending_count, to_ops_set)\n for y in loop_exits:\n if _IsTrainable(y):\n _SetGrad(grads, y, loop_state.ZerosLikeForExit(y))\n queue.append(y.op)\n\n stop_ops = _StopOps(from_ops, stop_gradient_ops, pending_count)\n while queue:\n # generate gradient subgraph for op.\n op = queue.popleft()\n with _maybe_colocate_with(op, colocate_gradients_with_ops):\n if loop_state:\n loop_state.EnterGradWhileContext(op, before=True)\n out_grads = _AggregatedGrads(grads, op, loop_state, aggregation_method)\n if loop_state:\n loop_state.ExitGradWhileContext(op, before=True)\n\n grad_fn = None\n # pylint: disable=protected-access\n func_call = None\n is_func_call = ops.get_default_graph()._is_function(op.type)\n has_out_grads = any(isinstance(g, ops.Tensor) or g for g in out_grads)\n if has_out_grads and (op._id not in stop_ops):\n if is_func_call:\n func_call = ops.get_default_graph()._get_function(op.type)\n grad_fn = func_call.python_grad_func\n # pylint: enable=protected-access\n else:\n # A grad_fn must be defined, either as a function or as None\n # for ops that do not have gradients.\n try:\n grad_fn = ops.get_gradient_function(op)\n except LookupError:\n raise LookupError(\n \"No gradient defined for operation '%s' (op type: %s)\" %\n (op.name, op.type))\n if loop_state:\n loop_state.EnterGradWhileContext(op, before=False)\n if (grad_fn or is_func_call) and has_out_grads:\n # NOTE: If _AggregatedGrads didn't compute a value for the i'th\n # output, it means that the cost does not depend on output[i],\n # therefore dC/doutput[i] is 0.\n for i, out_grad in enumerate(out_grads):\n if (not isinstance(out_grad, ops.Tensor) and\n not out_grad) and _IsTrainable(op.outputs[i]):\n # Only floating-point outputs get a zero gradient. Gradient\n # functions should ignore the gradient for other outputs.\n # TODO(apassos) gradients of resource handles might be an\n # issue here because of zeros.\n if loop_state:\n out_grads[i] = loop_state.ZerosLike(op, i)\n else:\n out_grads[i] = control_flow_ops.ZerosLikeOutsideLoop(op, i)\n with ops.name_scope(op.name + \"_grad\"):\n # pylint: disable=protected-access\n with ops.get_default_graph()._original_op(op):\n # pylint: enable=protected-access\n if grad_fn:\n # If grad_fn was found, do not use SymbolicGradient even for\n # functions.\n in_grads = _MaybeCompile(\n grad_scope, op, func_call, lambda: grad_fn(op, *out_grads))\n else:\n # For function call ops, we add a 'SymbolicGradient'\n # node to the graph to compute gradients.\n in_grads = _MaybeCompile(\n grad_scope, op, func_call, lambda: _SymGrad(op, out_grads))\n in_grads = _AsList(in_grads)\n _VerifyGeneratedGradients(in_grads, op)\n if gate_gradients and len(\n [x for x in in_grads if x is not None]) > 1:\n in_grads = control_flow_ops.tuple(in_grads)\n _LogOpGradients(op, out_grads, in_grads)\n else:\n # If no grad_fn is defined or none of out_grads is available,\n # just propagate a list of None backwards.\n in_grads = [None] * len(op.inputs)\n for i, (t_in, in_grad) in enumerate(zip(op.inputs, in_grads)):\n if in_grad is not None:\n if (isinstance(in_grad, ops.Tensor) and\n t_in.dtype != dtypes.resource):\n try:\n in_grad.set_shape(t_in.get_shape())\n except ValueError:\n raise ValueError(\n \"Incompatible shapes between op input and calculated \"\n \"input gradient. Forward operation: %s. Input index: %d. \"\n \"Original input shape: %s. \"\n \"Calculated input gradient shape: %s\"\n % (op.name, i, t_in.shape, in_grad.shape))\n _SetGrad(grads, t_in, in_grad)\n if loop_state:\n loop_state.ExitGradWhileContext(op, before=False)\n\n # Update pending count for the inputs of op and enqueue ready ops.\n _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state)\n\n if loop_state:\n loop_state.PostProcessing()\n return [_GetGrad(grads, x) for x in xs]\n\n\ndef _HasAnyNotNoneGrads(grads, op):\n \"\"\"Return true iff op has real gradient.\"\"\"\n out_grads = _GetGrads(grads, op)\n for out_grad in out_grads:\n if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):\n return True\n if out_grad and isinstance(out_grad, collections.Sequence):\n if any([g is not None for g in out_grad]):\n return True\n return False\n\n\ndef _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state):\n \"\"\"Update pending count for the inputs of op and enqueue ready ops.\"\"\"\n for x in op.inputs:\n # pylint: disable=protected-access\n pending_count[x.op._id] -= 1\n ready = (pending_count[x.op._id] == 0)\n if loop_state and not ready:\n ready = (pending_count[x.op._id] > 0 and\n control_flow_ops.IsLoopSwitch(x.op))\n # pylint: enable=protected-access\n if ready:\n if control_flow_ops.IsLoopExit(x.op):\n # if x is an exit without real gradient, defer processing them.\n grad_state = loop_state.GetGradState(x.op, before=False)\n grad_state.deferred_exits.append(x)\n grad_state.pending_exits_count -= 1\n if grad_state.pending_exits_count == 0:\n # We now have all the exits so process them.\n has_real_grad = False\n for y in grad_state.deferred_exits:\n if _HasAnyNotNoneGrads(grads, y.op):\n has_real_grad = True\n queue.append(y.op)\n else:\n grad_state.unused_exits.append(y)\n if has_real_grad:\n # For an unused exit, if it has floating-point outputs, backprop\n # a zero gradient. Otherwise, just ignore it.\n for y in grad_state.unused_exits:\n if _IsTrainable(y):\n _SetGrad(grads, y, loop_state.ZerosLikeForExit(y))\n queue.append(y.op)\n else:\n # All exits are \"unused\" so use None as gradient.\n for y in grad_state.unused_exits:\n queue.append(y.op)\n else:\n queue.append(x.op)\n\n\ndef _SetGrad(grads, t, grad):\n \"\"\"Sets gradient \"grad\" in \"grads\" for tensor \"t\".\"\"\"\n op = t.op\n op_grads = grads.get(op)\n if not op_grads:\n op_grads = [[] for _ in xrange(len(op.outputs))]\n grads[op] = op_grads\n t_grads = op_grads[t.value_index]\n if isinstance(t_grads, list):\n t_grads.append(grad)\n else:\n assert control_flow_ops.IsLoopSwitch(op)\n op_grads[t.value_index] = grad\n\n\ndef _GetGrad(grads, t):\n \"\"\"Gets gradient for tensor \"t\".\"\"\"\n op = t.op\n op_grads = grads.get(op)\n if not op_grads:\n return None\n t_grad = op_grads[t.value_index]\n assert not isinstance(t_grad, list), (\n \"gradients list should have been aggregated by now.\")\n return t_grad\n\n\ndef _GetGrads(grads, op):\n \"\"\"Gets all gradients for op.\"\"\"\n if op in grads:\n return grads[op]\n else:\n return [[] for _ in xrange(len(op.outputs))]\n\n\ndef _HandleNestedIndexedSlices(grad):\n assert isinstance(grad, ops.IndexedSlices)\n if isinstance(grad.values, ops.Tensor):\n return grad\n else:\n assert isinstance(grad.values, ops.IndexedSlices)\n g = _HandleNestedIndexedSlices(grad.values)\n return ops.IndexedSlices(g.values,\n array_ops.gather(grad.indices, g.indices),\n g.dense_shape)\n\n\ndef _AccumulatorShape(inputs):\n shape = tensor_shape.unknown_shape()\n for i in inputs:\n if isinstance(i, ops.Tensor):\n shape = shape.merge_with(i.get_shape())\n return shape\n\n\ndef _LogOpGradients(op, out_grads, in_grads):\n \"\"\"Log the in and out grads of an op.\"\"\"\n logging.vlog(1, \"Gradient for '\" + op.name + \"'\")\n\n def _FilterGrad(x):\n if x is None:\n return False\n if isinstance(x, (list, tuple)):\n return bool(x)\n else:\n return True\n\n logging.vlog(1, \" in --> %s\",\n \", \".join([x.name for x in out_grads if _FilterGrad(x)]))\n logging.vlog(1, \" out --> %s\",\n \", \".join([x.name for x in in_grads if _FilterGrad(x)]))\n\n\ndef _MultiDeviceAddN(tensor_list):\n \"\"\"Adds tensors from potentially multiple devices.\"\"\"\n # Basic function structure comes from control_flow_ops.group().\n # Sort tensors according to their devices.\n tensors_on_device = collections.defaultdict(lambda: [])\n for tensor in tensor_list:\n tensors_on_device[tensor.device].append(tensor)\n\n # For each device, add the tensors on that device first.\n # Then gather the partial sums from multiple devices.\n # TODO(sjhwang): Create hierarchical aggregation tree as pbar's suggestion.\n # E.g., aggregate per GPU, then per task, and so on.\n summands = []\n\n def DeviceKey(dev):\n return \"\" if dev is None else dev\n\n for dev in sorted(six.iterkeys(tensors_on_device), key=DeviceKey):\n tensors = tensors_on_device[dev]\n with ops.colocate_with(tensors[0].op, ignore_existing=True):\n summands.append(math_ops.add_n(tensors))\n\n return math_ops.add_n(summands)\n\n\nclass AggregationMethod(object):\n \"\"\"A class listing aggregation methods used to combine gradients.\n\n Computing partial derivatives can require aggregating gradient\n contributions. This class lists the various methods that can\n be used to combine gradients in the graph:\n\n * `ADD_N`: All of the gradient terms are summed as part of one\n operation using the \"AddN\" op. It has the property that all\n gradients must be ready before any aggregation is performed.\n * `DEFAULT`: The system-chosen default aggregation method.\n \"\"\"\n ADD_N = 0\n DEFAULT = ADD_N\n # The following are experimental and may not be supported in future releases.\n EXPERIMENTAL_TREE = 1\n EXPERIMENTAL_ACCUMULATE_N = 2\n\n\ndef _AggregatedGrads(grads, op, loop_state, aggregation_method=None):\n \"\"\"Get the aggregated gradients for op.\n\n Args:\n grads: The map of memoized gradients.\n op: The op to get gradients for.\n loop_state: An object for maintaining the state of the while loops in the\n graph. It is of type ControlFlowState. None if the graph\n contains no while loops.\n aggregation_method: Specifies the method used to combine gradient terms.\n Accepted values are constants defined in the class `AggregationMethod`.\n\n Returns:\n A list of gradients, one per each output of `op`. If the gradients\n for a particular output is a list, this function aggregates it\n before returning.\n\n Raises:\n TypeError: if the incoming grads are not Tensors or IndexedSlices.\n ValueError: if the arguments are invalid.\n\n \"\"\"\n if aggregation_method is None:\n aggregation_method = AggregationMethod.DEFAULT\n if aggregation_method not in [\n AggregationMethod.ADD_N, AggregationMethod.EXPERIMENTAL_TREE,\n AggregationMethod.EXPERIMENTAL_ACCUMULATE_N\n ]:\n raise ValueError(\"Invalid aggregation_method specified %s.\" %\n aggregation_method)\n out_grads = _GetGrads(grads, op)\n for i, out_grad in enumerate(out_grads):\n if loop_state:\n if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):\n assert control_flow_ops.IsLoopSwitch(op)\n continue\n # Grads have to be Tensors or IndexedSlices\n if (isinstance(out_grad, collections.Sequence) and not all([\n isinstance(g, (ops.Tensor, ops.IndexedSlices)) for g in out_grad\n if g is not None\n ])):\n raise TypeError(\"gradients have to be either all Tensors \"\n \"or all IndexedSlices\")\n # Aggregate multiple gradients, and convert [] to None.\n if out_grad:\n if len(out_grad) < 2:\n used = \"nop\"\n out_grads[i] = out_grad[0]\n elif all([isinstance(g, ops.Tensor) for g in out_grad if g is not None]):\n tensor_shape = _AccumulatorShape(out_grad)\n if (aggregation_method == AggregationMethod.EXPERIMENTAL_ACCUMULATE_N\n and len(out_grad) > 2 and tensor_shape.is_fully_defined()):\n # The benefit of using AccumulateN is that its inputs can be combined\n # in any order and this can allow the expression to be evaluated with\n # a smaller memory footprint. When used with gpu_allocator_retry,\n # it is possible to compute a sum of terms which are much larger than\n # total GPU memory.\n # AccumulateN can currently only be used if we know the shape for\n # an accumulator variable. If this is not known, or if we only have\n # 2 grads then we fall through to the \"tree\" case below.\n used = \"accumulate_n\"\n out_grads[i] = math_ops.accumulate_n(out_grad)\n elif aggregation_method in [\n AggregationMethod.EXPERIMENTAL_TREE,\n AggregationMethod.EXPERIMENTAL_ACCUMULATE_N\n ]:\n # Aggregate all gradients by doing pairwise sums: this may\n # reduce performance, but it can improve memory because the\n # gradients can be released earlier.\n #\n # TODO(vrv): Consider replacing this with a version of\n # tf.AddN() that eagerly frees its inputs as soon as they are\n # ready, so the order of this tree does not become a problem.\n used = \"tree\"\n with ops.name_scope(op.name + \"_gradient_sum\"):\n running_sum = out_grad[0]\n for grad in out_grad[1:]:\n running_sum = math_ops.add_n([running_sum, grad])\n out_grads[i] = running_sum\n else:\n used = \"add_n\"\n out_grads[i] = _MultiDeviceAddN(out_grad)\n logging.vlog(2, \" _AggregatedGrads %d x %s using %s\",\n len(out_grad), tensor_shape, used)\n else:\n out_grad = math_ops._as_indexed_slices_list(\n [g for g in out_grad if g is not None])\n out_grad = [_HandleNestedIndexedSlices(x) for x in out_grad]\n # Form IndexedSlices out of the concatenated values and\n # indices.\n out_grads[i] = ops.IndexedSlices(\n array_ops.concat([x.values for x in out_grad], 0),\n array_ops.concat([x.indices for x in out_grad], 0),\n out_grad[0].dense_shape)\n else: # not out_grad\n # out_grads[i] is [], thus its aggregation is simply None.\n out_grads[i] = None\n return out_grads\n\n\n# TODO(vrv): Make this available when we want to make it public.\ndef _hessian_vector_product(ys, xs, v):\n \"\"\"Multiply the Hessian of `ys` wrt `xs` by `v`.\n\n This is an efficient construction that uses a backprop-like approach\n to compute the product between the Hessian and another vector. The\n Hessian is usually too large to be explicitly computed or even\n represented, but this method allows us to at least multiply by it\n for the same big-O cost as backprop.\n\n Implicit Hessian-vector products are the main practical, scalable way\n of using second derivatives with neural networks. They allow us to\n do things like construct Krylov subspaces and approximate conjugate\n gradient descent.\n\n Example: if `y` = 1/2 `x`^T A `x`, then `hessian_vector_product(y,\n x, v)` will return an expression that evaluates to the same values\n as (A + A.T) `v`.\n\n Args:\n ys: A scalar value, or a tensor or list of tensors to be summed to\n yield a scalar.\n xs: A list of tensors that we should construct the Hessian over.\n v: A list of tensors, with the same shapes as xs, that we want to\n multiply by the Hessian.\n\n Returns:\n A list of tensors (or if the list would be length 1, a single tensor)\n containing the product between the Hessian and `v`.\n\n Raises:\n ValueError: `xs` and `v` have different length.\n\n \"\"\"\n\n # Validate the input\n length = len(xs)\n if len(v) != length:\n raise ValueError(\"xs and v must have the same length.\")\n\n # First backprop\n grads = gradients(ys, xs)\n\n assert len(grads) == length\n elemwise_products = [\n math_ops.multiply(grad_elem, array_ops.stop_gradient(v_elem))\n for grad_elem, v_elem in zip(grads, v) if grad_elem is not None\n ]\n\n # Second backprop\n return gradients(elemwise_products, xs)\n\n\ndef hessians(ys, xs, name=\"hessians\", colocate_gradients_with_ops=False,\n gate_gradients=False, aggregation_method=None):\n \"\"\"Constructs the Hessian of sum of `ys` with respect to `x` in `xs`.\n\n `hessians()` adds ops to the graph to output the Hessian matrix of `ys`\n with respect to `xs`. It returns a list of `Tensor` of length `len(xs)`\n where each tensor is the Hessian of `sum(ys)`. This function currently\n only supports evaluating the Hessian with respect to (a list of) one-\n dimensional tensors.\n\n The Hessian is a matrix of second-order partial derivatives of a scalar\n tensor (see https://en.wikipedia.org/wiki/Hessian_matrix for more details).\n\n Args:\n ys: A `Tensor` or list of tensors to be differentiated.\n xs: A `Tensor` or list of tensors to be used for differentiation.\n name: Optional name to use for grouping all the gradient ops together.\n defaults to 'hessians'.\n colocate_gradients_with_ops: See `gradients()` documentation for details.\n gate_gradients: See `gradients()` documentation for details.\n aggregation_method: See `gradients()` documentation for details.\n\n Returns:\n A list of Hessian matrices of `sum(ys)` for each `x` in `xs`.\n\n Raises:\n LookupError: if one of the operations between `xs` and `ys` does not\n have a registered gradient function.\n \"\"\"\n xs = _AsList(xs)\n kwargs = {\n 'colocate_gradients_with_ops': colocate_gradients_with_ops,\n 'gate_gradients': gate_gradients,\n 'aggregation_method': aggregation_method\n }\n # Compute first-order derivatives and iterate for each x in xs.\n hessians = []\n _gradients = gradients(ys, xs, **kwargs)\n for i, _gradient, x in zip(range(len(xs)), _gradients, xs):\n # Ensure that x is a vector.\n check_rank = check_ops.assert_rank(\n x, 1, message='Cannot compute Hessian because element %d of `xs` does '\n 'not have rank one.' % i\n )\n with ops.control_dependencies([check_rank]):\n # Declare an iterator and tensor array loop variables for the gradients.\n n = array_ops.size(x)\n loop_vars = [\n array_ops.constant(0, dtypes.int32),\n tensor_array_ops.TensorArray(x.dtype, n)\n ]\n # Iterate over all elements of the gradient and compute second order\n # derivatives.\n _, hessian = control_flow_ops.while_loop(\n lambda j, _: j < n,\n lambda j, result: (j + 1,\n result.write(j, gradients(_gradient[j], x)[0])),\n loop_vars\n )\n\n hessians.append(hessian.stack())\n return hessians\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Functional tests for DepthToSpace op.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import gradient_checker\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.platform import tf_logging\n\n\nclass DepthToSpaceTest(test.TestCase):\n\n def _testOne(self, inputs, block_size, outputs):\n input_nhwc = math_ops.to_float(inputs)\n with self.test_session(use_gpu=False):\n # test NHWC (default) on CPU\n x_tf = array_ops.depth_to_space(input_nhwc, block_size)\n self.assertAllEqual(x_tf.eval(), outputs)\n if test.is_gpu_available():\n with self.test_session(use_gpu=True):\n # test NHWC (default) on GPU\n x_tf = array_ops.depth_to_space(input_nhwc, block_size)\n self.assertAllEqual(x_tf.eval(), outputs)\n # test NCHW on GPU\n input_nchw = test_util.NHWCToNCHW(input_nhwc)\n output_nchw = array_ops.depth_to_space(\n input_nchw, block_size, data_format=\"NCHW\")\n output_nhwc = test_util.NCHWToNHWC(output_nchw)\n self.assertAllEqual(output_nhwc.eval(), outputs)\n\n def testBasic(self):\n x_np = [[[[1, 2, 3, 4]]]]\n block_size = 2\n x_out = [[[[1], [2]], [[3], [4]]]]\n self._testOne(x_np, block_size, x_out)\n\n # Tests for larger input dimensions. To make sure elements are\n # correctly ordered spatially.\n def testBlockSize2(self):\n x_np = [[[[1, 2, 3, 4],\n [5, 6, 7, 8]],\n [[9, 10, 11, 12],\n [13, 14, 15, 16]]]]\n block_size = 2\n x_out = [[[[1], [2], [5], [6]],\n [[3], [4], [7], [8]],\n [[9], [10], [13], [14]],\n [[11], [12], [15], [16]]]]\n self._testOne(x_np, block_size, x_out)\n\n def testBlockSize2Batch10(self):\n block_size = 2\n def batch_input_elt(i):\n return [[[1 * i, 2 * i, 3 * i, 4 * i],\n [5 * i, 6 * i, 7 * i, 8 * i]],\n [[9 * i, 10 * i, 11 * i, 12 * i],\n [13 * i, 14 * i, 15 * i, 16 * i]]]\n def batch_output_elt(i):\n return [[[1 * i], [2 * i], [5 * i], [6 * i]],\n [[3 * i], [4 * i], [7 * i], [8 * i]],\n [[9 * i], [10 * i], [13 * i], [14 * i]],\n [[11 * i], [12 * i], [15 * i], [16 * i]]]\n batch_size = 10\n x_np = [batch_input_elt(i) for i in range(batch_size)]\n x_out = [batch_output_elt(i) for i in range(batch_size)]\n self._testOne(x_np, block_size, x_out)\n\n # Tests for different width and height.\n def testNonSquare(self):\n x_np = [[[[1, 10, 2, 20, 3, 30, 4, 40]],\n [[5, 50, 6, 60, 7, 70, 8, 80]],\n [[9, 90, 10, 100, 11, 110, 12, 120]]]]\n block_size = 2\n x_out = [[[[1, 10], [2, 20]],\n [[3, 30], [4, 40]],\n [[5, 50], [6, 60]],\n [[7, 70], [8, 80]],\n [[9, 90], [10, 100]],\n [[11, 110], [12, 120]]]]\n self._testOne(x_np, block_size, x_out)\n\n # Tests for larger input dimensions. To make sure elements are\n # correctly ordered spatially.\n def testBlockSize4FlatInput(self):\n x_np = [[[[1, 2, 5, 6, 3, 4, 7, 8, 9, 10, 13, 14, 11, 12, 15, 16]]]]\n block_size = 4\n x_out = [[[[1], [2], [5], [6]],\n [[3], [4], [7], [8]],\n [[9], [10], [13], [14]],\n [[11], [12], [15], [16]]]]\n self._testOne(x_np, block_size, x_out)\n\n # Tests for larger input depths.\n # To make sure elements are properly interleaved in depth.\n def testDepthInterleaved(self):\n x_np = [[[[1, 10, 2, 20, 3, 30, 4, 40]]]]\n block_size = 2\n x_out = [[[[1, 10], [2, 20]],\n [[3, 30], [4, 40]]]]\n self._testOne(x_np, block_size, x_out)\n\n # Tests for larger input depths. Here an odd depth.\n # To make sure elements are properly interleaved in depth.\n def testDepthInterleavedDepth3(self):\n x_np = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]\n block_size = 2\n x_out = [[[[1, 2, 3], [4, 5, 6]],\n [[7, 8, 9], [10, 11, 12]]]]\n self._testOne(x_np, block_size, x_out)\n\n # Tests for larger input depths.\n # To make sure elements are properly interleaved in depth.\n def testDepthInterleavedLarger(self):\n x_np = [[[[1, 10, 2, 20, 3, 30, 4, 40],\n [5, 50, 6, 60, 7, 70, 8, 80]],\n [[9, 90, 10, 100, 11, 110, 12, 120],\n [13, 130, 14, 140, 15, 150, 16, 160]]]]\n block_size = 2\n x_out = [[[[1, 10], [2, 20], [5, 50], [6, 60]],\n [[3, 30], [4, 40], [7, 70], [8, 80]],\n [[9, 90], [10, 100], [13, 130], [14, 140]],\n [[11, 110], [12, 120], [15, 150], [16, 160]]]]\n self._testOne(x_np, block_size, x_out)\n\n # Error handling:\n\n # Tests for a block larger for the depth. In this case should raise an\n # exception.\n def testBlockSizeTooLarge(self):\n x_np = [[[[1, 2, 3, 4],\n [5, 6, 7, 8]],\n [[9, 10, 11, 12],\n [13, 14, 15, 16]]]]\n block_size = 4\n # Raise an exception, since th depth is only 4 and needs to be\n # divisible by 16.\n with self.assertRaises(ValueError):\n out_tf = array_ops.depth_to_space(x_np, block_size)\n out_tf.eval()\n\n # Test when the block size is 0.\n def testBlockSize0(self):\n x_np = [[[[1], [2]],\n [[3], [4]]]]\n block_size = 0\n with self.assertRaises(ValueError):\n out_tf = array_ops.depth_to_space(x_np, block_size)\n out_tf.eval()\n\n # Test when the block size is 1. The block size should be > 1.\n def testBlockSizeOne(self):\n x_np = [[[[1, 1, 1, 1],\n [2, 2, 2, 2]],\n [[3, 3, 3, 3],\n [4, 4, 4, 4]]]]\n block_size = 1\n with self.assertRaises(ValueError):\n out_tf = array_ops.depth_to_space(x_np, block_size)\n out_tf.eval()\n\n def testBlockSizeLargerThanInput(self):\n # The block size is too large for this input.\n x_np = [[[[1], [2]],\n [[3], [4]]]]\n block_size = 10\n with self.assertRaises(ValueError):\n out_tf = array_ops.space_to_depth(x_np, block_size)\n out_tf.eval()\n\n def testBlockSizeNotDivisibleDepth(self):\n # The depth is not divisible by the square of the block size.\n x_np = [[[[1, 1, 1, 1],\n [2, 2, 2, 2]],\n [[3, 3, 3, 3],\n [4, 4, 4, 4]]]]\n block_size = 3\n with self.assertRaises(ValueError):\n _ = array_ops.space_to_depth(x_np, block_size)\n\n def testUnknownShape(self):\n t = array_ops.depth_to_space(\n array_ops.placeholder(dtypes.float32), block_size=4)\n self.assertEqual(4, t.get_shape().ndims)\n\n def depthToSpaceUsingTranspose(self, tensor, block_size, data_format):\n block_size_sq = block_size * block_size\n if data_format == \"NHWC\":\n b, ih, iw, ic = tensor.shape.as_list()\n assert ic % block_size_sq == 0, (ic, block_size_sq)\n ow, oh, oc = iw * block_size, ih * block_size, ic // block_size_sq\n tensor = array_ops.reshape(tensor,\n [b, ih, iw, block_size, block_size, oc])\n tensor = array_ops.transpose(tensor, [0, 1, 3, 2, 4, 5])\n tensor = array_ops.reshape(tensor, [b, oh, ow, oc])\n elif data_format == \"NCHW\":\n b, ic, ih, iw = tensor.shape.as_list()\n assert ic % block_size_sq == 0, (ic, block_size_sq)\n ow, oh, oc = iw * block_size, ih * block_size, ic // block_size_sq\n tensor = array_ops.reshape(tensor,\n [b, block_size, block_size, oc, ih, iw])\n tensor = array_ops.transpose(tensor, [0, 3, 4, 1, 5, 2])\n tensor = array_ops.reshape(tensor, [b, oc, oh, ow])\n return tensor\n\n def compareToTranspose(self, batch_size, in_height, in_width, out_channels,\n block_size, data_format, use_gpu):\n in_channels = out_channels * block_size * block_size\n nhwc_input_shape = [batch_size, in_height, in_width, in_channels]\n nchw_input_shape = [batch_size, in_channels, in_height, in_width]\n total_size = np.prod(nhwc_input_shape)\n\n if data_format == \"NCHW_VECT_C\":\n # Initialize the input tensor with qint8 values that circle -127..127.\n x = [((f + 128) % 255) - 127 for f in range(total_size)]\n t = constant_op.constant(x, shape=nhwc_input_shape, dtype=dtypes.float32)\n expected = self.depthToSpaceUsingTranspose(t, block_size, \"NHWC\")\n t = test_util.NHWCToNCHW_VECT_C(t)\n t, _, _ = gen_array_ops.quantize_v2(t, -128.0, 127.0, dtypes.qint8)\n t = array_ops.depth_to_space(t, block_size, data_format=\"NCHW_VECT_C\")\n t = gen_array_ops.dequantize(t, -128, 127)\n actual = test_util.NCHW_VECT_CToNHWC(t)\n else:\n # Initialize the input tensor with ascending whole numbers as floats.\n x = [f * 1.0 for f in range(total_size)]\n shape = nchw_input_shape if data_format == \"NCHW\" else nhwc_input_shape\n t = constant_op.constant(x, shape=shape, dtype=dtypes.float32)\n expected = self.depthToSpaceUsingTranspose(t, block_size, data_format)\n actual = array_ops.depth_to_space(t, block_size, data_format=data_format)\n\n with self.test_session(use_gpu=use_gpu) as sess:\n actual_vals, expected_vals = sess.run([actual, expected])\n self.assertTrue(np.array_equal(actual_vals, expected_vals))\n\n def testAgainstTranspose(self):\n self.compareToTranspose(3, 2, 3, 1, 2, \"NHWC\", False)\n self.compareToTranspose(3, 2, 3, 2, 2, \"NHWC\", False)\n self.compareToTranspose(1, 2, 3, 2, 3, \"NHWC\", False)\n\n if not test.is_gpu_available():\n tf_logging.info(\"skipping gpu tests since gpu not available\")\n return\n\n self.compareToTranspose(3, 2, 3, 1, 2, \"NHWC\", True)\n self.compareToTranspose(3, 2, 3, 2, 2, \"NHWC\", True)\n self.compareToTranspose(3, 2, 3, 1, 2, \"NCHW\", True)\n self.compareToTranspose(3, 2, 3, 2, 2, \"NCHW\", True)\n self.compareToTranspose(3, 2, 3, 1, 3, \"NCHW\", True)\n self.compareToTranspose(3, 2, 3, 2, 3, \"NCHW\", True)\n self.compareToTranspose(5, 7, 11, 3, 2, \"NCHW\", True)\n self.compareToTranspose(3, 200, 300, 32, 2, \"NCHW\", True)\n\n self.compareToTranspose(3, 2, 3, 8, 2, \"NCHW_VECT_C\", True)\n self.compareToTranspose(3, 2, 3, 4, 3, \"NCHW_VECT_C\", True)\n self.compareToTranspose(3, 2, 3, 8, 3, \"NCHW_VECT_C\", True)\n self.compareToTranspose(5, 7, 11, 12, 2, \"NCHW_VECT_C\", True)\n self.compareToTranspose(3, 200, 300, 32, 2, \"NCHW_VECT_C\", True)\n\n\nclass DepthToSpaceGradientTest(test.TestCase):\n\n # Check the gradients.\n def _checkGrad(self, x, block_size):\n assert 4 == x.ndim\n with self.test_session(use_gpu=True):\n tf_x = ops.convert_to_tensor(x)\n tf_y = array_ops.depth_to_space(tf_x, block_size)\n epsilon = 1e-2\n ((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(\n tf_x,\n x.shape,\n tf_y,\n tf_y.get_shape().as_list(),\n x_init_value=x,\n delta=epsilon)\n\n self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)\n\n # Tests a gradient for depth_to_space of x which is a four dimensional\n # tensor of shape [b, h, w, d * block_size * block_size].\n def _compare(self, b, h, w, d, block_size):\n block_size_sq = block_size * block_size\n x = np.random.normal(\n 0, 1, b * h * w * d * block_size_sq).astype(np.float32).reshape(\n [b, h, w, d * block_size_sq])\n\n self._checkGrad(x, block_size)\n\n # Don't use very large numbers as dimensions here, as the result is tensor\n # with cartesian product of the dimensions.\n def testSmall(self):\n block_size = 2\n self._compare(3, 2, 5, 3, block_size)\n\n def testSmall2(self):\n block_size = 3\n self._compare(1, 2, 3, 2, block_size)\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Benchmarks for Cudnn RNN models.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport time\n\nfrom tensorflow.contrib.cudnn_rnn.python.ops import cudnn_rnn_ops\nfrom tensorflow.contrib.rnn.python.ops import core_rnn\nfrom tensorflow.contrib.rnn.python.ops import lstm_ops\nfrom tensorflow.python.client import session\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import rnn_cell\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\n\n\nclass CudnnRNNBenchmark(test.Benchmark):\n \"\"\"Benchmarks Cudnn LSTM and other related models.\n \"\"\"\n\n def _GetTestConfig(self):\n return {\n \"large\": {\n \"num_layers\": 4,\n \"num_units\": 1024,\n \"seq_length\": 40,\n \"batch_size\": 64,\n },\n \"medium\": {\n \"num_layers\": 4,\n \"num_units\": 512,\n \"seq_length\": 30,\n \"batch_size\": 64,\n },\n \"small\": {\n \"num_layers\": 4,\n \"num_units\": 128,\n \"seq_length\": 20,\n \"batch_size\": 64,\n },\n }\n\n def _GetConfigDesc(self, config):\n num_layers = config[\"num_layers\"]\n num_units = config[\"num_units\"]\n batch_size = config[\"batch_size\"]\n seq_length = config[\"seq_length\"]\n\n return \"y%d_u%d_b%d_q%d\" % (num_layers, num_units, batch_size, seq_length)\n\n def _BenchmarkOp(self, op, desc):\n burn_in_steps = 10\n benchmark_steps = 40\n with session.Session() as sess:\n sess.run(variables.global_variables_initializer())\n for i in xrange(burn_in_steps + benchmark_steps):\n if i == burn_in_steps:\n start_time = time.time()\n sess.run(op)\n total_time = time.time() - start_time\n step_time = total_time / benchmark_steps\n print(\"%s takes %.4f sec/step\" % (desc, step_time))\n self.report_benchmark(\n name=desc, iters=benchmark_steps, wall_time=total_time)\n\n def benchmarkCudnnLSTMTraining(self):\n test_configs = self._GetTestConfig()\n for config_name, config in test_configs.items():\n config = test_configs[config_name]\n num_layers = config[\"num_layers\"]\n num_units = config[\"num_units\"]\n batch_size = config[\"batch_size\"]\n seq_length = config[\"seq_length\"]\n\n with ops.Graph().as_default(), ops.device(\"/device:GPU:0\"):\n model = cudnn_rnn_ops.CudnnLSTM(num_layers, num_units, num_units)\n params_size_t = model.params_size()\n input_data = variables.Variable(\n array_ops.ones([seq_length, batch_size, num_units]))\n input_h = variables.Variable(\n array_ops.ones([num_layers, batch_size, num_units]))\n input_c = variables.Variable(\n array_ops.ones([num_layers, batch_size, num_units]))\n params = variables.Variable(\n array_ops.ones([params_size_t]), validate_shape=False)\n output, output_h, output_c = model(\n is_training=True,\n input_data=input_data,\n input_h=input_h,\n input_c=input_c,\n params=params)\n all_grads = gradients_impl.gradients(\n [output, output_h, output_c],\n [params, input_data, input_h, input_c])\n training_op = control_flow_ops.group(*all_grads)\n self._BenchmarkOp(training_op, \"cudnn_lstm %s %s\" %\n (config_name, self._GetConfigDesc(config)))\n\n def benchmarkTfRNNLSTMTraining(self):\n test_configs = self._GetTestConfig()\n for config_name, config in test_configs.items():\n num_layers = config[\"num_layers\"]\n num_units = config[\"num_units\"]\n batch_size = config[\"batch_size\"]\n seq_length = config[\"seq_length\"]\n\n with ops.Graph().as_default(), ops.device(\"/device:GPU:0\"):\n inputs = seq_length * [\n array_ops.zeros([batch_size, num_units], dtypes.float32)\n ]\n initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)\n\n cell = rnn_cell.LSTMCell(\n num_units=num_units, initializer=initializer, state_is_tuple=True)\n multi_cell = rnn_cell.MultiRNNCell(\n [cell() for _ in range(num_layers)])\n outputs, final_state = core_rnn.static_rnn(\n multi_cell, inputs, dtype=dtypes.float32)\n trainable_variables = ops.get_collection(\n ops.GraphKeys.TRAINABLE_VARIABLES)\n gradients = gradients_impl.gradients([outputs, final_state],\n trainable_variables)\n training_op = control_flow_ops.group(*gradients)\n self._BenchmarkOp(training_op, \"tf_rnn_lstm %s %s\" %\n (config_name, self._GetConfigDesc(config)))\n\n def benchmarkTfRNNLSTMBlockCellTraining(self):\n test_configs = self._GetTestConfig()\n for config_name, config in test_configs.items():\n num_layers = config[\"num_layers\"]\n num_units = config[\"num_units\"]\n batch_size = config[\"batch_size\"]\n seq_length = config[\"seq_length\"]\n\n with ops.Graph().as_default(), ops.device(\"/device:GPU:0\"):\n inputs = seq_length * [\n array_ops.zeros([batch_size, num_units], dtypes.float32)\n ]\n cell = lambda: lstm_ops.LSTMBlockCell(num_units=num_units) # pylint: disable=cell-var-from-loop\n\n multi_cell = rnn_cell.MultiRNNCell(\n [cell() for _ in range(num_layers)])\n outputs, final_state = core_rnn.static_rnn(\n multi_cell, inputs, dtype=dtypes.float32)\n trainable_variables = ops.get_collection(\n ops.GraphKeys.TRAINABLE_VARIABLES)\n gradients = gradients_impl.gradients([outputs, final_state],\n trainable_variables)\n training_op = control_flow_ops.group(*gradients)\n self._BenchmarkOp(training_op, \"tf_rnn_lstm_block_cell %s %s\" %\n (config_name, self._GetConfigDesc(config)))\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Abstractions for the head(s) of a model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport collections\n\nimport six\n\nfrom tensorflow.python.estimator import model_fn\nfrom tensorflow.python.estimator.canned import metric_keys\nfrom tensorflow.python.estimator.canned import prediction_keys\nfrom tensorflow.python.estimator.export import export_output\nfrom tensorflow.python.feature_column import feature_column as feature_column_lib\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import lookup_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import metrics as metrics_lib\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import string_ops\nfrom tensorflow.python.ops import weights_broadcast_ops\nfrom tensorflow.python.ops.losses import losses\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.saved_model import signature_constants\nfrom tensorflow.python.summary import summary\n\n_DEFAULT_SERVING_KEY = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY\n\n# The above default is defined by TF Serving, but these next three are just\n# a local convention without any special meaning.\n_CLASSIFY_SERVING_KEY = 'classification'\n_REGRESS_SERVING_KEY = 'regression'\n_PREDICT_SERVING_KEY = 'predict'\n\n\nLossAndLabels = collections.namedtuple('LossAndLabels',\n ['unweighted_loss', 'processed_labels'])\n\n\ndef _summary_key(head_name, val):\n return '%s/%s' % (val, head_name) if head_name else val\n\n\nclass _Head(object):\n \"\"\"Interface for the head/top of a model.\n\n Given logits (or output of a hidden layer), a Head knows how to compute\n predictions, loss, train_op, metrics and export outputs. It is meant to:\n\n 1. Simplify writing model_fn and to make model_fn more configurable\n 2. Support wide range of machine learning models. Since most heads can work\n with logits, they can support DNN, RNN, Wide, Wide&Deep,\n Global objectives, Gradient boosted trees and many other types\n of machine learning models.\n\n Common usage:\n Here is simplified model_fn to build a DNN regression model.\n ```python\n def _my_dnn_model_fn(features, labels, mode, params, config=None):\n # Optionally your callers can pass head to model_fn as a param.\n head = tf.contrib.learn.regression_head(...)\n input = tf.contrib.layers.input_from_feature_columns(features, ...)\n last_hidden_layer_out = tf.contrib.layers.stack(\n input, tf.contrib.layers.fully_connected, [1000, 500])\n logits = tf.contrib.layers.fully_connected(\n last_hidden_layer_out, head.logits_dimension, activation_fn=None)\n\n def _train_op_fn(loss):\n return optimizer.minimize(loss)\n\n return head.create_estimator_spec(\n features=features,\n labels=labels,\n mode=mode,\n logits=logits,\n train_op_fn=_train_op_fn)\n ```\n\n There are cases where computing and applying gradients can not be meaningfully\n captured with train_op_fn we support (for example, with sync optimizer). In\n such case, you can take the responsibility on your own. Here is a common\n use case,\n ```python\n estimator_spec = head.create_estimator_spec(\n features=features,\n labels=labels,\n mode=mode,\n logits=logits,\n train_op_fn=tf.contrib.learn.no_op_train_fn)\n if mode == model_fn.ModeKeys.TRAIN:\n optimizer = ...\n sync = tf.train.SyncReplicasOptimizer(opt=optimizer, ...)\n update_op = tf.contrib.layers.optimize_loss(optimizer=sync,\n loss=estimator_spec.loss, ...)\n hooks = [sync.make_session_run_hook(is_chief)]\n ... upate train_op and hooks in EstimatorSpec and return\n ```\n \"\"\"\n __metaclass__ = abc.ABCMeta\n\n @abc.abstractproperty\n def name(self):\n \"\"\"The name of this head.\n\n Returns:\n A string.\n \"\"\"\n raise NotImplementedError('Calling an abstract method.')\n\n @abc.abstractproperty\n def logits_dimension(self):\n \"\"\"Size of the last dimension of the logits `Tensor`.\n\n Typically, logits is of shape `[batch_size, logits_dimension]`.\n\n Returns:\n The expected size of the `logits` tensor.\n \"\"\"\n raise NotImplementedError('Calling an abstract method.')\n\n @abc.abstractmethod\n def create_loss(self, features, mode, logits, labels):\n \"\"\"Returns a loss Tensor from provided logits.\n\n This function is designed to be used by framework developers. Almost all\n users should use create_estimator_spec(), which calls this internally.\n `mode` and `features` are most likely not used, but some Head\n implementations may require them.\n\n Args:\n features: Input `dict` of `Tensor` objects.\n mode: Estimator's `ModeKeys`.\n logits: logits `Tensor` to be used for loss construction.\n labels: Labels `Tensor`, or `dict` of same.\n\n Returns:\n A LossAndLabels that contains the `Tensor` representing the loss and\n possibly processed labels (e.g. vocabulary lookup, shape manipulation,\n etc.), to be extendable in the future.\n \"\"\"\n raise NotImplementedError('Calling an abstract method.')\n\n @abc.abstractmethod\n def create_estimator_spec(\n self, features, mode, logits, labels=None, train_op_fn=None):\n \"\"\"Returns `EstimatorSpec` that a model_fn can return.\n\n Please note that,\n + All args must be passed via name.\n\n Args:\n features: Input `dict` of `Tensor` objects.\n mode: Estimator's `ModeKeys`.\n logits: logits `Tensor` to be used by the head.\n labels: Labels `Tensor`, or `dict` of same.\n train_op_fn: Function that takes a scalar loss `Tensor` and returns an op\n to optimize the model with the loss. This is used in TRAIN mode and\n must not be None. None is allowed in other modes. If you want to\n optimize loss yourself you can pass `no_op_train_fn` and then use\n EstimatorSpec.loss to compute and apply gradients.\n\n Returns:\n `EstimatorSpec`.\n \"\"\"\n raise NotImplementedError('Calling an abstract method.')\n\n\ndef _maybe_expand_dim(tensor):\n \"\"\"Expand the dim of `tensor` with static rank 1.\"\"\"\n with ops.name_scope(None, 'maybe_expand_dim', (tensor,)):\n static_shape = tensor.shape\n if static_shape is None:\n return tensor\n\n return (array_ops.expand_dims(tensor, -1) if static_shape.ndims == 1\n else tensor)\n\n\ndef _check_and_reshape_dense_labels(labels, expected_labels_dimension):\n \"\"\"Checks dense labels type and shape and reshapes to 2D Tensor.\"\"\"\n if labels is None:\n raise ValueError(\n 'You must provide a labels Tensor. Given: None. '\n 'Suggested troubleshooting steps: Check that your data contain '\n 'your label feature. Check that your input_fn properly parses and '\n 'returns labels.')\n with ops.name_scope(None, 'labels', (labels,)) as scope:\n labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)\n if isinstance(labels, sparse_tensor.SparseTensor):\n raise ValueError(\n 'SparseTensor labels are not supported. '\n 'labels must be a Tensor of shape [batch_size, %s]. '\n 'Suggested Fix (1): Check the label feature in your data. '\n 'Each example must contain %s value(s). If not, your choice of label '\n 'was probably incorrect. '\n 'Suggested Fix (2): In your input_fn, use '\n 'tf.sparse_tensor_to_dense() to turn labels into a Tensor.'\n '' % (expected_labels_dimension, expected_labels_dimension))\n labels = _maybe_expand_dim(labels)\n labels_shape = array_ops.shape(labels)\n err_msg = 'labels shape must be [batch_size, {}]'.format(\n expected_labels_dimension)\n assert_rank = check_ops.assert_rank(labels, 2, message=err_msg)\n with ops.control_dependencies([assert_rank]):\n static_shape = labels.shape\n if static_shape is not None:\n dim1 = static_shape[1]\n if (dim1 is not None) and (dim1 != expected_labels_dimension):\n raise ValueError(\n 'Mismatched label shape. '\n 'Classifier configured with n_classes=%s. Received %s. '\n 'Suggested Fix: check your n_classes argument to the estimator '\n 'and/or the shape of your label.' %\n (expected_labels_dimension, dim1))\n assert_dimension = check_ops.assert_equal(\n expected_labels_dimension, labels_shape[1], message=err_msg)\n with ops.control_dependencies([assert_dimension]):\n return array_ops.identity(labels, name=scope)\n\n\ndef _check_logits(logits, expected_logits_dimension):\n \"\"\"Check logits type and shape.\"\"\"\n with ops.name_scope(None, 'logits', (logits,)) as scope:\n logits = math_ops.to_float(logits)\n logits_shape = array_ops.shape(logits)\n assert_rank = check_ops.assert_rank(\n logits, 2, data=[logits_shape],\n message='logits shape must be [batch_size, logits_dimension]')\n with ops.control_dependencies([assert_rank]):\n static_shape = logits.shape\n if static_shape is not None:\n dim1 = static_shape[1]\n if (dim1 is not None) and (dim1 != expected_logits_dimension):\n raise ValueError(\n 'logits shape must be [batch_size, logits_dimension], got %s.' %\n (static_shape,))\n assert_dimension = check_ops.assert_equal(\n expected_logits_dimension, logits_shape[1], data=[logits_shape],\n message='logits shape must be [batch_size, logits_dimension]')\n with ops.control_dependencies([assert_dimension]):\n return array_ops.identity(logits, name=scope)\n\n\ndef _indicator_labels_mean(labels, weights=None, name=None):\n with ops.name_scope(name, 'labels_mean', (labels, weights)) as scope:\n labels = math_ops.to_float(labels, name='labels')\n if weights is not None:\n weights = weights_broadcast_ops.broadcast_weights(weights, labels)\n return metrics_lib.mean(labels, weights=weights, name=scope)\n\n\ndef _classification_output(scores, n_classes, label_vocabulary=None):\n batch_size = array_ops.shape(scores)[0]\n if label_vocabulary:\n export_class_list = label_vocabulary\n else:\n export_class_list = string_ops.as_string(math_ops.range(n_classes))\n export_output_classes = array_ops.tile(\n input=array_ops.expand_dims(input=export_class_list, axis=0),\n multiples=[batch_size, 1])\n return export_output.ClassificationOutput(\n scores=scores,\n # `ClassificationOutput` requires string classes.\n classes=export_output_classes)\n\n\ndef _accuracy_baseline(labels_mean):\n \"\"\"Return accuracy baseline based on labels mean.\n\n This is the best the model could do by always predicting one class.\n\n Args:\n labels_mean: Tuple of value and update op.\n\n Returns:\n Tuple of value and update op.\n \"\"\"\n with ops.name_scope(None, 'accuracy_baseline', labels_mean):\n value, update_op = labels_mean\n return (\n math_ops.maximum(value, 1. - value, name='value'),\n math_ops.maximum(update_op, 1 - update_op, name='update_op'))\n\n\ndef _predictions_mean(predictions, weights=None, name=None):\n with ops.name_scope(\n name, 'predictions_mean', (predictions, weights)) as scope:\n predictions = math_ops.to_float(predictions, name='predictions')\n if weights is not None:\n weights = weights_broadcast_ops.broadcast_weights(weights, predictions)\n return metrics_lib.mean(predictions, weights=weights, name=scope)\n\n\ndef _auc(labels, predictions, weights=None, curve='ROC', name=None):\n with ops.name_scope(name, 'auc', (predictions, labels, weights)) as scope:\n predictions = math_ops.to_float(predictions, name='predictions')\n if labels.dtype.base_dtype != dtypes.bool:\n logging.warning('Casting %s labels to bool.', labels.dtype)\n labels = math_ops.cast(labels, dtypes.bool)\n if weights is not None:\n weights = weights_broadcast_ops.broadcast_weights(weights, predictions)\n return metrics_lib.auc(\n labels=labels, predictions=predictions, weights=weights, curve=curve,\n name=scope)\n\n\ndef _accuracy_at_threshold(labels, predictions, weights, threshold, name=None):\n with ops.name_scope(\n name, 'accuracy_at_%s' % threshold,\n (predictions, labels, weights, threshold)) as scope:\n threshold_predictions = math_ops.to_float(\n math_ops.greater_equal(predictions, threshold))\n return metrics_lib.accuracy(\n labels=labels, predictions=threshold_predictions, weights=weights,\n name=scope)\n\n\ndef _precision_at_threshold(labels, predictions, weights, threshold, name=None):\n with ops.name_scope(\n name, 'precision_at_%s' % threshold,\n (predictions, labels, weights, threshold)) as scope:\n precision_tensor, update_op = metrics_lib.precision_at_thresholds(\n labels=labels, predictions=predictions, thresholds=(threshold,),\n weights=weights, name=scope)\n return array_ops.squeeze(precision_tensor), array_ops.squeeze(update_op)\n\n\ndef _recall_at_threshold(labels, predictions, weights, threshold, name=None):\n with ops.name_scope(\n name, 'recall_at_%s' % threshold,\n (predictions, labels, weights, threshold)) as scope:\n precision_tensor, update_op = metrics_lib.recall_at_thresholds(\n labels=labels, predictions=predictions, thresholds=(threshold,),\n weights=weights, name=scope)\n return array_ops.squeeze(precision_tensor), array_ops.squeeze(update_op)\n\n\ndef _multi_class_head_with_softmax_cross_entropy_loss(n_classes,\n weight_column=None,\n label_vocabulary=None,\n name=None):\n \"\"\"Creates a '_Head' for multi class classification.\n\n This head expects to be fed integer labels specifying the class index.\n\n Args:\n n_classes: Number of classes, must be greater than 2 (for 2 classes, use\n `_BinaryLogisticHeadWithSigmoidCrossEntropyLoss`).\n weight_column: A string or a `_NumericColumn` created by\n `tf.feature_column.numeric_column` defining feature column representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example.\n label_vocabulary: A list of strings represents possible label values. If it\n is not given, that means labels are already encoded as integer within\n [0, n_classes). If given, labels must be string type and have any value in\n `label_vocabulary`. Also there will be errors if vocabulary is not\n provided and labels are string.\n name: name of the head. If provided, summary and metrics keys will be\n suffixed by `\"/\" + name`. Also used as `name_scope` when creating ops.\n\n Returns:\n An instance of `_Head` for multi class classification.\n\n Raises:\n ValueError: if `n_classes`, `metric_class_ids` or `label_keys` is invalid.\n \"\"\"\n if label_vocabulary is not None and not isinstance(label_vocabulary,\n (list, tuple)):\n raise ValueError('label_vocabulary should be a list. Given type: {}'.format(\n type(label_vocabulary)))\n\n return _MultiClassHeadWithSoftmaxCrossEntropyLoss(n_classes, weight_column,\n label_vocabulary, name)\n\n\nclass _MultiClassHeadWithSoftmaxCrossEntropyLoss(_Head):\n \"\"\"See `_multi_class_head_with_softmax_cross_entropy_loss`.\"\"\"\n\n def __init__(self,\n n_classes,\n weight_column=None,\n label_vocabulary=None,\n name=None):\n if (n_classes is None) or (n_classes <= 2):\n raise ValueError('n_classes must be > 2: %s.' % n_classes)\n self._n_classes = n_classes\n self._weight_column = weight_column\n self._label_vocabulary = label_vocabulary\n self._name = name\n\n @property\n def name(self):\n return self._name\n\n @property\n def logits_dimension(self):\n return self._n_classes\n\n def _eval_metric_ops(self, labels, class_ids, weights, unweighted_loss):\n \"\"\"Returns the Eval metric ops.\"\"\"\n with ops.name_scope(\n None, 'metrics',\n (labels, class_ids, weights, unweighted_loss)):\n keys = metric_keys.MetricKeys\n metric_ops = {\n # Estimator already adds a metric for loss.\n # TODO(xiejw): Any other metrics?\n _summary_key(self._name, keys.LOSS_MEAN):\n metrics_lib.mean(\n unweighted_loss, weights=weights, name=keys.LOSS_MEAN),\n _summary_key(self._name, keys.ACCURACY):\n metrics_lib.accuracy(\n labels=labels,\n predictions=class_ids,\n weights=weights,\n name=keys.ACCURACY),\n }\n return metric_ops\n\n def _label_ids(self, labels):\n \"\"\"Converts labels to integer id space.\"\"\"\n if self._label_vocabulary is None:\n if not labels.dtype.is_integer:\n raise ValueError('Labels dtype should be integer '\n 'Instead got %s.' % labels.dtype)\n label_ids = labels\n else:\n if labels.dtype != dtypes.string:\n raise ValueError('Labels dtype should be string if there is a '\n 'vocabulary. Instead got {}'.format(labels.dtype))\n label_ids = lookup_ops.index_table_from_tensor(\n vocabulary_list=tuple(self._label_vocabulary),\n name='class_id_lookup').lookup(labels)\n return _assert_range(label_ids, self._n_classes)\n\n def create_loss(self, features, mode, logits, labels):\n \"\"\"See `Head`.\"\"\"\n del mode, features # Unused for this head.\n label_ids = self._label_ids(_check_and_reshape_dense_labels(labels, 1))\n unweighted_loss = losses.sparse_softmax_cross_entropy(\n labels=label_ids, logits=logits, reduction=losses.Reduction.NONE)\n # Restore the squeezed dim, so unweighted_loss matches the weights shape.\n return LossAndLabels(\n unweighted_loss=array_ops.expand_dims(unweighted_loss, axis=(1,)),\n processed_labels=label_ids)\n\n def create_estimator_spec(\n self, features, mode, logits, labels=None, train_op_fn=None):\n \"\"\"See `Head`.\"\"\"\n with ops.name_scope(self._name, 'head'):\n logits = _check_logits(logits, self.logits_dimension)\n\n # Predict.\n pred_keys = prediction_keys.PredictionKeys\n with ops.name_scope(None, 'predictions', (logits,)):\n # class_ids's shape is [batch_size]\n class_ids = math_ops.argmax(logits, 1, name=pred_keys.CLASS_IDS)\n class_ids = array_ops.expand_dims(class_ids, axis=(1,))\n if self._label_vocabulary:\n table = lookup_ops.index_to_string_table_from_tensor(\n vocabulary_list=self._label_vocabulary,\n name='class_string_lookup')\n classes = table.lookup(class_ids)\n else:\n classes = string_ops.as_string(class_ids, name='str_classes')\n\n probabilities = nn.softmax(logits, name=pred_keys.PROBABILITIES)\n predictions = {\n pred_keys.LOGITS: logits,\n pred_keys.PROBABILITIES: probabilities,\n # Expand to [batch_size, 1]\n pred_keys.CLASS_IDS: class_ids,\n pred_keys.CLASSES: classes,\n }\n if mode == model_fn.ModeKeys.PREDICT:\n classifier_output = _classification_output(\n scores=probabilities, n_classes=self._n_classes,\n label_vocabulary=self._label_vocabulary)\n return model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.PREDICT,\n predictions=predictions,\n export_outputs={\n _DEFAULT_SERVING_KEY: classifier_output,\n _CLASSIFY_SERVING_KEY: classifier_output,\n _PREDICT_SERVING_KEY: export_output.PredictOutput(predictions)\n })\n\n # Eval.\n unweighted_loss, label_ids = self.create_loss(\n features=features, mode=mode, logits=logits, labels=labels)\n weights = _weights(features, self._weight_column)\n training_loss = losses.compute_weighted_loss(\n unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)\n if mode == model_fn.ModeKeys.EVAL:\n return model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.EVAL,\n predictions=predictions,\n loss=training_loss,\n eval_metric_ops=self._eval_metric_ops(\n labels=label_ids,\n class_ids=class_ids,\n unweighted_loss=unweighted_loss,\n weights=weights))\n\n # Train.\n if train_op_fn is None:\n raise ValueError('train_op_fn can not be None.')\n with ops.name_scope(''):\n summary.scalar(\n _summary_key(self._name, metric_keys.MetricKeys.LOSS),\n training_loss)\n summary.scalar(\n _summary_key(self._name, metric_keys.MetricKeys.LOSS_MEAN),\n losses.compute_weighted_loss(\n unweighted_loss, weights=weights,\n reduction=losses.Reduction.MEAN))\n return model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.TRAIN,\n predictions=predictions,\n loss=training_loss,\n train_op=train_op_fn(training_loss))\n\n\ndef _binary_logistic_head_with_sigmoid_cross_entropy_loss(\n weight_column=None, thresholds=None, label_vocabulary=None, name=None):\n \"\"\"Creates a `Head` for single label binary classification.\n\n This head uses `sigmoid_cross_entropy_with_logits` loss.\n\n This head expects to be fed float labels of shape `(batch_size, 1)`.\n\n Args:\n weight_column: A string or a `_NumericColumn` created by\n `tf.feature_column.numeric_column` defining feature column representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example.\n thresholds: Iterable of floats in the range `(0, 1)`. For binary\n classification metrics such as precision and recall, an eval metric is\n generated for each threshold value. This threshold is applied to the\n logistic values to determine the binary classification (i.e., above the\n threshold is `true`, below is `false`.\n label_vocabulary: A list of strings represents possible label values. If it\n is not given, that means labels are already encoded within [0, 1]. If\n given, labels must be string type and have any value in\n `label_vocabulary`. Also there will be errors if vocabulary is not\n provided and labels are string.\n name: name of the head. If provided, summary and metrics keys will be\n suffixed by `\"/\" + name`. Also used as `name_scope` when creating ops.\n\n Returns:\n An instance of `Head` for binary classification.\n\n Raises:\n ValueError: if `thresholds` contains a value outside of `(0, 1)`.\n \"\"\"\n thresholds = tuple(thresholds) if thresholds else tuple()\n if label_vocabulary is not None and not isinstance(label_vocabulary,\n (list, tuple)):\n raise ValueError('label_vocabulary should be a list. Given type: {}'.format(\n type(label_vocabulary)))\n\n for threshold in thresholds:\n if (threshold <= 0.0) or (threshold >= 1.0):\n raise ValueError('thresholds not in (0, 1): %s.' % (thresholds,))\n return _BinaryLogisticHeadWithSigmoidCrossEntropyLoss(\n weight_column=weight_column,\n thresholds=thresholds,\n label_vocabulary=label_vocabulary,\n name=name)\n\n\nclass _BinaryLogisticHeadWithSigmoidCrossEntropyLoss(_Head):\n \"\"\"See `_binary_logistic_head_with_sigmoid_cross_entropy_loss`.\"\"\"\n\n def __init__(self,\n weight_column=None,\n thresholds=None,\n label_vocabulary=None,\n name=None):\n self._weight_column = weight_column\n self._thresholds = thresholds\n self._label_vocabulary = label_vocabulary\n self._name = name\n\n @property\n def name(self):\n return self._name\n\n @property\n def logits_dimension(self):\n return 1\n\n def _eval_metric_ops(self,\n labels,\n logits,\n logistic,\n class_ids,\n unweighted_loss,\n weights=None):\n with ops.name_scope(\n None, 'metrics',\n (labels, logits, logistic, class_ids, unweighted_loss, weights)):\n keys = metric_keys.MetricKeys\n labels_mean = _indicator_labels_mean(\n labels=labels, weights=weights, name=keys.LABEL_MEAN)\n metric_ops = {\n # Estimator already adds a metric for loss.\n _summary_key(self._name, keys.LOSS_MEAN):\n metrics_lib.mean(\n unweighted_loss, weights=weights, name=keys.LOSS_MEAN),\n _summary_key(self._name, keys.ACCURACY):\n metrics_lib.accuracy(\n labels=labels,\n predictions=class_ids,\n weights=weights,\n name=keys.ACCURACY),\n _summary_key(self._name, keys.PREDICTION_MEAN):\n _predictions_mean(\n predictions=logistic,\n weights=weights,\n name=keys.PREDICTION_MEAN),\n _summary_key(self._name, keys.LABEL_MEAN):\n labels_mean,\n _summary_key(self._name, keys.ACCURACY_BASELINE):\n _accuracy_baseline(labels_mean),\n _summary_key(self._name, keys.AUC):\n _auc(\n labels=labels,\n predictions=logistic,\n weights=weights,\n name=keys.AUC),\n _summary_key(self._name, keys.AUC_PR):\n _auc(\n labels=labels,\n predictions=logistic,\n weights=weights,\n curve='PR',\n name=keys.AUC_PR)\n }\n for threshold in self._thresholds:\n accuracy_key = keys.ACCURACY_AT_THRESHOLD % threshold\n metric_ops[_summary_key(self._name,\n accuracy_key)] = _accuracy_at_threshold(\n labels=labels,\n predictions=logistic,\n weights=weights,\n threshold=threshold,\n name=accuracy_key)\n # Precision for positive examples.\n precision_key = keys.PRECISION_AT_THRESHOLD % threshold\n metric_ops[_summary_key(self._name,\n precision_key)] = _precision_at_threshold(\n labels=labels,\n predictions=logistic,\n weights=weights,\n threshold=threshold,\n name=precision_key)\n # Recall for positive examples.\n recall_key = keys.RECALL_AT_THRESHOLD % threshold\n metric_ops[_summary_key(self._name,\n recall_key)] = _recall_at_threshold(\n labels=labels,\n predictions=logistic,\n weights=weights,\n threshold=threshold,\n name=recall_key)\n return metric_ops\n\n def create_loss(self, features, mode, logits, labels):\n \"\"\"See `Head`.\"\"\"\n del mode, features # Unused for this head.\n labels = _check_and_reshape_dense_labels(labels, self.logits_dimension)\n if self._label_vocabulary is not None:\n labels = lookup_ops.index_table_from_tensor(\n vocabulary_list=tuple(self._label_vocabulary),\n name='class_id_lookup').lookup(labels)\n labels = math_ops.to_float(labels)\n labels = _assert_range(labels, 2)\n return LossAndLabels(\n unweighted_loss=nn.sigmoid_cross_entropy_with_logits(\n labels=labels, logits=logits),\n processed_labels=labels)\n\n def create_estimator_spec(\n self, features, mode, logits, labels=None, train_op_fn=None):\n \"\"\"See `Head`.\"\"\"\n # Predict.\n with ops.name_scope(self._name, 'head'):\n with ops.name_scope(None, 'predictions', (logits,)):\n pred_keys = prediction_keys.PredictionKeys\n logits = _check_logits(logits, self.logits_dimension)\n logistic = math_ops.sigmoid(logits, name=pred_keys.LOGISTIC)\n two_class_logits = array_ops.concat(\n (array_ops.zeros_like(logits), logits), 1, name='two_class_logits')\n probabilities = nn.softmax(\n two_class_logits, name=pred_keys.PROBABILITIES)\n class_ids = array_ops.reshape(\n math_ops.argmax(two_class_logits, axis=1), (-1, 1), name='classes')\n if self._label_vocabulary:\n table = lookup_ops.index_to_string_table_from_tensor(\n vocabulary_list=self._label_vocabulary,\n name='class_string_lookup')\n classes = table.lookup(class_ids)\n else:\n classes = string_ops.as_string(class_ids, name='str_classes')\n predictions = {\n pred_keys.LOGITS: logits,\n pred_keys.LOGISTIC: logistic,\n pred_keys.PROBABILITIES: probabilities,\n pred_keys.CLASS_IDS: class_ids,\n pred_keys.CLASSES: classes,\n }\n if mode == model_fn.ModeKeys.PREDICT:\n classifier_output = _classification_output(\n scores=probabilities, n_classes=2,\n label_vocabulary=self._label_vocabulary)\n return model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.PREDICT,\n predictions=predictions,\n export_outputs={\n _DEFAULT_SERVING_KEY: classifier_output,\n _CLASSIFY_SERVING_KEY: classifier_output,\n _REGRESS_SERVING_KEY: export_output.RegressionOutput(\n value=logistic),\n _PREDICT_SERVING_KEY: export_output.PredictOutput(predictions)\n })\n\n # Eval.\n unweighted_loss, processed_labels = self.create_loss(\n features=features, mode=mode, logits=logits, labels=labels)\n weights = _weights(features, self._weight_column)\n training_loss = losses.compute_weighted_loss(\n unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)\n if mode == model_fn.ModeKeys.EVAL:\n return model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.EVAL,\n predictions=predictions,\n loss=training_loss,\n eval_metric_ops=self._eval_metric_ops(\n labels=processed_labels,\n logits=logits,\n logistic=logistic,\n class_ids=class_ids,\n unweighted_loss=unweighted_loss,\n weights=weights))\n\n # Train.\n if train_op_fn is None:\n raise ValueError('train_op_fn can not be None.')\n with ops.name_scope(''):\n summary.scalar(\n _summary_key(self._name, metric_keys.MetricKeys.LOSS),\n training_loss)\n summary.scalar(\n _summary_key(self._name, metric_keys.MetricKeys.LOSS_MEAN),\n losses.compute_weighted_loss(\n unweighted_loss, weights=weights,\n reduction=losses.Reduction.MEAN))\n return model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.TRAIN,\n predictions=predictions,\n loss=training_loss,\n train_op=train_op_fn(training_loss))\n\n\ndef _regression_head_with_mean_squared_error_loss(weight_column=None,\n label_dimension=1,\n name=None):\n \"\"\"Creates a `_Head` for regression using the mean squared loss.\n\n Args:\n weight_column: A string or a `_NumericColumn` created by\n `tf.feature_column.numeric_column` defining feature column representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example.\n label_dimension: Number of regression labels per example. This is the size\n of the last dimension of the labels `Tensor` (typically, this has shape\n `[batch_size, label_dimension]`).\n name: name of the head. If provided, summary and metrics keys will be\n suffixed by `\"/\" + name`. Also used as `name_scope` when creating ops.\n\n Returns:\n An instance of `_Head` for linear regression.\n \"\"\"\n return _RegressionHeadWithMeanSquaredErrorLoss(\n weight_column=weight_column,\n label_dimension=label_dimension,\n name=name)\n\n\nclass _RegressionHeadWithMeanSquaredErrorLoss(_Head):\n \"\"\"`Head` for regression using the mean squared loss.\"\"\"\n\n def __init__(self, label_dimension, weight_column=None, name=None):\n \"\"\"`Head` for regression.\"\"\"\n if label_dimension < 1:\n raise ValueError('Invalid label_dimension %s.' % label_dimension)\n self._logits_dimension = label_dimension\n self._weight_column = weight_column\n self._name = name\n\n @property\n def name(self):\n return self._name\n\n @property\n def logits_dimension(self):\n return self._logits_dimension\n\n def create_loss(self, features, mode, logits, labels):\n \"\"\"See `Head`.\"\"\"\n del mode, features # Unused for this head.\n labels = _check_and_reshape_dense_labels(labels, self._logits_dimension)\n labels = math_ops.to_float(labels)\n return LossAndLabels(\n unweighted_loss=losses.mean_squared_error(\n labels=labels, predictions=logits, reduction=losses.Reduction.NONE),\n processed_labels=labels)\n\n def create_estimator_spec(\n self, features, mode, logits, labels=None, train_op_fn=None):\n \"\"\"See `Head`.\"\"\"\n # Predict.\n with ops.name_scope(self._name, 'head'):\n logits = _check_logits(logits, self._logits_dimension)\n predictions = {prediction_keys.PredictionKeys.PREDICTIONS: logits}\n if mode == model_fn.ModeKeys.PREDICT:\n regression_output = export_output.RegressionOutput(value=logits)\n return model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.PREDICT,\n predictions=predictions,\n export_outputs={\n _DEFAULT_SERVING_KEY: regression_output,\n _REGRESS_SERVING_KEY: regression_output,\n _PREDICT_SERVING_KEY: export_output.PredictOutput(predictions)\n })\n\n # Eval.\n unweighted_loss, _ = self.create_loss(\n features=features, mode=mode, logits=logits, labels=labels)\n weights = _weights(features, self._weight_column)\n training_loss = losses.compute_weighted_loss(\n unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)\n if mode == model_fn.ModeKeys.EVAL:\n # Estimator already adds a metric for loss.\n eval_metric_ops = {\n metric_keys.MetricKeys.LOSS_MEAN: metrics_lib.mean(\n unweighted_loss, weights=weights)\n }\n return model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.EVAL,\n predictions=predictions,\n loss=training_loss,\n eval_metric_ops=eval_metric_ops)\n\n # Train.\n if train_op_fn is None:\n raise ValueError('train_op_fn can not be None.')\n with ops.name_scope(''):\n summary.scalar(\n _summary_key(self._name, metric_keys.MetricKeys.LOSS),\n training_loss)\n summary.scalar(\n _summary_key(self._name, metric_keys.MetricKeys.LOSS_MEAN),\n losses.compute_weighted_loss(\n unweighted_loss, weights=weights,\n reduction=losses.Reduction.MEAN))\n return model_fn.EstimatorSpec(\n mode=model_fn.ModeKeys.TRAIN,\n predictions=predictions,\n loss=training_loss,\n train_op=train_op_fn(training_loss))\n\n\ndef _assert_range(labels, n_classes):\n with ops.name_scope(None, 'assert_range', (labels,)):\n assert_less = check_ops.assert_less(\n labels,\n ops.convert_to_tensor(n_classes, dtype=labels.dtype),\n message='Label IDs must < n_classes')\n assert_greater = check_ops.assert_non_negative(\n labels, message='Label IDs must >= 0')\n with ops.control_dependencies((assert_less, assert_greater)):\n return array_ops.identity(labels)\n\n\ndef _weights(features, weight_column):\n \"\"\"Fetches weights from features.\"\"\"\n with ops.name_scope(None, 'weights', values=features.values()):\n if weight_column is None:\n return 1.\n if isinstance(weight_column, six.string_types):\n weight_column = feature_column_lib.numeric_column(key=weight_column)\n if not isinstance(weight_column, feature_column_lib._NumericColumn): # pylint: disable=protected-access\n raise TypeError('Weight column must be either a string or _NumericColumn.'\n ' Given type: {}.'.format(type(weight_column)))\n weights = weight_column._get_dense_tensor( # pylint: disable=protected-access\n feature_column_lib._LazyBuilder(features)) # pylint: disable=protected-access\n if not (weights.dtype.is_floating or weights.dtype.is_integer):\n raise ValueError('Weight column should be castable to float. '\n 'Given dtype: {}'.format(weights.dtype))\n weights = _maybe_expand_dim(math_ops.to_float(weights, name='weights'))\n return weights\n" ]
[ [ "tensorflow.python.ops.array_ops.constant", "tensorflow.python.ops.control_flow_ops.IsLoopExit", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.eager.context.in_eager_mode", "tensorflow.python.framework.ops.convert_n_to_tensor_or_indexed_slices", "tensorflow.python.framework.ops.get_gradient_function", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.ops.array_ops.stop_gradient", "tensorflow.python.ops.control_flow_ops.MaybeCreateControlFlowState", "tensorflow.python.framework.ops.internal_convert_n_to_tensor_or_indexed_slices", "tensorflow.python.ops.array_ops.gather", "tensorflow.python.framework.tensor_util.constant_value", "tensorflow.python.ops.array_ops.size", "tensorflow.python.ops.control_flow_ops.ZerosLikeOutsideLoop", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.platform.tf_logging.vlog", "tensorflow.python.framework.ops.register_tensor_conversion_function", "tensorflow.python.ops.check_ops.assert_rank", "tensorflow.python.ops.tensor_array_ops.TensorArray", "tensorflow.python.framework.ops.colocate_with", "tensorflow.python.framework.dtypes.as_dtype", "tensorflow.python.ops.math_ops.unsorted_segment_sum", "tensorflow.core.framework.attr_value_pb2.AttrValue", "tensorflow.core.framework.attr_value_pb2.NameAttrList", "tensorflow.python.ops.functional_ops._symbolic_gradient", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.ops.math_ops._as_indexed_slices_list", "tensorflow.python.ops.math_ops.add_n", "tensorflow.python.ops.control_flow_ops.IsLoopSwitch", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.math_ops.accumulate_n", "numpy.prod", "tensorflow.python.ops.control_flow_ops.tuple", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.framework.test_util.NCHW_VECT_CToNHWC", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.ops.gen_array_ops.quantize_v2", "tensorflow.python.ops.math_ops.to_float", "tensorflow.python.framework.test_util.NHWCToNCHW", "tensorflow.python.ops.array_ops.depth_to_space", "tensorflow.python.ops.array_ops.transpose", "tensorflow.python.ops.gen_array_ops.dequantize", "tensorflow.python.platform.test.main", "tensorflow.python.platform.test.is_gpu_available", "tensorflow.python.ops.array_ops.space_to_depth", "tensorflow.python.framework.test_util.NHWCToNCHW_VECT_C", "tensorflow.python.framework.ops.convert_to_tensor", "numpy.array_equal", "tensorflow.python.platform.tf_logging.info", "tensorflow.python.ops.array_ops.reshape", "numpy.random.normal", "numpy.prod", "tensorflow.python.framework.test_util.NCHWToNHWC", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.framework.ops.device", "tensorflow.python.ops.rnn_cell.LSTMCell", "tensorflow.contrib.rnn.python.ops.lstm_ops.LSTMBlockCell", "tensorflow.python.ops.control_flow_ops.group", "tensorflow.contrib.rnn.python.ops.core_rnn.static_rnn", "tensorflow.python.framework.ops.get_collection", "tensorflow.python.framework.ops.Graph", "tensorflow.python.ops.init_ops.random_uniform_initializer", "tensorflow.python.client.session.Session", "tensorflow.python.platform.test.main", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.ops.array_ops.ones", "tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops.CudnnLSTM", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.ops.gradients_impl.gradients" ], [ "tensorflow.python.ops.lookup_ops.index_to_string_table_from_tensor", "tensorflow.python.ops.math_ops.greater_equal", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.nn.softmax", "tensorflow.python.estimator.export.export_output.RegressionOutput", "tensorflow.python.ops.array_ops.squeeze", "tensorflow.python.ops.metrics.recall_at_thresholds", "tensorflow.python.ops.math_ops.to_float", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.ops.losses.losses.compute_weighted_loss", "tensorflow.python.ops.metrics.precision_at_thresholds", "tensorflow.python.ops.math_ops.argmax", "tensorflow.python.ops.metrics.mean", "tensorflow.python.ops.losses.losses.sparse_softmax_cross_entropy", "tensorflow.python.ops.weights_broadcast_ops.broadcast_weights", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.ops.check_ops.assert_non_negative", "tensorflow.python.ops.metrics.auc", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.framework.sparse_tensor.convert_to_tensor_or_sparse_tensor", "tensorflow.python.ops.check_ops.assert_rank", "tensorflow.python.platform.tf_logging.warning", "tensorflow.python.feature_column.feature_column.numeric_column", "tensorflow.python.ops.nn.sigmoid_cross_entropy_with_logits", "tensorflow.python.ops.array_ops.zeros_like", "tensorflow.python.ops.check_ops.assert_equal", "tensorflow.python.estimator.export.export_output.ClassificationOutput", "tensorflow.python.feature_column.feature_column._LazyBuilder", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.losses.losses.mean_squared_error", "tensorflow.python.ops.metrics.accuracy", "tensorflow.python.ops.math_ops.range", "tensorflow.python.ops.math_ops.sigmoid", "tensorflow.python.estimator.model_fn.EstimatorSpec", "tensorflow.python.ops.string_ops.as_string", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.estimator.export.export_output.PredictOutput", "tensorflow.python.ops.math_ops.maximum", "tensorflow.python.ops.array_ops.expand_dims" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "0.12", "1.0", "1.2", "1.4" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "1.4", "2.6", "1.13", "2.3", "2.4", "2.2", "2.9", "1.5", "1.7", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7", "1.2" ] } ]
SecantZhang/adversarial-robustness-toolbox
[ "065ac24e7f5ec124f6cfe39ce21f085f4c87a401", "065ac24e7f5ec124f6cfe39ce21f085f4c87a401", "80f1e7e0348a5e438c2712f8087003c713a4c6e3", "80f1e7e0348a5e438c2712f8087003c713a4c6e3", "80f1e7e0348a5e438c2712f8087003c713a4c6e3" ]
[ "tests/attacks/test_projected_gradient_descent.py", "tests/attacks/test_threshold_attack.py", "art/defences/detector/poison/ground_truth_evaluator.py", "tests/metrics/test_verification_decision_trees.py", "art/metrics/gradient_check.py" ]
[ "# MIT License\n#\n# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2018\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport unittest\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom art.attacks.evasion.projected_gradient_descent.projected_gradient_descent import ProjectedGradientDescent\nfrom art.attacks.evasion.projected_gradient_descent.projected_gradient_descent_numpy import (\n ProjectedGradientDescentNumpy,\n)\nfrom art.estimators.classification import KerasClassifier\nfrom art.estimators.estimator import BaseEstimator, LossGradientsMixin\nfrom art.utils import get_labels_np_array, random_targets\nfrom tests.attacks.utils import backend_test_classifier_type_check_fail\nfrom tests.utils import (\n TestBase,\n get_image_classifier_kr,\n get_image_classifier_pt,\n get_image_classifier_tf,\n get_tabular_classifier_kr,\n get_tabular_classifier_pt,\n get_tabular_classifier_tf,\n master_seed,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass TestPGD(TestBase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n\n cls.n_train = 10\n cls.n_test = 10\n cls.x_train_mnist = cls.x_train_mnist[0 : cls.n_train]\n cls.y_train_mnist = cls.y_train_mnist[0 : cls.n_train]\n cls.x_test_mnist = cls.x_test_mnist[0 : cls.n_test]\n cls.y_test_mnist = cls.y_test_mnist[0 : cls.n_test]\n\n def test_9a_keras_mnist(self):\n classifier = get_image_classifier_kr()\n\n scores = classifier._model.evaluate(self.x_train_mnist, self.y_train_mnist)\n logger.info(\"[Keras, MNIST] Accuracy on training set: %.2f%%\", scores[1] * 100)\n scores = classifier._model.evaluate(self.x_test_mnist, self.y_test_mnist)\n logger.info(\"[Keras, MNIST] Accuracy on test set: %.2f%%\", scores[1] * 100)\n\n self._test_backend_mnist(\n classifier, self.x_train_mnist, self.y_train_mnist, self.x_test_mnist, self.y_test_mnist\n )\n\n def test_3_tensorflow_mnist(self):\n classifier, sess = get_image_classifier_tf()\n\n scores = get_labels_np_array(classifier.predict(self.x_train_mnist))\n acc = np.sum(np.argmax(scores, axis=1) == np.argmax(self.y_train_mnist, axis=1)) / self.y_train_mnist.shape[0]\n logger.info(\"[TF, MNIST] Accuracy on training set: %.2f%%\", acc * 100)\n\n scores = get_labels_np_array(classifier.predict(self.x_test_mnist))\n acc = np.sum(np.argmax(scores, axis=1) == np.argmax(self.y_test_mnist, axis=1)) / self.y_test_mnist.shape[0]\n logger.info(\"[TF, MNIST] Accuracy on test set: %.2f%%\", acc * 100)\n\n self._test_backend_mnist(\n classifier, self.x_train_mnist, self.y_train_mnist, self.x_test_mnist, self.y_test_mnist\n )\n\n def test_5_pytorch_mnist(self):\n x_train_mnist = np.swapaxes(self.x_train_mnist, 1, 3).astype(np.float32)\n x_test_mnist = np.swapaxes(self.x_test_mnist, 1, 3).astype(np.float32)\n classifier = get_image_classifier_pt()\n\n scores = get_labels_np_array(classifier.predict(x_train_mnist))\n acc = np.sum(np.argmax(scores, axis=1) == np.argmax(self.y_train_mnist, axis=1)) / self.y_train_mnist.shape[0]\n logger.info(\"[PyTorch, MNIST] Accuracy on training set: %.2f%%\", acc * 100)\n\n scores = get_labels_np_array(classifier.predict(x_test_mnist))\n acc = np.sum(np.argmax(scores, axis=1) == np.argmax(self.y_test_mnist, axis=1)) / self.y_test_mnist.shape[0]\n logger.info(\"[PyTorch, MNIST] Accuracy on test set: %.2f%%\", acc * 100)\n\n self._test_backend_mnist(classifier, x_train_mnist, self.y_train_mnist, x_test_mnist, self.y_test_mnist)\n\n # Test with clip values of array type\n classifier.set_params(clip_values=(np.zeros_like(x_test_mnist[0]), np.ones_like(x_test_mnist[0])))\n self._test_backend_mnist(classifier, x_train_mnist, self.y_train_mnist, x_test_mnist, self.y_test_mnist)\n\n classifier.set_params(clip_values=(np.zeros_like(x_test_mnist[0][0]), np.ones_like(x_test_mnist[0][0])))\n self._test_backend_mnist(classifier, x_train_mnist, self.y_train_mnist, x_test_mnist, self.y_test_mnist)\n\n classifier.set_params(clip_values=(np.zeros_like(x_test_mnist[0][0][0]), np.ones_like(x_test_mnist[0][0][0])))\n self._test_backend_mnist(classifier, x_train_mnist, self.y_train_mnist, x_test_mnist, self.y_test_mnist)\n\n def _test_backend_mnist(self, classifier, x_train, y_train, x_test, y_test):\n x_test_original = x_test.copy()\n\n # Test PGD with np.inf norm\n attack = ProjectedGradientDescent(classifier, eps=1.0, eps_step=0.1)\n x_train_adv = attack.generate(x_train)\n x_test_adv = attack.generate(x_test)\n\n self.assertFalse((x_train == x_train_adv).all())\n self.assertFalse((x_test == x_test_adv).all())\n\n train_y_pred = get_labels_np_array(classifier.predict(x_train_adv))\n test_y_pred = get_labels_np_array(classifier.predict(x_test_adv))\n\n self.assertFalse((y_train == train_y_pred).all())\n self.assertFalse((y_test == test_y_pred).all())\n\n acc = np.sum(np.argmax(train_y_pred, axis=1) == np.argmax(y_train, axis=1)) / y_train.shape[0]\n logger.info(\"Accuracy on adversarial train examples: %.2f%%\", acc * 100)\n\n acc = np.sum(np.argmax(test_y_pred, axis=1) == np.argmax(y_test, axis=1)) / y_test.shape[0]\n logger.info(\"Accuracy on adversarial test examples: %.2f%%\", acc * 100)\n\n # Test PGD with 3 random initialisations\n attack = ProjectedGradientDescent(classifier, num_random_init=3)\n x_train_adv = attack.generate(x_train)\n x_test_adv = attack.generate(x_test)\n\n self.assertFalse((x_train == x_train_adv).all())\n self.assertFalse((x_test == x_test_adv).all())\n\n train_y_pred = get_labels_np_array(classifier.predict(x_train_adv))\n test_y_pred = get_labels_np_array(classifier.predict(x_test_adv))\n\n self.assertFalse((y_train == train_y_pred).all())\n self.assertFalse((y_test == test_y_pred).all())\n\n acc = np.sum(np.argmax(train_y_pred, axis=1) == np.argmax(y_train, axis=1)) / y_train.shape[0]\n logger.info(\"Accuracy on adversarial train examples with 3 random initialisations: %.2f%%\", acc * 100)\n\n acc = np.sum(np.argmax(test_y_pred, axis=1) == np.argmax(y_test, axis=1)) / y_test.shape[0]\n logger.info(\"Accuracy on adversarial test examples with 3 random initialisations: %.2f%%\", acc * 100)\n\n # Check that x_test has not been modified by attack and classifier\n self.assertAlmostEqual(float(np.max(np.abs(x_test_original - x_test))), 0.0, delta=0.00001)\n\n # Test the masking\n attack = ProjectedGradientDescent(classifier, num_random_init=1)\n mask = np.random.binomial(n=1, p=0.5, size=np.prod(x_test.shape))\n mask = mask.reshape(x_test.shape).astype(np.float32)\n\n x_test_adv = attack.generate(x_test, mask=mask)\n mask_diff = (1 - mask) * (x_test_adv - x_test)\n self.assertAlmostEqual(float(np.max(np.abs(mask_diff))), 0.0, delta=0.00001)\n\n # Test eps of array type 1\n attack = ProjectedGradientDescent(classifier, eps=1.0, eps_step=0.1)\n\n eps = np.ones(shape=x_test.shape) * 1.0\n eps_step = np.ones_like(eps) * 0.1\n\n attack_params = {\"eps_step\": eps_step, \"eps\": eps}\n attack.set_params(**attack_params)\n\n x_test_adv = attack.generate(x_test)\n self.assertFalse((x_test == x_test_adv).all())\n\n test_y_pred = get_labels_np_array(classifier.predict(x_test_adv))\n self.assertFalse((y_test == test_y_pred).all())\n\n # Test eps of array type 2\n eps = np.ones(shape=x_test.shape[1:]) * 1.0\n eps_step = np.ones_like(eps) * 0.1\n\n attack_params = {\"eps_step\": eps_step, \"eps\": eps}\n attack.set_params(**attack_params)\n\n x_test_adv = attack.generate(x_test)\n self.assertFalse((x_test == x_test_adv).all())\n\n test_y_pred = get_labels_np_array(classifier.predict(x_test_adv))\n self.assertFalse((y_test == test_y_pred).all())\n\n # Test eps of array type 3\n eps = np.ones(shape=x_test.shape[2:]) * 1.0\n eps_step = np.ones_like(eps) * 0.1\n\n attack_params = {\"eps_step\": eps_step, \"eps\": eps}\n attack.set_params(**attack_params)\n\n x_test_adv = attack.generate(x_test)\n self.assertFalse((x_test == x_test_adv).all())\n\n test_y_pred = get_labels_np_array(classifier.predict(x_test_adv))\n self.assertFalse((y_test == test_y_pred).all())\n\n # Test eps of array type 4\n eps = np.ones(shape=x_test.shape[3:]) * 1.0\n eps_step = np.ones_like(eps) * 0.1\n\n attack_params = {\"eps_step\": eps_step, \"eps\": eps}\n attack.set_params(**attack_params)\n\n x_test_adv = attack.generate(x_test)\n self.assertFalse((x_test == x_test_adv).all())\n\n test_y_pred = get_labels_np_array(classifier.predict(x_test_adv))\n self.assertFalse((y_test == test_y_pred).all())\n\n def test_1_classifier_type_check_fail(self):\n backend_test_classifier_type_check_fail(ProjectedGradientDescent, [BaseEstimator, LossGradientsMixin])\n\n def test_8_keras_iris_clipped(self):\n classifier = get_tabular_classifier_kr()\n\n # Test untargeted attack\n attack = ProjectedGradientDescent(classifier, eps=1.0, eps_step=0.1, max_iter=5)\n x_test_adv = attack.generate(self.x_test_iris)\n self.assertFalse((self.x_test_iris == x_test_adv).all())\n self.assertTrue((x_test_adv <= 1).all())\n self.assertTrue((x_test_adv >= 0).all())\n\n preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)\n self.assertFalse((np.argmax(self.y_test_iris, axis=1) == preds_adv).all())\n acc = np.sum(preds_adv == np.argmax(self.y_test_iris, axis=1)) / self.y_test_iris.shape[0]\n logger.info(\"Accuracy on Iris with PGD adversarial examples: %.2f%%\", (acc * 100))\n\n # Test targeted attack\n targets = random_targets(self.y_test_iris, nb_classes=3)\n attack = ProjectedGradientDescent(classifier, targeted=True, eps=1.0, eps_step=0.1, max_iter=5)\n x_test_adv = attack.generate(self.x_test_iris, **{\"y\": targets})\n self.assertFalse((self.x_test_iris == x_test_adv).all())\n self.assertTrue((x_test_adv <= 1).all())\n self.assertTrue((x_test_adv >= 0).all())\n\n preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)\n self.assertTrue((np.argmax(targets, axis=1) == preds_adv).any())\n acc = np.sum(preds_adv == np.argmax(targets, axis=1)) / self.y_test_iris.shape[0]\n logger.info(\"Success rate of targeted PGD on Iris: %.2f%%\", (acc * 100))\n\n def test_keras_9_iris_unbounded(self):\n classifier = get_tabular_classifier_kr()\n\n # Recreate a classifier without clip values\n classifier = KerasClassifier(model=classifier._model, use_logits=False, channels_first=True)\n attack = ProjectedGradientDescent(classifier, eps=1.0, eps_step=0.2, max_iter=5)\n x_test_adv = attack.generate(self.x_test_iris)\n self.assertFalse((self.x_test_iris == x_test_adv).all())\n self.assertTrue((x_test_adv > 1).any())\n self.assertTrue((x_test_adv < 0).any())\n\n preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)\n self.assertFalse((np.argmax(self.y_test_iris, axis=1) == preds_adv).all())\n acc = np.sum(preds_adv == np.argmax(self.y_test_iris, axis=1)) / self.y_test_iris.shape[0]\n logger.info(\"Accuracy on Iris with PGD adversarial examples: %.2f%%\", (acc * 100))\n\n def test_2_tensorflow_iris(self):\n classifier, _ = get_tabular_classifier_tf()\n\n # Test untargeted attack\n attack = ProjectedGradientDescent(classifier, eps=1.0, eps_step=0.1, max_iter=5)\n x_test_adv = attack.generate(self.x_test_iris)\n self.assertFalse((self.x_test_iris == x_test_adv).all())\n self.assertTrue((x_test_adv <= 1).all())\n self.assertTrue((x_test_adv >= 0).all())\n\n preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)\n self.assertFalse((np.argmax(self.y_test_iris, axis=1) == preds_adv).all())\n acc = np.sum(preds_adv == np.argmax(self.y_test_iris, axis=1)) / self.y_test_iris.shape[0]\n logger.info(\"Accuracy on Iris with PGD adversarial examples: %.2f%%\", (acc * 100))\n\n # Test targeted attack\n targets = random_targets(self.y_test_iris, nb_classes=3)\n attack = ProjectedGradientDescent(classifier, targeted=True, eps=1.0, eps_step=0.1, max_iter=5)\n x_test_adv = attack.generate(self.x_test_iris, **{\"y\": targets})\n self.assertFalse((self.x_test_iris == x_test_adv).all())\n self.assertTrue((x_test_adv <= 1).all())\n self.assertTrue((x_test_adv >= 0).all())\n\n preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)\n self.assertTrue((np.argmax(targets, axis=1) == preds_adv).any())\n acc = np.sum(preds_adv == np.argmax(targets, axis=1)) / self.y_test_iris.shape[0]\n logger.info(\"Success rate of targeted PGD on Iris: %.2f%%\", (acc * 100))\n\n def test_4_pytorch_iris_pt(self):\n classifier = get_tabular_classifier_pt()\n\n # Test untargeted attack\n attack = ProjectedGradientDescent(classifier, eps=1.0, eps_step=0.1, max_iter=5)\n x_test_adv = attack.generate(self.x_test_iris)\n self.assertFalse((self.x_test_iris == x_test_adv).all())\n self.assertTrue((x_test_adv <= 1).all())\n self.assertTrue((x_test_adv >= 0).all())\n\n preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)\n self.assertFalse((np.argmax(self.y_test_iris, axis=1) == preds_adv).all())\n acc = np.sum(preds_adv == np.argmax(self.y_test_iris, axis=1)) / self.y_test_iris.shape[0]\n logger.info(\"Accuracy on Iris with PGD adversarial examples: %.2f%%\", (acc * 100))\n\n # Test targeted attack\n targets = random_targets(self.y_test_iris, nb_classes=3)\n attack = ProjectedGradientDescent(classifier, targeted=True, eps=1.0, eps_step=0.1, max_iter=5)\n x_test_adv = attack.generate(self.x_test_iris, **{\"y\": targets})\n self.assertFalse((self.x_test_iris == x_test_adv).all())\n self.assertTrue((x_test_adv <= 1).all())\n self.assertTrue((x_test_adv >= 0).all())\n\n preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)\n self.assertTrue((np.argmax(targets, axis=1) == preds_adv).any())\n acc = np.sum(preds_adv == np.argmax(targets, axis=1)) / self.y_test_iris.shape[0]\n logger.info(\"Success rate of targeted PGD on Iris: %.2f%%\", (acc * 100))\n\n def test_7_scikitlearn(self):\n from sklearn.linear_model import LogisticRegression\n from sklearn.svm import SVC, LinearSVC\n\n from art.estimators.classification.scikitlearn import SklearnClassifier\n\n scikitlearn_test_cases = [\n LogisticRegression(solver=\"lbfgs\", multi_class=\"auto\"),\n SVC(gamma=\"auto\"),\n LinearSVC(),\n ]\n\n x_test_original = self.x_test_iris.copy()\n\n for model in scikitlearn_test_cases:\n classifier = SklearnClassifier(model=model, clip_values=(0, 1))\n classifier.fit(x=self.x_test_iris, y=self.y_test_iris)\n\n # Test untargeted attack\n attack = ProjectedGradientDescent(classifier, eps=1.0, eps_step=0.1, max_iter=5)\n x_test_adv = attack.generate(self.x_test_iris)\n self.assertFalse((self.x_test_iris == x_test_adv).all())\n self.assertTrue((x_test_adv <= 1).all())\n self.assertTrue((x_test_adv >= 0).all())\n\n preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)\n self.assertFalse((np.argmax(self.y_test_iris, axis=1) == preds_adv).all())\n acc = np.sum(preds_adv == np.argmax(self.y_test_iris, axis=1)) / self.y_test_iris.shape[0]\n logger.info(\n \"Accuracy of \" + classifier.__class__.__name__ + \" on Iris with PGD adversarial examples: \" \"%.2f%%\",\n (acc * 100),\n )\n\n # Test targeted attack\n targets = random_targets(self.y_test_iris, nb_classes=3)\n attack = ProjectedGradientDescent(classifier, targeted=True, eps=1.0, eps_step=0.1, max_iter=5)\n x_test_adv = attack.generate(self.x_test_iris, **{\"y\": targets})\n self.assertFalse((self.x_test_iris == x_test_adv).all())\n self.assertTrue((x_test_adv <= 1).all())\n self.assertTrue((x_test_adv >= 0).all())\n\n preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)\n self.assertTrue((np.argmax(targets, axis=1) == preds_adv).any())\n acc = np.sum(preds_adv == np.argmax(targets, axis=1)) / self.y_test_iris.shape[0]\n logger.info(\n \"Success rate of \" + classifier.__class__.__name__ + \" on targeted PGD on Iris: %.2f%%\", (acc * 100)\n )\n\n # Check that x_test has not been modified by attack and classifier\n self.assertAlmostEqual(float(np.max(np.abs(x_test_original - self.x_test_iris))), 0.0, delta=0.00001)\n\n @unittest.skipIf(tf.__version__[0] != \"2\", \"\")\n def test_4_framework_tensorflow_v2_mnist(self):\n classifier, _ = get_image_classifier_tf()\n self._test_framework_vs_numpy(classifier)\n\n def test_6_framework_pytorch_mnist(self):\n self.x_train_mnist = np.swapaxes(self.x_train_mnist, 1, 3).astype(np.float32)\n self.x_test_mnist = np.swapaxes(self.x_test_mnist, 1, 3).astype(np.float32)\n\n classifier = get_image_classifier_pt()\n self._test_framework_vs_numpy(classifier)\n\n self.x_train_mnist = np.swapaxes(self.x_train_mnist, 1, 3).astype(np.float32)\n self.x_test_mnist = np.swapaxes(self.x_test_mnist, 1, 3).astype(np.float32)\n\n def _test_framework_vs_numpy(self, classifier):\n # Test PGD with np.inf norm\n attack_np = ProjectedGradientDescentNumpy(\n classifier,\n eps=1.0,\n eps_step=0.1,\n max_iter=5,\n norm=np.inf,\n targeted=False,\n num_random_init=0,\n batch_size=3,\n random_eps=False,\n )\n x_train_adv_np = attack_np.generate(self.x_train_mnist)\n x_test_adv_np = attack_np.generate(self.x_test_mnist)\n\n attack_fw = ProjectedGradientDescent(\n classifier,\n eps=1.0,\n eps_step=0.1,\n max_iter=5,\n norm=np.inf,\n targeted=False,\n num_random_init=0,\n batch_size=3,\n random_eps=False,\n )\n x_train_adv_fw = attack_fw.generate(self.x_train_mnist)\n x_test_adv_fw = attack_fw.generate(self.x_test_mnist)\n\n # Test\n self.assertAlmostEqual(\n np.mean(x_train_adv_np - self.x_train_mnist), np.mean(x_train_adv_fw - self.x_train_mnist), places=6\n )\n self.assertAlmostEqual(\n np.mean(x_test_adv_np - self.x_test_mnist), np.mean(x_test_adv_fw - self.x_test_mnist), places=6\n )\n\n # Test PGD with L1 norm\n attack_np = ProjectedGradientDescentNumpy(\n classifier,\n eps=1.0,\n eps_step=0.1,\n max_iter=5,\n norm=1,\n targeted=False,\n num_random_init=0,\n batch_size=3,\n random_eps=False,\n )\n x_train_adv_np = attack_np.generate(self.x_train_mnist)\n x_test_adv_np = attack_np.generate(self.x_test_mnist)\n\n attack_fw = ProjectedGradientDescent(\n classifier,\n eps=1.0,\n eps_step=0.1,\n max_iter=5,\n norm=1,\n targeted=False,\n num_random_init=0,\n batch_size=3,\n random_eps=False,\n )\n x_train_adv_fw = attack_fw.generate(self.x_train_mnist)\n x_test_adv_fw = attack_fw.generate(self.x_test_mnist)\n\n # Test\n self.assertAlmostEqual(\n np.mean(x_train_adv_np - self.x_train_mnist), np.mean(x_train_adv_fw - self.x_train_mnist), places=6\n )\n self.assertAlmostEqual(\n np.mean(x_test_adv_np - self.x_test_mnist), np.mean(x_test_adv_fw - self.x_test_mnist), places=6\n )\n\n # Test PGD with L2 norm\n attack_np = ProjectedGradientDescentNumpy(\n classifier,\n eps=1.0,\n eps_step=0.1,\n max_iter=5,\n norm=2,\n targeted=False,\n num_random_init=0,\n batch_size=3,\n random_eps=False,\n )\n x_train_adv_np = attack_np.generate(self.x_train_mnist)\n x_test_adv_np = attack_np.generate(self.x_test_mnist)\n\n attack_fw = ProjectedGradientDescent(\n classifier,\n eps=1.0,\n eps_step=0.1,\n max_iter=5,\n norm=2,\n targeted=False,\n num_random_init=0,\n batch_size=3,\n random_eps=False,\n )\n x_train_adv_fw = attack_fw.generate(self.x_train_mnist)\n x_test_adv_fw = attack_fw.generate(self.x_test_mnist)\n\n # Test\n self.assertAlmostEqual(\n np.mean(x_train_adv_np - self.x_train_mnist), np.mean(x_train_adv_fw - self.x_train_mnist), places=6\n )\n self.assertAlmostEqual(\n np.mean(x_test_adv_np - self.x_test_mnist), np.mean(x_test_adv_fw - self.x_test_mnist), places=6\n )\n\n # Test PGD with True targeted\n attack_np = ProjectedGradientDescentNumpy(\n classifier,\n eps=1.0,\n eps_step=0.1,\n max_iter=5,\n norm=np.inf,\n targeted=True,\n num_random_init=0,\n batch_size=3,\n random_eps=False,\n )\n x_train_adv_np = attack_np.generate(self.x_train_mnist, self.y_train_mnist)\n x_test_adv_np = attack_np.generate(self.x_test_mnist, self.y_test_mnist)\n\n attack_fw = ProjectedGradientDescent(\n classifier,\n eps=1.0,\n eps_step=0.1,\n max_iter=5,\n norm=np.inf,\n targeted=True,\n num_random_init=0,\n batch_size=3,\n random_eps=False,\n )\n x_train_adv_fw = attack_fw.generate(self.x_train_mnist, self.y_train_mnist)\n x_test_adv_fw = attack_fw.generate(self.x_test_mnist, self.y_test_mnist)\n\n # Test\n self.assertAlmostEqual(\n np.mean(x_train_adv_np - self.x_train_mnist), np.mean(x_train_adv_fw - self.x_train_mnist), places=6\n )\n self.assertAlmostEqual(\n np.mean(x_test_adv_np - self.x_test_mnist), np.mean(x_test_adv_fw - self.x_test_mnist), places=6\n )\n\n # Test PGD with num_random_init=2\n master_seed(1234)\n attack_np = ProjectedGradientDescentNumpy(\n classifier,\n eps=1.0,\n eps_step=0.1,\n max_iter=5,\n norm=np.inf,\n targeted=False,\n num_random_init=2,\n batch_size=3,\n random_eps=False,\n )\n x_train_adv_np = attack_np.generate(self.x_train_mnist)\n x_test_adv_np = attack_np.generate(self.x_test_mnist)\n\n master_seed(1234)\n attack_fw = ProjectedGradientDescent(\n classifier,\n eps=1.0,\n eps_step=0.1,\n max_iter=5,\n norm=np.inf,\n targeted=False,\n num_random_init=2,\n batch_size=3,\n random_eps=False,\n )\n x_train_adv_fw = attack_fw.generate(self.x_train_mnist)\n x_test_adv_fw = attack_fw.generate(self.x_test_mnist)\n\n # Test\n self.assertAlmostEqual(\n np.mean(x_train_adv_np - self.x_train_mnist), np.mean(x_train_adv_fw - self.x_train_mnist), places=6\n )\n self.assertAlmostEqual(\n np.mean(x_test_adv_np - self.x_test_mnist), np.mean(x_test_adv_fw - self.x_test_mnist), places=6\n )\n\n # Test PGD with random_eps=True\n master_seed(1234)\n attack_np = ProjectedGradientDescentNumpy(\n classifier,\n eps=1.0,\n eps_step=0.1,\n max_iter=5,\n norm=np.inf,\n targeted=False,\n num_random_init=0,\n batch_size=3,\n random_eps=True,\n )\n x_train_adv_np = attack_np.generate(self.x_train_mnist)\n x_test_adv_np = attack_np.generate(self.x_test_mnist)\n\n master_seed(1234)\n attack_fw = ProjectedGradientDescent(\n classifier,\n eps=1.0,\n eps_step=0.1,\n max_iter=5,\n norm=np.inf,\n targeted=False,\n num_random_init=0,\n batch_size=3,\n random_eps=True,\n )\n x_train_adv_fw = attack_fw.generate(self.x_train_mnist)\n x_test_adv_fw = attack_fw.generate(self.x_test_mnist)\n\n # Test\n self.assertAlmostEqual(\n np.mean(x_train_adv_np - self.x_train_mnist), np.mean(x_train_adv_fw - self.x_train_mnist), places=6\n )\n self.assertAlmostEqual(\n np.mean(x_test_adv_np - self.x_test_mnist), np.mean(x_test_adv_fw - self.x_test_mnist), places=6\n )\n\n # Test the masking 1\n master_seed(1234)\n attack_np = ProjectedGradientDescentNumpy(\n classifier,\n eps=1.0,\n eps_step=0.1,\n max_iter=5,\n norm=np.inf,\n targeted=False,\n num_random_init=1,\n batch_size=3,\n random_eps=True,\n )\n\n mask = np.random.binomial(n=1, p=0.5, size=np.prod(self.x_train_mnist.shape))\n mask = mask.reshape(self.x_train_mnist.shape).astype(np.float32)\n x_train_adv_np = attack_np.generate(self.x_train_mnist, mask=mask)\n\n mask = np.random.binomial(n=1, p=0.5, size=np.prod(self.x_test_mnist.shape))\n mask = mask.reshape(self.x_test_mnist.shape).astype(np.float32)\n x_test_adv_np = attack_np.generate(self.x_test_mnist, mask=mask)\n\n master_seed(1234)\n attack_fw = ProjectedGradientDescent(\n classifier,\n eps=1.0,\n eps_step=0.1,\n max_iter=5,\n norm=np.inf,\n targeted=False,\n num_random_init=1,\n batch_size=3,\n random_eps=True,\n )\n\n mask = np.random.binomial(n=1, p=0.5, size=np.prod(self.x_train_mnist.shape))\n mask = mask.reshape(self.x_train_mnist.shape).astype(np.float32)\n x_train_adv_fw = attack_fw.generate(self.x_train_mnist, mask=mask)\n\n mask = np.random.binomial(n=1, p=0.5, size=np.prod(self.x_test_mnist.shape))\n mask = mask.reshape(self.x_test_mnist.shape).astype(np.float32)\n x_test_adv_fw = attack_fw.generate(self.x_test_mnist, mask=mask)\n\n # Test\n self.assertAlmostEqual(\n np.mean(x_train_adv_np - self.x_train_mnist), np.mean(x_train_adv_fw - self.x_train_mnist), places=6\n )\n self.assertAlmostEqual(\n np.mean(x_test_adv_np - self.x_test_mnist), np.mean(x_test_adv_fw - self.x_test_mnist), places=6\n )\n\n # Test the masking 2\n master_seed(1234)\n attack_np = ProjectedGradientDescentNumpy(\n classifier,\n eps=1.0,\n eps_step=0.1,\n max_iter=5,\n norm=np.inf,\n targeted=False,\n num_random_init=1,\n batch_size=3,\n random_eps=True,\n )\n\n mask = np.random.binomial(n=1, p=0.5, size=np.prod(self.x_train_mnist.shape[1:]))\n mask = mask.reshape(self.x_train_mnist.shape[1:]).astype(np.float32)\n x_train_adv_np = attack_np.generate(self.x_train_mnist, mask=mask)\n\n mask = np.random.binomial(n=1, p=0.5, size=np.prod(self.x_test_mnist.shape[1:]))\n mask = mask.reshape(self.x_test_mnist.shape[1:]).astype(np.float32)\n x_test_adv_np = attack_np.generate(self.x_test_mnist, mask=mask)\n\n master_seed(1234)\n attack_fw = ProjectedGradientDescent(\n classifier,\n eps=1.0,\n eps_step=0.1,\n max_iter=5,\n norm=np.inf,\n targeted=False,\n num_random_init=1,\n batch_size=3,\n random_eps=True,\n )\n\n mask = np.random.binomial(n=1, p=0.5, size=np.prod(self.x_train_mnist.shape[1:]))\n mask = mask.reshape(self.x_train_mnist.shape[1:]).astype(np.float32)\n x_train_adv_fw = attack_fw.generate(self.x_train_mnist, mask=mask)\n\n mask = np.random.binomial(n=1, p=0.5, size=np.prod(self.x_test_mnist.shape[1:]))\n mask = mask.reshape(self.x_test_mnist.shape[1:]).astype(np.float32)\n x_test_adv_fw = attack_fw.generate(self.x_test_mnist, mask=mask)\n\n # Test\n self.assertAlmostEqual(\n np.mean(x_train_adv_np - self.x_train_mnist), np.mean(x_train_adv_fw - self.x_train_mnist), places=6\n )\n self.assertAlmostEqual(\n np.mean(x_test_adv_np - self.x_test_mnist), np.mean(x_test_adv_fw - self.x_test_mnist), places=6\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# MIT License\n#\n# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nThis module tests the Threshold Attack.\n\n| Paper link:\n https://arxiv.org/abs/1906.06026\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport unittest\n\nimport numpy as np\n\nfrom art.attacks.evasion.pixel_threshold import ThresholdAttack\nfrom art.estimators.estimator import BaseEstimator, NeuralNetworkMixin\nfrom art.estimators.classification.classifier import ClassifierMixin\nfrom art.utils import get_labels_np_array\n\nfrom tests.utils import TestBase\nfrom tests.utils import get_image_classifier_tf, get_image_classifier_kr, get_image_classifier_pt\nfrom tests.attacks.utils import backend_test_classifier_type_check_fail\n\nlogger = logging.getLogger(__name__)\n\n\nclass TestThresholdAttack(TestBase):\n \"\"\"\n A unittest class for testing the Threshold Attack.\n\n This module tests the Threshold Attack.\n\n | Paper link:\n https://arxiv.org/abs/1906.06026\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n\n cls.n_test = 2\n cls.x_test_mnist = cls.x_test_mnist[0 : cls.n_test]\n cls.y_test_mnist = cls.y_test_mnist[0 : cls.n_test]\n\n def test_6_keras_mnist(self):\n \"\"\"\n Test with the KerasClassifier. (Untargeted Attack)\n :return:\n \"\"\"\n classifier = get_image_classifier_kr()\n self._test_attack(classifier, self.x_test_mnist, self.y_test_mnist, False)\n\n def test_2_tensorflow_mnist(self):\n \"\"\"\n Test with the TensorFlowClassifier. (Untargeted Attack)\n :return:\n \"\"\"\n classifier, sess = get_image_classifier_tf()\n self._test_attack(classifier, self.x_test_mnist, self.y_test_mnist, False)\n\n def test_4_pytorch_mnist(self):\n \"\"\"\n Test with the PyTorchClassifier. (Untargeted Attack)\n :return:\n \"\"\"\n x_test = np.reshape(self.x_test_mnist, (self.x_test_mnist.shape[0], 1, 28, 28)).astype(np.float32)\n classifier = get_image_classifier_pt()\n self._test_attack(classifier, x_test, self.y_test_mnist, False)\n\n def test_7_keras_mnist_targeted(self):\n \"\"\"\n Test with the KerasClassifier. (Targeted Attack)\n :return:\n \"\"\"\n classifier = get_image_classifier_kr()\n self._test_attack(classifier, self.x_test_mnist, self.y_test_mnist, True)\n\n def test_3_tensorflow_mnist_targeted(self):\n \"\"\"\n Test with the TensorFlowClassifier. (Targeted Attack)\n :return:\n \"\"\"\n classifier, sess = get_image_classifier_tf()\n self._test_attack(classifier, self.x_test_mnist, self.y_test_mnist, True)\n\n def test_5_pytorch_mnist_targeted(self):\n \"\"\"\n Test with the PyTorchClassifier. (Targeted Attack)\n :return:\n \"\"\"\n x_test = np.reshape(self.x_test_mnist, (self.x_test_mnist.shape[0], 1, 28, 28)).astype(np.float32)\n classifier = get_image_classifier_pt()\n self._test_attack(classifier, x_test, self.y_test_mnist, True)\n\n def _test_attack(self, classifier, x_test, y_test, targeted):\n \"\"\"\n Test with the Threshold Attack\n :return:\n \"\"\"\n x_test_original = x_test.copy()\n\n if targeted:\n # Generate random target classes\n class_y_test = np.argmax(y_test, axis=1)\n nb_classes = np.unique(class_y_test).shape[0]\n targets = np.random.randint(nb_classes, size=self.n_test)\n for i in range(self.n_test):\n if class_y_test[i] == targets[i]:\n targets[i] -= 1\n else:\n targets = y_test\n\n for es in [1]: # Option 0 is not easy to reproduce reliably, we should consider it at a later time\n df = ThresholdAttack(classifier, th=128, es=es, targeted=targeted)\n x_test_adv = df.generate(x_test_original, targets, max_iter=10)\n\n np.testing.assert_raises(AssertionError, np.testing.assert_array_equal, x_test, x_test_adv)\n self.assertFalse((0.0 == x_test_adv).all())\n\n y_pred = get_labels_np_array(classifier.predict(x_test_adv))\n\n accuracy = np.sum(np.argmax(y_pred, axis=1) == np.argmax(self.y_test_mnist, axis=1)) / self.n_test\n logger.info(\"Accuracy on adversarial examples: %.2f%%\", (accuracy * 100))\n\n # Check that x_test has not been modified by attack and classifier\n self.assertAlmostEqual(float(np.max(np.abs(x_test_original - x_test))), 0.0, delta=0.00001)\n\n def test_1_classifier_type_check_fail(self):\n backend_test_classifier_type_check_fail(ThresholdAttack, [BaseEstimator, NeuralNetworkMixin, ClassifierMixin])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# MIT License\n#\n# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2018\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nThis module implements classes to evaluate the performance of poison detection methods.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport json\nimport logging\nfrom typing import Tuple, Union, List\n\nimport numpy as np\n\nlogger = logging.getLogger(__name__)\n\n\nclass GroundTruthEvaluator:\n \"\"\"\n Class to evaluate the performance of the poison detection method.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Evaluates ground truth constructor\n \"\"\"\n\n def analyze_correctness(\n self, assigned_clean_by_class: Union[np.ndarray, List[np.ndarray]], is_clean_by_class: list\n ) -> Tuple[np.ndarray, str]:\n \"\"\"\n For each training sample, determine whether the activation clustering method was correct.\n\n :param assigned_clean_by_class: Result of clustering.\n :param is_clean_by_class: is clean separated by class.\n :return: Two variables are returned:\n 1) all_errors_by_class[i]: an array indicating the correctness of each assignment\n in the ith class. Such that:\n all_errors_by_class[i] = 0 if marked poison, is poison\n all_errors_by_class[i] = 1 if marked clean, is clean\n all_errors_by_class[i] = 2 if marked poison, is clean\n all_errors_by_class[i] = 3 marked clean, is poison\n 2) Json object with confusion matrix per-class.\n \"\"\"\n all_errors_by_class = []\n poison = 0\n clean = 1\n dic_json = {}\n\n logger.debug(\"Error rates per class:\")\n for class_i, (assigned_clean, is_clean) in enumerate(zip(assigned_clean_by_class, is_clean_by_class)):\n errors = []\n for assignment, bl_var in zip(assigned_clean, is_clean):\n bl_var = int(bl_var)\n # marked poison, is poison = 0\n # true positive\n if assignment == poison and bl_var == poison:\n errors.append(0)\n\n # marked clean, is clean = 1\n # true negative\n elif assignment == clean and bl_var == clean:\n errors.append(1)\n\n # marked poison, is clean = 2\n # false positive\n elif assignment == poison and bl_var == clean:\n errors.append(2)\n\n # marked clean, is poison = 3\n # false negative\n elif assignment == clean and bl_var == poison:\n errors.append(3)\n else:\n raise Exception(\"Analyze_correctness entered wrong class\")\n\n errors = np.asarray(errors)\n logger.debug(\"-------------------%d---------------\", class_i)\n key_i = \"class_\" + str(class_i)\n matrix_i = self.get_confusion_matrix(errors)\n dic_json.update({key_i: matrix_i})\n all_errors_by_class.append(errors)\n\n all_errors_by_class = np.asarray(all_errors_by_class)\n conf_matrix_json = json.dumps(dic_json)\n\n return all_errors_by_class, conf_matrix_json\n\n def get_confusion_matrix(self, values: np.ndarray) -> dict:\n \"\"\"\n Computes and returns a json object that contains the confusion matrix for each class.\n\n :param values: Array indicating the correctness of each assignment in the ith class.\n :return: Json object with confusion matrix per-class.\n \"\"\"\n dic_class = {}\n true_positive = np.where(values == 0)[0].shape[0]\n true_negative = np.where(values == 1)[0].shape[0]\n false_positive = np.where(values == 2)[0].shape[0]\n false_negative = np.where(values == 3)[0].shape[0]\n\n tp_rate = self.calculate_and_print(true_positive, true_positive + false_negative, \"true-positive rate\")\n tn_rate = self.calculate_and_print(true_negative, false_positive + true_negative, \"true-negative rate\")\n fp_rate = self.calculate_and_print(false_positive, false_positive + true_negative, \"false-positive rate\")\n fn_rate = self.calculate_and_print(false_negative, true_positive + false_negative, \"false-negative rate\")\n\n dic_tp = dict(\n rate=round(tp_rate, 2),\n numerator=true_positive,\n denominator=(true_positive + false_negative),\n )\n if (true_positive + false_negative) == 0:\n dic_tp = dict(\n rate=\"N/A\",\n numerator=true_positive,\n denominator=(true_positive + false_negative),\n )\n\n dic_tn = dict(\n rate=round(tn_rate, 2),\n numerator=true_negative,\n denominator=(false_positive + true_negative),\n )\n if (false_positive + true_negative) == 0:\n dic_tn = dict(\n rate=\"N/A\",\n numerator=true_negative,\n denominator=(false_positive + true_negative),\n )\n\n dic_fp = dict(\n rate=round(fp_rate, 2),\n numerator=false_positive,\n denominator=(false_positive + true_negative),\n )\n if (false_positive + true_negative) == 0:\n dic_fp = dict(\n rate=\"N/A\",\n numerator=false_positive,\n denominator=(false_positive + true_negative),\n )\n\n dic_fn = dict(\n rate=round(fn_rate, 2),\n numerator=false_negative,\n denominator=(true_positive + false_negative),\n )\n if (true_positive + false_negative) == 0:\n dic_fn = dict(\n rate=\"N/A\",\n numerator=false_negative,\n denominator=(true_positive + false_negative),\n )\n\n dic_class.update(dict(TruePositive=dic_tp))\n dic_class.update(dict(TrueNegative=dic_tn))\n dic_class.update(dict(FalsePositive=dic_fp))\n dic_class.update(dict(FalseNegative=dic_fn))\n\n return dic_class\n\n @staticmethod\n def calculate_and_print(numerator: int, denominator: int, name: str) -> float:\n \"\"\"\n Computes and prints the rates based on the denominator provided.\n\n :param numerator: number used to compute the rate.\n :param denominator: number used to compute the rate.\n :param name: Rate name being computed e.g., false-positive rate.\n :return: Computed rate\n \"\"\"\n try:\n res = 100 * (numerator / float(denominator))\n logger.debug(\"%s: %d/%d=%.3g\", name, numerator, denominator, res)\n return res\n except ZeroDivisionError:\n logger.debug(\"%s: couldn't calculate %d/%d\", name, numerator, denominator)\n return 0.0\n", "# MIT License\n#\n# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2019\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport unittest\n\nfrom xgboost import XGBClassifier\nimport lightgbm\nimport numpy as np\nfrom sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier, GradientBoostingClassifier\n\nfrom art.estimators.classification.xgboost import XGBoostClassifier\nfrom art.estimators.classification.lightgbm import LightGBMClassifier\nfrom art.estimators.classification.scikitlearn import SklearnClassifier\nfrom art.utils import load_dataset\nfrom art.metrics.verification_decisions_trees import RobustnessVerificationTreeModelsCliqueMethod\n\nfrom tests.utils import master_seed\n\nlogger = logging.getLogger(__name__)\n\nNB_TRAIN = 100\nNB_TEST = 100\n\n\nclass TestMetricsTrees(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n (x_train, y_train), (x_test, y_test), _, _ = load_dataset(\"mnist\")\n\n cls.n_classes = 10\n cls.n_features = 28 * 28\n n_train = x_train.shape[0]\n n_test = x_test.shape[0]\n x_train = x_train.reshape((n_train, cls.n_features))\n x_test = x_test.reshape((n_test, cls.n_features))\n\n cls.x_train = x_train[:NB_TRAIN]\n cls.y_train = y_train[:NB_TRAIN]\n cls.x_test = x_test[:NB_TEST]\n cls.y_test = y_test[:NB_TEST]\n\n @classmethod\n def setUp(cls):\n master_seed(seed=42)\n\n def test_XGBoost(self):\n model = XGBClassifier(n_estimators=4, max_depth=6, objective=\"multi:softprob\", eval_metric=\"merror\")\n model.fit(self.x_train, np.argmax(self.y_train, axis=1))\n\n classifier = XGBoostClassifier(model=model, nb_features=self.n_features, nb_classes=self.n_classes)\n\n rt = RobustnessVerificationTreeModelsCliqueMethod(classifier=classifier)\n average_bound, verified_error = rt.verify(\n x=self.x_test, y=self.y_test, eps_init=0.3, nb_search_steps=10, max_clique=2, max_level=2\n )\n\n self.assertEqual(average_bound, 0.0011425781249999997)\n self.assertEqual(verified_error, 1.0)\n\n def test_LightGBM(self):\n train_data = lightgbm.Dataset(self.x_train, label=np.argmax(self.y_train, axis=1))\n test_data = lightgbm.Dataset(self.x_test, label=np.argmax(self.y_test, axis=1))\n\n parameters = {\n \"objective\": \"multiclass\",\n \"num_class\": self.n_classes,\n \"metric\": \"multi_logloss\",\n \"is_unbalance\": \"true\",\n \"boosting\": \"gbdt\",\n \"num_leaves\": 5,\n \"feature_fraction\": 0.5,\n \"bagging_fraction\": 0.5,\n \"bagging_freq\": 0,\n \"learning_rate\": 0.05,\n \"verbose\": 0,\n }\n\n model = lightgbm.train(\n parameters, train_data, valid_sets=test_data, num_boost_round=2, early_stopping_rounds=10\n )\n\n classifier = LightGBMClassifier(model=model)\n\n rt = RobustnessVerificationTreeModelsCliqueMethod(classifier=classifier)\n average_bound, verified_error = rt.verify(\n x=self.x_test, y=self.y_test, eps_init=0.3, nb_search_steps=10, max_clique=2, max_level=2\n )\n\n self.assertEqual(average_bound, 0.047742187500000005)\n self.assertEqual(verified_error, 0.94)\n\n def test_GradientBoosting(self):\n model = GradientBoostingClassifier(n_estimators=4, max_depth=6)\n model.fit(self.x_train, np.argmax(self.y_train, axis=1))\n\n classifier = SklearnClassifier(model=model)\n\n rt = RobustnessVerificationTreeModelsCliqueMethod(classifier=classifier)\n average_bound, verified_error = rt.verify(\n x=self.x_test, y=self.y_test, eps_init=0.3, nb_search_steps=10, max_clique=2, max_level=2\n )\n\n self.assertAlmostEqual(average_bound, 0.009, delta=0.0002)\n self.assertEqual(verified_error, 1.0)\n\n def test_RandomForest(self):\n model = RandomForestClassifier(n_estimators=4, max_depth=6)\n model.fit(self.x_train, np.argmax(self.y_train, axis=1))\n\n classifier = SklearnClassifier(model=model)\n\n rt = RobustnessVerificationTreeModelsCliqueMethod(classifier=classifier)\n average_bound, verified_error = rt.verify(\n x=self.x_test, y=self.y_test, eps_init=0.3, nb_search_steps=10, max_clique=2, max_level=2\n )\n\n self.assertEqual(average_bound, 0.016482421874999993)\n self.assertEqual(verified_error, 1.0)\n\n def test_ExtraTrees(self):\n model = ExtraTreesClassifier(n_estimators=4, max_depth=6)\n model.fit(self.x_train, np.argmax(self.y_train, axis=1))\n\n classifier = SklearnClassifier(model=model)\n\n rt = RobustnessVerificationTreeModelsCliqueMethod(classifier=classifier)\n average_bound, verified_error = rt.verify(\n x=self.x_test, y=self.y_test, eps_init=0.3, nb_search_steps=10, max_clique=2, max_level=2\n )\n\n self.assertEqual(average_bound, 0.05406445312499999)\n self.assertEqual(verified_error, 0.96)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# MIT License\n#\n# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nThis module implements gradient check functions for estimators\n\"\"\"\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\nfrom tqdm.auto import trange\n\nif TYPE_CHECKING:\n from art.estimators.estimator import LossGradientsMixin\n\n\ndef loss_gradient_check(\n estimator: \"LossGradientsMixin\", x: np.ndarray, y: np.ndarray, training_mode: bool = False, **kwargs\n) -> np.ndarray:\n \"\"\"\n Compute the gradient of the loss function w.r.t. `x` and identify points where the gradient is zero, nan, or inf\n\n :param estimator: The classifier to be analyzed.\n :param x: Input with shape as expected by the classifier's model.\n :param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape\n (nb_samples,).\n :param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode.\n :return: Array of booleans with the shape (len(x), 3). If true means the gradient of the loss w.r.t. the\n particular `x` was bad (zero, nan, inf).\n \"\"\"\n assert len(x) == len(y), \"x and y must be the same length\"\n\n is_bad = []\n for i in trange(len(x), desc=\"Gradient check\"):\n grad = estimator.loss_gradient(x=x[[i]], y=y[[i]], training_mode=training_mode, **kwargs)\n is_bad.append(\n [\n (np.min(grad) == 0 and np.max(grad) == 0),\n np.any(np.isnan(grad)),\n np.any(np.isinf(grad)),\n ]\n )\n\n return np.array(is_bad, dtype=bool)\n" ]
[ [ "numpy.swapaxes", "numpy.ones_like", "sklearn.linear_model.LogisticRegression", "numpy.abs", "numpy.ones", "numpy.argmax", "numpy.mean", "numpy.zeros_like", "sklearn.svm.LinearSVC", "sklearn.svm.SVC", "numpy.prod" ], [ "numpy.abs", "numpy.unique", "numpy.reshape", "numpy.argmax", "numpy.testing.assert_raises", "numpy.random.randint" ], [ "numpy.asarray", "numpy.where" ], [ "sklearn.ensemble.ExtraTreesClassifier", "numpy.argmax", "sklearn.ensemble.RandomForestClassifier", "sklearn.ensemble.GradientBoostingClassifier" ], [ "numpy.min", "numpy.isnan", "numpy.max", "numpy.array", "numpy.isinf" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ashokei/FBGEMM
[ "bf9ed7dfce1fc2386349908950764bfda7089b4a" ]
[ "fbgemm_gpu/setup.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport glob\nimport os\nimport shutil\nimport sysconfig\n\nfrom codegen.embedding_backward_code_generator import emb_codegen\nfrom setuptools import setup\nfrom torch.utils.cpp_extension import BuildExtension, CUDAExtension\n\ncur_dir = os.path.dirname(os.path.realpath(__file__))\ncub_include_path = os.getenv(\"CUB_DIR\")\nbuild_codegen_path = \"build/codegen\"\npy_path = \"python\"\n\n# Get the long description from the relevant file\nwith open(os.path.join(cur_dir, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nextra_compile_args = sysconfig.get_config_var(\"CFLAGS\").split()\nextra_compile_args += [\"-mavx2\", \"-mf16c\", \"-mfma\", \"-mavx512f\", \"-mavx512bw\", \"-mavx512dq\", \"-mavx512vl\"]\n\nOPTIMIZERS = [\n \"adagrad\",\n \"adam\",\n \"approx_rowwise_adagrad\",\n \"approx_sgd\",\n \"lamb\",\n \"lars_sgd\",\n \"partial_rowwise_adam\",\n \"partial_rowwise_lamb\",\n \"rowwise_adagrad\",\n \"sgd\",\n]\n\ncpp_asmjit_files = glob.glob(\"../third_party/asmjit/src/asmjit/*/*.cpp\")\n\ncpp_fbgemm_files = [\n \"../src/EmbeddingSpMDMAvx2.cc\",\n \"../src/EmbeddingSpMDMAvx512.cc\",\n \"../src/EmbeddingSpMDM.cc\",\n \"../src/EmbeddingSpMDMNBit.cc\",\n \"../src/QuantUtils.cc\",\n \"../src/QuantUtilsAvx2.cc\",\n \"../src/RefImplementations.cc\",\n \"../src/RowWiseSparseAdagradFused.cc\",\n \"../src/SparseAdagrad.cc\",\n \"../src/Utils.cc\",\n]\n\ncpp_cpu_output_files = (\n [\n \"gen_embedding_forward_quantized_unweighted_codegen_cpu.cpp\",\n \"gen_embedding_forward_quantized_weighted_codegen_cpu.cpp\",\n \"gen_embedding_backward_dense_split_cpu.cpp\",\n ]\n + [\n \"gen_embedding_backward_split_{}_cpu.cpp\".format(optimizer)\n for optimizer in OPTIMIZERS\n ]\n + [\n \"gen_embedding_backward_{}_split_cpu.cpp\".format(optimizer)\n for optimizer in OPTIMIZERS\n ]\n)\n\ncpp_cuda_output_files = (\n [\n \"gen_embedding_forward_dense_weighted_codegen_cuda.cu\",\n \"gen_embedding_forward_dense_unweighted_codegen_cuda.cu\",\n \"gen_embedding_forward_quantized_split_unweighted_codegen_cuda.cu\",\n \"gen_embedding_forward_quantized_split_weighted_codegen_cuda.cu\",\n \"gen_embedding_forward_split_weighted_codegen_cuda.cu\",\n \"gen_embedding_forward_split_unweighted_codegen_cuda.cu\",\n \"gen_embedding_backward_split_indice_weights_codegen_cuda.cu\",\n \"gen_embedding_backward_dense_indice_weights_codegen_cuda.cu\",\n \"gen_embedding_backward_dense_split_unweighted_cuda.cu\",\n \"gen_embedding_backward_dense_split_weighted_cuda.cu\",\n ]\n + [\n \"gen_embedding_backward_{}_split_{}_cuda.cu\".format(optimizer, weighted)\n for optimizer in OPTIMIZERS\n for weighted in [\n \"weighted\",\n \"unweighted\",\n ]\n ]\n + [\n \"gen_embedding_backward_split_{}.cpp\".format(optimizer)\n for optimizer in OPTIMIZERS\n ]\n)\n\npy_output_files = [\"lookup_{}.py\".format(optimizer) for optimizer in OPTIMIZERS]\n\n\ndef generate_jinja_files():\n abs_build_path = os.path.join(cur_dir, build_codegen_path)\n if not os.path.exists(abs_build_path):\n os.makedirs(abs_build_path)\n emb_codegen(install_dir=abs_build_path, is_fbcode=False)\n\n dst_python_path = os.path.join(cur_dir, py_path)\n if not os.path.exists(dst_python_path):\n os.makedirs(dst_python_path)\n for filename in py_output_files:\n shutil.copy2(os.path.join(abs_build_path, filename), dst_python_path)\n shutil.copy2(os.path.join(cur_dir, \"codegen\", \"lookup_args.py\"), dst_python_path)\n\n\nclass FBGEMM_GPU_BuildExtension(BuildExtension.with_options(no_python_abi_suffix=True)):\n def build_extension(self, ext):\n generate_jinja_files()\n super().build_extension(ext)\n\n\nsetup(\n name=\"fbgemm_gpu\",\n install_requires=[\n \"torch\",\n \"Jinja2\",\n \"click\",\n \"hypothesis\",\n ],\n version=\"0.0.1\",\n long_description=long_description,\n ext_modules=[\n CUDAExtension(\n name=\"fbgemm_gpu_py\",\n sources=[\n os.path.join(cur_dir, build_codegen_path, \"{}\".format(f))\n for f in cpp_cuda_output_files + cpp_cpu_output_files\n ]\n + cpp_asmjit_files\n + cpp_fbgemm_files\n + [\n os.path.join(cur_dir, \"codegen/embedding_forward_split_cpu.cpp\"),\n os.path.join(cur_dir, \"codegen/embedding_forward_quantized_host_cpu.cpp\"),\n os.path.join(cur_dir, \"codegen/embedding_forward_quantized_host.cpp\"),\n os.path.join(cur_dir, \"codegen/embedding_backward_dense_host_cpu.cpp\"),\n os.path.join(cur_dir, \"codegen/embedding_backward_dense_host.cpp\"),\n os.path.join(cur_dir, \"src/split_embeddings_cache_cuda.cu\"),\n os.path.join(cur_dir, \"src/split_table_batched_embeddings.cpp\"),\n os.path.join(cur_dir, \"src/cumem_utils.cu\"),\n os.path.join(cur_dir, \"src/cumem_utils_host.cpp\"),\n os.path.join(cur_dir, \"src/quantize_ops_cpu.cpp\"),\n os.path.join(cur_dir, \"src/quantize_ops_gpu.cpp\"),\n os.path.join(cur_dir, \"src/sparse_ops_cpu.cpp\"),\n os.path.join(cur_dir, \"src/sparse_ops_gpu.cpp\"),\n os.path.join(cur_dir, \"src/sparse_ops.cu\"),\n ],\n include_dirs=[\n cur_dir,\n os.path.join(cur_dir, \"include\"),\n os.path.join(cur_dir, \"../include\"),\n os.path.join(cur_dir, \"../src\"),\n os.path.join(cur_dir, \"../third_party/asmjit/src\"),\n os.path.join(cur_dir, \"../third_party/asmjit/src/core\"),\n os.path.join(cur_dir, \"../third_party/asmjit/src/x86\"),\n os.path.join(cur_dir, \"../third_party/cpuinfo/include\"),\n cub_include_path,\n ],\n extra_compile_args={\"cxx\": extra_compile_args,\n \"nvcc\": [\"-U__CUDA_NO_HALF_CONVERSIONS__\"]},\n )\n ],\n cmdclass={\"build_ext\": FBGEMM_GPU_BuildExtension},\n)\n" ]
[ [ "torch.utils.cpp_extension.BuildExtension.with_options" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
EH94/ensemble-server
[ "852948ee90834266d9fa09562443e916316dede5" ]
[ "app.py" ]
[ "import io\nimport time\nimport keras\nimport numpy as np\nfrom os.path import join\nfrom PIL import Image\nfrom base64 import encodebytes\nfrom keras import backend as K\nfrom keras.models import Model, Input\nfrom keras.layers.merge import average\nfrom flask import Flask, request, jsonify, send_from_directory\n\n# The below provided fucntions will be used from yolo_utils.py\nfrom yolo_utils import read_classes, read_anchors, generate_colors, preprocess_image, draw_boxes\n\n# The below functions from the yad2k library will be used\nfrom yad2k2 import main\nfrom yad2k.models.keras_yolo import yolo_head, yolo_eval\n\napp = Flask(__name__)\n\nmemberCounter = 0\nimageCounter = 1\n\[email protected](\"/\")\ndef home():\n return jsonify(\"Members in ensemble: \" + str(memberCounter))\n\[email protected](\"/GetImages\")\ndef get_images():\n global imageCounter\n\n # Get paths for the next X images (X fake and X real) to send to edge device\n fake_images = [join(\"images/fake/\", str(f) + \".jpg\") for f in range(imageCounter, imageCounter + 10)]\n real_images = [join(\"images/real/\", str(f) + \".jpg\") for f in range(imageCounter, imageCounter + 10)]\n\n images = fake_images + real_images\n imageCounter += 10\n\n encoded_imges = []\n for image_path in images:\n try:\n encoded_imges.append(get_response_image(image_path))\n except:\n continue\n\n return jsonify({'images': encoded_imges})\n\[email protected](\"/GetAggregatedModel\")\ndef get_aggregated_model():\n if(memberCounter > 1):\n return send_from_directory(\"model_data/models/\", \"ensemble.h5\", as_attachment=True)\n else:\n return jsonify(\"No ensemble model found\")\n\[email protected](\"/AggregateModel\", methods=[\"POST\"])\ndef aggregate_model():\n if 'weights' not in request.files:\n return jsonify(\"No weights file provided\")\n \n file = request.files['weights']\n file.save(\"./model_data/weights/weights.weights\")\n \n weights = open(\"./model_data/weights/weights.weights\", 'rb')\n \n # Get time when conversion starts\n conversionStartTime = time.clock()\n # Convert weights to model\n model = convert_weights_to_keras_model(weights) \n # Get time when conversion has finnished\n conversionEndTime = time.clock()\n\n # Get time when initiated adding model to ensemble\n baggingStartTime = time.clock()\n # Add model to ensemble\n bagging_ensemble_model(model)\n # Ge time when model has been added to ensemble\n baggingEndTime = time.clock()\n\n totalTimeConversion = conversionEndTime-conversionStartTime\n totalTimeAggregation = baggingEndTime-baggingStartTime\n\n print(\"Conversion of weights to keras model \", memberCounter, \": \", \" - Time to convert: \", totalTimeConversion)\n print(\"Aggregation of model \", memberCounter, \": \", \" - Time to aggregate: \", totalTimeAggregation)\n\n return jsonify(\"Model has been added to the ensemble\")\n\[email protected](\"/GetEnsemblePrediction\", methods=[\"POST\"])\ndef get_ensemble_prediction():\n if(memberCounter > 1):\n if 'image' not in request.files:\n return jsonify(\"No image provided\")\n \n file = request.files['image']\n file.save(\"predictions/\" + file.filename)\n\n ensemble = keras.models.load_model('model_data/models/ensemble.h5', compile=False)\n image_name, out_scores, result= make_prediction(\"predictions/\", file.filename, ensemble)\n\n response = \"Image: \" + str(image_name) + \"\\nPrediction: \" + str(result) + \"\\nConfidence: \" + str(out_scores)\n return jsonify(response)\n else:\n return jsonify(\"No ensemble model found\")\n\[email protected](\"/GetSinglePrediction\", methods=[\"POST\"])\ndef get_single_prediction():\n if(memberCounter != 0):\n if 'image' not in request.files:\n return jsonify(\"No image provided\")\n \n file = request.files['image']\n file.save(\"predictions/\" + file.filename)\n\n firstModel = keras.models.load_model('model_data/models/firstModel.h5', compile=False)\n image_name, out_scores, result= make_prediction(\"predictions/\", file.filename, firstModel)\n\n response = \"Image: \" + str(image_name) + \"\\nPrediction: \" + str(result) + \"\\nConfidence: \" + str(out_scores)\n return jsonify(response)\n else:\n return jsonify(\"No model found\")\n\ndef bagging_ensemble_model(model):\n global memberCounter\n\n if(memberCounter == 0):\n keras.models.save_model(model, \"model_data/models/firstModel.h5\")\n memberCounter += 1\n\n return\n\n inputs = Input(shape=(608, 608, 3))\n if(memberCounter == 1):\n firstModel = keras.models.load_model('model_data/models/firstModel.h5', compile=True)\n x3 = average([firstModel(inputs), model(inputs)])\n else:\n existingEnsembleModel = keras.models.load_model('model_data/models/ensemble.h5', compile=True)\n existingEnsembleModel.layers.pop()\n\n values = [layer(inputs) for layer in existingEnsembleModel.layers]\n values.append(model(inputs))\n\n x3 = average(values[1:])\n\n newEnsembleModel = Model(inputs=inputs, outputs=x3)\n newEnsembleModel.compile(optimizer=\"sgd\", loss=\"binary_crossentropy\", metrics=[\"accuracy\"])\n newEnsembleModel.summary()\n\n keras.models.save_model(newEnsembleModel, \"model_data/models/ensemble.h5\")\n\n memberCounter += 1\n\ndef make_prediction(image_path, input_image_name, yolo_model):\n #Obtaining the dimensions of the input image\n input_image = Image.open(image_path + input_image_name)\n width, height = input_image.size\n width = np.array(width, dtype=float)\n height = np.array(height, dtype=float)\n\n #Assign the shape of the input image to image_shapr variable\n image_shape = (height, width)\n\n #Loading the classes and the anchor boxes that are provided in the model_data folder\n class_names = read_classes(\"model_data/yolo_classes.txt\")\n anchors = read_anchors(\"model_data/yolo_anchors.txt\")\n\n #Print the summery of the model\n yolo_model.summary()\n\n #Convert final layer features to bounding box parameters\n yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))\n\n #Now yolo_eval function selects the best boxes using filtering and non-max suppression techniques.\n # If you want to dive in more to see how this works, refer keras_yolo.py file in yad2k/models\n boxes, scores, classes = yolo_eval(yolo_outputs, image_shape)\n\n # Initiate a session\n sess = K.get_session()\n\n #Preprocess the input image before feeding into the convolutional network\n image, image_data = preprocess_image(image_path + input_image_name, model_image_size = (608, 608))\n\n #Run the session\n out_scores, out_boxes, out_classes = sess.run(\n [scores, boxes, classes],\n feed_dict={\n yolo_model.input: image_data,\n K.learning_phase(): 0\n }\n )\n\n #Print the results\n print('Found {} boxes for {}'.format(len(out_boxes), input_image_name))\n \n #Produce the colors for the bounding boxs\n colors = generate_colors(class_names)\n \n #Draw the bounding boxes\n draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)\n \n #Apply the predicted bounding boxes to the image and save it\n image.save(\"predictions/\" + input_image_name, quality=90)\n \n if(len(out_classes) == 0):\n result = \"No box found\"\n elif (out_classes[0] == 0):\n result = \"real\"\n else:\n result = \"fake\"\n\n return input_image_name, out_scores, result\n\ndef convert_weights_to_keras_model(weights_file):\n weights_header = np.ndarray(\n shape=(4, ), dtype='int32', buffer=weights_file.read(20))\n print('Weights Header: ', weights_header)\n\n return main(\"./model_data/weights/yolov2.cfg\", weights_file, str(memberCounter))\n\ndef get_response_image(image_path):\n image = Image.open(image_path, mode='r')\n byte_arr = io.BytesIO()\n image.save(byte_arr, format='PNG')\n encoded_img = encodebytes(byte_arr.getvalue()).decode('ascii')\n\n return encoded_img" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
plant99/pygeoapi
[ "4dfbe42a7ddecc04e76cbcfe5e7c376318919d9e" ]
[ "pygeoapi/provider/xarray_.py" ]
[ "# =================================================================\n#\n# Authors: Gregory Petrochenkov <[email protected]>\n#\n# Copyright (c) 2020 Gregory Petrochenkov\n#\n# Permission is hereby granted, free of charge, to any person\n# obtaining a copy of this software and associated documentation\n# files (the \"Software\"), to deal in the Software without\n# restriction, including without limitation the rights to use,\n# copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following\n# conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n# OTHER DEALINGS IN THE SOFTWARE.\n#\n# =================================================================\n\nimport os\nimport logging\nimport tempfile\nimport zipfile\n\nimport xarray\nimport numpy as np\n\nfrom pygeoapi.provider.base import (BaseProvider,\n ProviderConnectionError,\n ProviderNoDataError,\n ProviderQueryError)\nfrom pygeoapi.util import read_data\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass XarrayProvider(BaseProvider):\n \"\"\"Xarray Provider\"\"\"\n\n def __init__(self, provider_def):\n \"\"\"\n Initialize object\n :param provider_def: provider definition\n :returns: pygeoapi.provider.xarray_.XarrayProvider\n \"\"\"\n\n super().__init__(provider_def)\n\n try:\n if provider_def['format']['name'] == 'zarr':\n open_func = xarray.open_zarr\n else:\n open_func = xarray.open_dataset\n self._data = open_func(self.data)\n self._data = _convert_float32_to_float64(self._data)\n self._coverage_properties = self._get_coverage_properties()\n\n self.axes = [self._coverage_properties['x_axis_label'],\n self._coverage_properties['y_axis_label'],\n self._coverage_properties['time_axis_label']]\n\n self.fields = self._coverage_properties['fields']\n except Exception as err:\n LOGGER.warning(err)\n raise ProviderConnectionError(err)\n\n def get_coverage_domainset(self):\n \"\"\"\n Provide coverage domainset\n\n :returns: CIS JSON object of domainset metadata\n \"\"\"\n\n c_props = self._coverage_properties\n domainset = {\n 'type': 'DomainSetType',\n 'generalGrid': {\n 'type': 'GeneralGridCoverageType',\n 'srsName': c_props['bbox_crs'],\n 'axisLabels': [\n c_props['x_axis_label'],\n c_props['y_axis_label'],\n c_props['time_axis_label']\n ],\n 'axis': [{\n 'type': 'RegularAxisType',\n 'axisLabel': c_props['x_axis_label'],\n 'lowerBound': c_props['bbox'][0],\n 'upperBound': c_props['bbox'][2],\n 'uomLabel': c_props['bbox_units'],\n 'resolution': c_props['resx']\n }, {\n 'type': 'RegularAxisType',\n 'axisLabel': c_props['y_axis_label'],\n 'lowerBound': c_props['bbox'][1],\n 'upperBound': c_props['bbox'][3],\n 'uomLabel': c_props['bbox_units'],\n 'resolution': c_props['resy']\n },\n {\n 'type': 'RegularAxisType',\n 'axisLabel': c_props['time_axis_label'],\n 'lowerBound': c_props['time_range'][0],\n 'upperBound': c_props['time_range'][1],\n 'uomLabel': c_props['restime'],\n 'resolution': c_props['restime']\n }\n ],\n 'gridLimits': {\n 'type': 'GridLimitsType',\n 'srsName': 'http://www.opengis.net/def/crs/OGC/0/Index2D',\n 'axisLabels': ['i', 'j'],\n 'axis': [{\n 'type': 'IndexAxisType',\n 'axisLabel': 'i',\n 'lowerBound': 0,\n 'upperBound': c_props['width']\n }, {\n 'type': 'IndexAxisType',\n 'axisLabel': 'j',\n 'lowerBound': 0,\n 'upperBound': c_props['height']\n }]\n }\n },\n '_meta': {\n 'tags': self._data.attrs\n }\n }\n\n return domainset\n\n def get_coverage_rangetype(self):\n \"\"\"\n Provide coverage rangetype\n\n :returns: CIS JSON object of rangetype metadata\n \"\"\"\n\n rangetype = {\n 'type': 'DataRecordType',\n 'field': []\n }\n\n for name, var in self._data.variables.items():\n LOGGER.debug('Determining rangetype for {}'.format(name))\n\n desc, units = None, None\n if len(var.shape) >= 3:\n parameter = self._get_parameter_metadata(\n name, var.attrs)\n desc = parameter['description']\n units = parameter['unit_label']\n\n rangetype['field'].append({\n 'id': name,\n 'type': 'QuantityType',\n 'name': var.attrs.get('long_name') or desc,\n 'definition': str(var.dtype),\n 'nodata': 'null',\n 'uom': {\n 'id': 'http://www.opengis.net/def/uom/UCUM/{}'.format(\n units),\n 'type': 'UnitReference',\n 'code': units\n },\n '_meta': {\n 'tags': var.attrs\n }\n })\n\n return rangetype\n\n def query(self, range_subset=[], subsets={}, bbox=[], datetime_=None,\n format_='json'):\n \"\"\"\n Extract data from collection collection\n\n :param range_subset: list of data variables to return (all if blank)\n :param subsets: dict of subset names with lists of ranges\n :param bbox: bounding box [minx,miny,maxx,maxy]\n :param datetime_: temporal (datestamp or extent)\n :param format_: data format of output\n\n :returns: coverage data as dict of CoverageJSON or native format\n \"\"\"\n\n if not range_subset and not subsets and format_ != 'json':\n LOGGER.debug('No parameters specified, returning native data')\n if format_ == 'zarr':\n return _get_zarr_data(self._data)\n else:\n return read_data(self.data)\n\n if len(range_subset) < 1:\n range_subset = self.fields\n\n data = self._data[[*range_subset]]\n\n if any([self._coverage_properties['x_axis_label'] in subsets,\n self._coverage_properties['y_axis_label'] in subsets,\n self._coverage_properties['time_axis_label'] in subsets,\n datetime_ is not None]):\n\n LOGGER.debug('Creating spatio-temporal subset')\n\n query_params = {}\n for key, val in subsets.items():\n LOGGER.debug('Processing subset: {}'.format(key))\n if data.coords[key].values[0] > data.coords[key].values[-1]:\n LOGGER.debug('Reversing slicing from high to low')\n query_params[key] = slice(val[1], val[0])\n else:\n query_params[key] = slice(val[0], val[1])\n\n if bbox:\n if all([self._coverage_properties['x_axis_label'] in subsets,\n self._coverage_properties['y_axis_label'] in subsets,\n len(bbox) > 0]):\n msg = 'bbox and subsetting by coordinates are exclusive'\n LOGGER.warning(msg)\n raise ProviderQueryError(msg)\n else:\n query_params['x_axis_label'] = slice(bbox[0], bbox[2])\n query_params['y_axis_label'] = slice(bbox[1], bbox[3])\n\n if datetime_ is not None:\n if self._coverage_properties['time_axis_label'] in subsets:\n msg = 'datetime and temporal subsetting are exclusive'\n LOGGER.error(msg)\n raise ProviderQueryError(msg)\n else:\n if '/' in datetime_:\n begin, end = datetime_.split('/')\n if begin < end:\n query_params[self.time_field] = slice(begin, end)\n else:\n LOGGER.debug('Reversing slicing from high to low')\n query_params[self.time_field] = slice(end, begin)\n else:\n query_params[self.time_field] = datetime_\n\n LOGGER.debug('Query parameters: {}'.format(query_params))\n try:\n data = data.sel(query_params)\n except Exception as err:\n LOGGER.warning(err)\n raise ProviderQueryError(err)\n\n if (any([data.coords[self.x_field].size == 0,\n data.coords[self.y_field].size == 0,\n data.coords[self.time_field].size == 0])):\n msg = 'No data found'\n LOGGER.warning(msg)\n raise ProviderNoDataError(msg)\n\n out_meta = {\n 'bbox': [\n data.coords[self.x_field].values[0],\n data.coords[self.y_field].values[0],\n data.coords[self.x_field].values[-1],\n data.coords[self.y_field].values[-1]\n ],\n \"time\": [\n _to_datetime_string(data.coords[self.time_field].values[0]),\n _to_datetime_string(data.coords[self.time_field].values[-1])\n ],\n \"driver\": \"xarray\",\n \"height\": data.dims[self.y_field],\n \"width\": data.dims[self.x_field],\n \"time_steps\": data.dims[self.time_field],\n \"variables\": {var_name: var.attrs\n for var_name, var in data.variables.items()}\n }\n\n LOGGER.debug('Serializing data in memory')\n if format_ == 'json':\n LOGGER.debug('Creating output in CoverageJSON')\n return self.gen_covjson(out_meta, data, range_subset)\n elif format_ == 'zarr':\n LOGGER.debug('Returning data in native zarr format')\n return _get_zarr_data(data)\n else: # return data in native format\n with tempfile.TemporaryFile() as fp:\n LOGGER.debug('Returning data in native NetCDF format')\n fp.write(data.to_netcdf())\n fp.seek(0)\n return fp.read()\n\n def gen_covjson(self, metadata, data, range_type):\n \"\"\"\n Generate coverage as CoverageJSON representation\n\n :param metadata: coverage metadata\n :param data: rasterio DatasetReader object\n :param range_type: range type list\n\n :returns: dict of CoverageJSON representation\n \"\"\"\n\n LOGGER.debug('Creating CoverageJSON domain')\n minx, miny, maxx, maxy = metadata['bbox']\n mint, maxt = metadata['time']\n\n if data.coords[self.y_field].values[0] > data.coords[self.y_field].values[-1]: # noqa\n LOGGER.debug('Reversing direction of {}'.format(self.y_field))\n miny = data.coords[self.y_field].values[-1]\n maxy = data.coords[self.y_field].values[0]\n\n cj = {\n 'type': 'Coverage',\n 'domain': {\n 'type': 'Domain',\n 'domainType': 'Grid',\n 'axes': {\n 'x': {\n 'start': minx,\n 'stop': maxx,\n 'num': metadata['width']\n },\n 'y': {\n 'start': maxy,\n 'stop': miny,\n 'num': metadata['height']\n },\n self.time_field: {\n 'start': mint,\n 'stop': maxt,\n 'num': metadata['time_steps']\n }\n },\n 'referencing': [{\n 'coordinates': ['x', 'y'],\n 'system': {\n 'type': self._coverage_properties['crs_type'],\n 'id': self._coverage_properties['bbox_crs']\n }\n }]\n },\n 'parameters': {},\n 'ranges': {}\n }\n\n for variable in range_type:\n pm = self._get_parameter_metadata(\n variable, self._data[variable].attrs)\n\n parameter = {\n 'type': 'Parameter',\n 'description': pm['description'],\n 'unit': {\n 'symbol': pm['unit_label']\n },\n 'observedProperty': {\n 'id': pm['observed_property_id'],\n 'label': {\n 'en': pm['observed_property_name']\n }\n }\n }\n\n cj['parameters'][pm['id']] = parameter\n\n try:\n for key in cj['parameters'].keys():\n cj['ranges'][key] = {\n 'type': 'NdArray',\n 'dataType': str(self._data[variable].dtype),\n 'axisNames': [\n 'y', 'x', self._coverage_properties['time_axis_label']\n ],\n 'shape': [metadata['height'],\n metadata['width'],\n metadata['time_steps']]\n }\n\n data = data.fillna(None)\n cj['ranges'][key]['values'] = data[key].values.flatten().tolist() # noqa\n except IndexError as err:\n LOGGER.warning(err)\n raise ProviderQueryError('Invalid query parameter')\n\n return cj\n\n def _get_coverage_properties(self):\n \"\"\"\n Helper function to normalize coverage properties\n\n :returns: `dict` of coverage properties\n \"\"\"\n\n time_var, y_var, x_var = [None, None, None]\n for coord in self._data.coords:\n if coord.lower() == 'time':\n time_var = coord\n continue\n if self._data.coords[coord].attrs['units'] == 'degrees_north':\n y_var = coord\n continue\n if self._data.coords[coord].attrs['units'] == 'degrees_east':\n x_var = coord\n continue\n\n if self.x_field is None:\n self.x_field = x_var\n if self.y_field is None:\n self.y_field = y_var\n if self.time_field is None:\n self.time_field = time_var\n\n # It would be preferable to use CF attributes to get width\n # resolution etc but for now a generic approach is used to asess\n # all of the attributes based on lat lon vars\n\n properties = {\n 'bbox': [\n self._data.coords[self.x_field].values[0],\n self._data.coords[self.y_field].values[0],\n self._data.coords[self.x_field].values[-1],\n self._data.coords[self.y_field].values[-1],\n ],\n 'time_range': [\n _to_datetime_string(\n self._data.coords[self.time_field].values[0]\n ),\n _to_datetime_string(\n self._data.coords[self.time_field].values[-1]\n )\n ],\n 'bbox_crs': 'http://www.opengis.net/def/crs/OGC/1.3/CRS84',\n 'crs_type': 'GeographicCRS',\n 'x_axis_label': self.x_field,\n 'y_axis_label': self.y_field,\n 'time_axis_label': self.time_field,\n 'width': self._data.dims[self.x_field],\n 'height': self._data.dims[self.y_field],\n 'time': self._data.dims[self.time_field],\n 'time_duration': self.get_time_coverage_duration(),\n 'bbox_units': 'degrees',\n 'resx': np.abs(self._data.coords[self.x_field].values[1]\n - self._data.coords[self.x_field].values[0]),\n 'resy': np.abs(self._data.coords[self.y_field].values[1]\n - self._data.coords[self.y_field].values[0]),\n 'restime': self.get_time_resolution()\n }\n\n if 'crs' in self._data.variables.keys():\n properties['bbox_crs'] = '{}/{}'.format(\n 'http://www.opengis.net/def/crs/OGC/1.3/',\n self._data.crs.epsg_code)\n\n properties['inverse_flattening'] = self._data.crs.\\\n inverse_flattening\n\n properties['crs_type'] = 'ProjectedCRS'\n\n properties['axes'] = [\n properties['x_axis_label'],\n properties['y_axis_label'],\n properties['time_axis_label']\n ]\n\n properties['fields'] = [name for name in self._data.variables\n if len(self._data.variables[name].shape) >= 3]\n\n return properties\n\n @staticmethod\n def _get_parameter_metadata(name, attrs):\n \"\"\"\n Helper function to derive parameter name and units\n :param name: name of variable\n :param attrs: dictionary of variable attributes\n :returns: dict of parameter metadata\n \"\"\"\n\n return {\n 'id': name,\n 'description': attrs.get('long_name', None),\n 'unit_label': attrs.get('units', None),\n 'unit_symbol': attrs.get('units', None),\n 'observed_property_id': name,\n 'observed_property_name': attrs.get('long_name', None)\n }\n\n def get_time_resolution(self):\n \"\"\"\n Helper function to derive time resolution\n :returns: time resolution string\n \"\"\"\n\n if self._data[self.time_field].size > 1:\n time_diff = (self._data[self.time_field][1] -\n self._data[self.time_field][0])\n\n dt = np.array([time_diff.values.astype('timedelta64[{}]'.format(x))\n for x in ['Y', 'M', 'D', 'h', 'm', 's', 'ms']])\n\n return str(dt[np.array([x.astype(np.int) for x in dt]) > 0][0])\n else:\n return None\n\n def get_time_coverage_duration(self):\n \"\"\"\n Helper function to derive time coverage duration\n :returns: time coverage duration string\n \"\"\"\n\n dur = self._data[self.time_field][-1] - self._data[self.time_field][0]\n ms_difference = dur.values.astype('timedelta64[ms]').astype(np.double)\n\n time_dict = {\n 'days': int(ms_difference / 1000 / 60 / 60 / 24),\n 'hours': int((ms_difference / 1000 / 60 / 60) % 24),\n 'minutes': int((ms_difference / 1000 / 60) % 60),\n 'seconds': int(ms_difference / 1000) % 60\n }\n\n times = ['{} {}'.format(val, key) for key, val\n in time_dict.items() if val > 0]\n\n return ', '.join(times)\n\n\ndef _to_datetime_string(datetime_obj):\n \"\"\"\n Convenience function to formulate string from various datetime objects\n\n :param datetime_obj: datetime object (native datetime, cftime)\n\n :returns: str representation of datetime\n \"\"\"\n\n try:\n value = np.datetime_as_string(datetime_obj)\n except Exception as err:\n LOGGER.warning(err)\n value = datetime_obj.strftime('%Y-%m-%dT%H:%M:%S.%fZ')\n\n return value\n\n\ndef _zip_dir(path, ziph, cwd):\n \"\"\"\n Convenience function to zip directory with sub directories\n (based on source: https://stackoverflow.com/questions/1855095/)\n :param path: str directory to zip\n :param ziph: zipfile file\n :param cwd: current working directory\n\n \"\"\"\n for root, dirs, files in os.walk(path):\n for file in files:\n\n if len(dirs) < 1:\n new_root = '/'.join(root.split('/')[:-1])\n new_path = os.path.join(root.split('/')[-1], file)\n else:\n new_root = root\n new_path = file\n\n os.chdir(new_root)\n ziph.write(new_path)\n os.chdir(cwd)\n\n\ndef _get_zarr_data(data):\n \"\"\"\n Returns bytes to read from Zarr directory zip\n :param data: Xarray dataset of coverage data\n\n :returns: byte array of zip data\n \"\"\"\n\n tmp_dir = tempfile.TemporaryDirectory().name\n data.to_zarr('{}zarr.zarr'.format(tmp_dir), mode='w')\n with zipfile.ZipFile('{}zarr.zarr.zip'.format(tmp_dir),\n 'w', zipfile.ZIP_DEFLATED) as zipf:\n _zip_dir('{}zarr.zarr'.format(tmp_dir), zipf, os.getcwd())\n zip_file = open('{}zarr.zarr.zip'.format(tmp_dir), 'rb')\n return zip_file.read()\n\n\ndef _convert_float32_to_float64(data):\n \"\"\"\n Converts DataArray values of float32 to float64\n :param data: Xarray dataset of coverage data\n\n :returns: Xarray dataset of coverage data\n \"\"\"\n\n for var_name in data.variables:\n if data[var_name].dtype == 'float32':\n og_attrs = data[var_name].attrs\n data[var_name] = data[var_name].astype('float64')\n data[var_name].attrs = og_attrs\n\n return data\n" ]
[ [ "numpy.abs", "numpy.datetime_as_string" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mcuttler/pyTMD
[ "40ec2ec4c170c821a2558f52c1aeac6c5f9dfeff", "40ec2ec4c170c821a2558f52c1aeac6c5f9dfeff" ]
[ "pyTMD/load_nodal_corrections.py", "pyTMD/tidal_ellipse.py" ]
[ "#!/usr/bin/env python\nu\"\"\"\nload_nodal_corrections.py (12/2020)\nCalculates the nodal corrections for tidal constituents\nModification of ARGUMENTS fortran subroutine by Richard Ray 03/1999\n\nCALLING SEQUENCE:\n pu,pf,G = load_nodal_corrections(MJD,constituents)\n\nINPUTS:\n MJD: Modified Julian Day of input date\n constituents: tidal constituent IDs\n\nOUTPUTS:\n pu,pf: nodal corrections for the constituents\n G: phase correction in degrees\n\nOPTIONS:\n DELTAT: time correction for converting to Ephemeris Time (days)\n CORRECTIONS: use nodal corrections from OTIS/ATLAS or GOT models\n\nPYTHON DEPENDENCIES:\n numpy: Scientific Computing Tools For Python\n https://numpy.org\n https://numpy.org/doc/stable/user/numpy-for-matlab-users.html\n\nPROGRAM DEPENDENCIES:\n calc_astrol_longitudes.py: computes the basic astronomical mean longitudes\n\nREFERENCES:\n A. T. Doodson and H. Warburg, \"Admiralty Manual of Tides\", HMSO, (1941).\n P. Schureman, \"Manual of Harmonic Analysis and Prediction of Tides\"\n US Coast and Geodetic Survey, Special Publication, 98, (1958).\n M. G. G. Foreman and R. F. Henry, \"The harmonic analysis of tidal model\n time series\", Advances in Water Resources, 12, (1989).\n\nUPDATE HISTORY:\n Updated 12/2020: fix k1 for FES models\n Updated 08/2020: change time variable names to not overwrite functions\n update nodal corrections for FES models\n Updated 07/2020: added function docstrings. add shallow water constituents\n Updated 09/2019: added netcdf option to CORRECTIONS option\n Updated 08/2018: added correction option ATLAS for localized OTIS solutions\n Updated 07/2018: added option to use GSFC GOT nodal corrections\n Updated 09/2017: Rewritten in Python\n Rewritten in Matlab by Lana Erofeeva 01/2003\n Written by Richard Ray 03/1999\n\"\"\"\nimport numpy as np\nfrom pyTMD.calc_astrol_longitudes import calc_astrol_longitudes\n\ndef load_nodal_corrections(MJD,constituents,DELTAT=0.0,CORRECTIONS='OTIS'):\n \"\"\"\n Calculates the nodal corrections for tidal constituents\n\n Arguments\n ---------\n MJD: modified julian day of input date\n constituents: tidal constituent IDs\n\n Keyword arguments\n -----------------\n DELTAT: time correction for converting to Ephemeris Time (days)\n CORRECTIONS: use nodal corrections from OTIS/ATLAS or GOT models\n\n Returns\n -------\n pu,pf: nodal corrections for the constituents\n G: phase correction in degrees\n \"\"\"\n\n #-- constituents array (not all are included in tidal program)\n cindex = ['sa','ssa','mm','msf','mf','mt','alpha1','2q1','sigma1','q1',\n 'rho1','o1','tau1','m1','chi1','pi1','p1','s1','k1','psi1','phi1',\n 'theta1','j1','oo1','2n2','mu2','n2','nu2','m2a','m2','m2b','lambda2',\n 'l2','t2','s2','r2','k2','eta2','mns2','2sm2','m3','mk3','s3','mn4',\n 'm4','ms4','mk4','s4','s5','m6','s6','s7','s8','m8','mks2','msqm','mtm',\n 'n4','eps2','z0']\n\n #-- degrees to radians\n dtr = np.pi/180.0\n\n #-- set function for astronomical longitudes\n ASTRO5 = True if CORRECTIONS in ('GOT','FES') else False\n #-- convert from Modified Julian Dates into Ephemeris Time\n s,h,p,omega,pp = calc_astrol_longitudes(MJD+DELTAT, ASTRO5=ASTRO5)\n hour = (MJD % 1)*24.0\n t1 = 15.0*hour\n t2 = 30.0*hour\n nt = len(np.atleast_1d(MJD))\n\n #-- Determine equilibrium arguments\n arg = np.zeros((nt,60))\n arg[:,0] = h - pp #-- Sa\n arg[:,1] = 2.0*h #-- Ssa\n arg[:,2] = s - p #-- Mm\n arg[:,3] = 2.0*s - 2.0*h #-- MSf\n arg[:,4] = 2.0*s #-- Mf\n arg[:,5] = 3.0*s - p #-- Mt\n arg[:,6] = t1 - 5.0*s + 3.0*h + p - 90.0 #-- alpha1\n arg[:,7] = t1 - 4.0*s + h + 2.0*p - 90.0 #-- 2Q1\n arg[:,8] = t1 - 4.0*s + 3.0*h - 90.0 #-- sigma1\n arg[:,9] = t1 - 3.0*s + h + p - 90.0 #-- q1\n arg[:,10] = t1 - 3.0*s + 3.0*h - p - 90.0 #-- rho1\n arg[:,11] = t1 - 2.0*s + h - 90.0 #-- o1\n arg[:,12] = t1 - 2.0*s + 3.0*h + 90.0 #-- tau1\n arg[:,13] = t1 - s + h + 90.0 #-- M1\n arg[:,14] = t1 - s + 3.0*h - p + 90.0 #-- chi1\n arg[:,15] = t1 - 2.0*h + pp - 90.0 #-- pi1\n arg[:,16] = t1 - h - 90.0 #-- p1\n if CORRECTIONS in ('OTIS','ATLAS','netcdf'):\n arg[:,17] = t1 + 90.0 #-- s1\n elif CORRECTIONS in ('GOT','FES'):\n arg[:,17] = t1 + 180.0 #-- s1 (Doodson's phase)\n arg[:,18] = t1 + h + 90.0 #-- k1\n arg[:,19] = t1 + 2.0*h - pp + 90.0 #-- psi1\n arg[:,20] = t1 + 3.0*h + 90.0 #-- phi1\n arg[:,21] = t1 + s - h + p + 90.0 #-- theta1\n arg[:,22] = t1 + s + h - p + 90.0 #-- J1\n arg[:,23] = t1 + 2.0*s + h + 90.0 #-- OO1\n arg[:,24] = t2 - 4.0*s + 2.0*h + 2.0*p #-- 2N2\n arg[:,25] = t2 - 4.0*s + 4.0*h #-- mu2\n arg[:,26] = t2 - 3.0*s + 2.0*h + p #-- n2\n arg[:,27] = t2 - 3.0*s + 4.0*h - p #-- nu2\n arg[:,28] = t2 - 2.0*s + h + pp #-- M2a\n arg[:,29] = t2 - 2.0*s + 2.0*h #-- M2\n arg[:,30] = t2 - 2.0*s + 3.0*h - pp #-- M2b\n arg[:,31] = t2 - s + p + 180.0 #-- lambda2\n arg[:,32] = t2 - s + 2.0*h - p + 180.0 #-- L2\n arg[:,33] = t2 - h + pp #-- T2\n arg[:,34] = t2 #-- S2\n arg[:,35] = t2 + h - pp + 180.0 #-- R2\n arg[:,36] = t2 + 2.0*h #-- K2\n arg[:,37] = t2 + s + 2.0*h - pp #-- eta2\n arg[:,38] = t2 - 5.0*s + 4.0*h + p #-- MNS2\n arg[:,39] = t2 + 2.0*s - 2.0*h #-- 2SM2\n arg[:,40] = 1.5*arg[:,29] #-- M3\n arg[:,41] = arg[:,18] + arg[:,29] #-- MK3\n arg[:,42] = 3.0*t1 #-- S3\n arg[:,43] = arg[:,26] + arg[:,29] #-- MN4\n arg[:,44] = 2.0*arg[:,29] #-- M4\n arg[:,45] = arg[:,29] + arg[:,34] #-- MS4\n arg[:,46] = arg[:,29] + arg[:,36] #-- MK4\n arg[:,47] = 4.0*t1 #-- S4\n arg[:,48] = 5.0*t1 #-- S5\n arg[:,49] = 3.0*arg[:,29] #-- M6\n arg[:,50] = 3.0*t2 #-- S6\n arg[:,51] = 7.0*t1 #-- S7\n arg[:,52] = 4.0*t2 #-- S8\n #-- shallow water constituents\n arg[:,53] = 4.0*arg[:,29] #-- m8\n arg[:,54] = arg[:,29] + arg[:,36] - arg[:,34] #-- mks2\n arg[:,55] = 4.0*s - 2.0*h #-- msqm\n arg[:,56] = 3.0*s - p #-- mtm\n arg[:,57] = 2.0*arg[:,26] #-- n4\n arg[:,58] = t2 - 5.0*s + 4.0*h + p #-- eps2\n #-- mean sea level\n arg[:,59] = 0.0 #-- Z0\n\n #-- determine nodal corrections f and u\n sinn = np.sin(omega*dtr)\n cosn = np.cos(omega*dtr)\n sin2n = np.sin(2.0*omega*dtr)\n cos2n = np.cos(2.0*omega*dtr)\n sin3n = np.sin(3.0*omega*dtr)\n\n #-- set nodal corrections\n f = np.zeros((nt,60))\n u = np.zeros((nt,60))\n #-- determine nodal corrections f and u for each model type\n if CORRECTIONS in ('OTIS','ATLAS','netcdf'):\n f[:,0] = 1.0 #-- Sa\n f[:,1] = 1.0 #-- Ssa\n f[:,2] = 1.0 - 0.130*cosn #-- Mm\n f[:,3] = 1.0 #-- MSf\n f[:,4] = 1.043 + 0.414*cosn #-- Mf\n temp1 = (1.0 + 0.203*cosn + 0.040*cos2n)**2\n temp2 = (0.203*sinn + 0.040*sin2n)**2\n f[:,5] = np.sqrt(temp1 + temp2) #-- Mt\n f[:,6] = 1.0 #-- alpha1\n f[:,7] = np.sqrt((1.0 + 0.188*cosn)**2 + (0.188*sinn)**2) #-- 2Q1\n f[:,8] = f[:,7] #-- sigma1\n f[:,9] = f[:,7] #-- q1\n f[:,10] = f[:,7] #-- rho1\n temp1 = (1.0 + 0.189*cosn - 0.0058*cos2n)**2\n temp2 = (0.189*sinn - 0.0058*sin2n)**2\n f[:,11] = np.sqrt(temp1 + temp2) #-- O1\n f[:,12] = 1.0 #-- tau1\n #-- Doodson's\n # Mtmp1 = 2.0*np.cos(p*dtr) + 0.4*np.cos((p-omega)*dtr)\n # Mtmp2 = np.sin(p*dtr) + 0.2*np.sin((p-omega)*dtr)\n #-- Ray's\n Mtmp1 = 1.36*np.cos(p*dtr) + 0.267*np.cos((p-omega)*dtr)\n Mtmp2 = 0.64*np.sin(p*dtr) + 0.135*np.sin((p-omega)*dtr)\n f[:,13] = np.sqrt(Mtmp1**2 + Mtmp2**2) #-- M1\n f[:,14] = np.sqrt((1.0+0.221*cosn)**2+(0.221*sinn)**2) #-- chi1\n f[:,15] = 1.0 #-- pi1\n f[:,16] = 1.0 #-- P1\n f[:,17] = 1.0 #-- S1\n temp1 = (1.0 + 0.1158*cosn - 0.0029*cos2n)**2\n temp2 = (0.1554*sinn - 0.0029*sin2n)**2\n f[:,18] = np.sqrt(temp1 + temp2) #-- K1\n f[:,19] = 1.0 #-- psi1\n f[:,20] = 1.0 #-- phi1\n f[:,21] = 1.0 #-- theta1\n f[:,22] = np.sqrt((1.0+0.169*cosn)**2 + (0.227*sinn)**2) #-- J1\n temp1 = (1.0 + 0.640*cosn + 0.134*cos2n)**2\n temp2 = (0.640*sinn + 0.134*sin2n)**2\n f[:,23] = np.sqrt(temp1 + temp2) #-- OO1\n temp1 = (1.0 - 0.03731*cosn + 0.00052*cos2n)**2\n temp2 = (0.03731*sinn - 0.00052*sin2n)**2\n f[:,24] = np.sqrt(temp1 + temp2) #-- 2N2\n f[:,25] = f[:,24] #-- mu2\n f[:,26] = f[:,24] #-- N2\n f[:,27] = f[:,24] #-- nu2\n f[:,28] = 1.0 #-- M2a\n f[:,29] = f[:,24] #-- M2\n f[:,30] = 1.0 #-- M2b\n f[:,31] = 1.0 #-- lambda2\n Ltmp1 = 1.0 - 0.25*np.cos(2*p*dtr) - 0.11*np.cos((2.0*p-omega)*dtr) - 0.04*cosn\n Ltmp2 = 0.25*np.sin(2*p*dtr) + 0.11*np.sin((2.0*p-omega)*dtr) + 0.04*sinn\n f[:,32] = np.sqrt(Ltmp1**2 + Ltmp2**2) #-- L2\n f[:,33] = 1.0 #-- T2\n f[:,34] = 1.0 #-- S2\n f[:,35] = 1.0 #-- R2\n temp1 = (1.0 + 0.2852*cosn + 0.0324*cos2n)**2\n temp2 = (0.3108*sinn + 0.0324*sin2n)**2\n f[:,36] = np.sqrt(temp1 + temp2) #-- K2\n f[:,37] = np.sqrt((1.0 + 0.436*cosn)**2 + (0.436*sinn)**2) #-- eta2\n f[:,38] = f[:,29]**2 #-- MNS2\n f[:,39] = f[:,29] #-- 2SM2\n f[:,40] = 1.0 #-- M3 (wrong)\n f[:,41] = f[:,18]*f[:,29] #-- MK3\n f[:,42] = 1.0 #-- S3\n f[:,43] = f[:,29]**2 #-- MN4\n f[:,44] = f[:,43] #-- M4\n f[:,45] = f[:,43] #-- MS4\n f[:,46] = f[:,29]*f[:,36] #-- MK4\n f[:,47] = 1.0 #-- S4\n f[:,48] = 1.0 #-- S5\n f[:,49] = f[:,29]**3 #-- M6\n f[:,50] = 1.0 #-- S6\n f[:,51] = 1.0 #-- S7\n f[:,52] = 1.0 #-- S8\n #-- shallow water constituents\n f[:,53] = f[:,29]**4 #-- m8\n f[:,54] = f[:,29]*f[:,36] #-- mks2\n f[:,55] = f[:,4] #-- msqm\n f[:,56] = f[:,4] #-- mtm\n f[:,57] = f[:,29]**2 #-- n4\n f[:,58] = f[:,29] #-- eps2\n #-- mean sea level\n f[:,59] = 1.0 #-- Z0\n\n u[:,0] = 0.0 #-- Sa\n u[:,1] = 0.0 #-- Ssa\n u[:,2] = 0.0 #-- Mm\n u[:,3] = 0.0 #-- MSf\n u[:,4] = -23.7*sinn + 2.7*sin2n - 0.4*sin3n #-- Mf\n temp1 = -(0.203*sinn + 0.040*sin2n)\n temp2 = (1.0 + 0.203*cosn + 0.040*cos2n)\n u[:,5] = np.arctan(temp1/temp2)/dtr #-- Mt\n u[:,6] = 0.0 #-- alpha1\n u[:,7] = np.arctan(0.189*sinn/(1.0 + 0.189*cosn))/dtr #-- 2Q1\n u[:,8] = u[:,7] #-- sigma1\n u[:,9] = u[:,7] #-- q1\n u[:,10] = u[:,7] #-- rho1\n u[:,11] = 10.8*sinn - 1.3*sin2n + 0.2*sin3n #-- O1\n u[:,12] = 0.0 #-- tau1\n u[:,13] = np.arctan2(Mtmp2,Mtmp1)/dtr #-- M1\n u[:,14] = np.arctan(-0.221*sinn/(1.0+0.221*cosn))/dtr #-- chi1\n u[:,15] = 0.0 #-- pi1\n u[:,16] = 0.0 #-- P1\n u[:,17] = 0.0 #-- S1\n temp1 = (-0.1554*sinn + 0.0029*sin2n)\n temp2 = (1.0 + 0.1158*cosn - 0.0029*cos2n)\n u[:,18] = np.arctan(temp1/temp2)/dtr #-- K1\n u[:,19] = 0.0 #-- psi1\n u[:,20] = 0.0 #-- phi1\n u[:,21] = 0.0 #-- theta1\n u[:,22] = np.arctan(-0.227*sinn/(1.0+0.169*cosn))/dtr #-- J1\n temp1 = -(0.640*sinn + 0.134*sin2n)\n temp2 = (1.0 + 0.640*cosn + 0.134*cos2n)\n u[:,23] = np.arctan(temp1/temp2)/dtr #-- OO1\n temp1 = (-0.03731*sinn + 0.00052*sin2n)\n temp2 = (1.0 - 0.03731*cosn + 0.00052*cos2n)\n u[:,24] = np.arctan(temp1/temp2)/dtr #-- 2N2\n u[:,25] = u[:,24] #-- mu2\n u[:,26] = u[:,24] #-- N2\n u[:,27] = u[:,24] #-- nu2\n u[:,28] = 0.0 #-- M2a\n u[:,29] = u[:,24] #-- M2\n u[:,30] = 0.0 #-- M2b\n u[:,31] = 0.0 #-- lambda2\n u[:,32] = np.arctan(-Ltmp2/Ltmp1)/dtr #-- L2\n u[:,33] = 0.0 #-- T2\n u[:,34] = 0.0 #-- S2\n u[:,35] = 0.0 #-- R2\n temp1 = -(0.3108*sinn+0.0324*sin2n)\n temp2 = (1.0 + 0.2852*cosn + 0.0324*cos2n)\n u[:,36] = np.arctan(temp1/temp2)/dtr #-- K2\n u[:,37] = np.arctan(-0.436*sinn/(1.0 + 0.436*cosn))/dtr #-- eta2\n u[:,38] = u[:,29]*2.0 #-- MNS2\n u[:,39] = u[:,29] #-- 2SM2\n u[:,40] = 1.50*u[:,29] #-- M3\n u[:,41] = u[:,29] + u[:,18] #-- MK3\n u[:,42] = 0.0 #-- S3\n u[:,43] = 2.0*u[:,29] #-- MN4\n u[:,44] = u[:,43] #-- M4\n u[:,45] = u[:,29] #-- MS4\n u[:,46] = u[:,29] + u[:,36] #-- MK4\n u[:,47] = 0.0 #-- S4\n u[:,48] = 0.0 #-- S5\n u[:,49] = 3.0*u[:,29] #-- M6\n u[:,50] = 0.0 #-- S6\n u[:,51] = 0.0 #-- S7\n u[:,52] = 0.0 #-- S8\n #-- mean sea level\n u[:,59] = 0.0 #-- Z0\n\n elif CORRECTIONS in ('FES',):\n #-- additional astronomical terms for FES models\n II = np.arccos(0.913694997 - 0.035692561*np.cos(omega*dtr))\n at1 = np.arctan(1.01883*np.tan(omega*dtr/2.0))\n at2 = np.arctan(0.64412*np.tan(omega*dtr/2.0))\n xi = -at1 - at2 + omega*dtr\n xi[xi > np.pi] -= 2.0*np.pi\n nu = at1 - at2\n I2 = np.tan(II/2.0)\n Ra1 = np.sqrt(1.0 - 12.0*(I2**2)*np.cos(2.0*(p - xi)) + 36.0*(I2**4))\n P2 = np.sin(2.0*(p - xi))\n Q2 = 1.0/(6.0*(I2**2)) - np.cos(2.0*(p - xi))\n R = np.arctan(P2/Q2)\n P_prime = np.sin(2.0*II)*np.sin(nu)\n Q_prime = np.sin(2.0*II)*np.cos(nu) + 0.3347\n nu_prime = np.arctan(P_prime/Q_prime)\n P_sec = (np.sin(II)**2)*np.sin(2.0*nu)\n Q_sec = (np.sin(II)**2)*np.cos(2.0*nu) + 0.0727\n nu_sec = 0.5*np.arctan(P_sec/Q_sec)\n\n f[:,0] = 1.0 #-- Sa\n f[:,1] = 1.0 #-- Ssa\n f[:,2] = (2.0/3.0 - np.power(np.sin(II),2.0))/0.5021 #-- Mm\n f[:,3] = 1.0 #-- MSf\n f[:,4] = np.power(np.sin(II),2.0)/0.1578 #-- Mf\n f[:,7] = np.sin(II)*(np.cos(II/2.0)**2)/0.38 #-- 2Q1\n f[:,8] = f[:,7] #-- sigma1\n f[:,9] = f[:,7] #-- q1\n f[:,10] = f[:,7] #-- rho1\n f[:,11] = f[:,7] #-- O1\n #-- Ray's\n Mtmp1 = 1.36*np.cos(p*dtr) + 0.267*np.cos((p-omega)*dtr)\n Mtmp2 = 0.64*np.sin(p*dtr) + 0.135*np.sin((p-omega)*dtr)\n f[:,13] = np.sqrt(Mtmp1**2 + Mtmp2**2) #-- M1\n f[:,14] = np.sin(2.0*II) / 0.7214 #-- chi1\n f[:,15] = 1.0 #-- pi1\n f[:,16] = 1.0 #-- P1\n f[:,17] = 1.0 #-- S1\n temp1 = 0.8965*np.power(np.sin(2.0*II),2.0)\n temp2 = 0.6001*np.sin(2.0*II)*np.cos(nu)\n f[:,18] = np.sqrt(temp1 + temp2 + 0.1006) #-- K1\n f[:,19] = 1.0 #-- psi1\n f[:,20] = 1.0 #-- phi1\n f[:,21] = f[:,14] #-- theta1\n f[:,22] = f[:,14] #-- J1\n f[:,23] = np.sin(II)*np.power(np.sin(II/2.0),2.0)/0.01640 #-- OO1\n f[:,24] = np.power(np.cos(II/2.0),4.0)/0.9154 #-- 2N2\n f[:,25] = f[:,24] #-- mu2\n f[:,26] = f[:,24] #-- N2\n f[:,27] = f[:,24] #-- nu2\n f[:,28] = 1.0 #-- M2a\n f[:,29] = f[:,24] #-- M2\n f[:,30] = 1.0 #-- M2b\n f[:,31] = f[:,29] #-- lambda2\n f[:,32] = f[:,29]*Ra1 #-- L2\n f[:,33] = 1.0 #-- T2\n f[:,34] = 1.0 #-- S2\n f[:,35] = 1.0 #-- R2\n temp1 = 19.0444 * np.power(np.sin(II),4.0)\n temp2 = 2.7702 * np.power(np.sin(II),2.0) * np.cos(2.0*nu)\n f[:,36] = np.sqrt(temp1 + temp2 + 0.0981) #-- K2\n f[:,37] = np.power(np.sin(II),2.0)/0.1565 #-- eta2\n f[:,38] = f[:,29]**2 #-- MNS2\n f[:,39] = f[:,29] #-- 2SM2\n f[:,40] = np.power(np.cos(II/2.0), 6.0) / 0.8758 #-- M3\n f[:,41] = f[:,18]*f[:,29] #-- MK3\n f[:,42] = 1.0 #-- S3\n f[:,43] = f[:,29]**2 #-- MN4\n f[:,44] = f[:,43] #-- M4\n f[:,45] = f[:,29] #-- MS4\n f[:,46] = f[:,29]*f[:,36] #-- MK4\n f[:,47] = 1.0 #-- S4\n f[:,48] = 1.0 #-- S5\n f[:,49] = f[:,29]**3 #-- M6\n f[:,50] = 1.0 #-- S6\n f[:,51] = 1.0 #-- S7\n f[:,52] = 1.0 #-- S8\n #-- shallow water constituents\n f[:,53] = f[:,29]**4 #-- m8\n f[:,54] = f[:,29]*f[:,36] #-- mks2\n f[:,55] = f[:,4] #-- msqm\n f[:,56] = f[:,4] #-- mtm\n f[:,57] = f[:,29]**2 #-- n4\n f[:,58] = f[:,29] #-- eps2\n #-- mean sea level\n f[:,59] = 1.0 #-- Z0\n\n u[:,0] = 0.0 #-- Sa\n u[:,1] = 0.0 #-- Ssa\n u[:,2] = 0.0 #-- Mm\n u[:,3] = (2.0*xi - 2.0*nu)/dtr #-- MSf\n u[:,4] = -2.0*xi/dtr #-- Mf\n u[:,7] = (2.0*xi - nu)/dtr #-- 2Q1\n u[:,8] = u[:,7] #-- sigma1\n u[:,9] = u[:,7] #-- q1\n u[:,10] = u[:,7] #-- rho1\n u[:,11] = u[:,7] #-- O1\n u[:,13] = np.arctan2(Mtmp2,Mtmp1)/dtr #-- M1\n u[:,14] = -nu/dtr #-- chi1\n u[:,15] = 0.0 #-- pi1\n u[:,16] = 0.0 #-- P1\n u[:,17] = 0.0 #-- S1\n u[:,18] = -nu_prime/dtr #-- K1\n u[:,19] = 0.0 #-- psi1\n u[:,20] = 0.0 #-- phi1\n u[:,21] = -nu/dtr #-- theta1\n u[:,22] = u[:,21] #-- J1\n u[:,23] = (-2.0*xi - nu)/dtr #-- OO1\n u[:,24] = (2.0*xi - 2.0*nu)/dtr #-- 2N2\n u[:,25] = u[:,24] #-- mu2\n u[:,26] = u[:,24] #-- N2\n u[:,27] = u[:,24] #-- nu2\n u[:,29] = u[:,24] #-- M2\n u[:,31] = (2.0*xi - 2.0*nu)/dtr #-- lambda2\n u[:,32] = (2.0*xi - 2.0*nu - R)/dtr #-- L2\n u[:,33] = 0.0 #-- T2\n u[:,34] = 0.0 #-- S2\n u[:,35] = 0.0 #-- R2\n u[:,36] = -2.0*nu_sec/dtr #-- K2\n u[:,37] = -2.0*nu/dtr #-- eta2\n u[:,38] = (4.0*xi - 4.0*nu)/dtr #-- mns2\n u[:,39] = (2.0*xi - 2.0*nu)/dtr #-- 2SM2\n u[:,40] = (3.0*xi - 3.0*nu)/dtr #-- M3\n u[:,41] = (2.0*xi - 2.0*nu - 2.0*nu_prime)/dtr #-- MK3\n u[:,42] = 0.0 #-- S3\n u[:,43] = (4.0*xi - 4.0*nu)/dtr #-- MN4\n u[:,44] = (4.0*xi - 4.0*nu)/dtr #-- M4\n u[:,45] = (2.0*xi - 2.0*nu)/dtr #-- MS4\n u[:,46] = (2.0*xi - 2.0*nu - 2.0*nu_sec)/dtr #-- MK4\n u[:,47] = 0.0 #-- S4\n u[:,48] = 0.0 #-- S5\n u[:,49] = (6.0*xi - 6.0*nu)/dtr #-- M6\n u[:,50] = 0.0 #-- S6\n u[:,51] = 0.0 #-- S7\n u[:,52] = 0.0 #-- S8\n #-- shallow water constituents\n u[:,53] = (8.0*xi - 8.0*nu)/dtr #-- m8\n u[:,54] = (2.0*xi - 2.0*nu - 2.0*nu_sec)/dtr #-- mks2\n u[:,55] = u[:,4] #-- msqm\n u[:,56] = u[:,4] #-- mtm\n u[:,57] = (4.0*xi - 4.0*nu)/dtr #-- n4\n u[:,58] = u[:,29] #-- eps2\n #-- mean sea level\n u[:,59] = 0.0 #-- Z0\n\n elif CORRECTIONS in ('GOT',):\n f[:,9] = 1.009 + 0.187*cosn - 0.015*cos2n#-- Q1\n f[:,11] = f[:,9]#-- O1\n f[:,16] = 1.0 #-- P1\n f[:,17] = 1.0 #-- S1\n f[:,18] = 1.006 + 0.115*cosn - 0.009*cos2n#-- K1\n f[:,26] = 1.000 - 0.037*cosn#-- N2\n f[:,29] = f[:,26]#-- M2\n f[:,34] = 1.0 #-- S2\n f[:,36] = 1.024 + 0.286*cosn + 0.008*cos2n#-- K2\n f[:,44] = f[:,29]**2#-- M4\n\n u[:,9] = 10.8*sinn - 1.3*sin2n#-- Q1\n u[:,11] = u[:,9]#-- O1\n u[:,16] = 0.0 #-- P1\n u[:,17] = 0.0 #-- S1\n u[:,18] = -8.9*sinn + 0.7*sin2n#-- K1\n u[:,26] = -2.1*sinn#-- N2\n u[:,29] = u[:,26]#-- M2\n u[:,34] = 0.0 #-- S2\n u[:,36] = -17.7*sinn + 0.7*sin2n#-- K2\n u[:,44] = -4.2*sinn#-- M4\n\n #-- take pu,pf,G for the set of given constituents\n nc = len(constituents)\n pu = np.zeros((nt,nc))\n pf = np.zeros((nt,nc))\n G = np.zeros((nt,nc))\n for i,c in enumerate(constituents):\n #-- map between given constituents and supported in tidal program\n j, = [j for j,val in enumerate(cindex) if (val == c)]\n pu[:,i] = u[:,j]*dtr\n pf[:,i] = f[:,j]\n G[:,i] = arg[:,j]\n\n #-- return values as tuple\n return (pu,pf,G)\n", "#!/usr/bin/env python\nu\"\"\"\ntidal_ellipse.py (07/2020)\nExpresses the amplitudes and phases for the u and v components in terms of\n four ellipse parameters using Foreman's formula\n\nCALLING SEQUENCE:\n umajor,uminor,uincl,uphase = tidal_ellipse(u,v)\n\nINPUTS:\n u: zonal current (EW)\n v: meridional current (NS)\n\nOUTPUTS:\n umajor: amplitude of the semimajor semi-axis\n uminor: amplitude of the semiminor semi-axis\n uincl: angle of inclination of the northern semimajor semi-axis\n uphase: phase lag of the maximum current behind the maximum tidal potential\n of the individual constituent\n\nREFERENCE:\n M. G. G. Foreman and R. F. Henry, \"The harmonic analysis of tidal model time\n series\", Advances in Water Resources, 12(3), 109-120, (1989).\n https://doi.org/10.1016/0309-1708(89)90017-1\n\nUPDATE HISTORY:\n Written 07/2020\n\"\"\"\nimport numpy as np\n\ndef tidal_ellipse(u,v):\n \"\"\"\n Expresses the amplitudes and phases for the u and v components in terms of\n four ellipse parameters using Foreman's formula\n\n Arguments\n ---------\n u: zonal current (EW)\n v: meridional current (NS)\n\n Returns\n -------\n umajor: amplitude of the semimajor semi-axis\n uminor: amplitude of the semiminor semi-axis\n uincl: angle of inclination of the northern semimajor semi-axis\n uphase: phase lag of the maximum current behind the maximum tidal potential\n of the individual constituent\n \"\"\"\n #-- change to polar coordinates\n t1p = u.real - v.imag\n t2p = v.real + u.imag\n t1m = u.real + v.imag\n t2m = v.real - u.imag\n #-- ap, am: amplitudes of positively and negatively rotating vectors\n ap = np.sqrt(t1p**2 + t2p**2)/2.0\n am = np.sqrt(t1m**2 + t2m**2)/2.0\n #-- ep, em: phases of positively and negatively rotating vectors\n ep = 180.0*np.arctan2(t2p, t1p)/np.pi\n ep[ep < 0.0] += 360.0\n em = 180.0*np.arctan2(t2m, t1m)/np.pi\n em[em < 0.0] += 360.0\n #-- determine the amplitudes of the semimajor and semiminor axes\n #-- using Foreman's formula\n umajor = (ap + am)\n uminor = (ap - am)\n #-- determine the inclination and phase using Foreman's formula\n uincl = 0.5 * (em + ep)\n uincl[uincl > 180.0] -= 180.0\n uphase = -0.5*(ep - em)\n uphase[uphase < 0.0] += 360.0\n uphase[uphase >= 360.0] -= 360.0\n #-- return values\n return (umajor,uminor,uincl,uphase)\n" ]
[ [ "numpy.sqrt", "numpy.arctan", "numpy.cos", "numpy.sin", "numpy.atleast_1d", "numpy.arctan2", "numpy.tan", "numpy.zeros" ], [ "numpy.arctan2", "numpy.sqrt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vishalbelsare/textwiser
[ "2c5bdd73c26bd3fb7bd2f324f57d99233aa9c17f", "2c5bdd73c26bd3fb7bd2f324f57d99233aa9c17f" ]
[ "textwiser/transformations/base.py", "textwiser/factory.py" ]
[ "# Copyright 2019 FMR LLC <[email protected]>\n# SPDX-License-Identifer: Apache-2.0\n\nimport numpy as np\nimport torch\n\nfrom textwiser.base import BaseFeaturizer\nfrom textwiser.utils import convert, OutputType\n\n\nclass _BaseTransformation(BaseFeaturizer):\n def __init__(self, wrap_list_input=True):\n \"\"\"Initializes a Transformation.\n\n Subclasses must call this __init__ method.\n\n Parameters\n ----------\n wrap_list_input : bool\n If true, any list input to fit, forward, or fit_transform functions will\n be stacked to a 2D tensor before the functions are called, and will be\n converted back to a list before being returned.\n \"\"\"\n\n super(_BaseTransformation, self).__init__()\n self.wrap_list_input = wrap_list_input\n\n @property\n def input_types(self):\n return OutputType.tensor,\n\n def _check_input(self, x):\n if not isinstance(x, tuple(t.value for t in self.input_types)):\n return convert(x, self.input_types[0])\n return x\n\n def _forward(self, x):\n raise NotImplementedError(\"Transformations should implement the `_forward` method.\")\n\n def fit(self, x, y=None):\n x = self._check_input(x)\n if self.wrap_list_input:\n if isinstance(x, list): # happens after WordEmbedding\n x = torch.cat(x, 0)\n self._fit(x, y)\n\n def _wrap_list_input(self, fn, uses_y, x, y=None):\n sizes = None\n if isinstance(x, list): # happens after WordEmbedding\n if len(x) == 0:\n return []\n sizes = [0]\n sizes.extend([doc.shape[0] for doc in x])\n x = torch.cat(x, 0)\n vec = fn(x, y) if uses_y else fn(x)\n if sizes:\n cs = np.cumsum(sizes)\n vec = [vec[cs[i]:cs[i + 1], :] for i in range(cs.shape[0] - 1)]\n return vec\n\n def forward(self, x):\n x = self._check_input(x)\n return self._wrap_list_input(self._forward, False, x) if self.wrap_list_input else self._forward(x)\n\n def fit_transform(self, x, y=None):\n x = self._check_input(x)\n return self._wrap_list_input(self._fit_transform, True, x, y) if self.wrap_list_input else self._fit_transform(x, y)\n\n def _fit_transform(self, x, y=None):\n x = self._check_input(x)\n self._fit(x, y)\n return self._forward(x)\n", "# Copyright 2019 FMR LLC <[email protected]>\n# SPDX-License-Identifer: Apache-2.0\n\nimport numpy as np\nfrom scipy import sparse\nimport torch\nimport torch.nn as nn\nfrom typing import Union\n\nfrom textwiser.base import BaseFeaturizer\nfrom textwiser.embeddings import (\n _BOWEmbeddings,\n _CompoundEmbeddings,\n _Doc2VecEmbeddings,\n _RandomEmbeddings,\n _TfIdfEmbeddings,\n _USEEmbeddings,\n _WordEmbeddings,\n)\nfrom textwiser.transformations import _LDATransformation, _NMFTransformation, _PoolTransformation, _SVDTransformation, _UMAPTransformation\nfrom textwiser.options import _ArgBase, Embedding, WordOptions, Transformation, PoolOptions, Embedding_Type, Transformation_Type\nfrom textwiser.utils import convert, OutputType\n\n\nclass _Concat(BaseFeaturizer):\n def __init__(self, embeddings):\n super(_Concat, self).__init__()\n self.embeddings = nn.ModuleList(embeddings)\n\n def fit(self, x, y=None):\n [embedding.fit(x, y) for embedding in self.embeddings]\n\n def forward(self, x):\n embeds = [embedding(x) for embedding in self.embeddings]\n is_list = isinstance(embeds[0], list) # happens for word embeddings before pooling\n types = set([OutputType.from_object(embed[0]) if is_list else OutputType.from_object(embed) for embed in embeds])\n if OutputType.tensor in types: # need to convert everything to torch\n embeds = convert(embeds, OutputType.tensor)\n cat_fn = _Concat._tensor_concat\n elif len(types) == 2: # both numpy and sparse\n embeds = convert(embeds, OutputType.array)\n cat_fn = _Concat._array_concat\n elif OutputType.array in types: # only numpy\n cat_fn = _Concat._array_concat\n else: # only sparse\n cat_fn = _Concat._sparse_concat\n if is_list:\n return [cat_fn(embed) for embed in zip(*embeds)]\n return cat_fn(embeds)\n\n @staticmethod\n def _tensor_concat(x):\n return torch.cat(x, -1)\n\n @staticmethod\n def _array_concat(x):\n return np.concatenate(x, -1)\n\n @staticmethod\n def _sparse_concat(x):\n return sparse.hstack(x)\n\n\nclass _Sequential(BaseFeaturizer, nn.Sequential):\n def fit(self, x, y=None):\n if len(self) == 1 or (len(self) == 2 and isinstance(self[1], _PoolTransformation)):\n # If there's only one model, it is an embedding, and we can just fit\n # If there's more than one, we have to do fit transforms. The exception is\n # when the only transformation is pooling, which is nonparametric and doesn't need to fit.\n self[0].fit(x, y)\n else:\n for model in self[:-1]:\n x = model.fit_transform(x, y)\n # No need for transform for the last model\n self[-1].fit(x, y)\n\n def fit_transform(self, x, y=None):\n for model in self:\n x = model.fit_transform(x, y)\n return x\n\n def forward(self, x):\n return nn.Sequential.forward(self, x)\n\n\nModelType = Union[Embedding_Type, Transformation_Type, str]\n\n\nfactory = {\n Embedding.Compound: _CompoundEmbeddings,\n Embedding.BOW: _BOWEmbeddings,\n Embedding.Doc2Vec: _Doc2VecEmbeddings,\n Embedding.Random: _RandomEmbeddings,\n Embedding.TfIdf: _TfIdfEmbeddings,\n Embedding.USE: _USEEmbeddings,\n Embedding.Word: _WordEmbeddings,\n Transformation.LDA: _LDATransformation,\n Transformation.NMF: _NMFTransformation,\n Transformation.Pool: _PoolTransformation,\n Transformation.SVD: _SVDTransformation,\n Transformation.UMAP: _UMAPTransformation,\n}\n\n\ndef _get_and_init_doc_embeddings(model: ModelType, params):\n \"\"\"Initializes a single document embedding object with the given params.\n Note that different models have different params; check Flair documentation\n for an up to date list. https://github.com/zalandoresearch/flair/blob/master/flair/embeddings.py\n \"\"\"\n def to_word_option(model):\n if isinstance(model, str):\n if model in WordOptions.__members__:\n return WordOptions[model]\n elif isinstance(model, WordOptions):\n return model\n else:\n raise ValueError(\"The specified word option %s is not supported.\" % model)\n\n if 'word_option' in params:\n params['word_option'] = to_word_option(params['word_option'])\n\n if 'pool_option' in params and params['pool_option'] in PoolOptions.__members__:\n params['pool_option'] = PoolOptions[params['pool_option']]\n\n if 'inline_pool_option' in params and params['inline_pool_option'] in PoolOptions.__members__:\n params['inline_pool_option'] = PoolOptions[params['inline_pool_option']]\n\n # string to Embedding or Transformation conversion\n if isinstance(model, str):\n if model in WordOptions.__members__: # is a WordOption\n model = Embedding.Word(word_option=to_word_option(model), **params)\n else: # is a supported transformation or embedding\n model = _ArgBase.from_string(model, params)\n\n # model is now a _ArgBase type\n params = {**model._get_attrs(), **model.kwargs}\n # word or document level embedding\n return factory.get(model.__class__)(**params)\n\n\ndef get_standalone_document_embeddings(embedding):\n\n if isinstance(embedding, str): # embedding type, like \"tfidf\"\n return _get_and_init_doc_embeddings(embedding, dict())\n\n if isinstance(embedding, (tuple, list)): # type with arguments, like ('tfidf', { 'min_df': 3 }):\n return _get_and_init_doc_embeddings(embedding[0], embedding[1])\n\n if isinstance(embedding, _ArgBase): # Embedding or Transformation type\n return _get_and_init_doc_embeddings(embedding, dict())\n\n\ndef get_document_embeddings(embeddings):\n\n if isinstance(embeddings, (str, tuple, list, _ArgBase)):\n return get_standalone_document_embeddings(embeddings)\n\n if 'transform' in embeddings:\n transforms = embeddings['transform']\n return _Sequential(get_document_embeddings(transforms[0]),\n *[get_document_embeddings(transformation) for transformation in transforms[1:]])\n\n if 'concat' in embeddings:\n concats = embeddings['concat']\n return _Concat([get_document_embeddings(embedding) for embedding in concats])\n" ]
[ [ "numpy.cumsum", "torch.cat" ], [ "torch.cat", "torch.nn.ModuleList", "numpy.concatenate", "torch.nn.Sequential.forward", "scipy.sparse.hstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
jonrbates/ignite
[ "15eeb8791a2e0c2f55265e1f6b91f91dc35286c5", "15eeb8791a2e0c2f55265e1f6b91f91dc35286c5" ]
[ "tests/ignite/contrib/handlers/test_param_scheduler.py", "ignite/contrib/metrics/regression/geometric_mean_absolute_error.py" ]
[ "import numpy as np\nimport pytest\nimport torch\n\nfrom ignite.contrib.handlers.param_scheduler import (\n ConcatScheduler,\n CosineAnnealingScheduler,\n LinearCyclicalScheduler,\n LRScheduler,\n ParamGroupScheduler,\n PiecewiseLinear,\n create_lr_scheduler_with_warmup,\n)\nfrom ignite.engine import Engine, Events\n\n\ndef test_linear_scheduler():\n\n with pytest.raises(TypeError, match=r\"Argument optimizer should be torch.optim.Optimizer\"):\n LinearCyclicalScheduler({}, \"lr\", 1, 0, cycle_size=0)\n\n tensor = torch.zeros([1], requires_grad=True)\n optimizer = torch.optim.SGD([tensor], lr=0.0)\n\n with pytest.raises(ValueError, match=r\"Argument cycle_size should be positive and larger than 1\"):\n LinearCyclicalScheduler(optimizer, \"lr\", 1, 0, cycle_size=0)\n\n with pytest.raises(ValueError, match=r\"Argument cycle_size should be positive and larger than 1\"):\n LinearCyclicalScheduler(optimizer, \"lr\", 1, 0, cycle_size=1)\n\n scheduler = LinearCyclicalScheduler(optimizer, \"lr\", 1, 0, 10)\n state_dict = scheduler.state_dict()\n\n def save_lr(engine):\n lrs.append(optimizer.param_groups[0][\"lr\"])\n\n trainer = Engine(lambda engine, batch: None)\n trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)\n trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)\n\n for _ in range(2):\n lrs = []\n trainer.run([0] * 9, max_epochs=2)\n\n assert lrs == list(\n map(\n pytest.approx,\n [\n # Cycle 1\n 1.0,\n 0.8,\n 0.6,\n 0.4,\n 0.2,\n 0.0,\n 0.2,\n 0.4,\n 0.6,\n 0.8,\n # Cycle 2\n 1.0,\n 0.8,\n 0.6,\n 0.4,\n 0.2,\n 0.0,\n 0.2,\n 0.4, # 0.6, 0.8,\n ],\n )\n )\n scheduler.load_state_dict(state_dict)\n\n optimizer = torch.optim.SGD([tensor], lr=0)\n scheduler = LinearCyclicalScheduler(optimizer, \"lr\", 1, 0, 10, cycle_mult=2)\n state_dict = scheduler.state_dict()\n\n trainer = Engine(lambda engine, batch: None)\n trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)\n trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)\n\n for _ in range(2):\n lrs = []\n trainer.run([0] * 10, max_epochs=3)\n\n assert lrs == list(\n map(\n pytest.approx,\n [\n # Cycle 1\n 1.0,\n 0.8,\n 0.6,\n 0.4,\n 0.2,\n 0.0,\n 0.2,\n 0.4,\n 0.6,\n 0.8,\n # Cycle 2\n 1.0,\n 0.9,\n 0.8,\n 0.7,\n 0.6,\n 0.5,\n 0.4,\n 0.3,\n 0.2,\n 0.1,\n 0.0,\n 0.1,\n 0.2,\n 0.3,\n 0.4,\n 0.5,\n 0.6,\n 0.7,\n 0.8,\n 0.9,\n ],\n )\n )\n scheduler.load_state_dict(state_dict)\n\n # With float cycle_size\n optimizer = torch.optim.SGD([tensor], lr=0)\n scheduler = LinearCyclicalScheduler(\n optimizer, \"lr\", start_value=1.2, end_value=0.2, cycle_size=10.00000012, cycle_mult=1.0\n )\n state_dict = scheduler.state_dict()\n\n trainer = Engine(lambda engine, batch: None)\n trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)\n trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)\n\n for _ in range(2):\n lrs = []\n trainer.run([0] * 9, max_epochs=2)\n assert lrs == list(\n map(\n pytest.approx,\n [\n # Cycle 1\n 1.2,\n 1.0,\n 0.8,\n 0.6,\n 0.4,\n 0.2,\n 0.4,\n 0.6,\n 0.8,\n 1.0,\n # Cycle 2\n 1.2,\n 1.0,\n 0.8,\n 0.6,\n 0.4,\n 0.2,\n 0.4,\n 0.6, # 0.8, 1.0,\n ],\n )\n )\n scheduler.load_state_dict(state_dict)\n\n\ndef test_linear_scheduler_cycle_size_two():\n tensor = torch.zeros([1], requires_grad=True)\n optimizer = torch.optim.SGD([tensor], lr=0)\n\n scheduler = LinearCyclicalScheduler(optimizer, \"lr\", 1, 0, cycle_size=2)\n\n data = [0] * 10\n max_epochs = 2\n simulated_values = LinearCyclicalScheduler.simulate_values(\n num_events=len(data) * max_epochs, param_name=\"lr\", start_value=1, end_value=0, cycle_size=2\n )\n\n def save_lr(engine):\n lrs.append(optimizer.param_groups[0][\"lr\"])\n\n trainer = Engine(lambda engine, batch: None)\n trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)\n trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)\n\n lrs = []\n trainer.run(data, max_epochs=max_epochs)\n assert lrs == list(\n map(\n pytest.approx,\n [1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0],\n )\n )\n\n assert lrs == pytest.approx([v for i, v in simulated_values])\n\n\ndef test_cosine_annealing_scheduler():\n tensor = torch.zeros([1], requires_grad=True)\n optimizer = torch.optim.SGD([tensor], lr=0)\n\n scheduler = CosineAnnealingScheduler(optimizer, \"lr\", 0, 1, 10)\n state_dict = scheduler.state_dict()\n\n data = [0] * 9\n max_epochs = 2\n simulated_values = CosineAnnealingScheduler.simulate_values(\n num_events=len(data) * max_epochs, param_name=\"lr\", start_value=0, end_value=1, cycle_size=10\n )\n\n def save_lr(engine):\n lrs.append(optimizer.param_groups[0][\"lr\"])\n\n trainer = Engine(lambda engine, batch: None)\n trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)\n trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)\n\n for _ in range(2):\n lrs = []\n trainer.run(data, max_epochs=max_epochs)\n\n assert lrs == list(\n map(\n pytest.approx,\n [\n 0.0,\n 0.02447174185242318,\n 0.09549150281252627,\n 0.20610737385376332,\n 0.3454915028125263,\n 0.5,\n 0.6545084971874737,\n 0.7938926261462365,\n 0.9045084971874737,\n 0.9755282581475768,\n 0.0,\n 0.02447174185242318,\n 0.09549150281252627,\n 0.20610737385376332,\n 0.3454915028125263,\n 0.5,\n 0.6545084971874737,\n 0.7938926261462365, # 0.9045084971874737, 0.9755282581475768\n ],\n )\n )\n scheduler.load_state_dict(state_dict)\n\n assert lrs == pytest.approx([v for i, v in simulated_values])\n\n\ndef test_concat_scheduler_asserts():\n\n tensor = torch.zeros([1], requires_grad=True)\n optimizer = torch.optim.SGD([tensor], lr=0)\n\n scheduler_1 = LinearCyclicalScheduler(optimizer, \"lr\", start_value=1.0, end_value=0.0, cycle_size=10)\n scheduler_2 = CosineAnnealingScheduler(optimizer, \"lr\", start_value=0.0, end_value=1.0, cycle_size=10)\n\n with pytest.raises(ValueError):\n ConcatScheduler(schedulers=[], durations=[])\n\n with pytest.raises(ValueError):\n ConcatScheduler(schedulers=[scheduler_1], durations=[10])\n\n with pytest.raises(TypeError):\n ConcatScheduler(schedulers=[scheduler_1, 12], durations=[10])\n\n with pytest.raises(ValueError):\n ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations=[10, 5])\n\n with pytest.raises(ValueError):\n ConcatScheduler(schedulers=[scheduler_1, scheduler_2, scheduler_2], durations=[15, 12.0])\n\n with pytest.raises(ValueError):\n ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations=\"abc\")\n\n with pytest.raises(ValueError):\n ConcatScheduler.simulate_values(\n num_events=123, schedulers=[scheduler_1, scheduler_2], durations=[15], param_names=\"abc\"\n )\n\n\ndef test_concat_scheduler_state_dict():\n tensor = torch.zeros([1], requires_grad=True)\n optimizer = torch.optim.SGD([tensor], lr=0)\n scheduler_1 = LinearCyclicalScheduler(optimizer, \"lr\", start_value=1.0, end_value=0.0, cycle_size=10)\n scheduler_2 = CosineAnnealingScheduler(optimizer, \"lr\", start_value=0.0, end_value=1.0, cycle_size=10)\n durations = [10]\n concat_scheduler = ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations=durations, save_history=False)\n state_dict = concat_scheduler.state_dict()\n\n assert state_dict[\"durations\"] == durations\n assert state_dict[\"_current_duration\"] == durations[0]\n assert state_dict[\"_scheduler_index\"] == 0\n\n for _ in range(20):\n concat_scheduler(None, None)\n\n concat_scheduler.load_state_dict(state_dict)\n assert concat_scheduler.durations == durations\n assert concat_scheduler._current_duration == durations[0]\n assert id(concat_scheduler._current_scheduler) == id(scheduler_1)\n\n with pytest.raises(ValueError, match=r\"Required state attribute 'schedulers' is absent in provided state_dict\"):\n concat_scheduler.load_state_dict({\"a\": 1})\n\n with pytest.raises(ValueError, match=r\"Input state_dict contains 0 state_dicts of concatenated schedulers\"):\n concat_scheduler.load_state_dict({\"schedulers\": []})\n\n\ndef test_concat_scheduler_two_schedulers():\n tensor = torch.zeros([1], requires_grad=True)\n optimizer = torch.optim.SGD([tensor], lr=0)\n\n def _test(duration_vals_as_np_int):\n scheduler_1 = LinearCyclicalScheduler(optimizer, \"lr\", start_value=1.0, end_value=0.0, cycle_size=10)\n scheduler_2 = CosineAnnealingScheduler(optimizer, \"lr\", start_value=0.0, end_value=1.0, cycle_size=10)\n\n durations = [10]\n if duration_vals_as_np_int:\n durations = [np.int64(t) for t in durations]\n\n concat_scheduler = ConcatScheduler(\n schedulers=[scheduler_1, scheduler_2], durations=durations, save_history=True\n )\n state_dict = concat_scheduler.state_dict()\n\n data = [0] * 10\n max_epochs = 2\n simulated_values = ConcatScheduler.simulate_values(\n num_events=len(data) * max_epochs, schedulers=[scheduler_1, scheduler_2], durations=durations\n )\n\n def save_lr(engine):\n lrs.append(optimizer.param_groups[0][\"lr\"])\n\n trainer = Engine(lambda engine, batch: None)\n trainer.add_event_handler(Events.ITERATION_STARTED, concat_scheduler)\n trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)\n\n for _ in range(2):\n lrs = []\n trainer.run(data, max_epochs=max_epochs)\n\n assert lrs == list(\n map(\n pytest.approx,\n [\n # Cycle 1 of the LinearCyclicalScheduler\n 1.0,\n 0.8,\n 0.6,\n 0.4,\n 0.2,\n 0.0,\n 0.2,\n 0.4,\n 0.6,\n 0.8,\n # Cycle 1 of the CosineAnnealingScheduler\n 0.0,\n 0.02447174185242318,\n 0.09549150281252627,\n 0.20610737385376332,\n 0.3454915028125263,\n 0.5,\n 0.6545084971874737,\n 0.7938926261462365,\n 0.9045084971874737,\n 0.9755282581475768,\n ],\n )\n )\n\n state_lrs = trainer.state.param_history[\"lr\"]\n assert len(state_lrs) == len(lrs)\n # Unpack singleton lists\n assert [group[0] for group in state_lrs] == lrs\n assert lrs == pytest.approx([v for i, v in simulated_values])\n concat_scheduler.load_state_dict(state_dict)\n\n _test(duration_vals_as_np_int=False)\n _test(duration_vals_as_np_int=True)\n\n\ndef test_concat_scheduler_two_linear():\n tensor = torch.zeros([1], requires_grad=True)\n optimizer = torch.optim.SGD([tensor], lr=0)\n\n scheduler_1 = LinearCyclicalScheduler(optimizer, \"lr\", start_value=0.0, end_value=0.1, cycle_size=2)\n scheduler_2 = LinearCyclicalScheduler(optimizer, \"lr\", start_value=0.2, end_value=1.0, cycle_size=2)\n\n durations = [5]\n concat_scheduler = ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations=durations, save_history=True)\n state_dict = concat_scheduler.state_dict()\n\n assert concat_scheduler.get_param() == 0.0\n\n data = [0] * 10\n max_epochs = 2\n simulated_values = ConcatScheduler.simulate_values(\n num_events=len(data) * max_epochs, schedulers=[scheduler_1, scheduler_2], durations=durations\n )\n\n def save_lr(engine):\n lrs.append(optimizer.param_groups[0][\"lr\"])\n\n trainer = Engine(lambda engine, batch: None)\n trainer.add_event_handler(Events.ITERATION_STARTED, concat_scheduler)\n trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)\n\n for _ in range(2):\n lrs = []\n trainer.run(data, max_epochs=max_epochs)\n\n assert lrs == list(\n map(\n pytest.approx,\n [\n # first LinearCyclicalScheduler\n 0.0,\n 0.1,\n 0.0,\n 0.1,\n 0.0,\n # second LinearCyclicalScheduler\n 0.2,\n 1.0,\n 0.2,\n 1.0,\n 0.2,\n 1.0,\n 0.2,\n 1.0,\n 0.2,\n 1.0,\n 0.2,\n 1.0,\n 0.2,\n 1.0,\n 0.2,\n ],\n )\n )\n\n state_lrs = trainer.state.param_history[\"lr\"]\n assert len(state_lrs) == len(lrs)\n # Unpack singleton lists\n assert [group[0] for group in state_lrs] == lrs\n\n assert lrs == pytest.approx([v for i, v in simulated_values])\n concat_scheduler.load_state_dict(state_dict)\n\n\ndef test_concat_scheduler_3_schedulers():\n tensor = torch.zeros([1], requires_grad=True)\n optimizer = torch.optim.SGD([tensor], lr=0)\n\n scheduler_1 = LinearCyclicalScheduler(optimizer, \"lr\", start_value=1.0, end_value=0.5, cycle_size=20)\n scheduler_2 = LinearCyclicalScheduler(optimizer, \"lr\", start_value=0.5, end_value=0.45, cycle_size=10)\n scheduler_3 = LinearCyclicalScheduler(optimizer, \"lr\", start_value=0.5, end_value=0.0, cycle_size=20)\n durations = [10, 5]\n\n concat_scheduler = ConcatScheduler(\n schedulers=[scheduler_1, scheduler_2, scheduler_3], durations=durations, save_history=True\n )\n state_dict = concat_scheduler.state_dict()\n\n data = [0] * 10\n max_epochs = 2\n simulated_values = ConcatScheduler.simulate_values(\n num_events=len(data) * max_epochs, schedulers=[scheduler_1, scheduler_2, scheduler_3], durations=durations\n )\n\n def save_lr(engine):\n lrs.append(optimizer.param_groups[0][\"lr\"])\n\n trainer = Engine(lambda engine, batch: None)\n trainer.add_event_handler(Events.ITERATION_STARTED, concat_scheduler)\n trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)\n\n for _ in range(2):\n lrs = []\n trainer.run(data, max_epochs=max_epochs)\n\n assert lrs == list(\n map(\n pytest.approx,\n [\n # Cycle 1 of the first LinearCyclicalScheduler\n 1.0,\n 0.95,\n 0.9,\n 0.85,\n 0.8,\n 0.75,\n 0.7,\n 0.65,\n 0.6,\n 0.55,\n # Cycle 1 of the second LinearCyclicalScheduler\n 0.5,\n 0.49,\n 0.48,\n 0.47,\n 0.46,\n # Cycle 1 of the third LinearCyclicalScheduler\n 0.5,\n 0.45,\n 0.4,\n 0.35,\n 0.3,\n ],\n )\n )\n\n state_lrs = trainer.state.param_history[\"lr\"]\n assert len(state_lrs) == len(lrs)\n # Unpack singleton lists\n assert [group[0] for group in state_lrs] == lrs\n\n assert lrs == pytest.approx([v for i, v in simulated_values])\n concat_scheduler.load_state_dict(state_dict)\n\n\ndef test_save_param_history():\n tensor = torch.zeros([1], requires_grad=True)\n optimizer = torch.optim.SGD([tensor], lr=0)\n\n scheduler = LinearCyclicalScheduler(optimizer, \"lr\", 1, 0, 10, save_history=True)\n lrs = []\n\n def save_lr(engine):\n lrs.append(optimizer.param_groups[0][\"lr\"])\n\n trainer = Engine(lambda engine, batch: None)\n assert not hasattr(trainer.state, \"param_history\")\n\n trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)\n trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)\n trainer.run([0] * 10, max_epochs=2)\n\n state_lrs = trainer.state.param_history[\"lr\"]\n assert len(state_lrs) == len(lrs)\n # Unpack singleton lists\n assert [group[0] for group in state_lrs] == lrs\n\n\ndef test_lr_scheduler_asserts():\n\n t1 = torch.zeros([1], requires_grad=True)\n t2 = torch.zeros([1], requires_grad=True)\n optimizer = torch.optim.SGD([{\"params\": t1, \"lr\": 0.1}, {\"params\": t2, \"lr\": 0.1}])\n lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.98)\n\n with pytest.raises(ValueError):\n LRScheduler(lr_scheduler)\n\n with pytest.raises(ValueError):\n LRScheduler.simulate_values(num_events=100, lr_scheduler=lr_scheduler)\n\n with pytest.raises(TypeError):\n LRScheduler(123)\n\n\ndef test_lr_scheduler():\n def _test(torch_lr_scheduler_cls, **kwargs):\n\n tensor = torch.zeros([1], requires_grad=True)\n optimizer1 = torch.optim.SGD([tensor], lr=0.01)\n optimizer2 = torch.optim.SGD([tensor], lr=0.01)\n opt_state_dict1 = optimizer1.state_dict()\n opt_state_dict2 = optimizer2.state_dict()\n\n torch_lr_scheduler1 = torch_lr_scheduler_cls(optimizer=optimizer1, **kwargs)\n scheduler = LRScheduler(torch_lr_scheduler1)\n state_dict1 = scheduler.state_dict()\n\n torch_lr_scheduler2 = torch_lr_scheduler_cls(optimizer=optimizer2, **kwargs)\n state_dict2 = torch_lr_scheduler2.state_dict()\n\n def dummy_update(engine, batch):\n optimizer1.step()\n optimizer2.step()\n\n trainer = Engine(dummy_update)\n\n @trainer.on(Events.ITERATION_STARTED)\n def save_lr(engine):\n lrs.append(optimizer1.param_groups[0][\"lr\"])\n\n @trainer.on(Events.ITERATION_STARTED)\n def save_true_lr(engine):\n lrs_true.append(optimizer2.param_groups[0][\"lr\"])\n\n @trainer.on(Events.ITERATION_COMPLETED)\n def torch_lr_scheduler_step(engine):\n torch_lr_scheduler2.step()\n\n trainer.add_event_handler(Events.ITERATION_COMPLETED, scheduler)\n\n for _ in range(2):\n lrs = []\n lrs_true = []\n data = [0] * 10\n max_epochs = 2\n trainer.run(data, max_epochs=max_epochs)\n assert lrs_true == pytest.approx(lrs), \"{}: {} ({}) vs {} ({})\".format(\n _, lrs_true, len(lrs_true), lrs, len(lrs)\n )\n optimizer1.load_state_dict(opt_state_dict1)\n scheduler.load_state_dict(state_dict1)\n optimizer2.load_state_dict(opt_state_dict2)\n torch_lr_scheduler2.load_state_dict(state_dict2)\n\n optimizer3 = torch.optim.SGD([tensor], lr=0.01)\n torch_lr_scheduler3 = torch_lr_scheduler_cls(optimizer=optimizer3, **kwargs)\n\n simulated_values = LRScheduler.simulate_values(\n num_events=len(data) * max_epochs, lr_scheduler=torch_lr_scheduler3\n )\n assert lrs == pytest.approx([v for i, v in simulated_values])\n\n _test(torch.optim.lr_scheduler.StepLR, step_size=5, gamma=0.5)\n _test(torch.optim.lr_scheduler.ExponentialLR, gamma=0.78)\n\n # test _replicate_lr_scheduler\n tensor = torch.zeros([1], requires_grad=True)\n optimizer = torch.optim.SGD([tensor], lr=0.01)\n lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.78)\n init_lr_scheduler_state = dict(lr_scheduler.state_dict())\n copy_lr_scheduler = LRScheduler._replicate_lr_scheduler(lr_scheduler)\n for _ in range(10):\n optimizer.step()\n lr_scheduler.step()\n\n assert copy_lr_scheduler.state_dict() == init_lr_scheduler_state\n\n with pytest.raises(TypeError):\n LRScheduler._replicate_lr_scheduler(12)\n\n\ndef test_piecewiselinear_asserts():\n\n tensor = torch.zeros([1], requires_grad=True)\n optimizer = torch.optim.SGD([tensor], lr=0)\n\n with pytest.raises(ValueError):\n PiecewiseLinear(optimizer, \"lr\", milestones_values=[])\n\n with pytest.raises(ValueError):\n PiecewiseLinear(optimizer, \"lr\", milestones_values=[(0.5,)])\n\n with pytest.raises(ValueError):\n PiecewiseLinear(optimizer, \"lr\", milestones_values=[(10, 0.5), (0.6,)])\n\n with pytest.raises(ValueError):\n PiecewiseLinear(optimizer, \"lr\", milestones_values=[(10, 0.5), (5, 0.6)])\n\n\ndef test_piecewiselinear():\n def _test(milestones_as_np_int):\n tensor = torch.zeros([1], requires_grad=True)\n optimizer = torch.optim.SGD([tensor], lr=0)\n\n milestones_values = [(5, 0.5), (15, 1.0), (25, 0.0), (35, 1.0), (40, 0.5)]\n if milestones_as_np_int:\n milestones_values = [(np.int64(t), v) for t, v in milestones_values]\n\n scheduler = PiecewiseLinear(optimizer, \"lr\", milestones_values=milestones_values)\n state_dict = scheduler.state_dict()\n\n def save_lr(engine):\n lrs.append(optimizer.param_groups[0][\"lr\"])\n\n trainer = Engine(lambda engine, batch: None)\n trainer.add_event_handler(Events.ITERATION_COMPLETED, scheduler)\n trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)\n\n for _ in range(2):\n lrs = []\n trainer.run([0] * 25, max_epochs=2)\n\n assert lrs == list(\n map(\n pytest.approx,\n [\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n 0.55,\n 0.6,\n 0.65,\n 0.7,\n 0.75,\n 0.8,\n 0.85,\n 0.9,\n 0.95,\n 1.0,\n 0.9,\n 0.8,\n 0.7,\n 0.6,\n 0.5,\n 0.4,\n 0.3,\n 0.2,\n 0.1,\n 0.0,\n 0.1,\n 0.2,\n 0.3,\n 0.4,\n 0.5,\n 0.6,\n 0.7,\n 0.8,\n 0.9,\n 1.0,\n 0.9,\n 0.8,\n 0.7,\n 0.6,\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n 0.5,\n ],\n )\n )\n scheduler.load_state_dict(state_dict)\n\n _test(milestones_as_np_int=True)\n _test(milestones_as_np_int=False)\n\n\ndef test_simulate_and_plot_values():\n\n import matplotlib\n\n matplotlib.use(\"Agg\")\n\n def _test(scheduler_cls, **scheduler_kwargs):\n\n optimizer = None\n event = Events.ITERATION_STARTED\n if scheduler_cls == LRScheduler:\n scheduler_kwargs[\"optimizer\"] = scheduler_kwargs[\"lr_scheduler\"].optimizer\n optimizer = scheduler_kwargs[\"optimizer\"]\n event = Events.ITERATION_COMPLETED\n elif scheduler_cls == ConcatScheduler:\n optimizer = scheduler_kwargs[\"optimizer\"]\n del scheduler_kwargs[\"optimizer\"]\n else:\n tensor = torch.zeros([1], requires_grad=True)\n scheduler_kwargs[\"optimizer\"] = torch.optim.SGD([tensor], lr=0.1)\n optimizer = scheduler_kwargs[\"optimizer\"]\n\n max_epochs = 2\n data = [0] * 10\n # simulated_values = scheduler_cls.simulate_values(num_events=len(data) * max_epochs, **scheduler_kwargs)\n\n scheduler = scheduler_cls(**scheduler_kwargs)\n\n lrs = []\n\n def save_lr(engine):\n lrs.append(optimizer.param_groups[0][\"lr\"])\n\n trainer = Engine(lambda engine, batch: None)\n trainer.add_event_handler(event, scheduler)\n trainer.add_event_handler(Events.ITERATION_STARTED, save_lr)\n trainer.run(data, max_epochs=max_epochs)\n\n # assert lrs == pytest.approx([v for i, v in simulated_values])\n\n if scheduler_cls == LRScheduler or scheduler_cls == ConcatScheduler:\n # As internal state of torch lr scheduler has been changed the following checks will fail\n return\n\n # reexecute to check if no internal changes\n # simulated_values = scheduler_cls.simulate_values(num_events=len(data) * max_epochs,\n # save_history=True, # this will be removed\n # **scheduler_kwargs)\n # assert lrs == pytest.approx([v for i, v in simulated_values])\n\n # launch plot values\n scheduler_cls.plot_values(num_events=len(data) * max_epochs, **scheduler_kwargs)\n\n # LinearCyclicalScheduler\n _test(LinearCyclicalScheduler, param_name=\"lr\", start_value=1.0, end_value=0.0, cycle_size=10)\n\n # CosineAnnealingScheduler\n _test(CosineAnnealingScheduler, param_name=\"lr\", start_value=1.0, end_value=0.0, cycle_size=10)\n\n # LRScheduler\n tensor = torch.zeros([1], requires_grad=True)\n optimizer = torch.optim.SGD([tensor], lr=0.1)\n torch_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.5)\n\n _test(LRScheduler, lr_scheduler=torch_lr_scheduler)\n\n # ConcatScheduler = [LinearCyclicalScheduler, CosineAnnealingScheduler]\n scheduler_1 = LinearCyclicalScheduler(optimizer, \"lr\", start_value=1.0, end_value=0.0, cycle_size=20)\n scheduler_2 = CosineAnnealingScheduler(optimizer, \"lr\", start_value=0.0, end_value=1.0, cycle_size=10)\n durations = [10]\n _test(ConcatScheduler, optimizer=optimizer, schedulers=[scheduler_1, scheduler_2], durations=durations)\n\n # ConcatScheduler = [LinearCyclicalScheduler, LRScheduler]\n tensor = torch.ones([1], requires_grad=True)\n optimizer = torch.optim.SGD([tensor], lr=0.001)\n torch_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=1.5)\n scheduler_1 = LRScheduler(torch_lr_scheduler)\n scheduler_2 = LinearCyclicalScheduler(optimizer, \"lr\", start_value=0.1, end_value=0.0, cycle_size=10)\n durations = [10]\n _test(ConcatScheduler, optimizer=optimizer, schedulers=[scheduler_1, scheduler_2], durations=durations)\n\n # PiecewiseLinear\n tensor = torch.ones([1], requires_grad=True)\n optimizer = torch.optim.SGD([tensor], lr=0.001)\n _test(\n PiecewiseLinear,\n optimizer=optimizer,\n param_name=\"lr\",\n milestones_values=[(10, 0.5), (20, 0.45), (21, 0.3), (30, 0.1), (40, 0.1)],\n )\n\n\ndef test_create_lr_scheduler_with_warmup():\n\n with pytest.raises(TypeError, match=r\"Argument lr_scheduler should be a subclass of\"):\n create_lr_scheduler_with_warmup(12, warmup_start_value=0.0, warmup_end_value=0.1, warmup_duration=10)\n\n t1 = torch.zeros([1], requires_grad=True)\n # A) opt lr != warmup_end_value\n optimizer = torch.optim.SGD([t1], lr=0.2)\n torch_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.98)\n\n with pytest.raises(ValueError, match=r\"Argument warmup_duration should be at least 2 events\"):\n create_lr_scheduler_with_warmup(\n torch_lr_scheduler, warmup_start_value=0.0, warmup_end_value=0.1, warmup_duration=1\n )\n\n with pytest.raises(ValueError, match=r\"Argument warmup_duration should be at least 2 events\"):\n create_lr_scheduler_with_warmup(\n torch_lr_scheduler, warmup_start_value=0.0, warmup_end_value=0.1, warmup_duration=\"abc\"\n )\n\n with pytest.raises(TypeError, match=r\"Argument output_simulated_values should be a list of None\"):\n simulated_values = ()\n create_lr_scheduler_with_warmup(\n torch_lr_scheduler,\n warmup_start_value=0.0,\n warmup_end_value=0.1,\n warmup_duration=10,\n output_simulated_values=simulated_values,\n )\n\n def _test(lr_scheduler, optimizer, warmup_start_value, warmup_end_value, warmup_duration, warmup_end_next_value):\n num_iterations = 10\n max_epochs = 20\n\n simulated_values = [None] * (num_iterations * max_epochs)\n scheduler = create_lr_scheduler_with_warmup(\n lr_scheduler,\n warmup_start_value=warmup_start_value,\n warmup_end_value=warmup_end_value,\n warmup_duration=warmup_duration,\n output_simulated_values=simulated_values,\n )\n if warmup_end_value is None:\n warmup_end_value = optimizer.param_groups[0][\"lr\"]\n\n state_dict = scheduler.state_dict()\n trainer = Engine(lambda engine, batch: None)\n\n trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)\n\n @trainer.on(Events.ITERATION_STARTED)\n def save_lr(engine):\n lrs.append(optimizer.param_groups[0][\"lr\"])\n\n data = [0] * num_iterations\n\n for _ in range(2):\n lrs = []\n trainer.run(data, max_epochs=max_epochs)\n\n assert lrs == pytest.approx([v for i, v in simulated_values])\n\n assert lrs[0] == pytest.approx(warmup_start_value), \"lrs={}\".format(lrs[: warmup_duration + num_iterations])\n assert lrs[warmup_duration - 1] == pytest.approx(warmup_end_value), \"lrs={}\".format(\n lrs[: warmup_duration + num_iterations]\n )\n assert lrs[warmup_duration] == pytest.approx(warmup_end_next_value), \"lrs={}\".format(\n lrs[: warmup_duration + num_iterations]\n )\n scheduler.load_state_dict(state_dict)\n\n t1 = torch.zeros([1], requires_grad=True)\n # A) opt lr != warmup_end_value\n optimizer = torch.optim.SGD([t1], lr=0.2)\n torch_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.98)\n _test(torch_lr_scheduler, optimizer, 0.01, 0.05, 10, 0.2)\n optimizer = torch.optim.SGD([t1], lr=0.2)\n torch_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.98)\n _test(torch_lr_scheduler, optimizer, 0.01, 0.05, 2, 0.2)\n\n # B) opt lr == warmup_end_value\n optimizer = torch.optim.SGD([t1], lr=0.2)\n torch_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.98)\n _test(torch_lr_scheduler, optimizer, 0.01, 0.2, 10, 0.2 * 0.98)\n optimizer = torch.optim.SGD([t1], lr=0.2)\n torch_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.98)\n _test(torch_lr_scheduler, optimizer, 0.01, 0.2, 2, 0.2 * 0.98)\n\n # C) lr_scheduler start_value != warmup_end_value\n t1 = torch.zeros([1], requires_grad=True)\n optimizer = torch.optim.SGD([t1], lr=0.0)\n lr_scheduler = LinearCyclicalScheduler(\n optimizer=optimizer, param_name=\"lr\", start_value=0.8, end_value=0.0, cycle_size=10\n )\n _test(lr_scheduler, optimizer, 0.01, 0.05, 10, 0.8)\n optimizer = torch.optim.SGD([t1], lr=0.0)\n lr_scheduler = LinearCyclicalScheduler(\n optimizer=optimizer, param_name=\"lr\", start_value=0.8, end_value=0.0, cycle_size=10\n )\n _test(lr_scheduler, optimizer, 0.01, 0.05, 2, 0.8)\n\n # D) lr_scheduler start_value == warmup_end_value\n t1 = torch.zeros([1], requires_grad=True)\n optimizer = torch.optim.SGD([t1], lr=0.0)\n lr_scheduler = LinearCyclicalScheduler(\n optimizer=optimizer, param_name=\"lr\", start_value=0.8, end_value=0.0, cycle_size=10\n )\n _test(lr_scheduler, optimizer, 0.01, 0.8, 10, 0.8 - (0.8 / 5.0))\n optimizer = torch.optim.SGD([t1], lr=0.0)\n lr_scheduler = LinearCyclicalScheduler(\n optimizer=optimizer, param_name=\"lr\", start_value=0.8, end_value=0.0, cycle_size=10\n )\n _test(lr_scheduler, optimizer, 0.01, 0.8, 2, 0.8 - (0.8 / 5.0))\n\n # E) warmup_end_value is None: fall back to case B)\n optimizer = torch.optim.SGD([t1], lr=0.2)\n torch_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.98)\n _test(torch_lr_scheduler, optimizer, 0.01, None, 10, 0.2 * 0.98)\n\n\ndef test_create_lr_scheduler_with_warmup_on_combined_scheduler():\n # Test with a complex scheduler\n def _test(save_history):\n tensor = torch.ones([1], requires_grad=True)\n optimizer = torch.optim.SGD([tensor], lr=0.001)\n\n max_epochs = 25\n lr_max_value = 0.4\n num_iterations_per_epoch = 128\n num_iterations = max_epochs * num_iterations_per_epoch\n warmup_duration = 5 * num_iterations_per_epoch\n cooldown_duration = 5 * num_iterations_per_epoch\n\n scheduler_1 = LinearCyclicalScheduler(\n optimizer,\n \"lr\",\n start_value=lr_max_value,\n end_value=lr_max_value * 0.9,\n cycle_size=(num_iterations - warmup_duration - cooldown_duration) * 2,\n )\n\n scheduler_2 = LinearCyclicalScheduler(\n optimizer, \"lr\", start_value=lr_max_value, end_value=0.0, cycle_size=cooldown_duration * 2\n )\n\n lr_scheduler = ConcatScheduler(\n schedulers=[scheduler_1, scheduler_2],\n durations=[num_iterations - warmup_duration - cooldown_duration],\n save_history=False,\n )\n lr_values = [None] * num_iterations\n scheduler = create_lr_scheduler_with_warmup(\n lr_scheduler,\n warmup_start_value=0.0,\n warmup_end_value=lr_max_value,\n warmup_duration=warmup_duration,\n save_history=save_history,\n output_simulated_values=lr_values,\n )\n state_dict = scheduler.state_dict()\n\n trainer = Engine(lambda engine, batch: None)\n\n @trainer.on(Events.ITERATION_COMPLETED)\n def save_lr(engine):\n lrs.append(optimizer.param_groups[0][\"lr\"])\n\n trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)\n\n data = [0] * num_iterations_per_epoch\n\n for _ in range(2):\n lrs = []\n trainer.run(data, max_epochs=max_epochs)\n\n assert lrs == pytest.approx([v for i, v in lr_values])\n\n if save_history:\n param_history = trainer.state.param_history[\"lr\"]\n assert lrs == pytest.approx([v[0] for v in param_history])\n\n scheduler.load_state_dict(state_dict)\n\n _test(save_history=False)\n _test(save_history=True)\n\n\ndef test_create_lr_scheduler_with_warmup_with_real_model(dummy_model_factory):\n\n model = dummy_model_factory(with_grads=False, with_frozen_layer=False)\n init_lr = 0.01\n optimizer = torch.optim.SGD(model.parameters(), lr=init_lr)\n scaled_lr = 0.02\n warmup_duration = 5\n step_size = 2\n gamma = 0.97\n\n output_simulated_values = [None] * 50\n\n create_lr_scheduler_with_warmup(\n torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma),\n warmup_start_value=0.0,\n warmup_end_value=scaled_lr,\n warmup_duration=warmup_duration,\n output_simulated_values=output_simulated_values,\n )\n\n assert output_simulated_values[0] == [0, 0.0]\n assert output_simulated_values[warmup_duration - 1] == [warmup_duration - 1, scaled_lr]\n assert output_simulated_values[warmup_duration] == [warmup_duration, init_lr]\n v = [warmup_duration + step_size, init_lr * gamma]\n assert output_simulated_values[warmup_duration + step_size] == v\n\n\ndef test_param_group_scheduler_asserts():\n\n t1 = torch.zeros([1], requires_grad=True)\n t2 = torch.zeros([1], requires_grad=True)\n optimizer = torch.optim.SGD([{\"params\": t1, \"lr\": 0.1}, {\"params\": t2, \"lr\": 0.1}])\n\n lr_scheduler1 = LinearCyclicalScheduler(\n optimizer, \"lr\", param_group_index=0, start_value=1.0, end_value=0.0, cycle_size=10\n )\n lr_scheduler2 = LinearCyclicalScheduler(\n optimizer, \"lr\", param_group_index=1, start_value=1.0, end_value=0.0, cycle_size=10\n )\n\n with pytest.raises(ValueError):\n ParamGroupScheduler(schedulers=[0, 1, 2], names=[\"a\", \"b\", \"c\"])\n\n with pytest.raises(ValueError):\n ParamGroupScheduler(schedulers=[lr_scheduler1, \"2\"], names=[\"a\", \"b\"])\n\n with pytest.raises(ValueError):\n ParamGroupScheduler(schedulers=[lr_scheduler1, lr_scheduler2], names=\"ab\")\n\n with pytest.raises(ValueError):\n ParamGroupScheduler(schedulers=[lr_scheduler1, lr_scheduler2], names=[\"a\"])\n\n scheduler = ParamGroupScheduler(schedulers=[lr_scheduler1, lr_scheduler2], names=[\"a\", \"b\"])\n with pytest.raises(ValueError, match=r\"Required state attribute 'schedulers' is absent in provided state_dict\"):\n scheduler.load_state_dict({\"a\": 1})\n\n with pytest.raises(ValueError, match=r\"Input state_dict contains 0 state_dicts of param group schedulers\"):\n scheduler.load_state_dict({\"schedulers\": []})\n\n with pytest.raises(\n ValueError, match=r\"Name of scheduler from input state dict does not \" r\"correspond to required one\"\n ):\n scheduler.load_state_dict({\"schedulers\": [(\"a\", lr_scheduler1.state_dict()), (\"bad_name\", {})]})\n\n\ndef test_param_group_scheduler():\n def _test(lr_schedulers, optimizer):\n num_iterations = 10\n max_epochs = 20\n\n scheduler = ParamGroupScheduler(lr_schedulers, names=[\"s_{}\".format(i) for i in range(len(lr_schedulers))])\n state_dict = scheduler.state_dict()\n\n trainer = Engine(lambda engine, batch: None)\n\n @trainer.on(Events.ITERATION_COMPLETED)\n def save_lr(engine):\n lrs.append((optimizer.param_groups[0][\"lr\"], optimizer.param_groups[1][\"lr\"]))\n\n trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)\n\n data = [0] * num_iterations\n\n for _ in range(2):\n lrs = []\n trainer.run(data, max_epochs=max_epochs)\n assert [lr[0] for lr in lrs] == pytest.approx([lr[1] for lr in lrs])\n scheduler.load_state_dict(state_dict)\n\n t1 = torch.zeros([1], requires_grad=True)\n t2 = torch.zeros([1], requires_grad=True)\n optimizer = torch.optim.SGD([{\"params\": t1, \"lr\": 0.1}, {\"params\": t2, \"lr\": 0.1}])\n\n lr_scheduler1 = LinearCyclicalScheduler(\n optimizer, \"lr\", param_group_index=0, start_value=1.0, end_value=0.0, cycle_size=10\n )\n lr_scheduler2 = LinearCyclicalScheduler(\n optimizer, \"lr\", param_group_index=1, start_value=1.0, end_value=0.0, cycle_size=10\n )\n _test([lr_scheduler1, lr_scheduler2], optimizer)\n", "import torch\n\nfrom ignite.contrib.metrics.regression._base import _BaseRegression\nfrom ignite.exceptions import NotComputableError\n\n\nclass GeometricMeanAbsoluteError(_BaseRegression):\n r\"\"\"\n Calculates the Geometric Mean Absolute Error.\n\n :math:`\\text{GMAE} = \\exp(\\frac{1}{n}\\sum_{j=1}^n\\ln(|A_j - P_j|))`\n\n where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.\n\n More details can be found in `Botchkarev 2018`__.\n\n - `update` must receive output of the form `(y_pred, y)` or `{'y_pred': y_pred, 'y': y}`.\n - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.\n\n __ https://arxiv.org/abs/1809.03006\n \"\"\"\n\n def reset(self):\n self._sum_of_errors = 0.0\n self._num_examples = 0\n\n def _update(self, output):\n y_pred, y = output\n errors = torch.log(torch.abs(y.view_as(y_pred) - y_pred))\n self._sum_of_errors += torch.sum(errors)\n self._num_examples += y.shape[0]\n\n def compute(self):\n if self._num_examples == 0:\n raise NotComputableError(\n \"GeometricMeanAbsoluteError must have at \" \"least one example before it can be computed.\"\n )\n return torch.exp(self._sum_of_errors / self._num_examples).item()\n" ]
[ [ "torch.ones", "torch.zeros", "matplotlib.use", "torch.optim.lr_scheduler.ExponentialLR", "numpy.int64", "torch.optim.SGD", "torch.optim.lr_scheduler.StepLR" ], [ "torch.exp", "torch.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nicolahunfeld/CLMM
[ "a431649713e56b907a7366bdf21693c30851dee7" ]
[ "clmm/theory/parent_class.py" ]
[ "\"\"\"@file parent_class.py\nCLMModeling abstract class\n\"\"\"\nimport numpy as np\n\n# functions for the 2h term\nfrom scipy.integrate import simps\nfrom scipy.special import jv\nfrom scipy.interpolate import interp1d\n\nfrom .generic import compute_reduced_shear_from_convergence\nimport warnings\nfrom .generic import compute_reduced_shear_from_convergence, compute_magnification_bias_from_magnification\nfrom ..utils import validate_argument\n\n\nclass CLMModeling:\n r\"\"\"Object with functions for halo mass modeling\n\n Attributes\n ----------\n backend: str\n Name of the backend being used\n massdef : str\n Profile mass definition (`mean`, `critical`, `virial` - letter case independent)\n delta_mdef : int\n Mass overdensity definition.\n halo_profile_model : str\n Profile model parameterization (`nfw`, `einasto`, `hernquist` - letter case independent)\n cosmo: Cosmology\n Cosmology object\n hdpm: Object\n Backend object with halo profiles\n mdef_dict: dict\n Dictionary with the definitions for mass\n hdpm_dict: dict\n Dictionary with the definitions for profile\n validate_input: bool\n Validade each input argument\n cosmo_class: type\n Type of used cosmology objects\n \"\"\"\n # pylint: disable=too-many-instance-attributes\n\n def __init__(self, validate_input=True):\n self.backend = None\n\n self.massdef = ''\n self.delta_mdef = 0\n self.halo_profile_model = ''\n\n self.cosmo = None\n\n self.hdpm = None\n self.mdef_dict = {}\n self.hdpm_dict = {}\n\n self.validate_input = validate_input\n self.cosmo_class = None\n\n\n def set_cosmo(self, cosmo):\n r\"\"\" Sets the cosmology to the internal cosmology object\n\n Parameters\n ----------\n cosmo: clmm.Comology object, None\n CLMM Cosmology object. If is None, creates a new instance of self.cosmo_class().\n \"\"\"\n if self.validate_input:\n if self.cosmo_class is None:\n raise NotImplementedError\n validate_argument(locals(), 'cosmo', self.cosmo_class, none_ok=True)\n self._set_cosmo(cosmo)\n self.cosmo.validate_input = self.validate_input\n\n def _set_cosmo(self, cosmo):\n r\"\"\" Sets the cosmology to the internal cosmology object\"\"\"\n self.cosmo = cosmo if cosmo is not None else self.cosmo_class()\n\n def set_halo_density_profile(self, halo_profile_model='nfw', massdef='mean', delta_mdef=200):\n r\"\"\" Sets the definitions for the halo profile\n\n Parameters\n ----------\n halo_profile_model: str\n Halo mass profile, current options are 'nfw' (letter case independent)\n massdef: str\n Mass definition, current options are 'mean' (letter case independent)\n delta_mdef: int\n Overdensity number\n \"\"\"\n # make case independent\n massdef, halo_profile_model = massdef.lower(), halo_profile_model.lower()\n if self.validate_input:\n validate_argument(locals(), 'massdef', str)\n validate_argument(locals(), 'halo_profile_model', str)\n validate_argument(locals(), 'delta_mdef', int, argmin=0)\n if not massdef in self.mdef_dict:\n raise ValueError(\n f\"Halo density profile mass definition {massdef} not currently supported\")\n if not halo_profile_model in self.hdpm_dict:\n raise ValueError(\n f\"Halo density profile model {halo_profile_model} not currently supported\")\n return self._set_halo_density_profile(halo_profile_model=halo_profile_model,\n massdef=massdef, delta_mdef=delta_mdef)\n\n def _set_halo_density_profile(self, halo_profile_model='nfw', massdef='mean', delta_mdef=200):\n raise NotImplementedError\n\n def set_mass(self, mdelta):\n r\"\"\" Sets the value of the :math:`M_\\Delta`\n\n Parameters\n ----------\n mdelta : float\n Galaxy cluster mass :math:`M_\\Delta` in units of :math:`M_\\odot`\n \"\"\"\n if self.validate_input:\n validate_argument(locals(), 'mdelta', float, argmin=0)\n self._set_mass(mdelta)\n\n def _set_mass(self, mdelta):\n r\"\"\" Actually sets the value of the :math:`M_\\Delta` (without value check)\"\"\"\n raise NotImplementedError\n\n def set_einasto_alpha(self, alpha):\n r\"\"\" Sets the value of the :math:`\\alpha` parameter for the Einasto profile\n\n Parameters\n ----------\n alpha : float\n \"\"\"\n if self.halo_profile_model!='einasto' or self.backend!='nc':\n raise NotImplementedError(\"The Einasto slope cannot be set for your combination of profile choice or modeling backend.\")\n else:\n if self.validate_input:\n validate_argument(locals(), 'alpha', float)\n self._set_einasto_alpha(alpha)\n\n def _set_einasto_alpha(self, alpha):\n r\"\"\" Actually sets the value of the :math:`\\alpha` parameter for the Einasto profile\"\"\"\n raise NotImplementedError\n\n def get_einasto_alpha(self, z_cl=None):\n r\"\"\" Returns the value of the :math:`\\alpha` parameter for the Einasto profile, if defined\n\n Parameters\n ----------\n z_cl : float\n Cluster redshift (required for Einasto with the CCL backend, will be ignored for NC)\n \"\"\"\n if self.halo_profile_model!='einasto':\n raise ValueError(f\"Wrong profile model. Current profile = {self.halo_profile_model}\")\n else:\n return self._get_einasto_alpha(z_cl)\n\n def _get_einasto_alpha(self, z_cl=None):\n r\"\"\" Returns the value of the :math:`\\alpha` parameter for the Einasto profile, if defined\"\"\"\n raise NotImplementedError\n\n def set_concentration(self, cdelta):\n r\"\"\" Sets the concentration\n\n Parameters\n ----------\n cdelta: float\n Concentration\n \"\"\"\n if self.validate_input:\n validate_argument(locals(), 'cdelta', float, argmin=0)\n self._set_concentration(cdelta)\n\n def _set_concentration(self, cdelta):\n r\"\"\" Actuall sets the value of the concentration (without value check)\"\"\"\n raise NotImplementedError\n\n def eval_3d_density(self, r3d, z_cl, verbose=False):\n r\"\"\"Retrieve the 3d density :math:`\\rho(r)`.\n\n Parameters\n ----------\n r3d : array_like, float\n Radial position from the cluster center in :math:`M\\!pc`.\n z_cl: float\n Redshift of the cluster\n\n Returns\n -------\n array_like, float\n 3-dimensional mass density in units of :math:`M_\\odot\\ Mpc^{-3}`\n \"\"\"\n if self.validate_input:\n validate_argument(locals(), 'r3d', 'float_array', argmin=0)\n validate_argument(locals(), 'z_cl', 'float_array', argmin=0)\n\n if self.halo_profile_model=='einasto' and verbose:\n print(f\"Einasto alpha = {self._get_einasto_alpha(z_cl=z_cl)}\")\n\n return self._eval_3d_density(r3d=r3d, z_cl=z_cl)\n\n def _eval_3d_density(self, r3d, z_cl):\n raise NotImplementedError\n\n def eval_critical_surface_density(self, z_len, z_src):\n r\"\"\"Computes the critical surface density\n\n Parameters\n ----------\n z_len : float\n Lens redshift\n z_src : array_like, float\n Background source galaxy redshift(s)\n\n Returns\n -------\n float\n Cosmology-dependent critical surface density in units of :math:`M_\\odot\\ Mpc^{-2}`\n \"\"\"\n if self.validate_input:\n validate_argument(locals(), 'z_len', float, argmin=0)\n validate_argument(locals(), 'z_src', 'float_array', argmin=0)\n return self._eval_critical_surface_density(z_len=z_len, z_src=z_src)\n\n def _eval_critical_surface_density(self, z_len, z_src):\n return self.cosmo.eval_sigma_crit(z_len, z_src)\n\n def eval_surface_density(self, r_proj, z_cl, verbose=False):\n r\"\"\" Computes the surface mass density\n\n Parameters\n ----------\n r_proj : array_like\n Projected radial position from the cluster center in :math:`M\\!pc`.\n z_cl: float\n Redshift of the cluster\n\n Returns\n -------\n array_like, float\n 2D projected surface density in units of :math:`M_\\odot\\ Mpc^{-2}`\n \"\"\"\n if self.validate_input:\n validate_argument(locals(), 'r_proj', 'float_array', argmin=0)\n validate_argument(locals(), 'z_cl', float, argmin=0)\n\n if self.halo_profile_model=='einasto' and verbose:\n print(f\"Einasto alpha = {self._get_einasto_alpha(z_cl=z_cl)}\")\n\n return self._eval_surface_density(r_proj=r_proj, z_cl=z_cl)\n\n\n def _eval_surface_density(self, r_proj, z_cl):\n raise NotImplementedError\n\n def eval_mean_surface_density(self, r_proj, z_cl, verbose=False):\n r\"\"\" Computes the mean value of surface density inside radius r_proj\n\n Parameters\n ----------\n r_proj : array_like\n Projected radial position from the cluster center in :math:`M\\!pc`.\n z_cl: float\n Redshift of the cluster\n\n Returns\n -------\n array_like, float\n Excess surface density in units of :math:`M_\\odot\\ Mpc^{-2}`.\n \"\"\"\n if self.validate_input:\n validate_argument(locals(), 'r_proj', 'float_array', argmin=0)\n validate_argument(locals(), 'z_cl', float, argmin=0)\n\n if self.halo_profile_model=='einasto' and verbose:\n print(f\"Einasto alpha = {self._get_einasto_alpha(z_cl=z_cl)}\")\n\n return self._eval_mean_surface_density(r_proj=r_proj, z_cl=z_cl)\n\n def _eval_mean_surface_density(self, r_proj, z_cl):\n raise NotImplementedError\n\n def eval_excess_surface_density(self, r_proj, z_cl, verbose=False):\n r\"\"\" Computes the excess surface density\n\n Parameters\n ----------\n r_proj : array_like\n Projected radial position from the cluster center in :math:`M\\!pc`.\n z_cl: float\n Redshift of the cluster\n\n Returns\n -------\n array_like, float\n Excess surface density in units of :math:`M_\\odot\\ Mpc^{-2}`.\n \"\"\"\n if self.validate_input:\n validate_argument(locals(), 'r_proj', 'float_array', argmin=0)\n validate_argument(locals(), 'z_cl', float, argmin=0)\n\n if self.halo_profile_model=='einasto' and verbose:\n print(f\"Einasto alpha = {self._get_einasto_alpha(z_cl=z_cl)}\")\n\n return self._eval_excess_surface_density(r_proj=r_proj, z_cl=z_cl)\n\n def _eval_excess_surface_density(self, r_proj, z_cl):\n raise NotImplementedError\n\n def eval_excess_surface_density_2h(self, r_proj, z_cl, halobias=1., lsteps=500):\n r\"\"\" Computes the 2-halo term excess surface density (CCL backend only)\n\n Parameters\n ----------\n r_proj : array_like\n Projected radial position from the cluster center in :math:`M\\!pc`.\n z_cl: float\n Redshift of the cluster\n halobias : float, optional\n Value of the halo bias\n lsteps: int (optional)\n Number of steps for numerical integration\n\n Returns\n -------\n array_like, float\n Excess surface density from the 2-halo term in units of :math:`M_\\odot\\ Mpc^{-2}`.\n \"\"\"\n\n if self.validate_input:\n validate_argument(locals(), 'r_proj', 'float_array', argmin=0)\n validate_argument(locals(), 'z_cl', float, argmin=0)\n validate_argument(locals(), 'lsteps', int, argmin=1)\n validate_argument(locals(), 'halobias', float, argmin=0)\n\n\n if self.backend not in ('ccl', 'nc'):\n raise NotImplementedError(\n f\"2-halo term not currently supported with the {self.backend} backend. \"\n \"Use the CCL or NumCosmo backend instead\")\n else:\n return self._eval_excess_surface_density_2h(r_proj, z_cl, halobias=halobias, lsteps=lsteps)\n\n def _eval_excess_surface_density_2h(self, r_proj, z_cl, halobias=1.,lsteps=500):\n \"\"\"\"eval excess surface density from the 2-halo term\"\"\"\n da = self.cosmo.eval_da(z_cl)\n rho_m = self.cosmo._get_rho_m(z_cl)\n\n kk = np.logspace(-5.,5.,1000)\n pk = self.cosmo._eval_linear_matter_powerspectrum(kk, z_cl)\n interp_pk = interp1d(kk, pk, kind='cubic')\n theta = r_proj / da\n\n # calculate integral, units [Mpc]**-3\n def __integrand__( l , theta ):\n k = l / ((1 + z_cl) * da)\n return l * jv( 2 , l * theta ) * interp_pk( k )\n\n ll = np.logspace( 0 , 6 , lsteps )\n val = np.array( [ simps( __integrand__( ll , t ) , ll ) for t in theta ] )\n return halobias * val * rho_m / ( 2 * np.pi * ( 1 + z_cl )**3 * da**2 )\n\n def eval_surface_density_2h(self, r_proj, z_cl, halobias=1., lsteps=500):\n r\"\"\" Computes the 2-halo term surface density (CCL backend only)\n\n Parameters\n ----------\n r_proj : array_like\n Projected radial position from the cluster center in :math:`M\\!pc`.\n z_cl: float\n Redshift of the cluster\n halobias : float, optional\n Value of the halo bias\n lsteps: int (optional)\n Number of steps for numerical integration\n\n Returns\n -------\n array_like, float\n Excess surface density from the 2-halo term in units of :math:`M_\\odot\\ Mpc^{-2}`.\n \"\"\"\n\n if self.validate_input:\n validate_argument(locals(), 'r_proj', 'float_array', argmin=0)\n validate_argument(locals(), 'z_cl', float, argmin=0)\n validate_argument(locals(), 'lsteps', int, argmin=1)\n validate_argument(locals(), 'halobias', float, argmin=0)\n\n if self.backend not in ('ccl', 'nc'):\n raise NotImplementedError(\n f\"2-halo term not currently supported with the {self.backend} backend. \"\n \"Use the CCL or NumCosmo backend instead\")\n else:\n return self._eval_surface_density_2h(r_proj, z_cl, halobias=halobias, lsteps=lsteps)\n\n def _eval_surface_density_2h(self, r_proj, z_cl, halobias=1., lsteps=500):\n \"\"\"\"eval surface density from the 2-halo term\"\"\"\n da = self.cosmo.eval_da(z_cl)\n rho_m = self.cosmo._get_rho_m(z_cl)\n\n kk = np.logspace(-5.,5.,1000)\n pk = self.cosmo._eval_linear_matter_powerspectrum(kk, z_cl)\n interp_pk = interp1d(kk, pk, kind='cubic')\n theta = r_proj / da\n\n # calculate integral, units [Mpc]**-3\n def __integrand__( l , theta ):\n k = l / ((1 + z_cl) * da)\n return l * jv( 0 , l * theta ) * interp_pk( k )\n\n ll = np.logspace( 0 , 6 , lsteps )\n val = np.array( [ simps( __integrand__( ll , t ) , ll ) for t in theta ] )\n return halobias * val * rho_m / ( 2 * np.pi * ( 1 + z_cl )**3 * da**2 )\n \n \n def eval_tangential_shear(self, r_proj, z_cl, z_src, verbose=False):\n r\"\"\"Computes the tangential shear\n\n Parameters\n ----------\n r_proj : array_like\n The projected radial positions in :math:`M\\!pc`.\n z_cl : float\n Galaxy cluster redshift\n z_src : array_like, float\n Background source galaxy redshift(s)\n\n Returns\n -------\n array_like, float\n tangential shear\n \"\"\"\n if self.validate_input:\n validate_argument(locals(), 'r_proj', 'float_array', argmin=0)\n validate_argument(locals(), 'z_cl', float, argmin=0)\n validate_argument(locals(), 'z_src', 'float_array', argmin=0)\n\n if self.halo_profile_model=='einasto' and verbose:\n print(f\"Einasto alpha = {self._get_einasto_alpha(z_cl=z_cl)}\")\n\n return self._eval_tangential_shear(r_proj=r_proj, z_cl=z_cl, z_src=z_src)\n\n def _eval_tangential_shear(self, r_proj, z_cl, z_src):\n delta_sigma = self.eval_excess_surface_density(r_proj, z_cl)\n sigma_c = self.eval_critical_surface_density(z_cl, z_src)\n return delta_sigma/sigma_c\n\n def eval_convergence(self, r_proj, z_cl, z_src, verbose=False):\n r\"\"\"Computes the mass convergence\n\n .. math::\n \\kappa = \\frac{\\Sigma}{\\Sigma_{crit}}\n\n or\n\n .. math::\n \\kappa = \\kappa_\\infty \\times \\beta_s\n\n Parameters\n ----------\n r_proj : array_like\n The projected radial positions in :math:`M\\!pc`.\n z_cl : float\n Galaxy cluster redshift\n z_src : array_like, float\n Background source galaxy redshift(s)\n\n Returns\n -------\n array_like, float\n Mass convergence, kappa.\n \"\"\"\n if self.validate_input:\n validate_argument(locals(), 'r_proj', 'float_array', argmin=0)\n validate_argument(locals(), 'z_cl', float, argmin=0)\n validate_argument(locals(), 'z_src', 'float_array', argmin=0)\n\n if self.halo_profile_model=='einasto' and verbose:\n print(f\"Einasto alpha = {self._get_einasto_alpha(z_cl=z_cl)}\")\n\n return self._eval_convergence(r_proj=r_proj, z_cl=z_cl, z_src=z_src)\n\n def _eval_convergence(self, r_proj, z_cl, z_src, verbose=False):\n sigma = self.eval_surface_density(r_proj, z_cl, verbose=verbose)\n sigma_c = self.eval_critical_surface_density(z_cl, z_src)\n return sigma/sigma_c\n\n def eval_reduced_tangential_shear(self, r_proj, z_cl, z_src, z_src_model='single_plane',\n beta_s_mean=None, beta_s_square_mean=None, verbose=False):\n r\"\"\"Computes the reduced tangential shear :math:`g_t = \\frac{\\gamma_t}{1-\\kappa}`.\n\n Parameters\n ----------\n r_proj : array_like\n The projected radial positions in :math:`M\\!pc`.\n z_cl : float\n Galaxy cluster redshift\n z_src : array_like, float\n Background source galaxy redshift(s)\n z_src_model : str, optional\n Source redshift model, with the following supported options:\n\n * `single_plane` (default): all sources at one redshift (if `z_source` is a float) \\\n or known individual source galaxy redshifts (if `z_source` is an array and \\\n `r_proj` is a float);\n * `applegate14`: use the equation (6) in Weighing the Giants - III \\\n (Applegate et al. 2014; https://arxiv.org/abs/1208.0605) to evaluate tangential reduced shear;\n * `schrabback18`: use the equation (12) in Cluster Mass Calibration at High Redshift \\\n (Schrabback et al. 2017; https://arxiv.org/abs/1611.03866) to evaluate tangential reduced shear;\n \n beta_s_mean: array_like, float\n Lensing efficiency averaged over the galaxy redshift distribution \n\n .. math::\n \\langle \\beta_s \\rangle = \\left\\langle \\frac{D_{LS}}{D_S}\\frac{D_\\infty}{D_{L,\\infty}}\\right\\rangle\n \n beta_s_square_mean: array_like, float\n Square of the lensing efficiency averaged over the galaxy redshift distribution \n\n .. math::\n \\langle \\beta_s^2 \\rangle = \\left\\langle \\left(\\frac{D_{LS}}{D_S}\\frac{D_\\infty}{D_{L,\\infty}}\\right)^2 \\right\\rangle\n\n Returns\n -------\n gt : array_like, float\n Reduced tangential shear\n\n Notes\n -----\n Need to figure out if we want to raise exceptions rather than errors here?\n \"\"\"\n if self.validate_input:\n validate_argument(locals(), 'r_proj', 'float_array', argmin=0)\n validate_argument(locals(), 'z_cl', float, argmin=0)\n validate_argument(locals(), 'z_src', 'float_array', argmin=0)\n\n if self.halo_profile_model=='einasto' and verbose:\n print(f\"Einasto alpha = {self._get_einasto_alpha(z_cl=z_cl)}\")\n\n if z_src_model == 'single_plane':\n gt = self._eval_reduced_tangential_shear_sp(r_proj, z_cl, z_src)\n # elif z_src_model == 'known_z_src': # Discrete case\n # raise NotImplementedError('Need to implemnt Beta_s functionality, or average'+\n # 'sigma/sigma_c kappa_t = Beta_s*kappa_inf')\n # elif z_src_model == 'z_src_distribution': # Continuous ( from a distribution) case\n # raise NotImplementedError('Need to implement Beta_s and Beta_s2 calculation from'+\n # 'integrating distribution of redshifts in each radial bin')\n elif z_src_model == 'applegate14':\n if beta_s_mean is None or beta_s_square_mean is None:\n raise ValueError(\"beta_s_mean or beta_s_square_mean is not given.\")\n else:\n z_source = 1000. #np.inf # INF or a very large number\n gammat = self._eval_tangential_shear(r_proj, z_cl, z_source)\n kappa = self._eval_convergence(r_proj, z_cl, z_source)\n gt = beta_s_mean * gammat / (1. - beta_s_square_mean / beta_s_mean * kappa)\n \n elif z_src_model == 'schrabback18':\n if beta_s_mean is None or beta_s_square_mean is None:\n raise ValueError(\"beta_s_mean or beta_s_square_mean is not given.\")\n else:\n z_source = 1000. #np.inf # INF or a very large number\n gammat = self._eval_tangential_shear(r_proj, z_cl, z_source)\n kappa = self._eval_convergence(r_proj, z_cl, z_source)\n gt = (1. + (beta_s_square_mean / (beta_s_mean * beta_s_mean) - 1.) * beta_s_mean * kappa) * (beta_s_mean * gammat / (1. - beta_s_mean * kappa))\n \n else:\n raise ValueError(\"Unsupported z_src_model\")\n return gt\n\n def _eval_reduced_tangential_shear_sp(self, r_proj, z_cl, z_src):\n kappa = self.eval_convergence(r_proj, z_cl, z_src)\n gamma_t = self.eval_tangential_shear(r_proj, z_cl, z_src)\n return compute_reduced_shear_from_convergence(gamma_t, kappa)\n\n def eval_magnification(self, r_proj, z_cl, z_src, verbose=False):\n r\"\"\"Computes the magnification\n\n .. math::\n \\mu = \\frac{1}{(1-\\kappa)^2-|\\gamma_t|^2}\n\n Parameters\n ----------\n r_proj : array_like\n The projected radial positions in :math:`M\\!pc`.\n z_cl : float\n Galaxy cluster redshift\n z_src : array_like, float\n Background source galaxy redshift(s)\n\n Returns\n -------\n mu : array_like, float\n magnification, mu.\n\n Notes\n -----\n The magnification is computed taking into account just the tangential\n shear. This is valid for spherically averaged profiles, e.g., NFW and\n Einasto (by construction the cross shear is zero).\n \"\"\"\n if self.validate_input:\n validate_argument(locals(), 'r_proj', 'float_array', argmin=0)\n validate_argument(locals(), 'z_cl', float, argmin=0)\n validate_argument(locals(), 'z_src', 'float_array', argmin=0)\n\n if self.halo_profile_model=='einasto' and verbose:\n print(f\"Einasto alpha = {self._get_einasto_alpha(z_cl=z_cl)}\")\n\n return self._eval_magnification(r_proj=r_proj, z_cl=z_cl, z_src=z_src)\n\n def _eval_magnification(self, r_proj, z_cl, z_src):\n kappa = self.eval_convergence(r_proj, z_cl, z_src)\n gamma_t = self.eval_tangential_shear(r_proj, z_cl, z_src)\n return 1./((1-kappa)**2-abs(gamma_t)**2)\n \n def eval_magnification_bias(self, r_proj, z_cl, z_src, alpha):\n r\"\"\"Computes the magnification bias\n\n .. math::\n \\mu^{\\alpha - 1}\n\n Parameters\n ----------\n r_proj : array_like\n The projected radial positions in :math:`M\\!pc`.\n z_cl : float\n Galaxy cluster redshift\n z_src : array_like, float\n Background source galaxy redshift(s)\n alpha : float\n Slope of the cummulative number count of background sources at a given magnitude\n\n Returns\n -------\n mu_bias : array_like, float\n magnification bias.\n\n Notes\n -----\n The magnification is computed taking into account just the tangential\n shear. This is valid for spherically averaged profiles, e.g., NFW and\n Einasto (by construction the cross shear is zero).\n \"\"\"\n if self.validate_input:\n validate_argument(locals(), 'r_proj', 'float_array', argmin=0)\n validate_argument(locals(), 'z_cl', float, argmin=0)\n validate_argument(locals(), 'z_src', 'float_array', argmin=0)\n validate_argument(locals(), 'alpha', 'float_array')\n return self._eval_magnification_bias(r_proj=r_proj, z_cl=z_cl, z_src=z_src, alpha=alpha)\n\n def _eval_magnification_bias(self, r_proj, z_cl, z_src, alpha):\n magnification = self.eval_magnification(r_proj, z_cl, z_src)\n return compute_magnification_bias_from_magnification(magnification, alpha)\n" ]
[ [ "numpy.logspace", "scipy.interpolate.interp1d", "scipy.special.jv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
PrzemyslawSwiderski/PLCOpythonTool
[ "513db830e2b6b396393896735dd81bd2dd845ba2", "513db830e2b6b396393896735dd81bd2dd845ba2" ]
[ "src/classes/mysql_fetcher.py", "src/utils/unpickle_training_results.py" ]
[ "import logging\n\nimport pandas\nimport pymysql\n\nfrom classes.file_query_loader import FileQueryLoader\n\n\nclass MySqlFetcher:\n def __init__(self, query_loader=FileQueryLoader()):\n self.__db_connection = self.open_connection()\n self.data_set = pandas.DataFrame()\n self.query_loader = query_loader\n\n def open_connection(self):\n return pymysql.connect(host='localhost',\n user='root',\n password='root',\n db='prostate_screening',\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n\n def close_connection(self):\n self.__db_connection.close()\n\n def run_select_query(self, query, arguments=None):\n self.data_set = pandas.read_sql_query(query, self.__db_connection, params=arguments)\n logging.info(f\"\\nFetched data from DB: \"\n f\"\\n{self.data_set}\")\n return self.data_set\n\n def run_select_query_from_file(self, file_name):\n query = self.query_loader.load_query(file_name)\n return self.run_select_query(query)\n", "from sklearn.externals import joblib\n\nfrom helpers import get_file_names_by_ext\n\npickles = get_file_names_by_ext(\"pickles\")\npickles_sorted = sorted(pickles)\nrs = joblib.load(pickles_sorted[-1])\n\nprint(\"Best estimator:\")\nprint(rs.best_estimator_)\nprint(\"Best params:\")\nprint(rs.best_params_)\n" ]
[ [ "pandas.read_sql_query", "pandas.DataFrame" ], [ "sklearn.externals.joblib.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
FeynmanDNA/singa-auto
[ "e96982adc689335a323a5a32d03b23942e01d09f", "e96982adc689335a323a5a32d03b23942e01d09f", "e96982adc689335a323a5a32d03b23942e01d09f", "e96982adc689335a323a5a32d03b23942e01d09f" ]
[ "examples/models/tabular_classification/RandomForestClf.py", "examples/models/image_classification/TfEnas.py", "examples/models/tabular_classification/GaussianNBClf.py", "examples/models/tabular_classification/XgbClf.py" ]
[ "from sklearn.ensemble import RandomForestClassifier\nimport pickle\nimport base64\nimport pandas as pd\nimport numpy as np\nimport json\n\nfrom singa_auto.model import BaseModel, IntegerKnob, CategoricalKnob, logger\nfrom singa_auto.model.dev import test_model_class\nfrom singa_auto.constants import ModelDependency\n\nclass RandomForestClf(BaseModel):\n '''\n Implements Random Forest Classifier for tabular data classification task\n '''\n @staticmethod\n def get_knob_config():\n return {\n 'n_estimators': IntegerKnob(50, 200),\n 'oob_score': CategoricalKnob([True, False]),\n 'max_depth': IntegerKnob(10, 100),\n 'max_features': CategoricalKnob(['auto', 'sqrt', 'log2'])\n }\n\n\n def __init__(self, **knobs):\n self.__dict__.update(knobs)\n self._clf = self._build_classifier(self.n_estimators, self.max_depth, self.oob_score, self.max_features)\n\n\n def train(self, dataset_path, features=None, target=None, **kwargs):\n # Record features & target\n self._features = features\n self._target = target\n\n # Load CSV file as pandas dataframe\n csv_path = dataset_path\n data = pd.read_csv(csv_path)\n\n # Extract X & y from dataframe\n (X, y) = self._extract_xy(data)\n\n # Encode categorical features\n X = self._encoding_categorical_type(X)\n\n self._clf.fit(X, y)\n\n # Compute train accuracy\n score = self._clf.score(X, y)\n logger.log('Train accuracy: {}'.format(score))\n\n\n def evaluate(self, dataset_path):\n # Load CSV file as pandas dataframe\n csv_path = dataset_path\n data = pd.read_csv(csv_path)\n\n # Extract X & y from dataframe\n (X, y) = self._extract_xy(data)\n\n # Encode categorical features\n X = self._encoding_categorical_type(X)\n\n accuracy = self._clf.score(X, y)\n return accuracy\n\n\n def predict(self, queries):\n queries = [pd.DataFrame(query, index=[0]) for query in queries]\n probs = [self._clf.predict_proba(self._features_mapping(query)).tolist()[0] for query in queries]\n return probs\n\n\n def destroy(self):\n pass\n\n\n def dump_parameters(self):\n params = {}\n\n # Put model parameters\n clf_bytes = pickle.dumps(self._clf)\n clf_base64 = base64.b64encode(clf_bytes).decode('utf-8')\n params['clf_base64'] = clf_base64\n params['encoding_dict'] = json.dumps(self._encoding_dict)\n params['features'] = json.dumps(self._features)\n params['target'] = self._target\n\n return params\n\n\n def load_parameters(self, params):\n # Load model parameters\n assert 'clf_base64' in params\n clf_base64 = params['clf_base64']\n clf_bytes = base64.b64decode(clf_base64.encode('utf-8'))\n\n self._clf = pickle.loads(clf_bytes)\n self._encoding_dict = json.loads(params['encoding_dict'])\n self._features = json.loads(params['features'])\n self._target = params['target']\n\n\n def _extract_xy(self, data):\n features = self._features\n target = self._target\n\n if features is None:\n X = data.iloc[:,:-1]\n else:\n X = data[features]\n\n if target is None:\n y = data.iloc[:,-1]\n else:\n y = data[target]\n\n return (X, y)\n\n\n def _encoding_categorical_type(self, cols):\n # Apply label encoding for those categorical columns\n cat_cols = list(filter(lambda x: cols[x].dtype == 'object', cols.columns))\n encoded_cols = pd.DataFrame({col: cols[col].astype('category').cat.codes \\\n if cols[col].dtype == 'object' else cols[col] for col in cols}, index=cols.index)\n\n # Recover the missing elements (Use XGBoost to automatically handle them)\n encoded_cols = encoded_cols.replace(to_replace = -1, value = np.nan)\n\n # Generate the dict that maps categorical features to numerical\n encoding_dict = {col: {cat: n for n, cat in enumerate(cols[col].astype('category'). \\\n cat.categories)} for col in cat_cols}\n self._encoding_dict = encoding_dict\n\n return encoded_cols\n\n\n def _features_mapping(self, df):\n # Encode the categorical features with pre saved encoding dict\n cat_cols = list(filter(lambda x: df[x].dtype == 'object', df.columns))\n df_temp = df.copy()\n for col in cat_cols:\n df_temp[col] = df[col].map(self._encoding_dict[col])\n df = df_temp\n return df\n\n\n def _build_classifier(self, n_estimators, max_depth, oob_score, max_features):\n clf = RandomForestClassifier(\n n_estimators=n_estimators,\n max_depth=max_depth,\n oob_score=oob_score,\n max_features=max_features\n )\n return clf\n\n\nif __name__ == '__main__':\n test_model_class(\n model_file_path=__file__,\n model_class='RandomForestClf',\n task='TABULAR_CLASSIFICATION',\n dependencies={\n ModelDependency.SCIKIT_LEARN: '0.20.0'\n },\n train_dataset_path='data/titanic_train.csv',\n val_dataset_path='data/titanic_test.csv',\n train_args={\n 'features': ['Pclass', 'Sex', 'Age'],\n 'target':'Survived'\n },\n queries=[\n { 'Pclass': 1, 'Sex': 'female', 'Age': 16.0 }\n ]\n )\n", "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nimport tensorflow as tf\nfrom tensorflow.python.client import device_lib\nfrom tensorflow.python.training import moving_averages\nimport os\nimport math\nimport json\nimport random\nfrom datetime import datetime\nfrom collections import namedtuple\nimport numpy as np\nimport argparse\n\nfrom singa_auto.constants import ModelDependency\nfrom singa_auto.model import utils, BaseModel, IntegerKnob, CategoricalKnob, FloatKnob, \\\n FixedKnob, ArchKnob, KnobValue, PolicyKnob\nfrom singa_auto.model.dev import test_model_class\n\n_Model = namedtuple('_Model',\n ['train_dataset_init_op', 'pred_dataset_init_op',\n 'train_op', 'summary_op', 'pred_probs', 'pred_corrects',\n 'train_corrects', 'step', 'vars_assign_op', 'ph', 'var_phs'])\n_ModelMemo = namedtuple('_ModelMemo',\n ['train_params', 'use_dynamic_arch', 'knobs',\n 'model', 'graph', 'sess', 'saver', 'monitored_values'])\n_ModelPlaceholder = namedtuple('_ModelPlaceholder',\n ['train_images', 'train_classes', 'pred_images',\n 'pred_classes', 'normal_arch', 'reduction_arch'])\n\nOPS = [0, 1, 2, 3, 4]\nCELL_NUM_BLOCKS = 5 # No. of blocks in a cell\nTF_COLLECTION_MONITORED = 'MONITORED'\n\nclass TfEnas(BaseModel):\n '''\n Implements the child model of cell-based \"Efficient Neural Architecture Search via Parameter Sharing\" (ENAS)\n for IMAGE_CLASSIFICATION, configured for *architecture tuning with ENAS* .\n\n Original paper: https://arxiv.org/abs/1802.03268\n Implementation is with credits to https://github.com/melodyguan/enas\n '''\n # Memoise across trials to speed up training\n _datasets_memo = {} # { <dataset_path> -> <dataset> }\n _model_memo = None # of class `_MemoModel`\n _loaded_tf_vars_id_memo = None # ID of TF vars loaded\n _loaded_train_dataset_memo = None # Train dataset <dataset_path> loaded into the graph\n _loaded_pred_dataset_memo = None # Predict dataset <dataset_path> loaded into the graph\n\n @staticmethod\n def get_knob_config():\n return {\n 'cell_archs': TfEnas.make_arch_knob(),\n 'max_image_size': FixedKnob(32),\n 'epochs': FixedKnob(310), # Total no. of epochs during a standard train\n 'batch_size': FixedKnob(128),\n 'learning_rate': FixedKnob(0.05),\n 'initial_block_ch': FixedKnob(36),\n 'stem_ch_mul': FixedKnob(3),\n 'reg_decay': FixedKnob(4e-4),\n 'dropout_keep_prob': FixedKnob(0.8),\n 'opt_momentum': FixedKnob(0.9),\n 'use_sgdr': FixedKnob(True),\n 'sgdr_alpha': FixedKnob(0.002),\n 'sgdr_decay_epochs': FixedKnob(10),\n 'sgdr_t_mul': FixedKnob(2),\n 'num_layers': FixedKnob(15),\n 'aux_loss_mul': FixedKnob(0.4),\n 'drop_path_keep_prob': FixedKnob(0.6),\n 'drop_path_decay_epochs': FixedKnob(310),\n 'cutout_size': FixedKnob(0),\n 'grad_clip_norm': FixedKnob(0),\n 'use_aux_head': FixedKnob(False),\n 'share_params': PolicyKnob('SHARE_PARAMS'),\n\n # Affects whether model constructed is a scaled-down version with fewer layers\n 'downscale': PolicyKnob('DOWNSCALE'),\n 'enas_num_layers': FixedKnob(6),\n 'enas_initial_block_ch': FixedKnob(20),\n 'enas_dropout_keep_prob': FixedKnob(0.9),\n 'enas_sgdr_alpha': FixedKnob(0.01),\n 'enas_drop_path_keep_prob': FixedKnob(0.9),\n 'enas_drop_path_decay_epochs': FixedKnob(150),\n\n # Affects whether training is shortened using a reduced no. of epochs\n 'quick_train': PolicyKnob('EARLY_STOP'),\n 'enas_epochs': FixedKnob(1),\n\n # Affects whether training is skipped\n 'skip_train': PolicyKnob('SKIP_TRAIN'),\n\n # Affects whether evaluation is done on only a batch of the validation dataset\n 'quick_eval': PolicyKnob('QUICK_EVAL')\n }\n\n @staticmethod\n def make_arch_knob():\n # Make knob values for ops\n # Operations across blocks are considered identical for the purposes of architecture search\n # E.g. operation \"conv3x3\" with op code 0 has the same meaning across blocks\n ops = [KnobValue(i) for i in OPS]\n\n # Build list of knobs for ``cell_archs``\n cell_archs = []\n for c in range(2): # 1 for normal cell, 1 for reduction cell\n\n # Make knob values for inputs\n # Input indices across blocks in the same cell are considered identical for the purposes of architecture search\n # E.g. input from block 0 with index 2 has the same meaning across blocks in the same cell\n input_knob_values = [KnobValue(i) for i in range(CELL_NUM_BLOCKS + 2)]\n\n # For each block\n for b in range(CELL_NUM_BLOCKS):\n # Input 1 & input 2 can only can take input from prev prev cell, prev cell, or one of prev blocks\n inputs = input_knob_values[:(b + 2)]\n cell_archs.extend([inputs, ops, inputs, ops])\n\n return ArchKnob(cell_archs)\n\n def __init__(self, **knobs):\n super().__init__(**knobs)\n self._model = None\n self._graph = None\n self._sess = None\n self._saver = None\n self._monitored_values = None\n self._train_params = None\n self._knobs = self._process_knobs(knobs)\n\n def train(self, dataset_path, shared_params=None):\n knobs = self._knobs\n\n # Load dataset\n (images, classes, self._train_params) = self._maybe_load_dataset(dataset_path, **knobs)\n\n # Build model\n (self._model, self._graph, self._sess, self._saver,\n self._monitored_values) = self._maybe_build_model(**knobs)\n\n if not knobs['skip_train']:\n # Maybe load shared variables, then train model\n with self._graph.as_default():\n if knobs['share_params'] and shared_params is not None:\n self._maybe_load_tf_vars(shared_params)\n self._train_model(images, classes, dataset_path=dataset_path, **knobs)\n\n def evaluate(self, dataset_path):\n (images, classes, _) = self._maybe_load_dataset(dataset_path, train_params=self._train_params, **self._knobs)\n\n with self._graph.as_default():\n acc = self._evaluate_model(images, classes, dataset_path=dataset_path, **self._knobs)\n\n return acc\n\n def predict(self, queries):\n image_size = self._train_params['image_size']\n images = utils.dataset.transform_images(queries, image_size=image_size, mode='RGB')\n with self._graph.as_default():\n probs = self._predict_with_model(images, **self._knobs)\n return probs.tolist()\n\n def dump_parameters(self):\n params = {}\n\n # Add train params\n params['train_params'] = json.dumps(self._train_params)\n\n # Add model parameters\n with self._graph.as_default():\n tf_vars = tf.global_variables()\n values = self._sess.run(tf_vars)\n\n for (tf_var, value) in zip(tf_vars, values):\n params[tf_var.name] = np.asarray(value)\n\n # Add an ID for diffing\n vars_id = np.random.rand()\n params['vars_id'] = vars_id\n\n # Memo ID\n TfEnas._loaded_tf_vars_id_memo = vars_id\n\n return params\n\n def load_parameters(self, params):\n # Add train params\n self._train_params = json.loads(params['train_params'])\n\n # Build model\n (self._model, self._graph, self._sess,\n self._saver, self._monitored_values) = self._maybe_build_model(**self._knobs)\n\n # Add model parameters\n with self._graph.as_default():\n self._maybe_load_tf_vars(params)\n\n @staticmethod\n def teardown():\n if TfEnas._model_memo is not None:\n TfEnas._model_memo.sess.close()\n TfEnas._model_memo = None\n\n ####################################\n # Memoized methods\n ####################################\n\n def _maybe_load_dataset(self, dataset_path, train_params=None, **knobs):\n # Try to use memoized dataset\n if dataset_path in TfEnas._datasets_memo:\n dataset = TfEnas._datasets_memo[dataset_path]\n return dataset\n\n dataset = self._load_dataset(dataset_path, train_params, **knobs)\n TfEnas._datasets_memo[dataset_path] = dataset\n return dataset\n\n def _maybe_load_tf_vars(self, params):\n # If same TF vars has been loaded in previous trial, don't bother loading again\n vars_id = params['vars_id']\n\n if TfEnas._loaded_tf_vars_id_memo == vars_id:\n return # Skipping loading of vars\n\n self._load_tf_vars(params)\n\n # Memo ID\n TfEnas._loaded_tf_vars_id_memo = vars_id\n\n def _maybe_feed_dataset_to_model(self, images, classes=None, dataset_path=None, is_train=False):\n memo = TfEnas._loaded_train_dataset_memo if is_train else TfEnas._loaded_pred_dataset_memo\n if dataset_path is None or memo != dataset_path:\n # To load new dataset\n self._feed_dataset_to_model(images, classes, is_train=is_train)\n if is_train:\n TfEnas._loaded_train_dataset_memo = dataset_path\n else:\n TfEnas._loaded_pred_dataset_memo = dataset_path\n else:\n # Otherwise, dataset has previously been loaded, so do nothing\n pass\n\n def _maybe_build_model(self, **knobs):\n train_params = self._train_params\n use_dynamic_arch = knobs['downscale']\n\n # Use memoized model when possible\n if not self._if_model_same(TfEnas._model_memo, knobs, train_params, use_dynamic_arch):\n\n (model, graph, sess, saver, monitored_values) = \\\n self._build_model(**knobs)\n\n TfEnas._model_memo = _ModelMemo(\n train_params, use_dynamic_arch, knobs,\n model, graph, sess, saver, monitored_values\n )\n\n model_memo = TfEnas._model_memo\n return (model_memo.model, model_memo.graph, model_memo.sess, model_memo.saver,\n model_memo.monitored_values)\n\n def _if_model_same(self, model_memo, knobs, train_params, use_dynamic_arch):\n if model_memo is None:\n return False\n\n # Must have the same `train_params` & `use_dynamic_arch`\n if (train_params, use_dynamic_arch) != (model_memo.train_params, model_memo.use_dynamic_arch):\n return False\n\n # Knobs must be the same except for some that doesn't affect model construction\n # If arch is dynamic, knobs can differ by `cell_archs`\n ignored_knobs = ['skip_train', 'quick_train', 'quick_eval', 'downscale', 'epochs']\n if use_dynamic_arch:\n ignored_knobs.append('cell_archs')\n\n for (name, value) in knobs.items():\n if name not in ignored_knobs and value != model_memo.knobs.get(name):\n utils.logger.log('Detected that knob \"{}\" is different!'.format(name))\n return False\n\n return True\n\n ####################################\n # Private methods\n ####################################\n\n def _process_knobs(self, knobs):\n # Activates dynamic architecture with fewer layers\n if knobs['downscale']:\n knobs = {\n **knobs,\n 'num_layers': knobs['enas_num_layers'],\n 'initial_block_ch': knobs['enas_initial_block_ch'],\n 'dropout_keep_prob': knobs['enas_dropout_keep_prob'],\n 'sgdr_alpha': knobs['enas_sgdr_alpha'],\n 'drop_path_keep_prob': knobs['enas_drop_path_keep_prob'],\n 'drop_path_decay_epochs': knobs['enas_drop_path_decay_epochs'],\n }\n\n # Activates mode where training finishes with fewer epochs\n if knobs['quick_train']:\n knobs = {\n **knobs,\n 'epochs': knobs['enas_epochs']\n }\n\n return knobs\n\n def _load_dataset(self, dataset_path, train_params=None, **knobs):\n max_image_size = knobs['max_image_size']\n image_size = train_params['image_size'] if train_params is not None else max_image_size\n\n utils.logger.log('Loading dataset...')\n dataset = utils.dataset.load_dataset_of_image_files(dataset_path, max_image_size=image_size,\n mode='RGB')\n (images, classes) = zip(*[(image, image_class) for (image, image_class) in dataset])\n norm_mean = np.mean(images, axis=(0, 1, 2)).tolist()\n norm_std = np.std(images, axis=(0, 1, 2)).tolist()\n\n train_params = {\n 'N': len(images),\n 'image_size': dataset.image_size,\n 'K': dataset.classes,\n 'norm_mean': norm_mean,\n 'norm_std': norm_std\n }\n\n return (images, classes, train_params)\n\n def _build_model(self, **knobs):\n use_dynamic_arch = knobs['downscale']\n\n # Create graph\n graph = tf.Graph()\n\n with graph.as_default():\n # Define input placeholders to graph\n ph = self._make_placeholders()\n\n # Use fixed archs if specified, otherwise use placeholders'\n (normal_arch, reduction_arch) = self._get_fixed_cell_archs(**knobs)\n normal_arch = normal_arch if not use_dynamic_arch else ph.normal_arch\n reduction_arch = reduction_arch if not use_dynamic_arch else ph.reduction_arch\n\n # Initialize steps variable\n step = self._make_var('step', (), dtype=tf.int32, trainable=False, initializer=tf.initializers.constant(0))\n\n # For train dataset, preprocess & do inference\n utils.logger.log('Building model for training...')\n (train_X, train_classes, train_dataset_init_op) = \\\n self._preprocess(ph.train_images, ph.train_classes, is_train=True, **knobs)\n (train_logits, train_aux_logits_list) = self._forward(train_X, step, normal_arch, reduction_arch, is_train=True, **knobs)\n\n # Compute training loss\n total_loss = self._compute_loss(train_logits, train_aux_logits_list, train_classes, **knobs)\n\n # Optimize training loss\n train_op = self._optimize(total_loss, step, **knobs)\n\n # Compute predictions\n (_, train_corrects) = self._compute_predictions(train_logits, train_classes)\n\n # For pred dataset, preprocess & do inference\n utils.logger.log('Building model for predictions...')\n (pred_X, pred_classes, pred_dataset_init_op) = \\\n self._preprocess(ph.pred_images, ph.pred_classes, is_train=False, **knobs)\n (pred_logits, _) = self._forward(pred_X, step, normal_arch, reduction_arch, is_train=False,\n **knobs)\n\n # Compute predictions\n (pred_probs, pred_corrects) = self._compute_predictions(pred_logits, pred_classes)\n\n # Count model parameters\n model_params_count = self._count_model_parameters()\n\n # Monitor values\n (summary_op, monitored_values) = self._add_monitoring_of_values()\n\n # Add saver\n tf_vars = tf.global_variables()\n saver = tf.train.Saver(tf_vars)\n\n # Allow loading of model variables\n (var_phs, vars_assign_op) = self._add_vars_assign_op(tf_vars)\n\n model = _Model(train_dataset_init_op, pred_dataset_init_op, train_op, summary_op,\n pred_probs, pred_corrects, train_corrects, step, vars_assign_op, ph, var_phs)\n\n # Make session\n sess = self._make_session()\n self._init_session(sess, model)\n\n return (model, graph, sess, saver, monitored_values)\n\n def _load_tf_vars(self, params):\n m = self._model\n\n utils.logger.log('Loading TF vars...')\n\n tf_vars = tf.global_variables()\n values = self._sess.run(tf_vars) # Get current values for vars\n\n # Build feed dict for op for loading vars\n # For each var, use current value of param in session if not in params\n var_feeddict = {\n m.var_phs[tf_var.name]: params[tf_var.name]\n if tf_var.name in params else values[i]\n for (i, tf_var) in enumerate(tf_vars)\n }\n\n self._sess.run(m.vars_assign_op, feed_dict=var_feeddict)\n\n def _make_placeholders(self):\n w = self._train_params['image_size']\n h = self._train_params['image_size']\n in_ch = 3 # Num channels of input images\n\n train_images_ph = tf.placeholder(tf.int32, name='train_images_ph', shape=(None, w, h, in_ch)) # Train images\n pred_images_ph = tf.placeholder(tf.int32, name='pred_images_ph', shape=(None, w, h, in_ch)) # Predict images\n train_classes_ph = tf.placeholder(tf.int32, name='train_classes_ph', shape=(None,)) # Train classes\n pred_classes_ph = tf.placeholder(tf.int32, name='pred_classes_ph', shape=(None,)) # Predict classes\n normal_arch_ph = tf.placeholder(tf.int32, name='normal_arch_ph', shape=(CELL_NUM_BLOCKS, 4))\n reduction_arch_ph = tf.placeholder(tf.int32, name='reduction_arch_ph', shape=(CELL_NUM_BLOCKS, 4))\n\n return _ModelPlaceholder(train_images_ph, train_classes_ph, pred_images_ph, pred_classes_ph,\n normal_arch_ph, reduction_arch_ph)\n\n def _forward(self, X, step, normal_arch, reduction_arch, is_train=False, **knobs):\n K = self._train_params['K'] # No. of classes\n in_ch = 3 # Num channels of input images\n w = self._train_params['image_size'] # Initial input width\n h = self._train_params['image_size'] # Initial input height\n dropout_keep_prob = knobs['dropout_keep_prob']\n use_dynamic_arch = knobs['downscale']\n L = knobs['num_layers'] # Total number of layers\n initial_block_ch = knobs['initial_block_ch'] # Initial no. of channels for operations in block\n stem_ch_mul = knobs['stem_ch_mul'] # No. of channels for stem convolution as multiple of initial block channels\n use_aux_head = knobs['use_aux_head'] # Whether to use auxiliary head\n stem_ch = initial_block_ch * stem_ch_mul\n\n # Layers with reduction cells (otherwise, normal cells)\n reduction_layers = [L // 3, L // 3 * 2 + 1]\n\n # Layers with auxiliary heads\n # Aux heads speed up training of good feature repsentations early in the network\n # Add aux heads only if enabled and downsampling width can happen 3 times\n aux_head_layers = []\n if use_aux_head and w % (2 << 3) == 0:\n aux_head_layers.append(reduction_layers[-1] + 1)\n\n with tf.variable_scope('model', reuse=(not is_train)):\n\n # \"Stem\" convolution layer (layer -1)\n with tf.variable_scope('layer_stem'):\n X = self._do_conv(X, w, h, in_ch, stem_ch, filter_size=3, no_relu=True, is_train=is_train) # 3x3 convolution\n stem = (X, w, h, stem_ch)\n\n # Core layers of cells\n block_ch = initial_block_ch\n aux_logits_list = [] # Stores list of logits from aux heads\n layers = [stem, stem] # Stores previous layers. layers[i] = (<layer (i + 1)>, <width>, <height>, <channels>)\n for l in range(L + 2):\n utils.logger.log('Building layer {}...'.format(l))\n\n with tf.variable_scope('layer_{}'.format(l)):\n layers_ratio = (l + 1) / (L + 2)\n drop_path_keep_prob = self._get_drop_path_keep_prob(layers_ratio, step, is_train, **knobs)\n\n # Either add a reduction cell or normal cell\n if l in reduction_layers:\n block_ch *= 2\n w >>= 1\n h >>= 1\n\n with tf.variable_scope('reduction_cell'):\n if use_dynamic_arch:\n self._add_dynamic_cell(reduction_arch, layers, w, h, block_ch, drop_path_keep_prob, is_train)\n else:\n self._add_static_cell(reduction_arch, layers, w, h, block_ch, drop_path_keep_prob, is_train,\n is_reduction=True)\n else:\n with tf.variable_scope('normal_cell'):\n if use_dynamic_arch:\n self._add_dynamic_cell(normal_arch, layers, w, h, block_ch, drop_path_keep_prob, is_train)\n else:\n self._add_static_cell(normal_arch, layers, w, h, block_ch, drop_path_keep_prob, is_train)\n\n # Maybe add auxiliary heads\n if l in aux_head_layers:\n with tf.variable_scope('aux_head'):\n aux_logits = self._add_aux_head(*layers[-1], K, is_train)\n aux_logits_list.append(aux_logits)\n\n # Global average pooling\n (X, w, h, ch) = layers[-1]\n X = self._add_global_avg_pool(X, w, h, ch)\n\n # Add dropout if training\n if is_train:\n X = tf.nn.dropout(X, dropout_keep_prob)\n\n # Compute logits from X\n with tf.variable_scope('fully_connected'):\n logits = self._add_fully_connected(X, (ch,), K)\n\n return (logits, aux_logits_list)\n\n def _optimize(self, loss, step, **knobs):\n opt_momentum = knobs['opt_momentum'] # Momentum optimizer momentum\n grad_clip_norm = knobs['grad_clip_norm'] # L2 norm to clip gradients by\n\n # Compute learning rate, gradients\n tf_trainable_vars = tf.trainable_variables()\n lr = self._get_learning_rate(step, **knobs)\n grads = tf.gradients(loss, tf_trainable_vars)\n self._mark_for_monitoring('lr', lr)\n\n # Clip gradients\n if grad_clip_norm > 0:\n grads = [tf.clip_by_norm(x, grad_clip_norm) for x in grads]\n\n # Init optimizer\n opt = tf.train.MomentumOptimizer(lr, opt_momentum, use_locking=True, use_nesterov=True)\n train_op = opt.apply_gradients(zip(grads, tf_trainable_vars), global_step=step)\n\n return train_op\n\n def _preprocess(self, images, classes, is_train=False, **knobs):\n batch_size = knobs['batch_size']\n cutout_size = knobs['cutout_size']\n image_norm_mean = self._train_params['norm_mean']\n image_norm_std = self._train_params['norm_std']\n w = self._train_params['image_size']\n h = self._train_params['image_size']\n in_ch = 3 # Num channels of input images\n\n def _prepare(images, classes):\n # Bulk preprocessing of images\n images = tf.cast(images, tf.float32)\n images = (images - image_norm_mean) / image_norm_std # Normalize\n images = images / 255 # Convert to [0, 1]\n return (images, classes)\n\n # Prepare train dataset\n def _preprocess_train(image, clazz):\n # Do random crop + horizontal flip for each train image\n image = tf.pad(image, [[4, 4], [4, 4], [0, 0]])\n image = tf.image.random_crop(image, (w, h, in_ch))\n image = tf.image.random_flip_left_right(image)\n\n if cutout_size > 0:\n image = self._do_cutout(image, w, h, cutout_size)\n\n return (image, clazz)\n\n (images, classes) = _prepare(images, classes)\n dataset = tf.data.Dataset.from_tensor_slices((images, classes)).repeat()\n if is_train:\n dataset = dataset.apply(tf.data.experimental.map_and_batch(map_func=_preprocess_train, batch_size=batch_size))\n else:\n dataset = dataset.batch(batch_size)\n dataset_itr = dataset.make_initializable_iterator()\n (images_batch, classes_batch) = dataset_itr.get_next()\n dataset_init_op = dataset_itr.initializer\n\n return (images_batch, classes_batch, dataset_init_op)\n\n def _get_drop_path_keep_prob(self, layers_ratio, step, is_train=False, **knobs):\n batch_size = knobs['batch_size']\n drop_path_keep_prob = knobs['drop_path_keep_prob'] # Base keep prob for drop path\n drop_path_decay_epochs = knobs['drop_path_decay_epochs']\n N = self._train_params['N']\n\n # Only drop path during training\n keep_prob = tf.constant(1, dtype=tf.float32)\n if is_train:\n # Decrease keep prob deeper into network\n keep_prob = 1 - layers_ratio * (1 - drop_path_keep_prob)\n\n # Decrease keep prob with increasing steps\n steps_per_epoch = math.ceil(N / batch_size)\n steps_ratio = tf.minimum(((step + 1) / steps_per_epoch) / drop_path_decay_epochs, 1)\n keep_prob = 1 - steps_ratio * (1 - keep_prob)\n keep_prob = tf.cast(keep_prob, tf.float32)\n\n # Monitor last layer's keep prob\n if layers_ratio == 1:\n self._mark_for_monitoring('drop_path_keep_prob', keep_prob)\n\n return keep_prob\n\n def _get_learning_rate(self, step, **knobs):\n N = self._train_params['N']\n batch_size = knobs['batch_size']\n lr = knobs['learning_rate'] # Learning rate\n use_sgdr = knobs['use_sgdr']\n sgdr_decay_epochs = knobs['sgdr_decay_epochs']\n sgdr_alpha = knobs['sgdr_alpha']\n sgdr_t_mul = knobs['sgdr_t_mul']\n\n # Compute epoch from step\n steps_per_epoch = math.ceil(N / batch_size)\n epoch = step // steps_per_epoch\n\n if use_sgdr is True:\n # Apply Stoachastic Gradient Descent with Warm Restarts (SGDR)\n lr = tf.train.cosine_decay_restarts(lr, epoch, sgdr_decay_epochs, t_mul=sgdr_t_mul, alpha=sgdr_alpha)\n\n return lr\n\n def _init_session(self, sess, model):\n w = self._train_params['image_size']\n h = self._train_params['image_size']\n in_ch = 3\n m = model\n\n # Do initialization of all variables\n sess.run(tf.global_variables_initializer())\n\n # Load datasets with defaults\n sess.run([m.train_dataset_init_op, m.pred_dataset_init_op], feed_dict={\n m.ph.train_images: np.zeros((1, w, h, in_ch)),\n m.ph.train_classes: np.zeros((1,)),\n m.ph.pred_images: np.zeros((1, w, h, in_ch)),\n m.ph.pred_classes: np.zeros((1,))\n })\n\n def _make_session(self):\n config = tf.ConfigProto(allow_soft_placement=True)\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n return sess\n\n def _feed_dataset_to_model(self, images, classes=None, is_train=False):\n m = self._model\n utils.logger.log('Feeding dataset to model...')\n\n # Mock classes if required\n classes = classes or [0 for _ in range(len(images))]\n\n if is_train:\n self._sess.run(m.train_dataset_init_op, feed_dict={\n m.ph.train_images: images,\n m.ph.train_classes: classes\n })\n else:\n self._sess.run(m.pred_dataset_init_op, feed_dict={\n m.ph.pred_images: images,\n m.ph.pred_classes: classes,\n })\n\n def _train_model(self, images, classes, dataset_path=None, **knobs):\n num_epochs = knobs['epochs']\n m = self._model\n N = len(images)\n\n self._maybe_feed_dataset_to_model(images, classes, dataset_path=dataset_path, is_train=True)\n\n # Define plots\n # TODO: Investigate bug where plots for acc and loss are always 1 and 0\n utils.logger.define_plot('Train accuracy over Epochs', ['mean_acc'], 'epoch')\n for (name, _) in self._monitored_values.items():\n utils.logger.define_plot('\"{}\" Over Time'.format(name), [name])\n\n log_condition = TimedRepeatCondition()\n for epoch in range(num_epochs):\n utils.logger.log('Running epoch {}...'.format(epoch))\n\n corrects = []\n itr = self._get_dataset_iterator(N,[m.train_op, m.train_corrects, m.step, m.pred_probs,\n *self._monitored_values.values()], **knobs)\n for (_, batch_corrects, batch_steps, batch_probs, *values) in itr:\n # To track mean batch accuracy\n corrects.extend(batch_corrects)\n\n # Periodically, log monitored values\n if log_condition.check():\n utils.logger.log(step=batch_steps,\n **{ name: v for (name, v) in zip(self._monitored_values.keys(), values) })\n\n # Log mean batch accuracy and epoch\n mean_acc = np.mean(corrects)\n utils.logger.log(epoch=epoch, mean_acc=mean_acc)\n\n def _evaluate_model(self, images, classes, dataset_path=None, **knobs):\n batch_size = self._knobs['batch_size']\n m = self._model\n N = batch_size if self._knobs['quick_eval'] else len(images)\n\n self._maybe_feed_dataset_to_model(images, classes, dataset_path=dataset_path)\n\n corrects = []\n itr = self._get_dataset_iterator(N, [m.pred_corrects], **knobs)\n for (batch_corrects,) in itr:\n corrects.extend(batch_corrects)\n\n acc = np.mean(corrects)\n\n return acc\n\n def _predict_with_model(self, images, **knobs):\n m = self._model\n N = len(images)\n\n self._maybe_feed_dataset_to_model(images)\n\n all_probs = []\n itr = self._get_dataset_iterator(N, [m.pred_probs], **knobs)\n for (batch_probs,) in itr:\n all_probs.extend(batch_probs)\n\n all_probs = np.asarray(all_probs)\n\n return all_probs\n\n def _get_dataset_iterator(self, N, run_ops, **knobs):\n batch_size = knobs['batch_size']\n steps_per_epoch = math.ceil(N / batch_size)\n m = self._model\n\n (normal_arch, reduction_arch) = self._get_fixed_cell_archs(**knobs)\n feed_dict = {\n m.ph.normal_arch: normal_arch,\n m.ph.reduction_arch: reduction_arch\n }\n\n for itr_step in range(steps_per_epoch):\n results = self._sess.run(run_ops, feed_dict=feed_dict)\n yield results\n\n def _get_fixed_cell_archs(self, **knobs):\n cell_archs = knobs['cell_archs']\n b = CELL_NUM_BLOCKS\n normal_arch = [cell_archs[(4 * i):(4 * i + 4)] for i in range(b)]\n reduction_arch = [cell_archs[(4 * i):(4 * i + 4)] for i in range(b, b + b)]\n return (normal_arch, reduction_arch)\n\n def _add_aux_head(self, X, in_w, in_h, in_ch, K, is_train):\n pool_ksize = 5\n pool_stride = 3\n conv_ch = 128\n global_conv_ch = 768\n\n w = in_w\n h = in_h\n ch = in_ch\n\n # Pool\n with tf.variable_scope('pool'):\n X = tf.nn.relu(X)\n X = tf.nn.avg_pool(X, ksize=(1, pool_ksize, pool_ksize, 1), strides=(1, pool_stride, pool_stride, 1),\n padding='VALID')\n w //= pool_stride\n h //= pool_stride\n\n # Conv 1x1\n with tf.variable_scope('conv_0'):\n X = self._do_conv(X, w, h, ch, conv_ch, filter_size=1, no_reg=True, is_train=is_train)\n ch = conv_ch\n\n # Global conv\n with tf.variable_scope('conv_1'):\n X = self._do_conv(X, w, h, ch, global_conv_ch, filter_size=w, no_reg=True, is_train=is_train)\n ch = global_conv_ch\n\n # Global pooling\n X = self._add_global_avg_pool(X, w, h, ch)\n\n # Fully connected\n with tf.variable_scope('fully_connected'):\n aux_logits = self._add_fully_connected(X, (ch,), K, no_reg=True)\n\n return aux_logits\n\n def _compute_predictions(self, logits, classes):\n probs = tf.nn.softmax(logits)\n preds = tf.argmax(logits, axis=1, output_type=tf.int32)\n corrects = tf.equal(preds, classes)\n return (probs, corrects)\n\n def _compute_loss(self, logits, aux_logits_list, classes, **knobs):\n reg_decay = knobs['reg_decay']\n aux_loss_mul = knobs['aux_loss_mul'] # Multiplier for auxiliary loss\n\n # Compute sparse softmax cross entropy loss from logits & labels\n log_probs = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=classes)\n loss = tf.reduce_mean(log_probs)\n self._mark_for_monitoring('loss', loss)\n\n # Add regularization loss\n reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n reg_loss = reg_decay * tf.add_n(reg_losses)\n self._mark_for_monitoring('reg_loss', reg_loss)\n\n # Add loss from auxiliary logits\n aux_loss = tf.constant(0, dtype=tf.float32)\n for aux_logits in aux_logits_list:\n log_probs = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=aux_logits, labels=classes)\n aux_loss += aux_loss_mul * tf.reduce_mean(log_probs)\n\n total_loss = loss + reg_loss + aux_loss\n\n return total_loss\n\n def _add_global_avg_pool(self, X, in_w, in_h, in_ch):\n X = tf.nn.relu(X)\n X = tf.reduce_mean(X, (1, 2))\n X = tf.reshape(X, (-1, in_ch)) # Sanity shape check\n return X\n\n def _count_model_parameters(self):\n tf_trainable_vars = tf.trainable_variables()\n num_params = 0\n # utils.logger.log('Model parameters:')\n for var in tf_trainable_vars:\n # utils.logger.log(str(var))\n num_params += np.prod([dim.value for dim in var.get_shape()])\n\n utils.logger.log('Model has {} parameters'.format(num_params))\n return num_params\n\n def _add_vars_assign_op(self, vars):\n var_phs = {\n tf_var.name: tf.placeholder(dtype=tf_var.dtype, shape=tf_var.shape)\n for tf_var in vars\n }\n vars_assign_op = tf.group([\n tf.assign(tf_var, ph)\n for (tf_var, ph) in zip(vars, var_phs.values())\n ], name='vars_assign_op')\n\n return (var_phs, vars_assign_op)\n\n ####################################\n # Cells\n ####################################\n\n def _add_dynamic_cell(self, cell_arch, layers, w, h, block_ch, drop_path_keep_prob, is_train=False):\n b = CELL_NUM_BLOCKS\n\n # Downsample inputs to have same dimensions as blocks\n with tf.variable_scope('layer_-1_calibrate'):\n layers[-1] = (self._calibrate(*layers[-1], w, h, block_ch, is_train=is_train), w, h, block_ch)\n\n with tf.variable_scope('layer_-2_calibrate'):\n layers[-2] = (self._calibrate(*layers[-2], w, h, block_ch, is_train=is_train), w, h, block_ch)\n\n cell_inputs = [layers[-2][0] if len(layers) > 1 else layers[-1][0], layers[-1][0]]\n blocks = []\n for bi in range(b):\n with tf.variable_scope('block_{}'.format(bi)):\n idx1 = cell_arch[bi][0]\n op1 = cell_arch[bi][1]\n idx2 = cell_arch[bi][2]\n op2 = cell_arch[bi][3]\n\n with tf.variable_scope('X1'):\n X1 = self._add_op_dynamic(cell_inputs, blocks, idx1, op1, w, h, block_ch, is_train=is_train)\n X1 = self._add_drop_path(X1, drop_path_keep_prob)\n\n with tf.variable_scope('X2'):\n X2 = self._add_op_dynamic(cell_inputs, blocks, idx2, op2, w, h, block_ch, is_train=is_train)\n X2 = self._add_drop_path(X2, drop_path_keep_prob)\n\n X = tf.add_n([X1, X2])\n\n blocks.append(X)\n\n (X, comb_ch) = self._combine_cell_blocks_dynamic(cell_inputs, blocks, cell_arch, w, h, block_ch, is_train)\n\n X = tf.reshape(X, (-1, w, h, comb_ch)) # Sanity shape check\n\n layers.append((X, w, h, comb_ch))\n\n def _add_static_cell(self, cell_arch, layers, w, h, block_ch, drop_path_keep_prob, is_train=False, is_reduction=False):\n b = CELL_NUM_BLOCKS\n\n # Calibrate inputs as necessary to last input layer's dimensions and add them to hidden states\n cell_inputs = [layers[-2] if len(layers) > 1 else layers[-1], layers[-1]]\n (_, w_inp_last, h_inp_last, _) = cell_inputs[-1]\n for (i, (inp, w_inp, h_inp, ch_inp)) in enumerate(cell_inputs):\n with tf.variable_scope('input_{}_calibrate'.format(i)):\n inp = self._calibrate(inp, w_inp, h_inp, ch_inp, w_inp_last, h_inp_last, block_ch, is_train=is_train)\n\n # Apply conv 1x1 on last input\n if i == len(cell_inputs) - 1:\n with tf.variable_scope('input_{}_conv'.format(i)):\n inp = self._do_conv(inp, w_inp_last, h_inp_last, block_ch, block_ch, is_train=is_train)\n\n cell_inputs[i] = inp\n\n blocks = []\n for bi in range(b):\n with tf.variable_scope('block_{}'.format(bi)):\n idx1 = cell_arch[bi][0]\n op1 = cell_arch[bi][1]\n idx2 = cell_arch[bi][2]\n op2 = cell_arch[bi][3]\n\n with tf.variable_scope('X1'):\n X1 = self._add_op(cell_inputs, blocks, idx1, op1, w, h, block_ch,\n is_reduction=is_reduction, is_train=is_train)\n X1 = self._add_drop_path(X1, drop_path_keep_prob)\n\n with tf.variable_scope('X2'):\n X2 = self._add_op(cell_inputs, blocks, idx2, op2, w, h, block_ch,\n is_reduction=is_reduction, is_train=is_train)\n X2 = self._add_drop_path(X2, drop_path_keep_prob)\n\n X = tf.add_n([X1, X2])\n\n blocks.append(X)\n\n (X, comb_ch) = self._combine_cell_blocks(cell_inputs, blocks, cell_arch, w, h, block_ch, is_train)\n\n X = tf.reshape(X, (-1, w, h, comb_ch)) # Sanity shape check\n\n layers.append((X, w, h, comb_ch))\n\n def _combine_cell_blocks(self, cell_inputs, blocks, cell_arch, w, h, block_ch, is_train=False):\n # Count usage of inputs\n input_use_counts = [0] * len(cell_inputs + blocks)\n for (idx1, _, idx2, _) in cell_arch:\n input_use_counts[idx1] += 1\n input_use_counts[idx2] += 1\n\n # Concat only unused blocks\n with tf.variable_scope('combine'):\n block_use_counts = input_use_counts[len(cell_inputs):]\n out_blocks = [block for (block, use_count) in zip(blocks, block_use_counts) if use_count == 0]\n comb_ch = len(out_blocks) * block_ch\n X = tf.concat(out_blocks, axis=3)\n\n return (X, comb_ch)\n\n def _combine_cell_blocks_dynamic(self, cell_inputs, blocks, cell_arch, w, h, block_ch, is_train=False):\n ni = len(cell_inputs + blocks)\n b = len(blocks)\n\n # Count usage of inputs\n block_uses = []\n for bi in range(b):\n idx1 = cell_arch[bi][0]\n idx2 = cell_arch[bi][2]\n block_use = tf.one_hot(idx1, ni, dtype=tf.int32) + tf.one_hot(idx2, ni, dtype=tf.int32)\n block_uses.append(block_use)\n block_uses = tf.add_n(block_uses)\n unused_indices = tf.reshape(tf.cast(tf.where(tf.equal(block_uses, 0)), tf.int32), [-1])\n num_out_blocks = tf.size(unused_indices)\n\n # Select only unused blocks\n with tf.variable_scope('select'):\n stacked_blocks = tf.stack(cell_inputs + blocks)\n out_blocks = tf.gather(stacked_blocks, unused_indices, axis=0)\n out_blocks = tf.transpose(out_blocks, (1, 2, 3, 0, 4))\n\n # Combine to constant channels\n with tf.variable_scope('combine'):\n W = self._make_var('W', (ni, block_ch * block_ch))\n W = tf.gather(W, unused_indices, axis=0)\n W = tf.reshape(W, (1, 1, num_out_blocks * block_ch, block_ch))\n X = tf.reshape(out_blocks, (-1, w, h, num_out_blocks * block_ch))\n X = tf.nn.relu(X)\n X = tf.nn.conv2d(X, W, (1, 1, 1, 1), padding='SAME')\n X = self._add_batch_norm(X, block_ch, is_train=is_train)\n\n return (X, block_ch)\n\n def _add_op(self, cell_inputs, blocks, input_idx, op, w, h, ch, is_reduction=False, is_train=False):\n ni = len(cell_inputs + blocks)\n inputs = cell_inputs + blocks\n op_map = self._get_op_map()\n\n # Just build output for select operation\n X = inputs[input_idx]\n op_no = OPS[op]\n op_method = op_map[op_no]\n\n # If we were to account for reduction\n if is_reduction and input_idx < len(cell_inputs):\n X = op_method(X, input_idx, ni, w << 1, h << 1, ch, is_reduction=True, is_dynamic=False, is_train=is_train)\n else:\n X = op_method(X, input_idx, ni, w, h, ch, is_reduction=False, is_dynamic=False, is_train=is_train)\n\n return X\n\n def _add_op_dynamic(self, cell_inputs, blocks, input_idx, op, w, h, ch, is_train=False):\n ni = len(cell_inputs + blocks)\n inputs = tf.stack(cell_inputs + blocks, axis=0)\n op_map = self._get_op_map()\n\n # Build output for each available operation\n X = inputs[input_idx]\n op_Xs = []\n for op_no in OPS:\n op_method = op_map[op_no]\n op_X = op_method(X, input_idx, ni, w, h, ch, is_reduction=False, is_dynamic=True, is_train=is_train)\n op_Xs.append(op_X)\n\n # Stack operation outputs and index by op\n op_Xs = tf.stack(op_Xs)\n X = op_Xs[op]\n\n return X\n\n ####################################\n # Block Ops\n ####################################\n\n def _get_op_map(self):\n # List of all possible operations and their associated numbers\n return {\n 0: self._add_separable_conv_3x3_op,\n 1: self._add_separable_conv_5x5_op,\n 2: self._add_avg_pool_3x3_op,\n 3: self._add_max_pool_3x3_op,\n 4: self._add_identity_op,\n 5: self._add_separable_conv_7x7_op\n }\n\n def _add_avg_pool_3x3_op(self, X, input_idx, ni, w, h, ch, is_reduction, is_dynamic, is_train):\n filter_size = 3\n stride = 2 if is_reduction else 1\n with tf.variable_scope('avg_pool_3x3_op'):\n X = tf.nn.avg_pool(X, ksize=(1, filter_size, filter_size, 1), strides=[1, stride, stride, 1], padding='SAME')\n X = tf.reshape(X, (-1, w // stride, h // stride, ch)) # Sanity shape check\n return X\n\n def _add_identity_op(self, X, input_idx, ni, w, h, ch, is_reduction, is_dynamic, is_train):\n stride = 2 if is_reduction else 1\n with tf.variable_scope('identity_op'):\n # If stride > 1, calibrate, else, just return itself\n if stride > 1:\n X = self._calibrate(X, w, h, ch, w // stride, h // stride, ch, is_train=is_train)\n X = tf.reshape(X, (-1, w // stride, h // stride, ch)) # Sanity shape check\n return X\n\n def _add_max_pool_3x3_op(self, X, input_idx, ni, w, h, ch, is_reduction, is_dynamic, is_train):\n filter_size = 3\n stride = 2 if is_reduction else 1\n with tf.variable_scope('max_pool_3x3_op'):\n X = tf.nn.max_pool(X, ksize=(1, filter_size, filter_size, 1), strides=[1, stride, stride, 1], padding='SAME')\n X = tf.reshape(X, (-1, w // stride, h // stride, ch)) # Sanity shape check\n return X\n\n def _add_separable_conv_3x3_op(self, *args, **kwargs):\n return self._add_separable_conv_op(*args, **kwargs, filter_size=3)\n\n def _add_separable_conv_5x5_op(self, *args, **kwargs):\n return self._add_separable_conv_op(*args, **kwargs, filter_size=5)\n\n def _add_separable_conv_7x7_op(self, *args, **kwargs):\n return self._add_separable_conv_op(*args, **kwargs, filter_size=7)\n\n def _add_separable_conv_op(self, X, input_idx, ni, w, h, ch, is_reduction, is_dynamic, is_train, filter_size=3):\n num_stacks = 2\n stride = 2 if is_reduction else 1\n\n with tf.variable_scope('separable_conv_{}x{}_op'.format(filter_size, filter_size)):\n # For each stack of separable convolution (default of 2)\n for stack_no in range(num_stacks):\n # Only have > 1 stride for first stack\n stack_stride = stride if stack_no == 0 else 1\n with tf.variable_scope('stack_{}'.format(stack_no)):\n W_d = None\n W_p = None\n batch_norm_offset = None\n batch_norm_scale = None\n if is_dynamic:\n # Select weights corresponding to input index\n W_d = self._make_var('W_d', (ni, filter_size, filter_size, ch, 1))\n W_d = W_d[input_idx]\n W_p = self._make_var('W_p', (ni, 1, 1, ch, ch))\n W_p = W_p[input_idx]\n batch_norm_offset = self._make_var('batch_norm_offset', (ni, ch), init_constant=0)\n batch_norm_offset = batch_norm_offset[input_idx]\n batch_norm_scale = self._make_var('batch_norm_scale', (ni, ch), init_constant=1)\n batch_norm_scale = batch_norm_scale[input_idx]\n\n X = self._do_separable_conv(X, w, h, ch, filter_size=filter_size, stride=stack_stride,\n W_d=W_d, W_p=W_p, no_batch_norm=True)\n X = self._add_batch_norm(X, ch, offset=batch_norm_offset, scale=batch_norm_scale,\n no_moving_average=is_dynamic, is_train=is_train)\n\n X = tf.reshape(X, (-1, w // stride, h // stride, ch)) # Sanity shape check\n return X\n\n ####################################\n # Utils\n ####################################\n\n def _do_cutout(self, image, im_width, im_height, cutout_size):\n mask = tf.ones([cutout_size, cutout_size], dtype=tf.int32)\n start_x = tf.random.uniform(shape=(1,), minval=0, maxval=im_width, dtype=tf.int32)\n start_y = tf.random.uniform(shape=(1,), minval=0, maxval=im_height, dtype=tf.int32)\n mask = tf.pad(mask, [[cutout_size + start_y[0], im_height - start_y[0]],\n [cutout_size + start_x[0], im_width - start_x[0]]])\n mask = mask[cutout_size: cutout_size + im_height,\n cutout_size: cutout_size + im_width]\n mask = tf.tile(tf.reshape(mask, (im_height, im_width, 1)), (1, 1, 3))\n image = tf.where(tf.equal(mask, 0), x=image, y=tf.zeros_like(image))\n return image\n\n def _add_drop_path(self, X, keep_prob):\n with tf.variable_scope('drop_path'):\n batch_size = tf.shape(X)[0]\n noise_shape = (batch_size, 1, 1, 1)\n random_tensor = keep_prob + tf.random_uniform(noise_shape, dtype=tf.float32)\n binary_tensor = tf.floor(random_tensor)\n X = (X / keep_prob) * binary_tensor\n return X\n\n def _do_conv(self, X, w, h, in_ch, out_ch, filter_size=1, no_relu=False, no_reg=False, is_train=False):\n W = self._make_var('W', (filter_size, filter_size, in_ch, out_ch), no_reg=no_reg)\n if not no_relu:\n X = tf.nn.relu(X)\n X = tf.nn.conv2d(X, W, (1, 1, 1, 1), padding='SAME')\n X = self._add_batch_norm(X, out_ch, is_train=is_train)\n X = tf.reshape(X, (-1, w, h, out_ch)) # Sanity shape check\n return X\n\n def _do_separable_conv(self, X, w, h, ch, filter_size=3, stride=1, ch_mul=1,\n no_batch_norm=False, W_d=None, W_p=None, is_train=False):\n if W_d is None:\n W_d = self._make_var('W_d', (filter_size, filter_size, ch, ch_mul))\n if W_p is None:\n W_p = self._make_var('W_p', (1, 1, ch_mul * ch, ch))\n X = tf.nn.relu(X)\n X = tf.nn.separable_conv2d(X, W_d, W_p, strides=(1, stride, stride, 1), padding='SAME')\n if not no_batch_norm:\n X = self._add_batch_norm(X, ch, is_train=is_train)\n return X\n\n def _calibrate(self, X, w, h, ch, w_out, h_out, ch_out, is_train=False):\n '''\n Calibrate input of shape (-1, w, h, ch) to (-1, w_out, h_out, ch_out), assuming (w, h) / (w_out, h_out) is power of 2\n '''\n # Downsample with factorized reduction\n downsample_no = 0\n while w > w_out or h > h_out:\n downsample_no += 1\n with tf.variable_scope('downsample_{}x'.format(downsample_no)):\n X = tf.nn.relu(X)\n X = self._add_factorized_reduction(X, w, h, ch, ch_out, is_train=is_train)\n ch = ch_out\n w >>= 1\n h >>= 1\n\n # If channel counts finally don't match, convert channel counts with 1x1 conv\n if ch != ch_out:\n with tf.variable_scope('convert_conv'):\n X = self._do_conv(X, w, h, ch, ch_out, filter_size=1, is_train=is_train)\n\n X = tf.reshape(X, (-1, w_out, h_out, ch_out)) # Sanity shape check\n return X\n\n def _add_fully_connected(self, X, in_shape, out_ch, no_reg=False):\n ch = np.prod(in_shape)\n X = tf.reshape(X, (-1, ch))\n W = self._make_var('W', (ch, out_ch), no_reg=no_reg)\n X = tf.matmul(X, W)\n X = tf.reshape(X, (-1, out_ch)) # Sanity shape check\n return X\n\n def _add_factorized_reduction(self, X, in_w, in_h, in_ch, out_ch, is_train=False):\n '''\n Output is of shape (in_w // 2, in_h // 2, out_ch)\n '''\n assert in_w % 2 == 0 and in_h % 2 == 0, 'Width & height ({} & {}) must both be even!'.format(in_w, in_h)\n\n with tf.variable_scope('fac_reduc'):\n # Split area into 2 halves\n half_1 = tf.nn.avg_pool(X, ksize=(1, 1, 1, 1), strides=(1, 2, 2, 1), padding='VALID')\n shifted_X = tf.pad(X, ((0, 0), (0, 1), (0, 1), (0, 0)))[:, 1:, 1:, :]\n half_2 = tf.nn.avg_pool(shifted_X, ksize=(1, 1, 1, 1), strides=(1, 2, 2, 1), padding='VALID')\n\n # Apply 1 x 1 convolution to each half separately\n W_half_1 = self._make_var('W_half_1', (1, 1, in_ch, out_ch >> 1))\n X_half_1 = tf.nn.conv2d(half_1, W_half_1, (1, 1, 1, 1), padding='VALID')\n W_half_2 = self._make_var('W_half_2', (1, 1, in_ch, out_ch >> 1))\n X_half_2 = tf.nn.conv2d(half_2, W_half_2, (1, 1, 1, 1), padding='VALID')\n\n # Concat both halves across channels\n X = tf.concat([X_half_1, X_half_2], axis=3)\n\n # Apply batch normalization\n X = self._add_batch_norm(X, out_ch, is_train=is_train)\n\n X = tf.reshape(X, (-1, in_w // 2, in_h // 2, out_ch)) # Sanity shape check\n\n return X\n\n def _add_batch_norm(self, X, in_ch, decay=0.9, epsilon=1e-5, offset=None, scale=None, is_train=False,\n no_moving_average=False):\n with tf.variable_scope('batch_norm'):\n if offset is None:\n offset = self._make_var('offset', (in_ch,), init_constant=0)\n if scale is None:\n scale = self._make_var('scale', (in_ch,), init_constant=1)\n\n if not no_moving_average:\n moving_mean = self._make_var('moving_mean', (in_ch,), trainable=False, init_constant=0)\n moving_variance = self._make_var('moving_variance', (in_ch,), trainable=False, init_constant=1)\n\n if is_train:\n # For training, do batch norm with batch mean & variance\n # Update moving averages if training\n (X, mean, variance) = tf.nn.fused_batch_norm(X, scale, offset, epsilon=epsilon, is_training=True)\n update_mean = moving_averages.assign_moving_average(moving_mean, mean, decay)\n update_variance = moving_averages.assign_moving_average(moving_variance, variance, decay)\n with tf.control_dependencies([update_mean, update_variance]):\n X = tf.identity(X)\n else:\n # For prediction, do batch norm with computed moving mean & variance from training\n # Don't update moving averages if predicting\n (X, _, _) = tf.nn.fused_batch_norm(X, scale, offset, mean=moving_mean, variance=moving_variance,\n epsilon=epsilon, is_training=False)\n else:\n (X, _, _) = tf.nn.fused_batch_norm(X, scale, offset, epsilon=epsilon, is_training=True)\n\n return X\n\n def _mark_for_monitoring(self, name, value):\n tf.add_to_collection(TF_COLLECTION_MONITORED, tf.identity(value, name))\n\n def _add_monitoring_of_values(self):\n monitored_values = tf.get_collection(TF_COLLECTION_MONITORED)\n monitored_values = {\n value.name.split(':')[0]: value # Get rid of ':0' from name\n for value in monitored_values\n }\n\n for (name, value) in monitored_values.items():\n tf.summary.scalar(name, value)\n\n summary_op = tf.summary.merge_all()\n\n return (summary_op, monitored_values)\n\n def _make_var(self, name, shape, dtype=None, no_reg=False, initializer=None, init_constant=None, trainable=True):\n if initializer is None:\n if init_constant is not None:\n initializer = tf.constant_initializer(init_constant, dtype=tf.float32)\n else:\n initializer = tf.contrib.keras.initializers.he_normal()\n\n # Ensure that name is unique by shape too\n name += '-shape-{}'.format('x'.join([str(x) for x in shape]))\n\n var = tf.get_variable(name, shape=shape, dtype=dtype, initializer=initializer, trainable=trainable)\n\n # Add L2 regularization node for trainable var\n if trainable and not no_reg:\n l2_loss = tf.nn.l2_loss(var)\n tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, l2_loss)\n\n return var\n\nclass TimedRepeatCondition():\n def __init__(self, every_secs=60):\n self._every_secs = every_secs\n self._last_trigger_time = datetime.now()\n\n def check(self) -> bool:\n if (datetime.now() - self._last_trigger_time).total_seconds() >= self._every_secs:\n self._last_trigger_time = datetime.now()\n return True\n else:\n return False\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--train_path', type=str, default='data/cifar10_train.zip', help='Path to train dataset')\n parser.add_argument('--val_path', type=str, default='data/cifar10_val.zip', help='Path to validation dataset')\n parser.add_argument('--test_path', type=str, default='data/cifar10_test.zip', help='Path to test dataset')\n parser.add_argument('--query_path', type=str, default='examples/data/image_classification/cifar10_test_1.png',\n help='Path(s) to query image(s), delimited by commas')\n (args, _) = parser.parse_known_args()\n\n queries = utils.dataset.load_images(args.query_path.split(',')).tolist()\n test_model_class(\n model_file_path=__file__,\n model_class='TfEnas',\n task='IMAGE_CLASSIFICATION',\n dependencies={\n ModelDependency.TENSORFLOW: '1.12.0'\n },\n train_dataset_path=args.train_path,\n val_dataset_path=args.val_path,\n test_dataset_path=args.test_path,\n queries=queries\n )\n", "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport base64\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.preprocessing import StandardScaler\n\nfrom singa_auto.model import BaseModel, IntegerKnob, FloatKnob, logger\nfrom singa_auto.model.dev import test_model_class\nfrom singa_auto.constants import ModelDependency\n\nclass GaussianClf(BaseModel):\n '''\n Implements Gaussian Naive Bayes Classifier for tabular data classification task\n '''\n @staticmethod\n def get_knob_config():\n return {\n 'var_smoothing': FloatKnob(1e-07, 1e-05, is_exp=True),\n }\n\n def __init__(self, **knobs):\n self.__dict__.update(knobs)\n self._clf = self._build_classifier(self.var_smoothing)\n\n\n def train(self, dataset_path, **kwargs):\n # Load CSV file as pandas dataframe\n csv_path = dataset_path\n data = pd.read_csv(csv_path)\n\n X_train = self.prepare_X(data)\n y_train = data.iloc[:, -1]\n\n self._clf.fit(X_train, y_train)\n\n # Compute train accuracy\n score = self._clf.score(X_train, y_train)\n logger.log('Train accuracy: {}'.format(score))\n\n\n def evaluate(self, dataset_path):\n csv_path = dataset_path\n data = pd.read_csv(csv_path)\n\n X_val = self.prepare_X(data)\n y_val = data.iloc[:, -1]\n\n accuracy = self._clf.score(X_val, y_val)\n return accuracy\n\n\n def predict(self, queries):\n queries = [pd.DataFrame(query, index=[0]) for query in queries]\n probs = self._clf.predict_proba(queries)\n return probs.tolist()\n\n\n def destroy(self):\n pass\n\n\n def dump_parameters(self):\n params = {}\n # Put model parameters\n clf_bytes = pickle.dumps(self._clf)\n clf_base64 = base64.b64encode(clf_bytes).decode('utf-8')\n params['clf_base64'] = clf_base64\n\n return params\n\n\n def load_parameters(self, params):\n # Load model parameters\n assert 'clf_base64' in params\n clf_base64 = params['clf_base64']\n clf_bytes = base64.b64decode(clf_base64.encode('utf-8'))\n self._clf = pickle.loads(clf_bytes)\n\n\n def prepare_X(self, data):\n sc = StandardScaler()\n return sc.fit_transform(data)\n\n\n def _build_classifier(self, var_smoothing):\n clf = GaussianNB(priors=None, var_smoothing=var_smoothing)\n return clf\n\n\nif __name__ == '__main__':\n test_model_class(\n model_file_path=__file__,\n model_class='GaussianClf',\n task='TABULAR_CLASSIFICATION',\n dependencies={\n ModelDependency.SCIKIT_LEARN: '0.20.0'\n },\n train_dataset_path='data/heart_train.csv',\n val_dataset_path='data/heart_test.csv',\n queries=[\n { 'age': 50, 'Sex': '0', 'cp': 3, 'trestbps': 130, 'chol': 220, 'fbs': 1, 'restecg': 0, 'thalach': 170, 'exang': 1, 'oldpeak': 1.7, 'slope': 2, 'ca': 0, 'thal': 3 }\n ]\n )\n", "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nimport xgboost as xgb\nimport pickle\nimport base64\nimport pandas as pd\nimport numpy as np\nimport json\n\nfrom singa_auto.model import BaseModel, IntegerKnob, FloatKnob, logger\nfrom singa_auto.model.dev import test_model_class\nfrom singa_auto.constants import ModelDependency\n\nclass XgbClf(BaseModel):\n '''\n Implements a XGBoost Classifier for tabular data classification task\n '''\n @staticmethod\n def get_knob_config():\n return {\n 'n_estimators': IntegerKnob(50, 200),\n 'min_child_weight': IntegerKnob(1, 6),\n 'max_depth': IntegerKnob(2, 8),\n 'gamma': FloatKnob(0.0, 1.0, is_exp=False),\n 'subsample': FloatKnob(0.5, 1.0, is_exp=False),\n 'colsample_bytree': FloatKnob(0.1, 0.7, is_exp=False)\n }\n\n def __init__(self, **knobs):\n self.__dict__.update(knobs)\n\n def train(self, dataset_path, features=None, target=None, **kwargs):\n # Record features & target\n self._features = features\n self._target = target\n\n # Load CSV file as pandas dataframe\n csv_path = dataset_path\n data = pd.read_csv(csv_path)\n\n # Extract X & y from dataframe\n (X, y) = self._extract_xy(data)\n\n # Encode categorical features\n X = self._encoding_categorical_type(X)\n\n num_class = y.unique().size\n\n self._clf = self._build_classifier(self.n_estimators, self.min_child_weight, \\\n self.max_depth, self.gamma, self.subsample, self.colsample_bytree, num_class)\n\n self._clf.fit(X, y)\n\n # Compute train accuracy\n score = self._clf.score(X, y)\n logger.log('Train accuracy: {}'.format(score))\n\n def evaluate(self, dataset_path):\n # Load CSV file as pandas dataframe\n csv_path = dataset_path\n data = pd.read_csv(csv_path)\n\n # Extract X & y from dataframe\n (X, y) = self._extract_xy(data)\n\n # Encode categorical features\n X = self._encoding_categorical_type(X)\n\n accuracy = self._clf.score(X, y)\n return accuracy\n\n def predict(self, queries):\n queries = [pd.DataFrame(query, index=[0]) for query in queries]\n probs = [self._clf.predict_proba(self._features_mapping(query)).tolist()[0] for query in queries]\n return probs\n\n def destroy(self):\n pass\n\n def dump_parameters(self):\n params = {}\n\n # Put model parameters\n clf_bytes = pickle.dumps(self._clf)\n clf_base64 = base64.b64encode(clf_bytes).decode('utf-8')\n params['clf_base64'] = clf_base64\n params['encoding_dict'] = json.dumps(self._encoding_dict)\n params['features'] = json.dumps(self._features)\n params['target'] = self._target\n\n return params\n\n def load_parameters(self, params):\n # Load model parameters\n assert 'clf_base64' in params\n clf_base64 = params['clf_base64']\n clf_bytes = base64.b64decode(clf_base64.encode('utf-8'))\n\n self._clf = pickle.loads(clf_bytes)\n self._encoding_dict = json.loads(params['encoding_dict'])\n self._features = json.loads(params['features'])\n self._target = params['target']\n\n def _extract_xy(self, data):\n features = self._features\n target = self._target\n\n if features is None:\n X = data.iloc[:,:-1]\n else:\n X = data[features]\n\n if target is None:\n y = data.iloc[:,-1]\n else:\n y = data[target]\n\n return (X, y)\n\n def _encoding_categorical_type(self, cols):\n # Apply label encoding for those categorical columns\n cat_cols = list(filter(lambda x: cols[x].dtype == 'object', cols.columns))\n encoded_cols = pd.DataFrame({col: cols[col].astype('category').cat.codes \\\n if cols[col].dtype == 'object' else cols[col] for col in cols}, index=cols.index)\n\n # Recover the missing elements (Use XGBoost to automatically handle them)\n encoded_cols = encoded_cols.replace(to_replace = -1, value = np.nan)\n\n # Generate the dict that maps categorical features to numerical\n encoding_dict = {col: {cat: n for n, cat in enumerate(cols[col].astype('category'). \\\n cat.categories)} for col in cat_cols}\n self._encoding_dict = encoding_dict\n\n return encoded_cols\n\n def _features_mapping(self, df):\n # Encode the categorical features with pre saved encoding dict\n cat_cols = list(filter(lambda x: df[x].dtype == 'object', df.columns))\n df_temp = df.copy()\n for col in cat_cols:\n df_temp[col] = df[col].map(self._encoding_dict[col])\n df = df_temp\n return df\n\n def _build_classifier(self, n_estimators, min_child_weight, max_depth, gamma, subsample, colsample_bytree, num_class):\n assert num_class >= 2\n\n if num_class == 2:\n clf = xgb.XGBClassifier(\n n_estimators=n_estimators,\n min_child_weight=min_child_weight,\n max_depth=max_depth,\n gamma=gamma,\n subsample=subsample,\n colsample_bytree=colsample_bytree\n )\n else:\n clf = xgb.XGBClassifier(\n n_estimators=n_estimators,\n min_child_weight=min_child_weight,\n max_depth=max_depth,\n gamma=gamma,\n subsample=subsample,\n colsample_bytree=colsample_bytree,\n objective='multi:softmax',\n num_class=num_class\n )\n return clf\n\nif __name__ == '__main__':\n test_model_class(\n model_file_path=__file__,\n model_class='XgbClf',\n task='TABULAR_CLASSIFICATION',\n dependencies={\n ModelDependency.XGBOOST: '0.90'\n },\n train_dataset_path='data/titanic_train.csv',\n val_dataset_path='data/titanic_val.csv',\n train_args={\n 'features': ['Pclass', 'Sex', 'Age'],\n 'target':'Survived'\n },\n queries=[\n { 'Pclass': 1, 'Sex': 'female', 'Age': 2.0 }\n ]\n )\n" ]
[ [ "pandas.read_csv", "sklearn.ensemble.RandomForestClassifier", "pandas.DataFrame" ], [ "tensorflow.get_variable", "tensorflow.concat", "tensorflow.contrib.keras.initializers.he_normal", "tensorflow.control_dependencies", "numpy.asarray", "tensorflow.stack", "tensorflow.nn.max_pool", "tensorflow.global_variables", "tensorflow.equal", "tensorflow.cast", "tensorflow.minimum", "tensorflow.train.cosine_decay_restarts", "tensorflow.image.random_crop", "tensorflow.data.experimental.map_and_batch", "tensorflow.nn.l2_loss", "numpy.mean", "tensorflow.pad", "tensorflow.initializers.constant", "tensorflow.add_n", "tensorflow.summary.scalar", "tensorflow.nn.conv2d", "tensorflow.Graph", "tensorflow.python.training.moving_averages.assign_moving_average", "tensorflow.image.random_flip_left_right", "tensorflow.get_collection", "tensorflow.floor", "tensorflow.gradients", "tensorflow.ConfigProto", "tensorflow.gather", "numpy.std", "tensorflow.train.MomentumOptimizer", "tensorflow.clip_by_norm", "tensorflow.Session", "tensorflow.trainable_variables", "tensorflow.train.Saver", "tensorflow.argmax", "numpy.zeros", "tensorflow.nn.dropout", "tensorflow.nn.fused_batch_norm", "tensorflow.matmul", "tensorflow.shape", "tensorflow.random.uniform", "tensorflow.identity", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.zeros_like", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "numpy.random.rand", "tensorflow.summary.merge_all", "tensorflow.nn.avg_pool", "tensorflow.one_hot", "tensorflow.add_to_collection", "tensorflow.size", "tensorflow.nn.relu", "tensorflow.nn.softmax", "tensorflow.constant", "tensorflow.transpose", "tensorflow.reduce_mean", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.reshape", "tensorflow.nn.separable_conv2d", "tensorflow.ones", "tensorflow.assign", "tensorflow.constant_initializer", "numpy.prod", "tensorflow.variable_scope", "tensorflow.random_uniform" ], [ "sklearn.preprocessing.StandardScaler", "pandas.read_csv", "sklearn.naive_bayes.GaussianNB", "pandas.DataFrame" ], [ "pandas.read_csv", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
ElmerJeanpierreLopez/wradlib
[ "ae6aa24c68f431b735a742510cea3475fb55059d", "ae6aa24c68f431b735a742510cea3475fb55059d" ]
[ "wradlib/georef/vector.py", "wradlib/georef/rect.py" ]
[ "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n# Copyright (c) 2011-2019, wradlib developers.\n# Distributed under the MIT License. See LICENSE.txt for more info.\n\n\"\"\"\nVector Functions (GDAL)\n^^^^^^^^^^^^^^^^^^^^^^^\n\n.. autosummary::\n :nosignatures:\n :toctree: generated/\n\n get_vector_coordinates\n get_vector_points\n transform_geometry\n ogr_create_layer\n ogr_copy_layer\n ogr_copy_layer_by_name\n ogr_add_feature\n ogr_add_geometry\n numpy_to_ogr\n ogr_to_numpy\n ogr_geocol_to_numpy\n get_centroid\n\"\"\"\nimport warnings\n\nimport numpy as np\nfrom osgeo import gdal, ogr, osr\n\nfrom wradlib.georef import projection\n\nogr.UseExceptions()\ngdal.UseExceptions()\n\n\ndef get_vector_points(geom):\n \"\"\"Extract coordinate points from given ogr geometry as generator object\n\n If geometries are nested, function recurses.\n\n Parameters\n ----------\n geom : ogr.Geometry\n\n Returns\n -------\n result : generator object\n expands to Nx2 dimensional nested point arrays\n \"\"\"\n geomtype = geom.GetGeometryType()\n if geomtype > 1:\n # 1D Geometries, LINESTRINGS\n if geomtype == 2:\n result = np.array(geom.GetPoints())\n yield result\n # RINGS, POLYGONS, MULTIPOLYGONS, MULTILINESTRINGS\n elif geomtype > 2:\n # iterate over geometries and recurse\n for item in geom:\n for result in get_vector_points(item):\n yield result\n else:\n warnings.warn(\"unsupported geometry type detected in \"\n \"wradlib.georef.get_vector_points - skipping\")\n\n\ndef transform_geometry(geom, dest_srs, **kwargs):\n \"\"\"Perform geotransformation to given destination SpatialReferenceSystem\n\n It transforms coordinates to a given destination osr spatial reference\n if a geotransform is neccessary.\n\n Parameters\n ----------\n geom : ogr.geometry\n dest_srs : osr.SpatialReference\n Destination Projection\n\n Keyword Arguments\n -----------------\n source_srs : osr.SpatialReference\n Source Projection\n\n Returns\n -------\n geom : ogr.Geometry\n Transformed Geometry\n \"\"\"\n gsrs = geom.GetSpatialReference()\n srs = kwargs.get('source_srs', gsrs)\n\n # srs is None assume wgs84 lonlat, but warn too\n if srs is None:\n srs = projection.get_default_projection()\n warnings.warn(\"geometry without spatial reference - assuming wgs84\")\n\n # transform if not the same spatial reference system\n if not srs.IsSame(dest_srs):\n if gsrs is None:\n geom.AssignSpatialReference(srs)\n gsrs = geom.GetSpatialReference()\n if gdal.VersionInfo()[0] >= '3':\n dest_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)\n gsrs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)\n geom.TransformTo(dest_srs)\n\n return geom\n\n\ndef get_vector_coordinates(layer, **kwargs):\n \"\"\"Function iterates over gdal ogr layer features and packs extracted \\\n vector coordinate points into nested ndarray\n\n It transforms coordinates to a given destination osr spatial reference if\n dest_srs is given and a geotransform is neccessary.\n\n Parameters\n ----------\n layer : ogr.Layer\n\n Keyword Arguments\n -----------------\n source_srs : osr.SpatialReference\n Source Projection\n dest_srs: osr.SpatialReference\n Destination Projection\n key : string\n attribute key to extract from layer feature\n\n Returns\n -------\n shp : nested :class:`numpy:numpy.ndarray`\n Dimension of subarrays Nx2\n extracted shape coordinate points\n attrs : list\n List of attributes extracted from features\n \"\"\"\n\n shp = []\n\n source_srs = kwargs.get('source_srs', None)\n dest_srs = kwargs.get('dest_srs', None)\n key = kwargs.get('key', None)\n if key:\n attrs = []\n else:\n attrs = None\n\n for i in range(layer.GetFeatureCount()):\n feature = layer.GetNextFeature()\n if feature:\n if key:\n attrs.append(feature[key])\n geom = feature.GetGeometryRef()\n if dest_srs:\n transform_geometry(geom, dest_srs, source_srs=source_srs)\n # get list of xy-coordinates\n reslist = list(get_vector_points(geom))\n shp.append(np.squeeze(np.array(reslist)))\n\n shp = np.squeeze(np.array(shp))\n\n return shp, attrs\n\n\ndef ogr_create_layer(ds, name, srs=None, geom_type=None, fields=None):\n \"\"\"Creates OGR.Layer objects in gdal.Dataset object.\n\n Creates one OGR.Layer with given name in given gdal.Dataset object\n using given OGR.GeometryType and FieldDefinitions\n\n Parameters\n ----------\n ds : gdal.Dataset\n object\n name : string\n OGRLayer name\n srs : OSR.SpatialReference\n object\n geom_type : OGR GeometryType\n (eg. ogr.wkbPolygon)\n fields : list of 2 element tuples\n (strings, OGR.DataType) field name, field type\n\n Returns\n -------\n out : OGR.Layer\n object\n \"\"\"\n if geom_type is None:\n raise TypeError(\"geometry_type needed\")\n\n lyr = ds.CreateLayer(name, srs=srs, geom_type=geom_type)\n if fields is not None:\n for fname, fvalue in fields:\n lyr.CreateField(ogr.FieldDefn(fname, fvalue))\n\n return lyr\n\n\ndef ogr_copy_layer(src_ds, index, dst_ds, reset=True):\n \"\"\" Copy OGR.Layer object.\n\n Copy OGR.Layer object from src_ds gdal.Dataset to dst_ds gdal.Dataset\n\n Parameters\n ----------\n src_ds : gdal.Dataset\n object\n index : int\n layer index\n dst_ds : gdal.Dataset\n object\n reset : bool\n if True resets src_layer\n \"\"\"\n # get and copy src geometry layer\n\n src_lyr = src_ds.GetLayerByIndex(index)\n if reset:\n src_lyr.ResetReading()\n src_lyr.SetSpatialFilter(None)\n src_lyr.SetAttributeFilter(None)\n dst_ds.CopyLayer(src_lyr, src_lyr.GetName())\n\n\ndef ogr_copy_layer_by_name(src_ds, name, dst_ds, reset=True):\n \"\"\" Copy OGR.Layer object.\n\n Copy OGR.Layer object from src_ds gdal.Dataset to dst_ds gdal.Dataset\n\n Parameters\n ----------\n src_ds : gdal.Dataset\n object\n name : string\n layer name\n dst_ds : gdal.Dataset\n object\n reset : bool\n if True resets src_layer\n \"\"\"\n # get and copy src geometry layer\n\n src_lyr = src_ds.GetLayerByName(name)\n if src_lyr is None:\n raise ValueError(\"OGR layer 'name' not found in dataset\")\n if reset:\n src_lyr.ResetReading()\n src_lyr.SetSpatialFilter(None)\n src_lyr.SetAttributeFilter(None)\n dst_ds.CopyLayer(src_lyr, src_lyr.GetName())\n\n\ndef ogr_add_feature(ds, src, name=None):\n \"\"\" Creates OGR.Feature objects in OGR.Layer object.\n\n OGR.Features are built from numpy src points or polygons.\n\n OGR.Features 'FID' and 'index' corresponds to source data element\n\n Parameters\n ----------\n ds : gdal.Dataset\n object\n src : :func:`numpy:numpy.array`\n source data\n name : string\n name of wanted Layer\n \"\"\"\n\n if name is not None:\n lyr = ds.GetLayerByName(name)\n else:\n lyr = ds.GetLayer()\n\n defn = lyr.GetLayerDefn()\n geom_name = ogr.GeometryTypeToName(lyr.GetGeomType())\n fields = [defn.GetFieldDefn(i).GetName()\n for i in range(defn.GetFieldCount())]\n feat = ogr.Feature(defn)\n\n for index, src_item in enumerate(src):\n geom = numpy_to_ogr(src_item, geom_name)\n\n if 'index' in fields:\n feat.SetField('index', index)\n\n feat.SetGeometry(geom)\n lyr.CreateFeature(feat)\n\n\ndef ogr_add_geometry(layer, geom, attrs):\n \"\"\" Copies single OGR.Geometry object to an OGR.Layer object.\n\n Given OGR.Geometry is copied to new OGR.Feature and\n written to given OGR.Layer by given index. Attributes are attached.\n\n Parameters\n ----------\n layer : OGR.Layer\n object\n geom : OGR.Geometry\n object\n attrs : list\n attributes referring to layer fields\n\n \"\"\"\n defn = layer.GetLayerDefn()\n feat = ogr.Feature(defn)\n\n for i, item in enumerate(attrs):\n feat.SetField(i, item)\n feat.SetGeometry(geom)\n layer.CreateFeature(feat)\n\n\ndef numpy_to_ogr(vert, geom_name):\n \"\"\"Convert a vertex array to gdal/ogr geometry.\n\n Using JSON as a vehicle to efficiently deal with numpy arrays.\n\n Parameters\n ----------\n vert : array_like\n a numpy array of vertices of shape (num vertices, 2)\n geom_name : string\n Name of Geometry\n\n Returns\n -------\n out : ogr.Geometry\n object of type geom_name\n \"\"\"\n\n if geom_name in ['Polygon', 'MultiPolygon']:\n json_str = \"{{'type':{0!r},'coordinates':[{1!r}]}}\".\\\n format(geom_name, vert.tolist())\n else:\n json_str = \"{{'type':{0!r},'coordinates':{1!r}}}\".\\\n format(geom_name, vert.tolist())\n\n return ogr.CreateGeometryFromJson(json_str)\n\n\ndef ogr_to_numpy(ogrobj):\n \"\"\"Backconvert a gdal/ogr geometry to a numpy vertex array.\n\n Using JSON as a vehicle to efficiently deal with numpy arrays.\n\n Parameters\n ----------\n ogrobj : ogr.Geometry\n object\n\n Returns\n -------\n out : :class:`numpy:numpy.ndarray`\n a nested ndarray of vertices of shape (num vertices, 2)\n\n \"\"\"\n jsonobj = eval(ogrobj.ExportToJson())\n\n return np.squeeze(jsonobj['coordinates'])\n\n\ndef ogr_geocol_to_numpy(ogrobj):\n \"\"\"Backconvert a gdal/ogr geometry Collection to a numpy vertex array.\n\n This extracts only Polygon geometries!\n\n Using JSON as a vehicle to efficiently deal with numpy arrays.\n\n Parameters\n ----------\n ogrobj : ogr.Geometry\n Collection object\n\n Returns\n -------\n out : :class:`numpy:numpy.ndarray`\n a nested ndarray of vertices of shape (num vertices, 2)\n\n \"\"\"\n jsonobj = eval(ogrobj.ExportToJson())\n mpol = []\n for item in jsonobj['geometries']:\n print(item['type'])\n if item['type'] == 'Polygon':\n mpol.append(item['coordinates'])\n\n return np.squeeze(mpol)\n\n\ndef get_centroid(polyg):\n \"\"\"Return centroid of a polygon\n\n Parameters\n ----------\n polyg : :class:`numpy:numpy.ndarray`\n of shape (num vertices, 2) or ogr.Geometry object\n\n Returns\n -------\n out : x and y coordinate of the centroid\n\n \"\"\"\n if not type(polyg) == ogr.Geometry:\n polyg = numpy_to_ogr(polyg, 'Polygon')\n return polyg.Centroid().GetPoint()[0:2]\n", "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n# Copyright (c) 2011-2019, wradlib developers.\n# Distributed under the MIT License. See LICENSE.txt for more info.\n\n\"\"\"\nRectangular Grid Functions\n^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. autosummary::\n :nosignatures:\n :toctree: generated/\n\n get_radolan_coords\n get_radolan_grid\n xyz_to_spherical\n\"\"\"\nimport numpy as np\n\nfrom wradlib.georef import projection\n\n\ndef get_radolan_coords(lon, lat, trig=False):\n \"\"\"\n Calculates x,y coordinates of radolan grid from lon, lat\n\n Parameters\n ----------\n\n lon : float, :class:`numpy:numpy.ndarray` of floats\n longitude\n lat : float, :class:`numpy:numpy.ndarray` of floats\n latitude\n trig : boolean\n if True, uses trigonometric formulas for calculation,\n otherwise osr transformations\n if False, uses osr spatial reference system to transform\n between projections\n `trig` is recommended to be False, however, the two ways of\n computation are expected to be equivalent.\n \"\"\"\n\n if trig:\n # calculation of x_0 and y_0 coordinates of radolan grid\n # as described in the format description\n phi_0 = np.radians(60)\n phi_m = np.radians(lat)\n lam_0 = 10\n lam_m = lon\n lam = np.radians(lam_m - lam_0)\n er = 6370.040\n m_phi = (1 + np.sin(phi_0)) / (1 + np.sin(phi_m))\n x = er * m_phi * np.cos(phi_m) * np.sin(lam)\n y = - er * m_phi * np.cos(phi_m) * np.cos(lam)\n else:\n # create radolan projection osr object\n proj_stereo = projection.create_osr(\"dwd-radolan\")\n\n # create wgs84 projection osr object\n proj_wgs = projection.get_default_projection()\n\n x, y = projection.reproject(lon, lat, projection_source=proj_wgs,\n projection_target=proj_stereo)\n\n return x, y\n\n\ndef get_radolan_grid(nrows=None, ncols=None, trig=False, wgs84=False):\n \"\"\"Calculates x/y coordinates of radolan grid of the German Weather Service\n\n Returns the x,y coordinates of the radolan grid positions\n (lower left corner of every pixel). The radolan grid is a\n polarstereographic projection, the projection information was taken from\n RADOLAN-RADVOR-OP Kompositformat_2.2.2 :cite:`DWD2009`\n\n .. table:: Coordinates for 900km x 900km grid\n\n +------------+-----------+------------+-----------+-----------+\n | Coordinate | lon | lat | x | y |\n +============+===========+============+===========+===========+\n | LowerLeft | 3.5889E | 46.9526N | -523.4622 | -4658.645 |\n +------------+-----------+------------+-----------+-----------+\n | LowerRight | 14.6209E | 47.0705N | 376.5378 | -4658.645 |\n +------------+-----------+------------+-----------+-----------+\n | UpperRight | 15.7208E | 54.7405N | 376.5378 | -3758.645 |\n +------------+-----------+------------+-----------+-----------+\n | UpperLeft | 2.0715E | 54.5877N | -523.4622 | -3758.645 |\n +------------+-----------+------------+-----------+-----------+\n\n .. table:: Coordinates for 1100km x 900km grid\n\n +------------+-----------+------------+-----------+-----------+\n | Coordinate | lon | lat | x | y |\n +============+===========+============+===========+===========+\n | LowerLeft | 4.6759E | 46.1929N | -443.4622 | -4758.645 |\n +------------+-----------+------------+-----------+-----------+\n | LowerRight | 15.4801E | 46.1827N | 456.5378 | -4758.645 |\n +------------+-----------+------------+-----------+-----------+\n | UpperRight | 17.1128E | 55.5342N | 456.5378 | -3658.645 |\n +------------+-----------+------------+-----------+-----------+\n | UpperLeft | 3.0889E | 55.5482N | -433.4622 | -3658.645 |\n +------------+-----------+------------+-----------+-----------+\n\n .. table:: Coordinates for 1500km x 1400km grid\n\n +------------+-----------+------------+-----------+-----------+\n | Coordinate | lon | lat | x | y |\n +============+===========+============+===========+===========+\n | LowerLeft | 2.3419E | 43.9336N | -673.4622 | -5008.645 |\n +------------+-----------+------------+-----------+-----------+\n\n Parameters\n ----------\n nrows : int\n number of rows (460, 900 by default, 1100, 1500)\n ncols : int\n number of columns (460, 900 by default, 1400)\n trig : boolean\n if True, uses trigonometric formulas for calculation\n if False, uses osr spatial reference system to transform between\n projections\n `trig` is recommended to be False, however, the two ways of computation\n are expected to be equivalent.\n wgs84 : boolean\n if True, output coordinates are in wgs84 lonlat format (default: False)\n\n Returns\n -------\n radolan_grid : :class:`numpy:numpy.ndarray`\n Array of shape (rows, cols, 2) xy- or lonlat-grid.\n\n Examples\n --------\n\n >>> # using osr spatial reference transformation\n >>> import wradlib.georef as georef # noqa\n >>> radolan_grid = georef.get_radolan_grid()\n >>> print(\"{0}, ({1:.4f}, {2:.4f})\".format(radolan_grid.shape, *radolan_grid[0,0,:])) # noqa\n (900, 900, 2), (-523.4622, -4658.6447)\n\n >>> # using pure trigonometric transformations\n >>> import wradlib.georef as georef\n >>> radolan_grid = georef.get_radolan_grid(trig=True)\n >>> print(\"{0}, ({1:.4f}, {2:.4f})\".format(radolan_grid.shape, *radolan_grid[0,0,:])) # noqa\n (900, 900, 2), (-523.4622, -4658.6447)\n\n >>> # using osr spatial reference transformation\n >>> import wradlib.georef as georef\n >>> radolan_grid = georef.get_radolan_grid(1500, 1400)\n >>> print(\"{0}, ({1:.4f}, {2:.4f})\".format(radolan_grid.shape, *radolan_grid[0,0,:])) # noqa\n (1500, 1400, 2), (-673.4622, -5008.6447)\n\n >>> # using osr spatial reference transformation\n >>> import wradlib.georef as georef\n >>> radolan_grid = georef.get_radolan_grid(900, 900, wgs84=True)\n >>> print(\"{0}, ({1:.4f}, {2:.4f})\".format(radolan_grid.shape, *radolan_grid[0,0,:])) # noqa\n (900, 900, 2), (3.5889, 46.9526)\n\n See :ref:`/notebooks/radolan/radolan_grid.ipynb#\\\nPolar-Stereographic-Projection`.\n\n Raises\n ------\n TypeError, ValueError\n \"\"\"\n\n # setup default parameters in dicts\n tiny = {'j_0': 450, 'i_0': 450, 'res': 2}\n small = {'j_0': 460, 'i_0': 460, 'res': 2}\n normal = {'j_0': 450, 'i_0': 450, 'res': 1}\n normal_wx = {'j_0': 370, 'i_0': 550, 'res': 1}\n extended = {'j_0': 600, 'i_0': 800, 'res': 1}\n griddefs = {(450, 450): tiny, (460, 460): small,\n (900, 900): normal, (1100, 900): normal_wx,\n (1500, 1400): extended}\n\n # type and value checking\n if nrows and ncols:\n if not (isinstance(nrows, int) and isinstance(ncols, int)):\n raise TypeError(\"wradlib.georef: Parameter *nrows* \"\n \"and *ncols* not integer\")\n if (nrows, ncols) not in griddefs.keys():\n raise ValueError(\"wradlib.georef: Parameter *nrows* \"\n \"and *ncols* mismatch.\")\n else:\n # fallback for call without parameters\n nrows = 900\n ncols = 900\n\n # tiny, small, normal or extended grid check\n # reference point changes according to radolan composit format\n j_0 = griddefs[(nrows, ncols)]['j_0']\n i_0 = griddefs[(nrows, ncols)]['i_0']\n res = griddefs[(nrows, ncols)]['res']\n\n x_0, y_0 = get_radolan_coords(9.0, 51.0, trig=trig)\n\n x_arr = np.arange(x_0 - j_0, x_0 - j_0 + ncols * res, res)\n y_arr = np.arange(y_0 - i_0, y_0 - i_0 + nrows * res, res)\n x, y = np.meshgrid(x_arr, y_arr)\n\n radolan_grid = np.dstack((x, y))\n\n if wgs84:\n\n if trig:\n # inverse projection\n lon0 = 10. # central meridian of projection\n lat0 = 60. # standard parallel of projection\n\n sinlat0 = np.sin(np.radians(lat0))\n\n fac = (6370.040 ** 2.) * ((1. + sinlat0) ** 2.)\n lon = np.degrees(np.arctan((-x / y))) + lon0\n lat = np.degrees(np.arcsin((fac - (x ** 2. + y ** 2.)) /\n (fac + (x ** 2. + y ** 2.))))\n radolan_grid = np.dstack((lon, lat))\n else:\n # create radolan projection osr object\n proj_stereo = projection.create_osr(\"dwd-radolan\")\n\n # create wgs84 projection osr object\n proj_wgs = projection.get_default_projection()\n\n radolan_grid = projection.reproject(radolan_grid,\n projection_source=proj_stereo,\n projection_target=proj_wgs)\n\n return radolan_grid\n\n\ndef xyz_to_spherical(xyz, alt=0, proj=None, ke=4. / 3.):\n \"\"\"Returns spherical representation (r, theta, phi) of given cartesian\n coordinates (x, y, z) with respect to the reference altitude (asl)\n considering earth's geometry (proj).\n\n Parameters\n ----------\n xyz : :class:`numpy:numpy.ndarray`\n Array of shape (..., 3). Contains cartesian coordinates.\n alt : float\n Altitude (in meters)\n defaults to 0.\n proj : osr object\n projection of the source coordinates (aeqd) with spheroid model\n defaults to None.\n ke : float\n Adjustment factor to account for the refractivity gradient that\n affects radar beam propagation. In principle this is wavelength-\n dependent. The default of 4/3 is a good approximation for most\n weather radar wavelengths\n\n Returns\n -------\n r : :class:`numpy:numpy.ndarray`\n Array of xyz.shape. Contains the radial distances.\n theta: :class:`numpy:numpy.ndarray`\n Array of xyz.shape. Contains the elevation angles.\n phi : :class:`numpy:numpy.ndarray`\n Array of xyz.shape. Contains the azimuthal angles.\n \"\"\"\n\n # get the approximate radius of the projection's ellipsoid\n # for the latitude_of_center, if no projection is given assume\n # spherical earth\n try:\n lat0 = proj.GetProjParm('latitude_of_center')\n re = projection.get_earth_radius(lat0, proj)\n except Exception:\n re = 6370040.\n\n # calculate xy-distance\n s = np.sqrt(np.sum(xyz[..., 0:2] ** 2, axis=-1))\n\n # calculate earth's arc angle\n gamma = s / (re * ke)\n\n # calculate elevation angle theta\n numer = np.cos(gamma) - (re * ke + alt) / (re * ke + xyz[..., 2])\n denom = np.sin(gamma)\n theta = np.arctan(numer / denom)\n\n # calculate radial distance r\n r = (re * ke + xyz[..., 2]) * denom / np.cos(theta)\n # another method using gamma only, but slower\n # keep it here for reference\n # f1 = (re * ke + xyz[..., 2])\n # f2 = (re * ke + alt)\n # r = np.sqrt(f1**2 + f2**2 - 2 * f1 * f2 * np.cos(gamma))\n\n # calculate azimuth angle phi\n phi = 90 - np.rad2deg(np.arctan2(xyz[..., 1], xyz[..., 0]))\n phi[phi <= 0] += 360\n\n return r, phi, np.degrees(theta)\n" ]
[ [ "numpy.squeeze", "numpy.array" ], [ "numpy.radians", "numpy.arctan", "numpy.arcsin", "numpy.arange", "numpy.degrees", "numpy.cos", "numpy.dstack", "numpy.sin", "numpy.arctan2", "numpy.meshgrid", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tadyvn/Orchid_combine
[ "adbb372c6fd2719d9c2377a5249c58bfeea5132e" ]
[ "data/config.py" ]
[ "from backbone import ResNetBackbone, VGGBackbone, ResNetBackboneGN, DarkNetBackbone, MobileNetV2Backbone\nfrom math import sqrt\nimport torch\n\n# for making bounding boxes pretty\nCOLORS = ((244, 67, 54),\n (233, 30, 99),\n (156, 39, 176),\n (103, 58, 183),\n ( 63, 81, 181),\n ( 33, 150, 243),\n ( 3, 169, 244),\n ( 0, 188, 212),\n ( 0, 150, 136),\n ( 76, 175, 80),\n (139, 195, 74),\n (205, 220, 57),\n (255, 235, 59),\n (255, 193, 7),\n (255, 152, 0),\n (255, 87, 34),\n (121, 85, 72),\n (158, 158, 158),\n ( 96, 125, 139))\n\n\n# These are in BGR and are for ImageNet\nMEANS = (103.94, 116.78, 123.68)\nSTD = (57.38, 57.12, 58.40)\n\nCOCO_CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',\n 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',\n 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',\n 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',\n 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',\n 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',\n 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',\n 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',\n 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',\n 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',\n 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',\n 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven',\n 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n 'scissors', 'teddy bear', 'hair drier', 'toothbrush')\n\nCOCO_LABEL_MAP = { 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8,\n 9: 9, 10: 10, 11: 11, 13: 12, 14: 13, 15: 14, 16: 15, 17: 16,\n 18: 17, 19: 18, 20: 19, 21: 20, 22: 21, 23: 22, 24: 23, 25: 24,\n 27: 25, 28: 26, 31: 27, 32: 28, 33: 29, 34: 30, 35: 31, 36: 32,\n 37: 33, 38: 34, 39: 35, 40: 36, 41: 37, 42: 38, 43: 39, 44: 40,\n 46: 41, 47: 42, 48: 43, 49: 44, 50: 45, 51: 46, 52: 47, 53: 48,\n 54: 49, 55: 50, 56: 51, 57: 52, 58: 53, 59: 54, 60: 55, 61: 56,\n 62: 57, 63: 58, 64: 59, 65: 60, 67: 61, 70: 62, 72: 63, 73: 64,\n 74: 65, 75: 66, 76: 67, 77: 68, 78: 69, 79: 70, 80: 71, 81: 72,\n 82: 73, 84: 74, 85: 75, 86: 76, 87: 77, 88: 78, 89: 79, 90: 80}\n\nYOUTUBE_VIS_CLASSES = ('person', 'giant_panda', 'lizard', 'parrot', 'skateboard',\n 'sedan', 'ape', 'dog', 'snake', 'monkey', 'hand', 'rabbit',\n 'duck', 'cat', 'cow', 'fish', 'train', 'horse', 'turtle',\n 'bear', 'motorbike', 'giraffe', 'leopard', 'fox', 'deer',\n 'owl', 'surfboard', 'airplane', 'truck', 'zebra', 'tiger',\n 'elephant', 'snowboard', 'boat', 'shark', 'mouse', 'frog',\n 'eagle', 'earless_seal', 'tennis_racket')\n\nYOUTUBE_VIS_LABEL_MAP = { 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7,\n 8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14,\n 15: 15, 16: 16, 17: 17, 18: 18, 19: 19, 20: 20, 21: 21,\n 22: 22, 23: 23, 24: 24, 25: 25, 26: 26, 27: 27, 28: 28,\n 29: 29, 30: 30, 31: 31, 32: 32, 33: 33, 34: 34, 35: 35,\n 36: 36, 37: 37, 38: 38, 39: 39, 40: 40}\n\nCOCO_INV_LABEL_MAP = {t: s for s, t in COCO_LABEL_MAP.items()}\n\nYTVIS_COCO_CLASS_MAP = {'person': 'person', 'skateboard': 'skateboard', 'sedan': 'car',\n 'dog': 'dog', 'cat': 'cat', 'cow': 'cow', 'train': 'train',\n 'horse': 'horse', 'bear': 'bear', 'motorbike': 'motorcycle',\n 'giraffe': 'giraffe', 'surfboard': 'surfboard', 'airplane': 'airplane',\n 'truck': 'truck', 'zebra': 'zebra', 'elephant': 'elephant',\n 'snowboard': 'snowboard', 'boat': 'boat', 'tennis_racket': 'tennis racket'}\n\nCOCO_YTVIS_CLASS_MAP = {coco: ytvis for ytvis, coco in YTVIS_COCO_CLASS_MAP.items()}\nCOCO_YTVIS_LABEL_MAP = {COCO_INV_LABEL_MAP[COCO_CLASSES.index(coco) + 1]: YOUTUBE_VIS_CLASSES.index(ytvis) + 1 for coco, ytvis in COCO_YTVIS_CLASS_MAP.items()}\nCOCO_INTER_LABEL_MAP = {COCO_INV_LABEL_MAP[COCO_CLASSES.index(coco) + 1]: COCO_CLASSES.index(coco) + 1 for coco in COCO_YTVIS_CLASS_MAP}\n\nMOTS_CLASSES = ('car', 'pedestrian')\nMOTS_LABEL_MAP = {1: 1, 2: 2}\n\n# ----------------------- CONFIG CLASS ----------------------- #\n\nclass Config(object):\n \"\"\"\n Holds the configuration for anything you want it to.\n To get the currently active config, call get_cfg().\n\n To use, just do cfg.x instead of cfg['x'].\n I made this because doing cfg['x'] all the time is dumb.\n \"\"\"\n\n def __init__(self, config_dict):\n for key, val in config_dict.items():\n self.__setattr__(key, val)\n\n def copy(self, new_config_dict={}):\n \"\"\"\n Copies this config into a new config object, making\n the changes given by new_config_dict.\n \"\"\"\n\n ret = Config(vars(self))\n \n for key, val in new_config_dict.items():\n ret.__setattr__(key, val)\n\n return ret\n\n def replace(self, new_config_dict):\n \"\"\"\n Copies new_config_dict into this config object.\n Note: new_config_dict can also be a config object.\n \"\"\"\n if isinstance(new_config_dict, Config):\n new_config_dict = vars(new_config_dict)\n\n for key, val in new_config_dict.items():\n self.__setattr__(key, val)\n \n def print(self):\n for k, v in vars(self).items():\n print(k, ' = ', v)\n\n# ----------------------- DATASETS ----------------------- #\n\ndataset_base = Config({\n 'name': 'Base Dataset',\n\n # Training images and annotations\n 'train_images': './data/coco/images/',\n 'train_info': 'path_to_annotation_file',\n\n # Calibration image folder for TensorRT INT8 conversion.\n 'calib_images': './data/coco/calib_images/',\n \n # Validation images and annotations.\n 'valid_images': './data/coco/images/',\n 'valid_info': 'path_to_annotation_file',\n\n # Whether or not to load GT. If this is False, eval.py quantitative evaluation won't work.\n 'has_gt': True,\n\n # Whether the dataset is a video dataset\n 'is_video': False,\n\n # A list of names for each of you classes.\n 'class_names': COCO_CLASSES,\n\n # COCO class ids aren't sequential, so this is a bandage fix. If your ids aren't sequential,\n # provide a map from category_id -> index in class_names + 1 (the +1 is there because it's 1-indexed).\n # If not specified, this just assumes category ids start at 1 and increase sequentially.\n 'label_map': None,\n\n # Dataset Map\n 'dataset_map': None,\n\n # Joint training\n 'joint': None\n})\n\ncoco2014_dataset = dataset_base.copy({\n 'name': 'COCO 2014',\n \n 'train_info': './data/coco/annotations/instances_train2014.json',\n 'valid_info': './data/coco/annotations/instances_val2014.json',\n\n 'label_map': COCO_LABEL_MAP\n})\n\ncoco2017_dataset = dataset_base.copy({\n 'name': 'COCO 2017',\n \n 'train_info': './data/coco/annotations/instances_train2017.json',\n 'valid_info': './data/coco/annotations/instances_val2017.json',\n\n 'label_map': COCO_LABEL_MAP\n})\n\ncoco2017_testdev_dataset = dataset_base.copy({\n 'name': 'COCO 2017 Test-Dev',\n\n 'valid_info': './data/coco/annotations/image_info_test-dev2017.json',\n 'has_gt': False,\n\n 'label_map': COCO_LABEL_MAP\n})\n\nflying_chairs_dataset = dataset_base.copy({\n 'name': 'FlyingChairs',\n\n 'trainval_info': './data/FlyingChairs/train_val.txt',\n 'trainval_images': './data/FlyingChairs/data/',\n})\n\nyoutube_vis_dataset = dataset_base.copy({\n 'name': 'YouTube VIS',\n\n 'class_names': YOUTUBE_VIS_CLASSES,\n 'label_map': YOUTUBE_VIS_LABEL_MAP,\n\n 'train_info': './data/YoutubeVIS/annotations/train.v4.json',\n 'train_images': './data/YoutubeVIS/train_all_frames/JPEGImages/',\n 'use_all_frames': False,\n\n # Calibration image folder for TensorRT INT8 conversion.\n # Because we need two frames (prev, next) to estimate flows and calibrate the warping module, we need to specify a parent folder for calibration images, and two sub-folders for previous and next frames correspondingly.\n # Use colon(:) to split folder (sub-folders).\n 'calib_images': './data/YoutubeVIS/calib_images/:prev:next',\n\n 'frame_offset_lb': 1,\n 'frame_offset_ub': 4,\n 'frame_offset_multiplier': 1,\n 'all_frame_direction': 'allway',\n\n 'valid_info': './data/YoutubeVIS/annotations/valid.v4.json',\n 'valid_images': './data/YoutubeVIS/valid_all_frames/v4/',\n\n 'images_per_video': 5,\n 'is_video': True\n})\n\n\nmy_custom_dataset = dataset_base.copy({\n 'name': 'My Dataset',\n\n 'train_images': '/home/tang/Downloads/YOLACTEDGE/yolact_edge_orchid/training_data/train_img',\n 'train_info': '/home/tang/Downloads/YOLACTEDGE/yolact_edge_orchid/training_data/train_annotation/train.json',\n\n 'valid_images': '//home/tang/Downloads/YOLACTEDGE/yolact_edge_orchid/training_data/validate_img',\n 'valid_info': '/home/tang/Downloads/YOLACTEDGE/yolact_edge_orchid/training_data/validate_annotation/validate.json',\n\n 'has_gt': True,\n #'class_names': ('bud', 'root', 'fullbud'),\n 'class_names': ('bud', 'root'),\n #'class_names': ('agl_1', 'agl_2', 'agl_3', 'agl_4', 'agl_5', 'agl_6','agl_7', 'agl_8', 'agl_9','agl_10', 'agl_11', 'agl_12'),\n\n # below is only needed for YTVIS-style video dataset.\n\n})\n\n# ----------------------- TRANSFORMS ----------------------- #\n\nresnet_transform = Config({\n 'channel_order': 'RGB',\n 'normalize': True,\n 'subtract_means': False,\n 'to_float': False,\n})\n\nvgg_transform = Config({\n # Note that though vgg is traditionally BGR,\n # the channel order of vgg_reducedfc.pth is RGB.\n 'channel_order': 'RGB',\n 'normalize': False,\n 'subtract_means': True,\n 'to_float': False,\n})\n\ndarknet_transform = Config({\n 'channel_order': 'RGB',\n 'normalize': False,\n 'subtract_means': False,\n 'to_float': True,\n})\n\nmobilenetv2_transform = Config({\n 'channel_order': 'RGB',\n 'normalize': True,\n 'subtract_means': False,\n 'to_float': False,\n})\n\n\n\n# ----------------------- BACKBONES ----------------------- #\n\nbackbone_base = Config({\n 'name': 'Base Backbone',\n 'path': 'path/to/pretrained/weights',\n 'type': object,\n 'args': tuple(),\n 'transform': resnet_transform,\n\n 'selected_layers': list(),\n 'pred_scales': list(),\n 'pred_aspect_ratios': list(),\n\n 'use_pixel_scales': False,\n 'preapply_sqrt': True,\n 'use_square_anchors': False,\n})\n\nresnet101_backbone = backbone_base.copy({\n 'name': 'ResNet101',\n 'path': 'resnet101_reducedfc.pth',\n 'type': ResNetBackbone,\n 'args': ([3, 4, 23, 3],),\n 'transform': resnet_transform,\n\n 'selected_layers': list(range(2, 8)),\n 'pred_scales': [[1]]*6,\n 'pred_aspect_ratios': [ [[0.66685089, 1.7073535, 0.87508774, 1.16524493, 0.49059086]] ] * 6,\n})\n\nresnet101_gn_backbone = backbone_base.copy({\n 'name': 'ResNet101_GN',\n 'path': 'R-101-GN.pkl',\n 'type': ResNetBackboneGN,\n 'args': ([3, 4, 23, 3],),\n 'transform': resnet_transform,\n\n 'selected_layers': list(range(2, 8)),\n 'pred_scales': [[1]]*6,\n 'pred_aspect_ratios': [ [[0.66685089, 1.7073535, 0.87508774, 1.16524493, 0.49059086]] ] * 6,\n})\n\nresnet152_backbone = resnet101_backbone.copy({\n 'name': 'ResNet152',\n 'path': 'resnet152-b121ed2d.pth',\n 'type': ResNetBackbone,\n 'args': ([3, 8, 36, 3],),\n 'transform': resnet_transform,\n})\n\nresnet50_backbone = resnet101_backbone.copy({\n 'name': 'ResNet50',\n 'path': 'resnet50-19c8e357.pth',\n 'type': ResNetBackbone,\n 'args': ([3, 4, 6, 3],),\n 'transform': resnet_transform,\n})\n\ndarknet53_backbone = backbone_base.copy({\n 'name': 'DarkNet53',\n 'path': 'darknet53.pth',\n 'type': DarkNetBackbone,\n 'args': ([1, 2, 8, 8, 4],),\n 'transform': darknet_transform,\n\n 'selected_layers': list(range(3, 9)),\n 'pred_scales': [[3.5, 4.95], [3.6, 4.90], [3.3, 4.02], [2.7, 3.10], [2.1, 2.37], [1.8, 1.92]],\n 'pred_aspect_ratios': [ [[1, sqrt(2), 1/sqrt(2), sqrt(3), 1/sqrt(3)][:n], [1]] for n in [3, 5, 5, 5, 3, 3] ],\n})\n\nvgg16_arch = [[64, 64],\n [ 'M', 128, 128],\n [ 'M', 256, 256, 256],\n [('M', {'kernel_size': 2, 'stride': 2, 'ceil_mode': True}), 512, 512, 512],\n [ 'M', 512, 512, 512],\n [('M', {'kernel_size': 3, 'stride': 1, 'padding': 1}),\n (1024, {'kernel_size': 3, 'padding': 6, 'dilation': 6}),\n (1024, {'kernel_size': 1})]]\n\nvgg16_backbone = backbone_base.copy({\n 'name': 'VGG16',\n 'path': 'vgg16_reducedfc.pth',\n 'type': VGGBackbone,\n 'args': (vgg16_arch, [(256, 2), (128, 2), (128, 1), (128, 1)], [3]),\n 'transform': vgg_transform,\n\n 'selected_layers': [3] + list(range(5, 10)),\n 'pred_scales': [[5, 4]]*6,\n 'pred_aspect_ratios': [ [[1], [1, sqrt(2), 1/sqrt(2), sqrt(3), 1/sqrt(3)][:n]] for n in [3, 5, 5, 5, 3, 3] ],\n})\n\nmobilenetv2_arch = [\n # t, c, n, s\n [1, 16, 1, 1],\n [6, 24, 2, 2],\n [6, 32, 3, 2],\n [6, 64, 4, 2],\n [6, 96, 3, 1],\n [6, 160, 3, 2],\n [6, 320, 1, 1],\n]\n\nmobilenetv2_backbone = backbone_base.copy({\n 'name': 'MobileNetV2',\n 'path': 'mobilenet_v2-b0353104.pth',\n 'type': MobileNetV2Backbone,\n 'args': (1.0, mobilenetv2_arch, 8),\n 'transform': mobilenetv2_transform,\n\n 'selected_layers': [3, 4, 6],\n \n 'pred_aspect_ratios': [ [[1, 1/2, 2]] ]*5,\n 'pred_scales': [[24], [48], [96], [192], [384]],\n\n 'use_pixel_scales': True,\n 'preapply_sqrt': False,\n 'use_square_anchors': True,\n})\n\n\n\n# ----------------------- MASK BRANCH TYPES ----------------------- #\n\nmask_type = Config({\n # Direct produces masks directly as the output of each pred module.\n # This is denoted as fc-mask in the paper.\n # Parameters: mask_size, use_gt_bboxes\n 'direct': 0,\n\n # Lincomb produces coefficients as the output of each pred module then uses those coefficients\n # to linearly combine features from a prototype network to create image-sized masks.\n # Parameters:\n # - masks_to_train (int): Since we're producing (near) full image masks, it'd take too much\n # vram to backprop on every single mask. Thus we select only a subset.\n # - mask_proto_src (int): The input layer to the mask prototype generation network. This is an\n # index in backbone.layers. Use to use the image itself instead.\n # - mask_proto_net (list<tuple>): A list of layers in the mask proto network with the last one\n # being where the masks are taken from. Each conv layer is in\n # the form (num_features, kernel_size, **kwdargs). An empty\n # list means to use the source for prototype masks. If the\n # kernel_size is negative, this creates a deconv layer instead.\n # If the kernel_size is negative and the num_features is None,\n # this creates a simple bilinear interpolation layer instead.\n # - mask_proto_bias (bool): Whether to include an extra coefficient that corresponds to a proto\n # mask of all ones.\n # - mask_proto_prototype_activation (func): The activation to apply to each prototype mask.\n # - mask_proto_mask_activation (func): After summing the prototype masks with the predicted\n # coeffs, what activation to apply to the final mask.\n # - mask_proto_coeff_activation (func): The activation to apply to the mask coefficients.\n # - mask_proto_crop (bool): If True, crop the mask with the predicted bbox during training.\n # - mask_proto_crop_expand (float): If cropping, the percent to expand the cropping bbox by\n # in each direction. This is to make the model less reliant\n # on perfect bbox predictions.\n # - mask_proto_loss (str [l1|disj]): If not None, apply an l1 or disjunctive regularization\n # loss directly to the prototype masks.\n # - mask_proto_binarize_downsampled_gt (bool): Binarize GT after dowsnampling during training?\n # - mask_proto_normalize_mask_loss_by_sqrt_area (bool): Whether to normalize mask loss by sqrt(sum(gt))\n # - mask_proto_reweight_mask_loss (bool): Reweight mask loss such that background is divided by\n # #background and foreground is divided by #foreground.\n # - mask_proto_grid_file (str): The path to the grid file to use with the next option.\n # This should be a numpy.dump file with shape [numgrids, h, w]\n # where h and w are w.r.t. the mask_proto_src convout.\n # - mask_proto_use_grid (bool): Whether to add extra grid features to the proto_net input.\n # - mask_proto_coeff_gate (bool): Add an extra set of sigmoided coefficients that is multiplied\n # into the predicted coefficients in order to \"gate\" them.\n # - mask_proto_prototypes_as_features (bool): For each prediction module, downsample the prototypes\n # to the convout size of that module and supply the prototypes as input\n # in addition to the already supplied backbone features.\n # - mask_proto_prototypes_as_features_no_grad (bool): If the above is set, don't backprop gradients to\n # to the prototypes from the network head.\n # - mask_proto_remove_empty_masks (bool): Remove masks that are downsampled to 0 during loss calculations.\n # - mask_proto_reweight_coeff (float): The coefficient to multiple the forground pixels with if reweighting.\n # - mask_proto_coeff_diversity_loss (bool): Apply coefficient diversity loss on the coefficients so that the same\n # instance has similar coefficients.\n # - mask_proto_coeff_diversity_alpha (float): The weight to use for the coefficient diversity loss.\n # - mask_proto_normalize_emulate_roi_pooling (bool): Normalize the mask loss to emulate roi pooling's affect on loss.\n # - mask_proto_double_loss (bool): Whether to use the old loss in addition to any special new losses.\n # - mask_proto_double_loss_alpha (float): The alpha to weight the above loss.\n 'lincomb': 1,\n})\n\n\n\n\n\n# ----------------------- ACTIVATION FUNCTIONS ----------------------- #\n\nactivation_func = Config({\n 'tanh': torch.tanh,\n 'sigmoid': torch.sigmoid,\n 'softmax': lambda x: torch.nn.functional.softmax(x, dim=-1),\n 'relu': lambda x: torch.nn.functional.relu(x, inplace=True),\n 'none': lambda x: x,\n})\n\n\n\n\n\n# ----------------------- FPN DEFAULTS ----------------------- #\n\nfpn_base = Config({\n # The number of features to have in each FPN layer\n 'num_features': 256,\n\n # The upsampling mode used\n 'interpolation_mode': 'bilinear',\n\n # The number of extra layers to be produced by downsampling starting at P5\n 'num_downsample': 1,\n\n # Whether to down sample with a 3x3 stride 2 conv layer instead of just a stride 2 selection\n 'use_conv_downsample': False,\n\n # Whether to pad the pred layers with 1 on each side (I forgot to add this at the start)\n # This is just here for backwards compatibility\n 'pad': True,\n})\n\n\n\n# ------------------------ FLOW DEFAULTS ------------------------ #\nflow_base = Config({\n 'encode_layers': [[4, 1], [2], [4]],\n 'encode_channels': 256,\n 'fine_tune_layers': None,\n 'warp_layers': \"P4P5\",\n 'use_spa': False,\n 'use_normalized_spa': False,\n 'use_shuffle_cat': False,\n 'num_groups': 1,\n 'use_scale_factor': True,\n 'use_scale_bias': True,\n 'reduce_channels': [],\n 'warp_mode': 'none',\n 'flow_layer': 'each',\n 'base_backward': True,\n 'feature_matching_loss': None,\n 'fm_loss_loc': 'L',\n 'fm_loss_alpha': 1.0,\n 'train_flow': False,\n 'model': 'none',\n})\n\n\n# ----------------------- CONFIG DEFAULTS ----------------------- #\n\ncoco_base_config = Config({\n 'dataset': coco2014_dataset,\n 'joint_dataset': None,\n 'num_classes': 2, # This should include the background class\n\n 'max_iter': 400000,\n\n # The maximum number of detections for evaluation\n 'max_num_detections': 100,\n\n # dw' = momentum * dw - lr * (grad + decay * w)\n 'lr': 1e-3,\n 'momentum': 0.9,\n 'decay': 5e-4,\n\n # For each lr step, what to multiply the lr with\n 'gamma': 0.1,\n 'lr_steps': (280000, 360000, 400000),\n\n # Initial learning rate to linearly warmup from (if until > 0)\n 'lr_warmup_init': 1e-4,\n\n # If > 0 then increase the lr linearly from warmup_init to lr each iter for until iters\n 'lr_warmup_until': 500,\n\n # The terms to scale the respective loss by\n 'conf_alpha': 1,\n 'bbox_alpha': 1.5,\n 'mask_alpha': 0.4 / 256 * 140 * 140, # Some funky equation. Don't worry about it.\n\n # Eval.py sets this if you just want to run YOLACT as a detector\n 'eval_mask_branch': True,\n\n # See mask_type for details.\n 'mask_type': mask_type.direct,\n 'mask_size': 16,\n 'masks_to_train': 100,\n 'mask_proto_src': None,\n 'mask_proto_net': [(256, 3, {}), (256, 3, {})],\n 'mask_proto_bias': False,\n 'mask_proto_prototype_activation': activation_func.relu,\n 'mask_proto_mask_activation': activation_func.sigmoid,\n 'mask_proto_coeff_activation': activation_func.tanh,\n 'mask_proto_crop': True,\n 'mask_proto_crop_expand': 0,\n 'mask_proto_loss': None,\n 'mask_proto_binarize_downsampled_gt': True,\n 'mask_proto_normalize_mask_loss_by_sqrt_area': False,\n 'mask_proto_reweight_mask_loss': False,\n 'mask_proto_grid_file': 'data/grid.npy',\n 'mask_proto_use_grid': False,\n 'mask_proto_coeff_gate': False,\n 'mask_proto_prototypes_as_features': False,\n 'mask_proto_prototypes_as_features_no_grad': False,\n 'mask_proto_remove_empty_masks': False,\n 'mask_proto_reweight_coeff': 1,\n 'mask_proto_coeff_diversity_loss': False,\n 'mask_proto_coeff_diversity_alpha': 1,\n 'mask_proto_normalize_emulate_roi_pooling': False,\n 'mask_proto_double_loss': False,\n 'mask_proto_double_loss_alpha': 1,\n\n # SSD data augmentation parameters\n # Randomize hue, vibrance, etc.\n 'augment_photometric_distort': True,\n # Have a chance to scale down the image and pad (to emulate smaller detections)\n 'augment_expand': True,\n # Potentialy sample a random crop from the image and put it in a random place\n 'augment_random_sample_crop': True,\n # Mirror the image with a probability of 1/2\n 'augment_random_mirror': True,\n # Flip the image vertically with a probability of 1/2\n 'augment_random_flip': False,\n # With uniform probability, rotate the image [0,90,180,270] degrees\n 'augment_random_rot90': False,\n\n # Discard detections with width and height smaller than this (in absolute width and height)\n 'discard_box_width': 4 / 550,\n 'discard_box_height': 4 / 550,\n\n # If using batchnorm anywhere in the backbone, freeze the batchnorm layer during training.\n # Note: any additional batch norm layers after the backbone will not be frozen.\n 'freeze_bn': False,\n\n # Set this to a config object if you want an FPN (inherit from fpn_base). See fpn_base for details.\n 'fpn': None,\n\n # Use the same weights for each network head\n 'share_prediction_module': False,\n\n # For hard negative mining, instead of using the negatives that are leastl confidently background,\n # use negatives that are most confidently not background.\n 'ohem_use_most_confident': False,\n\n # Use focal loss as described in https://arxiv.org/pdf/1708.02002.pdf instead of OHEM\n 'use_focal_loss': False,\n 'focal_loss_alpha': 0.25,\n 'focal_loss_gamma': 2,\n \n # The initial bias toward forground objects, as specified in the focal loss paper\n 'focal_loss_init_pi': 0.01,\n\n # Whether to use sigmoid focal loss instead of softmax, all else being the same.\n 'use_sigmoid_focal_loss': False,\n\n # Use class[0] to be the objectness score and class[1:] to be the softmax predicted class.\n # Note: at the moment this is only implemented if use_focal_loss is on.\n 'use_objectness_score': False,\n\n # Adds a global pool + fc layer to the smallest selected layer that predicts the existence of each of the 80 classes.\n # This branch is only evaluated during training time and is just there for multitask learning.\n 'use_class_existence_loss': False,\n 'class_existence_alpha': 1,\n\n # Adds a 1x1 convolution directly to the biggest selected layer that predicts a semantic segmentations for each of the 80 classes.\n # This branch is only evaluated during training time and is just there for multitask learning.\n 'use_semantic_segmentation_loss': False,\n 'semantic_segmentation_alpha': 1,\n\n # Match gt boxes using the Box2Pix change metric instead of the standard IoU metric.\n # Note that the threshold you set for iou_threshold should be negative with this setting on.\n 'use_change_matching': False,\n\n # Uses the same network format as mask_proto_net, except this time it's for adding extra head layers before the final\n # prediction in prediction modules. If this is none, no extra layers will be added.\n 'extra_head_net': None,\n\n # What params should the final head layers have (the ones that predict box, confidence, and mask coeffs)\n 'head_layer_params': {'kernel_size': 3, 'padding': 1},\n\n # Add extra layers between the backbone and the network heads\n # The order is (bbox, conf, mask)\n 'extra_layers': (0, 0, 0),\n\n # During training, to match detections with gt, first compute the maximum gt IoU for each prior.\n # Then, any of those priors whose maximum overlap is over the positive threshold, mark as positive.\n # For any priors whose maximum is less than the negative iou threshold, mark them as negative.\n # The rest are neutral and not used in calculating the loss.\n 'positive_iou_threshold': 0.5,\n 'negative_iou_threshold': 0.5,\n\n # If less than 1, anchors treated as a negative that have a crowd iou over this threshold with\n # the crowd boxes will be treated as a neutral.\n 'crowd_iou_threshold': 1,\n\n # This is filled in at runtime by Yolact's __init__, so don't touch it\n 'mask_dim': None,\n\n # Input image size. If preserve_aspect_ratio is False, min_size is ignored.\n 'min_size': 200,\n 'max_size': 300,\n \n # Whether or not to do post processing on the cpu at test time\n 'force_cpu_nms': True,\n\n # Whether to use mask coefficient cosine similarity nms instead of bbox iou nms\n 'use_coeff_nms': False,\n\n # Whether or not to have a separate branch whose sole purpose is to act as the coefficients for coeff_diversity_loss\n # Remember to turn on coeff_diversity_loss, or these extra coefficients won't do anything!\n # To see their effect, also remember to turn on use_coeff_nms.\n 'use_instance_coeff': False,\n 'num_instance_coeffs': 64,\n\n # Whether or not to tie the mask loss / box loss to 0\n 'train_masks': True,\n 'train_boxes': True,\n # If enabled, the gt masks will be cropped using the gt bboxes instead of the predicted ones.\n # This speeds up training time considerably but results in much worse mAP at test time.\n 'use_gt_bboxes': False,\n\n # Whether or not to preserve aspect ratio when resizing the image.\n # If True, uses the faster r-cnn resizing scheme.\n # If False, all images are resized to max_size x max_size\n 'preserve_aspect_ratio': False,\n\n # Whether or not to use the prediction module (c) from DSSD\n 'use_prediction_module': False,\n\n # Whether or not to use the predicted coordinate scheme from Yolo v2\n 'use_yolo_regressors': False,\n \n # For training, bboxes are considered \"positive\" if their anchors have a 0.5 IoU overlap\n # or greater with a ground truth box. If this is true, instead of using the anchor boxes\n # for this IoU computation, the matching function will use the predicted bbox coordinates.\n # Don't turn this on if you're not using yolo regressors!\n 'use_prediction_matching': False,\n\n # A list of settings to apply after the specified iteration. Each element of the list should look like\n # (iteration, config_dict) where config_dict is a dictionary you'd pass into a config object's init.\n 'delayed_settings': [],\n\n # Use command-line arguments to set this.\n 'no_jit': False,\n\n 'backbone': None,\n 'name': 'base_config',\n})\n\n\n# ----------------------- YOLACT v1.0 CONFIGS ----------------------- #\n\nyolact_base_config = coco_base_config.copy({\n 'name': 'yolact_base',\n\n # Dataset stuff\n 'dataset': coco2017_dataset,\n 'num_classes': len(coco2017_dataset.class_names) + 1,\n\n # Image Size\n #'max_size': 550,\n 'max_size': 640,\n #'max_size': 320, \n # Training params\n 'lr_schedule': 'step',\n 'lr_steps': (280000, 600000, 700000, 750000),\n 'max_iter': 800000,\n\n 'flow': flow_base,\n \n # Backbone Settings\n 'backbone': resnet101_backbone.copy({\n 'selected_layers': list(range(1, 4)),\n 'use_pixel_scales': True,\n 'preapply_sqrt': False,\n 'use_square_anchors': True, # This is for backward compatability with a bug\n\n 'pred_aspect_ratios': [ [[1, 1/2, 2]] ]*5,\n 'pred_scales': [[24], [48], [96], [192], [384]],\n }),\n\n # FPN Settings\n 'fpn': fpn_base.copy({\n 'use_conv_downsample': True,\n 'num_downsample': 2,\n }),\n\n # Mask Settings\n 'mask_type': mask_type.lincomb,\n 'mask_alpha': 6.125,\n 'mask_proto_src': 0,\n 'mask_proto_net': [(256, 3, {'padding': 1})] * 3 + [(None, -2, {}), (256, 3, {'padding': 1})] + [(32, 1, {})],\n 'mask_proto_normalize_emulate_roi_pooling': True,\n\n # Other stuff\n 'share_prediction_module': True,\n 'extra_head_net': [(256, 3, {'padding': 1})],\n\n 'positive_iou_threshold': 0.5,\n 'negative_iou_threshold': 0.4,\n\n 'crowd_iou_threshold': 0.7,\n\n 'use_semantic_segmentation_loss': True,\n\n 'torch2trt_backbone': False,\n 'torch2trt_backbone_int8': False,\n 'torch2trt_protonet': False,\n 'torch2trt_protonet_int8': False,\n 'torch2trt_fpn': False,\n 'torch2trt_fpn_int8': False,\n 'torch2trt_prediction_module': False,\n 'torch2trt_prediction_module_int8': False,\n 'torch2trt_spa': False,\n 'torch2trt_spa_int8': False,\n 'torch2trt_flow_net': False,\n 'torch2trt_flow_net_int8': False,\n\n 'use_tensorrt_safe_mode': False,\n})\n\nyolact_edge_config = yolact_base_config.copy({\n 'name': 'yolact_edge',\n 'torch2trt_max_calibration_images': 100,\n 'torch2trt_backbone_int8': True,\n 'torch2trt_protonet_int8': True,\n 'torch2trt_fpn': True,\n 'torch2trt_prediction_module': True,\n 'dataset': my_custom_dataset.copy(),\n 'num_classes': 3, # two/three classes + 1 BG\n #'num_classes': 13, # 13 due to the first class start from 1, not zero. Edit in the next version\n})\n\nyolact_edge_mobilenetv2_config = yolact_edge_config.copy({\n 'name': 'yolact_edge_mobilenetv2',\n\n 'backbone': mobilenetv2_backbone\n})\n\nyolact_edge_vid_config = yolact_edge_config.copy({\n 'name': 'yolact_edge_vid',\n 'dataset': youtube_vis_dataset.copy({\n 'joint': 'coco',\n 'use_all_frames': True,\n 'images_per_video': 1,\n 'frame_offset_lb': 2,\n 'frame_offset_ub': 5,\n 'frame_offset_multiplier': 1,\n 'all_frame_direction': 'forward',\n }),\n\n 'torch2trt_spa': True,\n 'torch2trt_spa_int8': False,\n 'torch2trt_flow_net': False,\n 'torch2trt_flow_net_int8': True,\n\n 'joint_dataset': yolact_edge_config.dataset.copy({\n 'dataset_map': 'ytvis'\n }),\n 'lr': 2e-4,\n 'lr_warmup_init': 0,\n 'lr_schedule': 'cosine',\n 'max_iter': 200000,\n 'num_classes': len(youtube_vis_dataset.class_names) + 1,\n 'augment_expand': False,\n 'flow': flow_base.copy({\n 'encode_layers': [[1], [2], [4]],\n 'reduce_channels': [64],\n 'encode_channels': 64,\n 'num_groups': 1,\n 'use_shuffle_cat': False,\n 'base_backward': True,\n 'fine_tune_layers': 'flow_net,flow_net_pre_convs,spa,fpn_phase_2,proto_net,prediction_layers,semantic_seg_conv',\n 'selected_layers': [1, 2],\n 'warp_mode': 'flow',\n 'model': 'mini',\n 'use_pseudo_gt_flow_loss': False,\n 'feature_matching_loss': 'cosine',\n 'use_spa': True,\n 'fm_loss_loc': 'L+P',\n })\n})\n\nyolact_edge_vid_minimal_config = yolact_edge_vid_config.copy({\n 'name': 'yolact_edge_vid_minimal',\n 'torch2trt_spa': False,\n 'flow': yolact_edge_vid_config.flow.copy({\n 'fine_tune_layers': 'flow_net,flow_net_pre_convs,fpn_phase_2,proto_net,prediction_layers,semantic_seg_conv',\n 'use_spa': False,\n 'feature_matching_loss': None,\n })\n})\n\nyolact_edge_vid_trainflow_config = yolact_edge_vid_config.copy({\n 'name': 'yolact_edge_vid_trainflow',\n 'dataset': flying_chairs_dataset,\n 'lr': 2e-4,\n 'max_iter': 400000,\n 'flow': yolact_edge_vid_config.flow.copy({\n 'train_flow': True,\n 'base_backward': False,\n 'fine_tune_layers': 'flow_net,flow_net_pre_convs'\n })\n})\n\nyolact_edge_youtubevis_config = yolact_edge_vid_config.copy({\n 'name': 'yolact_edge_youtubevis',\n 'dataset': yolact_edge_vid_config.dataset.copy({\n 'use_all_frames': False,\n 'images_per_video': 1,\n }),\n\n 'torch2trt_spa': False,\n 'torch2trt_flow_net_int8': False,\n\n 'lr': 5e-4,\n 'lr_schedule': 'cosine',\n 'max_iter': 500000,\n 'augment_expand': True,\n 'flow': yolact_edge_vid_config.flow.copy({\n 'warp_mode': 'none',\n 'fine_tune_layers': None,\n 'use_spa': False\n })\n})\n\nyolact_resnet50_config = yolact_base_config.copy({\n 'name': 'yolact_resnet50',\n\n 'backbone': resnet50_backbone.copy({\n 'selected_layers': list(range(1, 4)),\n\n 'pred_scales': yolact_base_config.backbone.pred_scales,\n 'pred_aspect_ratios': yolact_base_config.backbone.pred_aspect_ratios,\n 'use_pixel_scales': True,\n 'preapply_sqrt': False,\n 'use_square_anchors': True, # This is for backward compatability with a bug\n }),\n})\n\nyolact_resnet152_config = yolact_base_config.copy({\n 'name': 'yolact_resnet152',\n\n 'backbone': resnet152_backbone.copy({\n 'selected_layers': list(range(1, 4)),\n\n 'pred_scales': yolact_base_config.backbone.pred_scales,\n 'pred_aspect_ratios': yolact_base_config.backbone.pred_aspect_ratios,\n 'use_pixel_scales': True,\n 'preapply_sqrt': False,\n 'use_square_anchors': True, # This is for backward compatability with a bug\n }),\n})\n\nyolact_edge_resnet50_config = yolact_edge_config.copy({\n 'name': 'yolact_edge_resnet50',\n 'backbone': yolact_resnet50_config.backbone\n})\n\nyolact_edge_vid_resnet50_config = yolact_edge_vid_config.copy({\n 'name': 'yolact_edge_vid_resnet50',\n 'backbone': yolact_resnet50_config.backbone\n})\n\nyolact_edge_vid_trainflow_resnet50_config = yolact_edge_vid_trainflow_config.copy({\n 'name': 'yolact_edge_vid_trainflow_resnet50',\n 'backbone': yolact_resnet50_config.backbone\n})\n\nyolact_edge_youtubevis_resnet50_config = yolact_edge_youtubevis_config.copy({\n 'name': 'yolact_edge_youtubevis_resnet50',\n 'backbone': yolact_resnet50_config.backbone\n})\n\n# Default config\ncfg = yolact_edge_config.copy()\n\ndef set_cfg(config_name:str):\n \"\"\" Sets the active config. Works even if cfg is already imported! \"\"\"\n global cfg\n\n # Note this is not just an eval because I'm lazy, but also because it can\n # be used like ssd300_config.copy({'max_size': 400}) for extreme fine-tuning\n cfg.replace(eval(config_name))\n\ndef set_dataset(dataset_name:str):\n \"\"\" Sets the dataset of the current config. \"\"\"\n cfg.dataset = eval(dataset_name)\n \n" ]
[ [ "torch.nn.functional.softmax", "torch.nn.functional.relu" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cmlab-mira/Efficient-and-Phase-aware-Video-Super-resolution-for-Cardiac-MRI
[ "ec01b783f8acd41a7056431bad615896b8495f95", "ec01b783f8acd41a7056431bad615896b8495f95" ]
[ "src/data/datasets/dsb15_vsr_dataset.py", "src/runner/predictors/dsb15_misr_predictor.py" ]
[ "import numpy as np\nimport nibabel as nib\n\nfrom src.data.datasets.base_dataset import BaseDataset\nfrom src.data.transforms import compose\n\n\nclass Dsb15VSRDataset(BaseDataset):\n \"\"\"The dataset of the 2015 Data Science Bowl challenge for the Video Super-Resolution.\n \n Ref: https://www.kaggle.com/c/second-annual-data-science-bowl\n \n Args:\n downscale_factor (int): The downscale factor (2, 3, 4).\n transforms (list of Box): The preprocessing techniques applied to the data.\n augments (list of Box): The augmentation techniques applied to the training data (default: None).\n num_frames (int): The number of the frames of a sequence (default: 5).\n temporal_order (str): The order to form the sequence (default: 'last').\n 'last': The sequence would be {t-n+1, ..., t-1, t}.\n 'middle': The sequence would be {t-(n-1)//2, ..., t-1, t, t+1, ..., t+[(n-1)-(n-1)//2]}.\n \"\"\"\n def __init__(self, downscale_factor, transforms, augments=None, num_frames=5, temporal_order='last', **kwargs):\n super().__init__(**kwargs)\n if downscale_factor not in [2, 3, 4]:\n raise ValueError(f'The downscale factor should be 2, 3, 4. Got {downscale_factor}.')\n self.downscale_factor = downscale_factor\n\n self.transforms = compose(transforms)\n self.augments = compose(augments) \n self.num_frames = num_frames\n \n if temporal_order not in ['last', 'middle']:\n raise ValueError(f\"The temporal order should be 'last' or 'middle'. Got {temporal_order}.\")\n self.temporal_order = temporal_order \n\n # Save the data paths and the target frame index for training; only need to save the data paths\n # for validation to process dynamic length of the sequences.\n lr_paths = sorted((self.data_dir / self.type / 'LR' / f'X{downscale_factor}').glob('**/*2d+1d*.nii.gz'))\n hr_paths = sorted((self.data_dir / self.type / 'HR').glob('**/*2d+1d*.nii.gz'))\n if self.type == 'train':\n self.data = []\n for lr_path, hr_path in zip(lr_paths, hr_paths):\n T = nib.load(str(lr_path)).header.get_data_shape()[-1]\n self.data.extend([(lr_path, hr_path, t) for t in range(T)])\n else:\n self.data = [(lr_path, hr_path) for lr_path, hr_path in zip(lr_paths, hr_paths)]\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n if self.type == 'train':\n lr_path, hr_path, t = self.data[index]\n else:\n lr_path, hr_path = self.data[index]\n lr_imgs = nib.load(str(lr_path)).get_data() # (H, W, C, T)\n hr_imgs = nib.load(str(hr_path)).get_data() # (H, W, C, T)\n \n if self.type == 'train':\n # Compute the start and the end index of the sequence according to the temporal order.\n n = self.num_frames\n T = lr_imgs.shape[-1]\n if self.temporal_order == 'last':\n start, end = t - n + 1, t + 1\n elif self.temporal_order == 'middle':\n start, end = t - (n - 1) // 2, t + ((n - 1) - (n - 1) // 2) + 1\n if start < 0:\n lr_imgs = np.concatenate((lr_imgs[..., start:], lr_imgs[..., :end]), axis=-1)\n hr_imgs = np.concatenate((hr_imgs[..., start:], hr_imgs[..., :end]), axis=-1)\n elif end > T:\n end %= T\n lr_imgs = np.concatenate((lr_imgs[..., start:], lr_imgs[..., :end]), axis=-1)\n hr_imgs = np.concatenate((hr_imgs[..., start:], hr_imgs[..., :end]), axis=-1)\n else:\n lr_imgs = lr_imgs[..., start:end]\n hr_imgs = hr_imgs[..., start:end]\n imgs = [lr_imgs[..., t] for t in range(lr_imgs.shape[-1])] + \\\n [hr_imgs[..., t] for t in range(hr_imgs.shape[-1])] # list of (H, W, C)\n else:\n imgs = [lr_imgs[..., t] for t in range(lr_imgs.shape[-1])] + \\\n [hr_imgs[..., t] for t in range(hr_imgs.shape[-1])] # list of (H, W, C)\n\n if self.type == 'train':\n imgs = self.augments(*imgs)\n imgs = self.transforms(*imgs)\n imgs = [img.permute(2, 0, 1).contiguous() for img in imgs]\n lr_imgs, hr_imgs = imgs[:len(imgs) // 2], imgs[len(imgs) // 2:]\n return {'lr_imgs': lr_imgs, 'hr_imgs': hr_imgs, 'index': index}\n ", "import csv\nimport torch\nimport logging\nimport imageio\nimport numpy as np\nimport functools\nfrom scipy.misc import imsave\nfrom tqdm import tqdm\nfrom pathlib import Path\n\nfrom src.runner.predictors.base_predictor import BasePredictor\nfrom src.utils import denormalize\n\n\nclass Dsb15MISRPredictor(BasePredictor):\n \"\"\"The DSB15 predictor for the Multi-Images Super-Resolution.\n Args:\n saved_dir (str): The directory to save the predicted videos, images and metrics (default: None).\n exported (bool): Whether to export the predicted video, images and metrics (default: False).\n \"\"\"\n def __init__(self, saved_dir=None, exported=False, **kwargs):\n super().__init__(**kwargs)\n if self.test_dataloader.batch_size != 1:\n raise ValueError(f'The testing batch size should be 1. Got {self.test_dataloader.batch_size}.')\n\n if exported:\n self.saved_dir = Path(saved_dir)\n self.exported = exported\n self._denormalize = functools.partial(denormalize, dataset='dsb15')\n \n def predict(self):\n \"\"\"The testing process.\n \"\"\"\n self.net.eval()\n trange = tqdm(self.test_dataloader,\n total=len(self.test_dataloader),\n desc='testing')\n\n if self.exported:\n videos_dir = self.saved_dir / 'videos'\n imgs_dir = self.saved_dir / 'imgs'\n csv_path = self.saved_dir / 'results.csv'\n\n sr_imgs = []\n tmp_sid = None\n header = ['name'] + \\\n [metric_fn.__class__.__name__ for metric_fn in self.metric_fns] + \\\n [loss_fns.__class__.__name__ for loss_fns in self.loss_fns]\n results = [header]\n\n log = self._init_log()\n count = 0\n for batch in trange:\n batch = self._allocate_data(batch)\n inputs, target, index = self._get_inputs_targets(batch)\n with torch.no_grad():\n lr_path, hr_path, t = self.test_dataloader.dataset.data[index]\n filename = lr_path.parts[-1].split('.')[0]\n patient, _, sid = filename.split('_')\n \n output = self.net(inputs)\n losses = self._compute_losses(output, target)\n loss = (torch.stack(losses) * self.loss_weights).sum()\n metrics = self._compute_metrics(output, target, patient)\n\n if self.exported:\n fid = f'frame{t+1:0>2d}'\n _losses = [loss.item() for loss in losses]\n _metrics = [metric.item() for metric in metrics]\n filename = filename.replace('2d+1d', '2d').replace('sequence', 'slice') + f'_{fid}'\n results.append([filename, *_metrics, *_losses])\n\n # Save the video.\n if sid != tmp_sid and index != 0:\n output_dir = videos_dir / patient\n if not output_dir.is_dir():\n output_dir.mkdir(parents=True)\n self._dump_video(output_dir / f'{tmp_sid}.gif', sr_imgs)\n sr_imgs = []\n\n output = self._denormalize(output)\n sr_img = output.squeeze().detach().cpu().numpy().astype(np.uint8)\n sr_imgs.append(sr_img)\n tmp_sid = sid\n\n # Save the image.\n output_dir = imgs_dir / patient\n if not output_dir.is_dir():\n output_dir.mkdir(parents=True)\n img_name = sid.replace('sequence', 'slice') + f'_{fid}.png'\n imsave(output_dir / img_name, sr_img)\n\n batch_size = self.test_dataloader.batch_size\n self._update_log(log, batch_size, loss, losses, metrics)\n count += batch_size\n trange.set_postfix(**dict((key, f'{value / count: .3f}') for key, value in log.items()))\n\n # Save the results.\n if self.exported:\n with open(csv_path, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerows(results)\n\n for key in log:\n log[key] /= count\n logging.info(f'Test log: {log}.')\n\n def _get_inputs_targets(self, batch):\n \"\"\"Specify the data input and target.\n Args:\n batch (dict): A batch of data.\n\n Returns:\n inputs (list of torch.Tensor): The data inputs.\n target (torch.Tensor): The data target.\n index (int): The index of the target path in the `dataloder.data`.\n \"\"\"\n return batch['lr_imgs'], batch['hr_img'], batch['index']\n\n def _compute_losses(self, output, target):\n \"\"\"Compute the losses.\n Args:\n output (torch.Tensor): The model output.\n target (torch.Tensor): The data target.\n\n Returns:\n losses (list of torch.Tensor): The computed losses.\n \"\"\"\n losses = [loss_fn(output, target) for loss_fn in self.loss_fns]\n return losses\n\n def _compute_metrics(self, output, target, name):\n \"\"\"Compute the metrics.\n Args:\n output (torch.Tensor): The model output.\n target (torch.Tensor): The data target.\n name (str): The patient name.\n\n Returns:\n metrics (list of torch.Tensor): The computed metrics.\n \"\"\"\n output, target = self._denormalize(output), self._denormalize(target)\n metrics = []\n for metric_fn in self.metric_fns:\n if 'Cardiac' in metric_fn.__class__.__name__:\n metrics.append(metric_fn(output, target, name))\n else:\n metrics.append(metric_fn(output, target))\n return metrics\n \n def _dump_video(self, path, imgs):\n \"\"\"To dump the video by concatenate the images.\n Args:\n path (Path): The path to save the video.\n imgs (list): The images to form the video.\n \"\"\"\n with imageio.get_writer(path) as writer:\n for img in imgs:\n writer.append_data(img)" ]
[ [ "numpy.concatenate" ], [ "scipy.misc.imsave", "torch.no_grad", "torch.stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.14", "0.15", "1.0", "0.19", "0.18", "1.2", "0.12", "0.10", "0.17", "0.16" ], "tensorflow": [] } ]