repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
hammer/sgkit | [
"d3b77ed878ce421c9363b948a9796f46572fa44a"
]
| [
"sgkit/tests/test_vcfzarr_reader.py"
]
| [
"import allel\nimport numpy as np\nimport pytest\nimport xarray as xr\nimport zarr\nfrom numpy.testing import assert_array_equal\n\nfrom sgkit import read_vcfzarr\nfrom sgkit.io.vcfzarr_reader import _ensure_2d, vcfzarr_to_zarr\n\n\ndef create_vcfzarr(\n shared_datadir, tmpdir, *, fields=None, grouped_by_contig=False, consolidated=False\n):\n \"\"\"Create a vcfzarr file using scikit-allel\"\"\"\n vcf_path = shared_datadir / \"sample.vcf\"\n output_path = tmpdir / \"sample.vcf.zarr\"\n if grouped_by_contig:\n for contig in [\"19\", \"20\", \"X\"]:\n allel.vcf_to_zarr(\n str(vcf_path),\n str(output_path),\n fields=fields,\n group=contig,\n region=contig,\n )\n else:\n allel.vcf_to_zarr(str(vcf_path), str(output_path), fields=fields)\n if consolidated:\n zarr.consolidate_metadata(str(output_path))\n return output_path\n\n\ndef test_ensure_2d():\n assert_array_equal(_ensure_2d(np.array([0, 2, 1])), np.array([[0], [2], [1]]))\n assert_array_equal(_ensure_2d(np.array([[0], [2], [1]])), np.array([[0], [2], [1]]))\n\n\ndef test_read_vcfzarr(shared_datadir, tmpdir):\n vcfzarr_path = create_vcfzarr(shared_datadir, tmpdir)\n ds = read_vcfzarr(vcfzarr_path)\n\n assert ds.attrs[\"contigs\"] == [\"19\", \"20\", \"X\"]\n assert_array_equal(ds[\"variant_contig\"], [0, 0, 1, 1, 1, 1, 1, 1, 2])\n assert_array_equal(\n ds[\"variant_position\"],\n [111, 112, 14370, 17330, 1110696, 1230237, 1234567, 1235237, 10],\n )\n assert_array_equal(\n ds[\"variant_allele\"],\n [\n [\"A\", \"C\", \"\", \"\"],\n [\"A\", \"G\", \"\", \"\"],\n [\"G\", \"A\", \"\", \"\"],\n [\"T\", \"A\", \"\", \"\"],\n [\"A\", \"G\", \"T\", \"\"],\n [\"T\", \"\", \"\", \"\"],\n [\"G\", \"GA\", \"GAC\", \"\"],\n [\"T\", \"\", \"\", \"\"],\n [\"AC\", \"A\", \"ATG\", \"C\"],\n ],\n )\n assert_array_equal(\n ds[\"variant_id\"],\n [\".\", \".\", \"rs6054257\", \".\", \"rs6040355\", \".\", \"microsat1\", \".\", \"rsTest\"],\n )\n assert_array_equal(\n ds[\"variant_id_mask\"],\n [True, True, False, True, False, True, False, True, False],\n )\n\n assert_array_equal(ds[\"sample_id\"], [\"NA00001\", \"NA00002\", \"NA00003\"])\n\n call_genotype = np.array(\n [\n [[0, 0], [0, 0], [0, 1]],\n [[0, 0], [0, 0], [0, 1]],\n [[0, 0], [1, 0], [1, 1]],\n [[0, 0], [0, 1], [0, 0]],\n [[1, 2], [2, 1], [2, 2]],\n [[0, 0], [0, 0], [0, 0]],\n [[0, 1], [0, 2], [-1, -1]],\n [[0, 0], [0, 0], [-1, -1]],\n [[0, -1], [0, 1], [0, 2]],\n ],\n dtype=\"i1\",\n )\n assert_array_equal(ds[\"call_genotype\"], call_genotype)\n assert_array_equal(ds[\"call_genotype_mask\"], call_genotype < 0)\n assert \"call_genotype_phased\" not in ds\n\n\[email protected](\n \"grouped_by_contig, consolidated, has_variant_id\",\n [\n (False, False, False),\n (False, False, True),\n (True, False, True),\n (True, True, False),\n ],\n)\[email protected](\n \"concat_algorithm\",\n [None, \"xarray_internal\"],\n)\ndef test_vcfzarr_to_zarr(\n shared_datadir,\n tmpdir,\n grouped_by_contig,\n consolidated,\n has_variant_id,\n concat_algorithm,\n):\n if has_variant_id:\n fields = None\n else:\n fields = [\n \"variants/CHROM\",\n \"variants/POS\",\n \"variants/REF\",\n \"variants/ALT\",\n \"calldata/GT\",\n \"samples\",\n ]\n\n vcfzarr_path = create_vcfzarr(\n shared_datadir,\n tmpdir,\n fields=fields,\n grouped_by_contig=grouped_by_contig,\n consolidated=consolidated,\n )\n\n output = str(tmpdir / \"vcf.zarr\")\n vcfzarr_to_zarr(\n vcfzarr_path,\n output,\n grouped_by_contig=grouped_by_contig,\n concat_algorithm=concat_algorithm,\n consolidated=consolidated,\n )\n\n ds = xr.open_zarr(output, concat_characters=False)\n\n # Note that variant_allele values are byte strings, not unicode strings (unlike for read_vcfzarr)\n # We should make the two consistent.\n\n assert ds.attrs[\"contigs\"] == [\"19\", \"20\", \"X\"]\n assert_array_equal(ds[\"variant_contig\"], [0, 0, 1, 1, 1, 1, 1, 1, 2])\n assert_array_equal(\n ds[\"variant_position\"],\n [111, 112, 14370, 17330, 1110696, 1230237, 1234567, 1235237, 10],\n )\n assert_array_equal(\n ds[\"variant_allele\"],\n [\n [b\"A\", b\"C\", b\"\", b\"\"],\n [b\"A\", b\"G\", b\"\", b\"\"],\n [b\"G\", b\"A\", b\"\", b\"\"],\n [b\"T\", b\"A\", b\"\", b\"\"],\n [b\"A\", b\"G\", b\"T\", b\"\"],\n [b\"T\", b\"\", b\"\", b\"\"],\n [b\"G\", b\"GA\", b\"GAC\", b\"\"],\n [b\"T\", b\"\", b\"\", b\"\"],\n [b\"AC\", b\"A\", b\"ATG\", b\"C\"],\n ],\n )\n if has_variant_id:\n assert_array_equal(\n ds[\"variant_id\"],\n [\n b\".\",\n b\".\",\n b\"rs6054257\",\n b\".\",\n b\"rs6040355\",\n b\".\",\n b\"microsat1\",\n b\".\",\n b\"rsTest\",\n ],\n )\n assert_array_equal(\n ds[\"variant_id_mask\"],\n [True, True, False, True, False, True, False, True, False],\n )\n else:\n assert \"variant_id\" not in ds\n assert \"variant_id_mask\" not in ds\n\n assert_array_equal(ds[\"sample_id\"], [\"NA00001\", \"NA00002\", \"NA00003\"])\n\n call_genotype = np.array(\n [\n [[0, 0], [0, 0], [0, 1]],\n [[0, 0], [0, 0], [0, 1]],\n [[0, 0], [1, 0], [1, 1]],\n [[0, 0], [0, 1], [0, 0]],\n [[1, 2], [2, 1], [2, 2]],\n [[0, 0], [0, 0], [0, 0]],\n [[0, 1], [0, 2], [-1, -1]],\n [[0, 0], [0, 0], [-1, -1]],\n [[0, -1], [0, 1], [0, 2]],\n ],\n dtype=\"i1\",\n )\n assert_array_equal(ds[\"call_genotype\"], call_genotype)\n assert_array_equal(ds[\"call_genotype_mask\"], call_genotype < 0)\n assert \"call_genotype_phased\" not in ds\n"
]
| [
[
"numpy.array",
"numpy.testing.assert_array_equal"
]
]
|
Aidlab/aidlab-python-examples | [
"b392490d64985363a29d325a7b0eef817a556538"
]
| [
"example_chart_mac_windows.py"
]
| [
"import Aidlab\nimport numpy as np\nfrom multiprocessing import Process, Queue, Array\nimport matplotlib.pyplot as pyplot\nimport matplotlib.animation as animation\n\nbuffer_size = 500\nresult = None\nx = [i for i in range(buffer_size)]\ny = []\n\nfigure = pyplot.figure()\naxis = figure.add_subplot(1, 1, 1)\n\n\ndef animate(i):\n global y\n axis.clear()\n axis.plot(x, y)\n pyplot.ylim([np.min(y) - np.std(y), np.max(y) + np.std(y)])\n\n\ndef chart(result):\n global y\n y = result\n ani = animation.FuncAnimation(figure, animate, interval=2)\n pyplot.show()\n\n\nclass MainManager(Aidlab.Aidlab):\n\n def __init__(self):\n super().__init__()\n self.sample_index = 0\n\n def did_connect(self, aidlab):\n print(\"Connected to: \", aidlab.address)\n\n def did_disconnect(self, aidlab):\n print(\"Disconnected from: \", aidlab.address)\n\n def did_receive_ecg(self, aidlab, timestamp, values):\n global result, buffer_size\n self.sample_index += 1\n result[self.sample_index % buffer_size] = values[0]\n\n\nif __name__ == '__main__':\n # create process for Plot\n result = Array('d', buffer_size)\n Process(target=chart, args=(result,)).start()\n\n signals = [\"ecg\"]\n\n main_manager = MainManager()\n main_manager.connect(signals)\n\n # Start the connection\n while True:\n pass\n"
]
| [
[
"numpy.max",
"matplotlib.animation.FuncAnimation",
"numpy.min",
"matplotlib.pyplot.figure",
"numpy.std",
"matplotlib.pyplot.show"
]
]
|
izeye/tensorflow | [
"d4422ff4b2f142de1d0c626f73c734655d340e0d"
]
| [
"tensorflow/python/framework/ops.py"
]
| [
"# Copyright 2015 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Classes and functions used to construct graphs.\"\"\"\n# pylint: disable=g-bad-name\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport contextlib\nimport copy\nimport linecache\nimport re\nimport sys\nimport threading\nimport weakref\n\nimport tensorflow.python.platform\n\nimport six\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.core.framework import graph_pb2\nfrom tensorflow.core.framework import versions_pb2\nfrom tensorflow.python.framework import device as pydev\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import registry\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import versions\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.platform import logging\n\n\ndef _convert_stack(stack):\n \"\"\"Converts a stack extracted using _extract_stack() to a traceback stack.\n\n Args:\n stack: A list of n 4-tuples, (filename, lineno, name, frame_globals).\n\n Returns:\n A list of n 4-tuples (filename, lineno, name, code), where the code tuple\n element is calculated from the corresponding elements of the input tuple.\n \"\"\"\n ret = []\n for filename, lineno, name, frame_globals in stack:\n linecache.checkcache(filename)\n line = linecache.getline(filename, lineno, frame_globals)\n if line:\n line = line.strip()\n else:\n line = None\n ret.append((filename, lineno, name, line))\n return ret\n\n\n# pylint: disable=line-too-long\ndef _extract_stack():\n \"\"\"A lightweight re-implementation of traceback.extract_stack.\n\n NOTE(mrry): traceback.extract_stack eagerly retrieves the line of code for\n each stack frame using linecache, which results in an abundance of stat()\n calls. This implementation does not retrieve the code, and any consumer\n should apply _convert_stack to the result to obtain a traceback that can\n be formatted etc. using traceback methods.\n\n Returns:\n A list of 4-tuples (filename, lineno, name, frame_globals) corresponding to\n the call stack of the current thread.\n \"\"\"\n # pylint: enable=line-too-long\n try:\n raise ZeroDivisionError\n except ZeroDivisionError:\n f = sys.exc_info()[2].tb_frame.f_back\n ret = []\n while f is not None:\n lineno = f.f_lineno\n co = f.f_code\n filename = co.co_filename\n name = co.co_name\n frame_globals = f.f_globals\n ret.append((filename, lineno, name, frame_globals))\n f = f.f_back\n ret.reverse()\n return ret\n\n\ndef _as_graph_element(obj):\n \"\"\"Convert `obj` to a graph element if possible, otherwise return `None`.\n\n Args:\n obj: Object to convert.\n\n Returns:\n The result of `obj._as_graph_element()` if that method is available;\n otherwise `None`.\n \"\"\"\n conv_fn = getattr(obj, \"_as_graph_element\", None)\n if conv_fn and callable(conv_fn):\n return conv_fn()\n return None\n\n\nclass Tensor(object):\n \"\"\"Represents a value produced by an `Operation`.\n\n A `Tensor` is a symbolic handle to one of the outputs of an\n `Operation`. It does not hold the values of that operation's output,\n but instead provides a means of computing those values in a\n TensorFlow [`Session`](../../api_docs/python/client.md#Session).\n\n This class has two primary purposes:\n\n 1. A `Tensor` can be passed as an input to another `Operation`.\n This builds a dataflow connection between operations, which\n enables TensorFlow to execute an entire `Graph` that represents a\n large, multi-step computation.\n\n 2. After the graph has been launched in a session, the value of the\n `Tensor` can be computed by passing it to\n [`Session.run()`](../../api_docs/python/client.md#Session.run).\n `t.eval()` is a shortcut for calling\n `tf.get_default_session().run(t)`.\n\n In the following example, `c`, `d`, and `e` are symbolic `Tensor`\n objects, whereas `result` is a numpy array that stores a concrete\n value:\n\n ```python\n # Build a dataflow graph.\n c = tf.constant([[1.0, 2.0], [3.0, 4.0]])\n d = tf.constant([[1.0, 1.0], [0.0, 1.0]])\n e = tf.matmul(c, d)\n\n # Construct a `Session` to execute the graph.\n sess = tf.Session()\n\n # Execute the graph and store the value that `e` represents in `result`.\n result = sess.run(e)\n ```\n\n @@dtype\n @@name\n @@value_index\n @@graph\n @@op\n @@consumers\n\n @@eval\n\n @@get_shape\n @@set_shape\n\n \"\"\"\n\n # List of Python operators that we allow to override.\n OVERLOADABLE_OPERATORS = {\n # Binary.\n \"__add__\",\n \"__radd__\",\n \"__sub__\",\n \"__rsub__\",\n \"__mul__\",\n \"__rmul__\",\n \"__div__\",\n \"__rdiv__\",\n \"__truediv__\",\n \"__rtruediv__\",\n \"__floordiv__\",\n \"__rfloordiv__\",\n \"__mod__\",\n \"__rmod__\",\n \"__lt__\",\n \"__le__\",\n \"__gt__\",\n \"__ge__\",\n \"__and__\",\n \"__rand__\",\n \"__or__\",\n \"__ror__\",\n \"__xor__\",\n \"__rxor__\",\n \"__getitem__\",\n \"__pow__\",\n \"__rpow__\",\n # Unary.\n \"__invert__\",\n \"__neg__\",\n \"__abs__\"\n }\n\n def __init__(self, op, value_index, dtype):\n \"\"\"Creates a new `Tensor`.\n\n Args:\n op: An `Operation`. `Operation` that computes this tensor.\n value_index: An `int`. Index of the operation's endpoint that produces\n this tensor.\n dtype: A `DType`. Type of elements stored in this tensor.\n\n Raises:\n TypeError: If the op is not an `Operation`.\n \"\"\"\n if not isinstance(op, Operation):\n raise TypeError(\"op needs to be an Operation: %s\" % op)\n self._op = op\n self._value_index = value_index\n self._dtype = dtypes.as_dtype(dtype)\n self._shape = tensor_shape.unknown_shape()\n # List of operations that use this Tensor as input. We maintain this list\n # to easily navigate a computation graph.\n self._consumers = []\n\n @property\n def op(self):\n \"\"\"The `Operation` that produces this tensor as an output.\"\"\"\n return self._op\n\n @property\n def dtype(self):\n \"\"\"The `DType` of elements in this tensor.\"\"\"\n return self._dtype\n\n @property\n def graph(self):\n \"\"\"The `Graph` that contains this tensor.\"\"\"\n return self._op.graph\n\n @property\n def name(self):\n \"\"\"The string name of this tensor.\"\"\"\n if not self._op.name:\n raise ValueError(\"Operation was not named: %s\" % self._op)\n return \"%s:%d\" % (self._op.name, self._value_index)\n\n @property\n def device(self):\n \"\"\"The name of the device on which this tensor will be produced, or None.\"\"\"\n return self._op.device\n\n def _shape_as_list(self):\n if self._shape.ndims is not None:\n return [dim.value for dim in self._shape.dims]\n else:\n return None\n\n def get_shape(self):\n \"\"\"Returns the `TensorShape` that represents the shape of this tensor.\n\n The shape is computed using shape inference functions that are\n registered for each `Operation` type using `tf.RegisterShape`.\n See [`TensorShape`](../../api_docs/python/framework.md#TensorShape) for more\n details of what a shape represents.\n\n The inferred shape of a tensor is used to provide shape\n information without having to launch the graph in a session. This\n can be used for debugging, and providing early error messages. For\n example:\n\n ```python\n c = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n\n print(c.get_shape())\n ==> TensorShape([Dimension(2), Dimension(3)])\n\n d = tf.constant([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])\n\n print(d.get_shape())\n ==> TensorShape([Dimension(4), Dimension(2)])\n\n # Raises a ValueError, because `c` and `d` do not have compatible\n # inner dimensions.\n e = tf.matmul(c, d)\n\n f = tf.matmul(c, d, transpose_a=True, transpose_b=True)\n\n print(f.get_shape())\n ==> TensorShape([Dimension(3), Dimension(4)])\n ```\n\n In some cases, the inferred shape may have unknown dimensions. If\n the caller has additional information about the values of these\n dimensions, `Tensor.set_shape()` can be used to augment the\n inferred shape.\n\n Returns:\n A `TensorShape` representing the shape of this tensor.\n \"\"\"\n return self._shape\n\n def set_shape(self, shape):\n \"\"\"Updates the shape of this tensor.\n\n This method can be called multiple times, and will merge the given\n `shape` with the current shape of this tensor. It can be used to\n provide additional information about the shape of this tensor that\n cannot be inferred from the graph alone. For example, this can be used\n to provide additional information about the shapes of images:\n\n ```python\n _, image_data = tf.TFRecordReader(...).read(...)\n image = tf.image.decode_png(image_data, channels=3)\n\n # The height and width dimensions of `image` are data dependent, and\n # cannot be computed without executing the op.\n print(image.get_shape())\n ==> TensorShape([Dimension(None), Dimension(None), Dimension(3)])\n\n # We know that each image in this dataset is 28 x 28 pixels.\n image.set_shape([28, 28, 3])\n print(image.get_shape())\n ==> TensorShape([Dimension(28), Dimension(28), Dimension(3)])\n ```\n\n Args:\n shape: A `TensorShape` representing the shape of this tensor.\n\n Raises:\n ValueError: If `shape` is not compatible with the current shape of\n this tensor.\n \"\"\"\n self._shape = self._shape.merge_with(shape)\n\n @property\n def value_index(self):\n \"\"\"The index of this tensor in the outputs of its `Operation`.\"\"\"\n return self._value_index\n\n def consumers(self):\n \"\"\"Returns a list of `Operation`s that consume this tensor.\n\n Returns:\n A list of `Operation`s.\n \"\"\"\n return self._consumers\n\n def _add_consumer(self, consumer):\n \"\"\"Add a consumer to this tensor.\n\n Args:\n consumer: an Operation.\n\n Raises:\n TypeError: if the consumer is not an Operation.\n \"\"\"\n if not isinstance(consumer, Operation):\n raise TypeError(\"Consumer must be an Operation: %s\" % consumer)\n self._consumers.append(consumer)\n\n def _as_node_def_input(self):\n \"\"\"Return a value to use for the NodeDef \"input\" attribute.\n\n The returned string can be used in a NodeDef \"input\" attribute\n to indicate that the NodeDef uses this Tensor as input.\n\n Raises:\n ValueError: if this Tensor's Operation does not have a name.\n\n Returns:\n a string.\n \"\"\"\n if not self._op.name:\n raise ValueError(\"Operation was not named: %s\" % self._op)\n if self._value_index == 0:\n return self._op.name\n else:\n return \"%s:%d\" % (self._op.name, self._value_index)\n\n def __str__(self):\n return \"Tensor(\\\"%s\\\"%s%s%s)\" % (\n self.name,\n (\", shape=%s\" % self.get_shape())\n if self.get_shape().ndims is not None else \"\",\n (\", dtype=%s\" % self._dtype.name) if self._dtype else \"\",\n (\", device=%s\" % self.device) if self.device else \"\")\n\n def __repr__(self):\n return \"<tf.Tensor '%s' shape=%s dtype=%s>\" % (\n self.name, self.get_shape(), self._dtype.name)\n\n def __hash__(self):\n # Necessary to support Python's collection membership operators\n return id(self)\n\n def __eq__(self, other):\n # Necessary to support Python's collection membership operators\n return id(self) == id(other)\n\n # NOTE(mrry): This enables the Tensor's overloaded \"right\" binary\n # operators to run when the left operand is an ndarray, because it\n # accords the Tensor class higher priority than an ndarray, or a\n # numpy matrix.\n # TODO(mrry): Convert this to using numpy's __numpy_ufunc__\n # mechanism, which allows more control over how Tensors interact\n # with ndarrays.\n __array_priority__ = 100\n\n @staticmethod\n def _override_operator(operator, func):\n \"\"\"Overrides (string) operator on Tensors to call func.\n\n Args:\n operator: the string name of the operator to override.\n func: the function that replaces the overriden operator.\n\n Raises:\n ValueError: If operator has already been overwritten,\n or if operator is not allowed to be overwritten.\n \"\"\"\n existing = getattr(Tensor, operator, None)\n if existing is not None:\n # Check to see if this is a default method-wrapper or slot wrapper which\n # will be true for the comparison operators.\n if not isinstance(existing, type(object.__lt__)):\n raise ValueError(\"operator %s cannot be overwritten again.\" % operator)\n if operator not in Tensor.OVERLOADABLE_OPERATORS:\n raise ValueError(\"Overriding %s is disallowed\" % operator)\n setattr(Tensor, operator, func)\n\n def __iter__(self):\n \"\"\"Dummy method to prevent iteration. Do not call.\n\n NOTE(mrry): If we register __getitem__ as an overloaded operator,\n Python will valiantly attempt to iterate over the Tensor from 0 to\n infinity. Declaring this method prevents this unintended\n behavior.\n\n Raises:\n TypeError: when invoked.\n \"\"\"\n raise TypeError(\"'Tensor' object is not iterable\")\n\n def eval(self, feed_dict=None, session=None):\n \"\"\"Evaluates this tensor in a `Session`.\n\n Calling this method will execute all preceding operations that\n produce the inputs needed for the operation that produces this\n tensor.\n\n *N.B.* Before invoking `Tensor.eval()`, its graph must have been\n launched in a session, and either a default session must be\n available, or `session` must be specified explicitly.\n\n Args:\n feed_dict: A dictionary that maps `Tensor` objects to feed values.\n See [`Session.run()`](../../api_docs/python/client.md#Session.run) for a\n description of the valid feed values.\n session: (Optional.) The `Session` to be used to evaluate this tensor. If\n none, the default session will be used.\n\n Returns:\n A numpy array corresponding to the value of this tensor.\n\n \"\"\"\n return _eval_using_default_session(self, feed_dict, self.graph, session)\n\n\ndef _TensorTensorConversionFunction(t, dtype=None, name=None, as_ref=False):\n _ = name, as_ref\n if dtype and not dtype.is_compatible_with(t.dtype):\n raise ValueError(\n \"Tensor conversion requested dtype %s for Tensor with dtype %s: %r\"\n % (dtype.name, t.dtype.name, str(t)))\n return t\n\n\n_tensor_conversion_func_registry = {\n 0: [(Tensor, _TensorTensorConversionFunction)]}\n\n\ndef convert_to_tensor(value, dtype=None, name=None, as_ref=False):\n \"\"\"Converts the given `value` to a `Tensor`.\n\n This function converts Python objects of various types to `Tensor`\n objects. It accepts `Tensor` objects, numpy arrays, Python lists,\n and Python scalars. For example:\n\n ```python\n import numpy as np\n array = np.random.rand(32, 100, 100)\n\n def my_func(arg):\n arg = tf.convert_to_tensor(arg, dtype=tf.float32)\n return tf.matmul(arg, arg) + arg\n\n # The following calls are equivalent.\n value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))\n value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])\n value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))\n ```\n\n This function can be useful when composing a new operation in Python\n (such as `my_func` in the example above). All standard Python op\n constructors apply this function to each of their Tensor-valued\n inputs, which allows those ops to accept numpy arrays, Python lists,\n and scalars in addition to `Tensor` objects.\n\n Args:\n value: An object whose type has a registered `Tensor` conversion function.\n dtype: Optional element type for the returned tensor. If missing, the\n type is inferred from the type of `value`.\n name: Optional name to use if a new `Tensor` is created.\n as_ref: True if we want the result as a ref tensor.\n\n Returns:\n A `Tensor` based on `value`.\n\n Raises:\n TypeError: If no conversion function is registered for `value`.\n RuntimeError: If a registered conversion function returns an invalid value.\n\n \"\"\"\n error_prefix = \"\" if name is None else \"%s: \" % name\n if dtype is not None:\n dtype = dtypes.as_dtype(dtype)\n for _, funcs_at_priority in sorted(_tensor_conversion_func_registry.items()):\n for base_type, conversion_func in funcs_at_priority:\n if isinstance(value, base_type):\n ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)\n if not isinstance(ret, Tensor):\n raise RuntimeError(\n \"%sConversion function %r for type %s returned non-Tensor: %r\"\n % (error_prefix, conversion_func, base_type, ret))\n if dtype and not dtype.is_compatible_with(ret.dtype):\n raise RuntimeError(\n \"%sConversion function %r for type %s returned incompatible \"\n \"dtype: requested = %s, actual = %s\"\n % (error_prefix, conversion_func, base_type,\n dtype.name, ret.dtype.name))\n return ret\n raise TypeError(\"%sCannot convert %r with type %s to Tensor: \"\n \"no conversion function registered.\"\n % (error_prefix, value, type(value)))\n\n\ndef convert_n_to_tensor(values, dtype=None, name=None, as_ref=False):\n \"\"\"Converts `values` to a list of `Tensor` objects.\n\n Args:\n values: A list of objects that can be consumed by `tf.convert_to_tensor()`.\n dtype: (Optional.) The required `DType` of the returned `Tensor` objects.\n name: (Optional.) A name prefix to used when a new `Tensor` is\n created, in which case element `i` will be given the name `name\n + '_' + i`.\n as_ref: True if the caller wants the results as ref tensors.\n\n Returns:\n A list of `Tensor` and/or `IndexedSlices` objects.\n\n Raises:\n TypeError: If no conversion function is registered for an element in\n `values`.\n RuntimeError: If a registered conversion function returns an invalid\n value.\n \"\"\"\n if not isinstance(values, collections.Sequence):\n raise TypeError(\"values must be a list.\")\n ret = []\n for i, value in enumerate(values):\n n = None if name is None else \"%s_%d\" % (name, i)\n ret.append(convert_to_tensor(value, dtype=dtype, name=n, as_ref=as_ref))\n return ret\n\n\ndef convert_to_tensor_or_indexed_slices(value, dtype=None, name=None,\n as_ref=False):\n \"\"\"Converts the given object to a `Tensor` or an `IndexedSlices`.\n\n If `value` is an `IndexedSlices` it is returned\n unmodified. Otherwise, it is converted to a `Tensor` using\n `convert_to_tensor()`.\n\n Args:\n value: An `IndexedSlices` or an object that can be consumed by\n `convert_to_tensor()`.\n dtype: (Optional.) The required `DType` of the returned `Tensor` or\n `IndexedSlices`.\n name: (Optional.) A name to use if a new `Tensor` is created.\n as_ref: True if the caller wants the results as ref tensors.\n\n Returns:\n An `Tensor` or an `IndexedSlices` based on `value`.\n\n Raises:\n ValueError: If `dtype` does not match the element type of `value`.\n \"\"\"\n if isinstance(value, IndexedSlices):\n if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value.dtype):\n raise ValueError(\n \"Tensor conversion requested dtype %s for Tensor with dtype %s: %r\"\n % (dtypes.as_dtype(dtype).name, value.dtype.name, str(value)))\n return value\n else:\n return convert_to_tensor(value, dtype=dtype, name=name, as_ref=as_ref)\n\n\ndef convert_n_to_tensor_or_indexed_slices(values, dtype=None, name=None,\n as_ref=False):\n \"\"\"Converts `values` to a list of `Tensor` or `IndexedSlices` objects.\n\n Args:\n values: A list of `None`, `IndexedSlices`, or objects that can be consumed\n by `convert_to_tensor()`.\n dtype: (Optional.) The required `DType` of the returned `Tensor`\n `IndexedSlices`.\n name: (Optional.) A name prefix to used when a new `Tensor` is\n created, in which case element `i` will be given the name `name\n + '_' + i`.\n as_ref: True if the caller wants the results as ref tensors.\n\n Returns:\n A list of `Tensor` and/or `IndexedSlices` objects.\n\n Raises:\n TypeError: If no conversion function is registered for an element in\n `values`.\n RuntimeError: If a registered conversion function returns an invalid\n value.\n \"\"\"\n if not isinstance(values, collections.Sequence):\n raise TypeError(\"values must be a list.\")\n ret = []\n for i, value in enumerate(values):\n if value is None:\n ret.append(value)\n else:\n n = None if name is None else \"%s_%d\" % (name, i)\n ret.append(\n convert_to_tensor_or_indexed_slices(value, dtype=dtype, name=n,\n as_ref=as_ref))\n return ret\n\n\ndef register_tensor_conversion_function(base_type, conversion_func,\n priority=100):\n \"\"\"Registers a function for converting objects of `base_type` to `Tensor`.\n\n The conversion function must have the following signature:\n\n def conversion_func(value, dtype=None, name=None, as_ref=False):\n # ...\n\n It must return a `Tensor` with the given `dtype` if specified. If the\n conversion function creates a new `Tensor`, it should use the given\n `name` if specified. All exceptions will be propagated to the caller.\n\n If `as_ref` is true, the function must return a `Tensor` reference,\n such as a `Variable`.\n\n NOTE: The conversion functions will execute in order of priority,\n followed by order of registration. To ensure that a conversion function\n `F` runs before another conversion function `G`, ensure that `F` is\n registered with a smaller priority than `G`.\n\n Args:\n base_type: The base type or tuple of base types for all objects that\n `conversion_func` accepts.\n conversion_func: A function that converts instances of `base_type` to\n `Tensor`.\n priority: Optional integer that indicates the priority for applying this\n conversion function. Conversion functions with smaller priority values\n run earlier than conversion functions with larger priority values.\n Defaults to 100.\n\n Raises:\n TypeError: If the arguments do not have the appropriate type.\n\n \"\"\"\n if not (isinstance(base_type, type) or\n (isinstance(base_type, tuple)\n and all(isinstance(x, type) for x in base_type))):\n raise TypeError(\"base_type must be a type or a tuple of types.\")\n if not callable(conversion_func):\n raise TypeError(\"conversion_func must be callable.\")\n\n try:\n funcs_at_priority = _tensor_conversion_func_registry[priority]\n except KeyError:\n funcs_at_priority = []\n _tensor_conversion_func_registry[priority] = funcs_at_priority\n funcs_at_priority.append((base_type, conversion_func))\n\n\nclass IndexedSlices(object):\n \"\"\"A sparse representation of a set of tensor slices at given indices.\n\n This class is a simple wrapper for a pair of `Tensor` objects:\n\n * `values`: A `Tensor` of any dtype with shape `[D0, D1, ..., Dn]`.\n * `indices`: A 1-D integer `Tensor` with shape `[D0]`.\n\n An `IndexedSlices` is typically used to represent a subset of a larger\n tensor `dense` of shape `[LARGE0, D1, .. , DN]` where `LARGE0 >> D0`.\n The values in `indices` are the indices in the first dimension of\n the slices that have been extracted from the larger tensor.\n\n The dense tensor `dense` represented by an `IndexedSlices` `slices` has\n\n ```python\n dense[slices.indices[i], :, :, :, ...] = slices.values[i, :, :, :, ...]\n ```\n\n The `IndexedSlices` class is used principally in the definition of\n gradients for operations that have sparse gradients\n (e.g. [`tf.gather`](../../api_docs/python/array_ops.md#gather)).\n\n Contrast this representation with\n [`SparseTensor`](../../api_docs/python/sparse_ops.md#SparseTensor),\n which uses multi-dimensional indices and scalar values.\n\n @@__init__\n\n @@values\n @@indices\n @@dense_shape\n\n @@name\n @@dtype\n @@device\n @@op\n \"\"\"\n\n def __init__(self, values, indices, dense_shape=None):\n \"\"\"Creates an `IndexedSlices`.\"\"\"\n _get_graph_from_inputs([values, indices, dense_shape])\n self._values = values\n self._indices = indices\n self._dense_shape = dense_shape\n\n @property\n def values(self):\n \"\"\"A `Tensor` containing the values of the slices.\"\"\"\n return self._values\n\n @property\n def indices(self):\n \"\"\"A 1-D `Tensor` containing the indices of the slices.\"\"\"\n return self._indices\n\n @property\n def dense_shape(self):\n \"\"\"A 1-D `Tensor` containing the shape of the corresponding dense tensor.\"\"\"\n return self._dense_shape\n\n @property\n def name(self):\n \"\"\"The name of this `IndexedSlices`.\"\"\"\n return self.values.name\n\n @property\n def device(self):\n \"\"\"The name of the device on which `values` will be produced, or `None`.\"\"\"\n return self.values.device\n\n @property\n def op(self):\n \"\"\"The `Operation` that produces `values` as an output.\"\"\"\n return self.values.op\n\n @property\n def dtype(self):\n \"\"\"The `DType` of elements in this tensor.\"\"\"\n return self.values.dtype\n\n @property\n def graph(self):\n \"\"\"The `Graph` that contains the values, indices, and shape tensors.\"\"\"\n return self._values.graph\n\n def __str__(self):\n return \"IndexedSlices(indices=%s, values=%s%s)\" % (\n self._indices, self._values,\n (\", dense_shape=%s\" % self._dense_shape) if self._dense_shape else \"\")\n\nIndexedSlicesValue = collections.namedtuple(\"IndexedSlicesValue\",\n [\"values\", \"indices\", \"dense_shape\"])\n\nIndexedSlicesValue = collections.namedtuple(\n \"IndexedSlicesValue\", [\"values\", \"indices\", \"dense_shape\"])\n\n\nclass SparseTensor(object):\n \"\"\"Represents a sparse tensor.\n\n Tensorflow represents a sparse tensor as three separate dense tensors:\n `indices`, `values`, and `shape`. In Python, the three tensors are\n collected into a `SparseTensor` class for ease of use. If you have separate\n `indices`, `values`, and `shape` tensors, wrap them in a `SparseTensor`\n object before passing to the ops below.\n\n Concretely, the sparse tensor `SparseTensor(values, indices, shape)` is\n\n * `indices`: A 2-D int64 tensor of shape `[N, ndims]`.\n * `values`: A 1-D tensor of any type and shape `[N]`.\n * `shape`: A 1-D int64 tensor of shape `[ndims]`.\n\n where `N` and `ndims` are the number of values, and number of dimensions in\n the `SparseTensor` respectively.\n\n The corresponding dense tensor satisfies\n\n ```python\n dense.shape = shape\n dense[tuple(indices[i])] = values[i]\n ```\n\n By convention, `indices` should be sorted in row-major order (or equivalently\n lexicographic order on the tuples `indices[i]`). This is not enforced when\n `SparseTensor` objects are constructed, but most ops assume correct ordering.\n If the ordering of sparse tensor `st` is wrong, a fixed version can be\n obtained by calling `tf.sparse_reorder(st)`.\n\n Example: The sparse tensor\n\n ```python\n SparseTensor(values=[1, 2], indices=[[0, 0], [1, 2]], shape=[3, 4])\n ```\n\n represents the dense tensor\n\n ```python\n [[1, 0, 0, 0]\n [0, 0, 2, 0]\n [0, 0, 0, 0]]\n ```\n\n @@__init__\n @@indices\n @@values\n @@dtype\n @@shape\n @@graph\n \"\"\"\n\n def __init__(self, indices, values, shape):\n \"\"\"Creates a `SparseTensor`.\n\n Args:\n indices: A 2-D int64 tensor of shape `[N, ndims]`.\n values: A 1-D tensor of any type and shape `[N]`.\n shape: A 1-D int64 tensor of shape `[ndims]`.\n\n Returns:\n A `SparseTensor`\n \"\"\"\n with op_scope([indices, values, shape], None, \"SparseTensor\"):\n indices = convert_to_tensor(indices, name=\"indices\", dtype=dtypes.int64)\n # Always pass as_ref=True because we want to be able to update\n # values later if it is a VariableOp.\n # TODO(touts): Consider adding mutable_values() when 'values'\n # is a VariableOp and updating users of SparseTensor.\n values = convert_to_tensor(values, name=\"values\", as_ref=True)\n shape = convert_to_tensor(shape, name=\"shape\", dtype=dtypes.int64)\n self._indices = indices\n self._values = values\n self._shape = shape\n\n indices_shape = indices.get_shape().with_rank(2)\n values_shape = values.get_shape().with_rank(1)\n shape_shape = shape.get_shape().with_rank(1)\n\n # Assert number of rows in indices match the number of elements in values.\n indices_shape[0].merge_with(values_shape[0])\n # Assert number of columns in indices matches the number of elements in\n # shape.\n indices_shape[1].merge_with(shape_shape[0])\n\n @property\n def indices(self):\n \"\"\"The indices of non-zero values in the represented dense tensor.\n\n Returns:\n A 2-D Tensor of int64 with shape `[N, ndims]`, where `N` is the\n number of non-zero values in the tensor, and `ndims` is the rank.\n \"\"\"\n return self._indices\n\n @property\n def values(self):\n \"\"\"The non-zero values in the represented dense tensor.\n\n Returns:\n A 1-D Tensor of any data type.\n \"\"\"\n return self._values\n\n @property\n def dtype(self):\n \"\"\"The `DType` of elements in this tensor.\"\"\"\n return self._values.dtype\n\n @property\n def shape(self):\n \"\"\"A 1-D Tensor of int64 representing the shape of the dense tensor.\"\"\"\n return self._shape\n\n @property\n def graph(self):\n \"\"\"The `Graph` that contains the index, value, and shape tensors.\"\"\"\n return self._indices.graph\n\n def __str__(self):\n return \"SparseTensor(indices=%s, values=%s, shape=%s)\" % (\n self._indices, self._values, self._shape)\n\n\nSparseTensorValue = collections.namedtuple(\"SparseTensorValue\",\n [\"indices\", \"values\", \"shape\"])\n\n\ndef _device_string(dev_spec):\n if isinstance(dev_spec, pydev.Device):\n return dev_spec.to_string()\n else:\n return dev_spec\n\n\ndef _NodeDef(op_type, name, device=None, attrs=None):\n \"\"\"Create a NodeDef proto.\n\n Args:\n op_type: Value for the \"op\" attribute of the NodeDef proto.\n name: Value for the \"name\" attribute of the NodeDef proto.\n device: string, device, or function from NodeDef to string.\n Value for the \"device\" attribute of the NodeDef proto.\n attrs: Optional dictionary where the key is the attribute name (a string)\n and the value is the respective \"attr\" attribute of the NodeDef proto (an\n AttrValue).\n\n Returns:\n A graph_pb2.NodeDef protocol buffer.\n \"\"\"\n node_def = graph_pb2.NodeDef()\n node_def.op = compat.as_bytes(op_type)\n node_def.name = compat.as_bytes(name)\n if attrs is not None:\n for k, v in six.iteritems(attrs):\n node_def.attr[k].CopyFrom(v)\n if device is not None:\n if callable(device):\n node_def.device = device(node_def)\n else:\n node_def.device = _device_string(device)\n return node_def\n\n\n# Copied from core/framework/node_def_util.cc\n# TODO(mrry,josh11b): Consolidate this validation in C++ code.\n_VALID_OP_NAME_REGEX = re.compile(\"[A-Za-z0-9.][A-Za-z0-9_.\\\\-/]*\")\n\n\nclass Operation(object):\n \"\"\"Represents a graph node that performs computation on tensors.\n\n An `Operation` is a node in a TensorFlow `Graph` that takes zero or\n more `Tensor` objects as input, and produces zero or more `Tensor`\n objects as output. Objects of type `Operation` are created by\n calling a Python op constructor (such as\n [`tf.matmul()`](../../api_docs/python/math_ops.md#matmul))\n or [`Graph.create_op()`](../../api_docs/python/framework.md#Graph.create_op).\n\n For example `c = tf.matmul(a, b)` creates an `Operation` of type\n \"MatMul\" that takes tensors `a` and `b` as input, and produces `c`\n as output.\n\n After the graph has been launched in a session, an `Operation` can\n be executed by passing it to\n [`Session.run()`](../../api_docs/python/client.md#Session.run).\n `op.run()` is a shortcut for calling `tf.get_default_session().run(op)`.\n\n @@name\n @@type\n @@inputs\n @@control_inputs\n @@outputs\n @@device\n @@graph\n\n @@run\n\n @@get_attr\n @@traceback\n \"\"\"\n\n def __init__(self, node_def, g, inputs=None, output_types=None,\n control_inputs=None, input_types=None, original_op=None,\n op_def=None):\n \"\"\"Creates an `Operation`.\n\n NOTE: This constructor validates the name of the `Operation` (passed\n as `node_def.name`). Valid `Operation` names match the following\n regular expression:\n\n [A-Za-z0-9.][A-Za-z0-9_.\\\\-/]*\n\n Args:\n node_def: `graph_pb2.NodeDef`. `NodeDef` for the `Operation`.\n Used for attributes of `graph_pb2.NodeDef`, typically `name`,\n `op`, and `device`. The `input` attribute is irrelevant here\n as it will be computed when generating the model.\n g: `Graph`. The parent graph.\n inputs: list of `Tensor` objects. The inputs to this `Operation`.\n output_types: list of `DType` objects. List of the types of the\n `Tensors` computed by this operation. The length of this list indicates\n the number of output endpoints of the `Operation`.\n control_inputs: list of operations or tensors from which to have a\n control dependency.\n input_types: List of `DType` objects representing the\n types of the tensors accepted by the `Operation`. By default\n uses `[x.dtype.base_dtype for x in inputs]`. Operations that expect\n reference-typed inputs must specify these explicitly.\n original_op: Optional. Used to associate the new `Operation` with an\n existing `Operation` (for example, a replica with the op that was\n replicated).\n op_def: Optional. The `op_def_pb2.OpDef` proto that describes the\n op type that this `Operation` represents.\n\n Raises:\n TypeError: if control inputs are not Operations or Tensors,\n or if `node_def` is not a `NodeDef`,\n or if `g` is not a `Graph`,\n or if `inputs` are not tensors,\n or if `inputs` and `input_types` are incompatible.\n ValueError: if the `node_def` name is not valid.\n \"\"\"\n if not isinstance(node_def, graph_pb2.NodeDef):\n raise TypeError(\"node_def needs to be a NodeDef: %s\" % node_def)\n if node_def.ByteSize() >= (1 << 31) or node_def.ByteSize() < 0:\n raise ValueError(\n \"Cannot create an Operation with a NodeDef larger than 2GB.\")\n if not _VALID_OP_NAME_REGEX.match(node_def.name):\n raise ValueError(\"'%s' is not a valid node name\" % node_def.name)\n if not isinstance(g, Graph):\n raise TypeError(\"g needs to be a Graph: %s\" % g)\n self._node_def = copy.deepcopy(node_def)\n self._graph = g\n if inputs is None:\n inputs = []\n elif not isinstance(inputs, list):\n raise TypeError(\"inputs needs to be a list of Tensors: %s\" % inputs)\n self._inputs = list(inputs) # Defensive copy.\n for a in self._inputs:\n if not isinstance(a, Tensor):\n raise TypeError(\"input needs to be a Tensor: %s\" % a)\n # Mark that we consume the inputs.\n a._add_consumer(self) # pylint: disable=protected-access\n if output_types is None:\n output_types = []\n self._output_types = output_types\n self._outputs = [Tensor(self, i, output_type)\n for i, output_type in enumerate(output_types)]\n if input_types is None:\n input_types = [i.dtype.base_dtype for i in self._inputs]\n else:\n if not all(x.is_compatible_with(i.dtype)\n for i, x in zip(self._inputs, input_types)):\n raise TypeError(\"Inputs are not compatible with input types\")\n self._input_types = input_types\n\n # Build the list of control inputs.\n self._control_inputs = []\n if control_inputs:\n for c in control_inputs:\n c_op = None\n if isinstance(c, Operation):\n c_op = c\n elif isinstance(c, (Tensor, IndexedSlices)):\n c_op = c.op\n else:\n raise TypeError(\"Control input must be an Operation, \"\n \"a Tensor, or IndexedSlices: %s\" % c)\n self._control_inputs.append(c_op)\n\n self._original_op = original_op\n self._op_def = op_def\n self._traceback = _extract_stack()\n # Add this op to the current control flow context:\n self._control_flow_context = g._get_control_flow_context()\n if self._control_flow_context is not None:\n self._control_flow_context.AddOp(self)\n # NOTE(keveman): Control flow context's AddOp could be creating new ops and\n # setting op.inputs[index] = new_op. Thus the new ops' id could be larger\n # than this op's id even though this op depend on them. Therefore, delaying\n # assigning id to this op until all ops this could be dependent on are\n # created.\n self._id_value = self._graph._next_id() # pylint: disable=protected-access\n self._recompute_node_def()\n\n def values(self):\n \"\"\"DEPRECATED: Use outputs.\"\"\"\n return tuple(self.outputs)\n\n def _get_control_flow_context(self):\n \"\"\"Returns the control flow context of this op.\n\n Returns:\n A context object.\n \"\"\"\n return self._control_flow_context\n\n def _set_control_flow_context(self, context):\n \"\"\"Sets the current control flow context of this op.\n\n Args:\n context: a context object.\n \"\"\"\n self._control_flow_context = context\n\n @property\n def name(self):\n \"\"\"The full name of this operation.\"\"\"\n return self._node_def.name\n\n @property\n def _id(self):\n \"\"\"The unique integer id of this operation.\"\"\"\n return self._id_value\n\n @property\n def device(self):\n \"\"\"The name of the device to which this op has been assigned, if any.\n\n Returns:\n The string name of the device to which this op has been\n assigned, or None if it has not been assigned to a device.\n \"\"\"\n dev = self._node_def.device\n return None if not dev else dev\n\n def _set_device(self, device):\n \"\"\"Set the device of this operation.\n\n Args:\n device: string or device.. The device to set.\n \"\"\"\n self._node_def.device = _device_string(device)\n\n def _add_input(self, tensor, dtype=None):\n \"\"\"Add a new input to this operation.\n\n Args:\n tensor: the Tensor to add as an input.\n dtype: tf.DType: type of the input; defaults to\n the tensor's dtype.\n\n Raises:\n TypeError: if tensor is not a Tensor,\n or if input tensor type is not convertible to dtype.\n ValueError: if the Tensor is from a different graph.\n \"\"\"\n if not isinstance(tensor, Tensor):\n raise TypeError(\"tensor must be a Tensor: %s\" % tensor)\n _assert_same_graph(self, tensor)\n if dtype is None:\n dtype = tensor.dtype\n else:\n dtype = dtypes.as_dtype(dtype)\n if not dtype.is_compatible_with(tensor.dtype):\n raise TypeError(\n \"Cannot convert a tensor of type %s to an input of type %s\"\n % (tensor.dtype.name, dtype.name))\n self._inputs.append(tensor)\n self._input_types.append(dtype)\n tensor._add_consumer(self) # pylint: disable=protected-access\n self._recompute_node_def()\n\n def _update_input(self, index, tensor, dtype=None):\n \"\"\"Update the input to this operation at the given index.\n\n NOTE: This is for TF internal use only. Please don't use it.\n\n Args:\n index: the index of the input to update.\n tensor: the Tensor to be used as the input at the given index.\n dtype: tf.DType: type of the input; defaults to\n the tensor's dtype.\n\n Raises:\n TypeError: if tensor is not a Tensor,\n or if input tensor type is not convertible to dtype.\n ValueError: if the Tensor is from a different graph.\n \"\"\"\n if not isinstance(tensor, Tensor):\n raise TypeError(\"tensor must be a Tensor: %s\" % tensor)\n _assert_same_graph(self, tensor)\n if dtype is None:\n dtype = tensor.dtype\n else:\n dtype = dtypes.as_dtype(dtype)\n if not dtype.is_compatible_with(tensor.dtype):\n raise TypeError(\n \"Cannot convert a tensor of type %s to an input of type %s\"\n % (tensor.dtype.name, dtype.name))\n\n self._inputs[index].consumers().remove(self)\n self._inputs[index] = tensor\n self._input_types[index] = dtype\n tensor._add_consumer(self) # pylint: disable=protected-access\n self._recompute_node_def()\n\n def _add_control_input(self, op):\n \"\"\"Add a new control input to this operation.\n\n Args:\n op: the Operation to add as control input.\n\n Raises:\n TypeError: if op is not an Operation.\n ValueError: if op is from a different graph.\n \"\"\"\n if not isinstance(op, Operation):\n raise TypeError(\"op must be an Operation: %s\" % op)\n _assert_same_graph(self, op)\n self._control_inputs.append(op)\n self._recompute_node_def()\n\n # Methods below are used when building the NodeDef and Graph proto.\n def _recompute_node_def(self):\n del self._node_def.input[:]\n self._node_def.input.extend([t._as_node_def_input() for t in self._inputs])\n if self._control_inputs:\n self._node_def.input.extend([\"^%s\" % op.name for op in\n self._control_inputs])\n\n def __str__(self):\n return str(self._node_def)\n\n @property\n def outputs(self):\n \"\"\"The list of `Tensor` objects representing the outputs of this op.\"\"\"\n return self._outputs\n\n# pylint: disable=protected-access\n class _InputList(object):\n \"\"\"Immutable input list wrapper.\"\"\"\n\n def __init__(self, op):\n self._op = op\n\n def __iter__(self):\n return iter(self._op._inputs)\n\n def __len__(self):\n return len(self._op._inputs)\n\n def __bool__(self):\n return bool(self._op._inputs)\n\n # Python 3 wants __bool__, Python 2.7 wants __nonzero__\n __nonzero__ = __bool__\n\n def __getitem__(self, i):\n return self._op._inputs[i]\n# pylint: enable=protected-access\n\n @property\n def inputs(self):\n \"\"\"The list of `Tensor` objects representing the data inputs of this op.\"\"\"\n return Operation._InputList(self)\n\n @property\n def _input_dtypes(self):\n return self._input_types\n\n @property\n def control_inputs(self):\n \"\"\"The `Operation` objects on which this op has a control dependency.\n\n Before this op is executed, TensorFlow will ensure that the\n operations in `self.control_inputs` have finished executing. This\n mechanism can be used to run ops sequentially for performance\n reasons, or to ensure that the side effects of an op are observed\n in the correct order.\n\n Returns:\n A list of `Operation` objects.\n\n \"\"\"\n return self._control_inputs\n\n @property\n def type(self):\n \"\"\"The type of the op (e.g. `\"MatMul\"`).\"\"\"\n return self._node_def.op\n\n @property\n def graph(self):\n \"\"\"The `Graph` that contains this operation.\"\"\"\n return self._graph\n\n @property\n def node_def(self):\n \"\"\"Returns a serialized `NodeDef` representation of this operation.\n\n Returns:\n A\n [`NodeDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)\n protocol buffer.\n \"\"\"\n return self._node_def\n\n @property\n def op_def(self):\n \"\"\"Returns the `OpDef` proto that represents the type of this op.\n\n Returns:\n An\n [`OpDef`](https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto)\n protocol buffer.\n \"\"\"\n return self._op_def\n\n @property\n def traceback(self):\n \"\"\"Returns the call stack from when this operation was constructed.\"\"\"\n return _convert_stack(self._traceback)\n\n def get_attr(self, name):\n \"\"\"Returns the value of the attr of this op with the given `name`.\n\n Args:\n name: The name of the attr to fetch.\n\n Returns:\n The value of the attr, as a Python object.\n\n Raises:\n ValueError: If this op does not have an attr with the given `name`.\n \"\"\"\n fields = [\"s\", \"i\", \"f\", \"b\", \"type\", \"shape\", \"tensor\"]\n if name not in self._node_def.attr:\n raise ValueError(\"No attr named '\" + name + \"' in \" +\n str(self._node_def))\n x = self._node_def.attr[name]\n # Treat an empty oneof value as an empty list.\n if not x.WhichOneof(\"value\"):\n return []\n if x.HasField(\"list\"):\n for f in fields:\n if getattr(x.list, f):\n return list(getattr(x.list, f))\n return []\n else:\n for f in fields:\n if x.HasField(f):\n return getattr(x, f)\n assert False, \"Unsupported field type in \" + str(x)\n\n def run(self, feed_dict=None, session=None):\n \"\"\"Runs this operation in a `Session`.\n\n Calling this method will execute all preceding operations that\n produce the inputs needed for this operation.\n\n *N.B.* Before invoking `Operation.run()`, its graph must have been\n launched in a session, and either a default session must be\n available, or `session` must be specified explicitly.\n\n Args:\n feed_dict: A dictionary that maps `Tensor` objects to feed values.\n See [`Session.run()`](../../api_docs/python/client.md#Session.run)\n for a description of the valid feed values.\n session: (Optional.) The `Session` to be used to run to this operation. If\n none, the default session will be used.\n \"\"\"\n _run_using_default_session(self, feed_dict, self.graph, session)\n\n\n_gradient_registry = registry.Registry(\"gradient\")\n\n\nclass RegisterGradient(object):\n \"\"\"A decorator for registering the gradient function for an op type.\n\n This decorator is only used when defining a new op type. For an op\n with `m` inputs and `n` outputs, the gradient function is a function\n that takes the original `Operation` and `n` `Tensor` objects\n (representing the gradients with respect to each output of the op),\n and returns `m` `Tensor` objects (representing the partial gradients\n with respect to each input of the op).\n\n For example, assuming that operations of type `\"Sub\"` take two\n inputs `x` and `y`, and return a single output `x - y`, the\n following gradient function would be registered:\n\n ```python\n @tf.RegisterGradient(\"Sub\")\n def _sub_grad(unused_op, grad):\n return grad, tf.neg(grad)\n ```\n\n The decorator argument `op_type` is the string type of an\n operation. This corresponds to the `OpDef.name` field for the proto\n that defines the operation.\n\n @@__init__\n \"\"\"\n\n def __init__(self, op_type):\n \"\"\"Creates a new decorator with `op_type` as the Operation type.\n\n Args:\n op_type: The string type of an operation. This corresponds to the\n `OpDef.name` field for the proto that defines the operation.\n \"\"\"\n if not isinstance(op_type, six.string_types):\n raise TypeError(\"op_type must be a string\")\n self._op_type = op_type\n\n def __call__(self, f):\n \"\"\"Registers the function `f` as gradient function for `op_type`.\"\"\"\n _gradient_registry.register(f, self._op_type)\n return f\n\n\ndef NoGradient(op_type):\n \"\"\"Specifies that ops of type `op_type` do not have a defined gradient.\n\n This function is only used when defining a new op type. It may be\n used for ops such as `tf.size()` that are not differentiable. For\n example:\n\n ```python\n tf.NoGradient(\"Size\")\n ```\n\n Args:\n op_type: The string type of an operation. This corresponds to the\n `OpDef.name` field for the proto that defines the operation.\n\n Raises:\n TypeError: If `op_type` is not a string.\n\n \"\"\"\n if not isinstance(op_type, six.string_types):\n raise TypeError(\"op_type must be a string\")\n _gradient_registry.register(None, op_type)\n\n\ndef get_gradient_function(op):\n \"\"\"Returns the function that computes gradients for \"op\".\"\"\"\n if not op.inputs: return None\n try:\n op_type = op.get_attr(\"_gradient_op_type\")\n except ValueError:\n op_type = op.type\n return _gradient_registry.lookup(op_type)\n\n\n_shape_registry = registry.Registry(\"shape functions\")\n_default_shape_function_registry = registry.Registry(\"default shape functions\")\n\n\nclass RegisterShape(object):\n \"\"\"A decorator for registering the shape function for an op type.\n\n This decorator is only used when defining a new op type. A shape\n function is a function from an `Operation` object to a list of\n `TensorShape` objects, with one `TensorShape` for each output of the\n operation.\n\n For example, assuming that operations of type `\"Sub\"` take two\n inputs `x` and `y`, and return a single output `x - y`, all with the\n same shape, the following shape function would be registered:\n\n ```python\n @tf.RegisterShape(\"Sub\")\n def _sub_shape(op):\n return [op.inputs[0].get_shape().merge_with(op.inputs[1].get_shape())]\n ```\n\n The decorator argument `op_type` is the string type of an\n operation. This corresponds to the `OpDef.name` field for the proto\n that defines the operation.\n\n \"\"\"\n\n def __init__(self, op_type):\n \"\"\"Saves the `op_type` as the `Operation` type.\"\"\"\n if not isinstance(op_type, six.string_types):\n raise TypeError(\"op_type must be a string\")\n self._op_type = op_type\n\n def __call__(self, f):\n \"\"\"Registers \"f\" as the shape function for \"op_type\".\"\"\"\n if f is None:\n # None is a special \"weak\" value that provides a default shape function,\n # and can be overridden by a non-None registration.\n try:\n _default_shape_function_registry.register(_no_shape_function,\n self._op_type)\n except KeyError:\n # Ignore duplicate registrations of the weak value. This can\n # occur if the op library input to wrapper generation\n # inadvertently links in one or more of the standard op\n # libraries.\n pass\n else:\n _shape_registry.register(f, self._op_type)\n return f\n\n\ndef _no_shape_function(op):\n return [tensor_shape.unknown_shape() for _ in op.outputs]\n\n\ndef set_shapes_for_outputs(op):\n \"\"\"Uses the registered shape functions to set the shapes for op's outputs.\"\"\"\n try:\n shape_func = _shape_registry.lookup(op.type)\n except LookupError:\n try:\n shape_func = _default_shape_function_registry.lookup(op.type)\n except LookupError:\n raise RuntimeError(\"No shape function registered for standard op: %s\"\n % op.type)\n shapes = shape_func(op)\n if len(op.outputs) != len(shapes):\n raise RuntimeError(\n \"Shape function for op %s returned %d shapes but expected %d\" %\n (op, len(shapes), len(op.outputs)))\n for output, s in zip(op.outputs, shapes):\n output.set_shape(s)\n\n\nclass OpStats(object):\n \"\"\"A holder for statistics about an operator.\n\n This class holds information about the resource requirements for an op,\n including the size of its weight parameters on-disk and how many FLOPS it\n requires to execute forward inference.\n\n If you define a new operation, you can create a function that will return a\n set of information about its usage of the CPU and disk space when serialized.\n The function itself takes a Graph object that's been set up so you can call\n methods like get_tensor_by_name to help calculate the results, and a NodeDef\n argument.\n\n \"\"\"\n\n def __init__(self, statistic_type, value=None):\n \"\"\"Sets up the initial placeholders for the statistics.\"\"\"\n self.statistic_type = statistic_type\n self.value = value\n\n @property\n def statistic_type(self):\n return self._statistic_type\n\n @statistic_type.setter\n def statistic_type(self, statistic_type):\n self._statistic_type = statistic_type\n\n @property\n def value(self):\n return self._value\n\n @value.setter\n def value(self, value):\n self._value = value\n\n def __iadd__(self, other):\n if other.statistic_type != self.statistic_type:\n raise ValueError(\"Can't add an OpStat of type %s to one of %s.\",\n self.statistic_type, other.statistic_type)\n if self.value is None:\n self.value = other.value\n elif other.value is not None:\n self._value += other.value\n return self\n\n_stats_registry = registry.Registry(\"statistical functions\")\n\n\nclass RegisterStatistics(object):\n \"\"\"A decorator for registering the statistics function for an op type.\n\n This decorator is very similar to the RegisterShapes class, and can be defined\n for an op type so that it gives a report on the resources used by an instance\n of an operator, in the form of an OpStats object.\n\n Well-known types of statistics include these so far:\n\n - weight_parameters: For operations like MatMul, Conv, and BiasAdd that take\n learned weights as inputs, this statistic captures how many numerical values\n are used. This is good to know because the weights take up most of the size\n of a typical serialized graph on disk.\n\n - flops: When running a graph, the bulk of the computation happens doing\n numerical calculations like matrix multiplications. This type allows a node\n to return how many floating-point operations it takes to complete. The\n total number of FLOPs for a graph is a good guide to its expected latency.\n\n You can add your own statistics just by picking a new type string, registering\n functions for the ops you care about, and then calling something like\n python/tools/graph_metrics.py with the new type as an argument.\n\n If a statistic for an op is registered multiple times, a KeyError will be\n raised.\n\n For example, you can define a new metric called doohickey for a Foo operation\n by placing this in your code:\n\n ```python\n @ops.RegisterStatistics(\"Foo\", \"doohickey\")\n def _calc_foo_bojangles(unused_graph, unused_node_def):\n return ops.OpStats(\"doohickey\", 20)\n ```\n\n Then in client code you can retrieve the value by making this call:\n\n ```python\n doohickey = ops.get_stats_for_node_def(graph, node_def, \"doohickey\")\n ```\n\n If the NodeDef is for an op with a registered doohickey function, you'll get\n back the calculated amount in doohickey.value, or None if it's not defined.\n\n \"\"\"\n\n def __init__(self, op_type, statistic_type):\n \"\"\"Saves the `op_type` as the `Operation` type.\"\"\"\n if not isinstance(op_type, six.string_types):\n raise TypeError(\"op_type must be a string.\")\n if \",\" in op_type:\n raise TypeError(\"op_type must not contain a comma.\")\n self._op_type = op_type\n if not isinstance(statistic_type, six.string_types):\n raise TypeError(\"statistic_type must be a string.\")\n if \",\" in statistic_type:\n raise TypeError(\"statistic_type must not contain a comma.\")\n self._statistic_type = statistic_type\n\n def __call__(self, f):\n \"\"\"Registers \"f\" as the statistics function for \"op_type\".\"\"\"\n _stats_registry.register(f, self._op_type + \",\" + self._statistic_type)\n return f\n\n\ndef get_stats_for_node_def(graph, node, statistic_type):\n \"\"\"Looks up the node's statistics function in the registry and calls it.\n\n This function takes a Graph object and a NodeDef from a GraphDef, and if\n there's an associated statistics method, calls it and returns a result. If no\n function has been registered for the particular node type, it returns an empty\n statistics object.\n\n Args:\n graph: A Graph object that's been set up with the node's graph.\n node: A NodeDef describing the operator.\n statistic_type: A string identifying the statistic we're interested in.\n Returns:\n An OpStats object containing information about resource usage.\n \"\"\"\n\n try:\n stats_func = _stats_registry.lookup(node.op + \",\" + statistic_type)\n result = stats_func(graph, node)\n except LookupError:\n result = OpStats(statistic_type)\n return result\n\n\nclass Graph(object):\n \"\"\"A TensorFlow computation, represented as a dataflow graph.\n\n A `Graph` contains a set of\n [`Operation`](../../api_docs/python/framework.md#Operation) objects,\n which represent units of computation; and\n [`Tensor`](../../api_docs/python/framework.md#Tensor) objects, which represent\n the units of data that flow between operations.\n\n A default `Graph` is always registered, and accessible by calling\n [`tf.get_default_graph()`](../../api_docs/python/framework.md#get_default_graph).\n To add an operation to the default graph, simply call one of the functions\n that defines a new `Operation`:\n\n ```\n c = tf.constant(4.0)\n assert c.graph is tf.get_default_graph()\n ```\n\n Another typical usage involves the\n [`Graph.as_default()`](../../api_docs/python/framework.md#Graph.as_default)\n context manager, which overrides the current default graph for the\n lifetime of the context:\n\n ```python\n g = tf.Graph()\n with g.as_default():\n # Define operations and tensors in `g`.\n c = tf.constant(30.0)\n assert c.graph is g\n ```\n\n Important note: This class *is not* thread-safe for graph construction. All\n operations should be created from a single thread, or external\n synchronization must be provided. Unless otherwise specified, all methods\n are not thread-safe.\n\n @@__init__\n @@as_default\n @@as_graph_def\n @@finalize\n @@finalized\n\n @@control_dependencies\n @@device\n @@name_scope\n\n A `Graph` instance supports an arbitrary number of \"collections\"\n that are identified by name. For convenience when building a large\n graph, collections can store groups of related objects: for\n example, the `tf.Variable` uses a collection (named\n [`tf.GraphKeys.VARIABLES`](../../api_docs/python/framework.md#GraphKeys)) for\n all variables that are created during the construction of a graph. The caller\n may define additional collections by specifying a new name.\n\n @@add_to_collection\n @@get_collection\n\n @@as_graph_element\n @@get_operation_by_name\n @@get_tensor_by_name\n @@get_operations\n\n @@get_default_device\n @@seed\n @@unique_name\n @@version\n @@graph_def_versions\n\n @@create_op\n @@gradient_override_map\n \"\"\"\n\n def __init__(self):\n \"\"\"Creates a new, empty Graph.\"\"\"\n self._nodes_by_id = dict()\n self._next_node_id = [dict()]\n self._next_id_counter = 0\n self._nodes_by_name = dict()\n # Current name stack: a pair of uniquified names and plain names.\n self._name_stack = (\"\", \"\")\n # Maps a name used in the graph to the next id to use for that name.\n self._names_in_use = {}\n # Default device applied to new ops.\n self._default_device = None\n # Functions that will be applied to choose a device if none is specified.\n self._device_function_stack = []\n # Default original_op applied to new ops.\n self._default_original_op = None\n # Current control flow context. It could be either CondContext or\n # WhileContext defined in ops/control_flow_ops.py\n self._control_flow_context = None\n # A new node will depend of the union of all of the nodes in the stack.\n self._control_dependencies_stack = []\n # Arbritrary collections of objects.\n self._collections = {}\n # The graph-level random seed\n self._seed = None\n # A map from op type to the kernel label that should be used.\n self._op_to_kernel_label_map = {}\n # A map from op type to an alternative op type that should be used when\n # computing gradients.\n self._gradient_override_map = {}\n # True if the graph is considered \"finalized\". In that case no\n # new operations can be added.\n self._finalized = False\n # Functions defined in the graph\n self._functions = collections.OrderedDict()\n # Default GraphDef versions\n self._graph_def_versions = versions_pb2.VersionDef(\n producer=versions.GRAPH_DEF_VERSION,\n min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER)\n\n def _check_not_finalized(self):\n \"\"\"Check if the graph is finalized.\n\n Raises:\n RuntimeError: If the graph finalized.\n \"\"\"\n if self._finalized:\n raise RuntimeError(\"Graph is finalized and cannot be modified.\")\n\n def _add_op(self, op):\n \"\"\"Adds 'op' to the graph.\n\n Args:\n op: the Operator or Tensor to add.\n\n Raises:\n TypeError: if op is not an Operation or Tensor.\n ValueError: if the op.name or op._id are already used.\n \"\"\"\n self._check_not_finalized()\n if not isinstance(op, (Tensor, Operation)):\n raise TypeError(\"op must be a Tensor or Operation: %s\" % op)\n\n if op._id in self._nodes_by_id:\n raise ValueError(\"cannot add an op with id %d as it already \"\n \"exists in the graph\" % op._id)\n if op.name in self._nodes_by_name:\n raise ValueError(\"cannot add op with name %s as that name \"\n \"is already used\" % op.name)\n self._nodes_by_id[op._id] = op\n self._nodes_by_name[op.name] = op\n\n @property\n def version(self):\n \"\"\"Returns a version number that increases as ops are added to the graph.\n\n Note that this is unrelated to the\n [GraphDef version](#Graph.graph_def_version).\n \"\"\"\n return self._next_id_counter\n\n @property\n def graph_def_versions(self):\n \"\"\"The GraphDef version information of this graph.\n\n For details on the meaning of each version, see [`GraphDef`]\n (https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto).\n\n Returns:\n A `VersionDef`.\n \"\"\"\n return self._graph_def_versions\n\n @property\n def seed(self):\n return self._seed\n\n @seed.setter\n def seed(self, seed):\n self._seed = seed\n\n @property\n def finalized(self):\n \"\"\"True if this graph has been finalized.\"\"\"\n return self._finalized\n\n def finalize(self):\n \"\"\"Finalizes this graph, making it read-only.\n\n After calling `g.finalize()`, no new operations can be added to\n `g`. This method is used to ensure that no operations are added\n to a graph when it is shared between multiple threads, for example\n when using a [`QueueRunner`](../../api_docs/python/train.md#QueueRunner).\n \"\"\"\n self._finalized = True\n\n def _get_control_flow_context(self):\n \"\"\"Returns the current control flow context.\n\n Returns:\n A context object.\n \"\"\"\n return self._control_flow_context\n\n def _set_control_flow_context(self, context):\n \"\"\"Sets the current control flow context.\n\n Args:\n context: a context object.\n \"\"\"\n self._control_flow_context = context\n\n def as_graph_def(self, from_version=None):\n \"\"\"Returns a serialized `GraphDef` representation of this graph.\n\n The serialized `GraphDef` can be imported into another `Graph`\n (using [`import_graph_def()`](#import_graph_def)) or used with the\n [C++ Session API](../../api_docs/cc/index.md).\n\n This method is thread-safe.\n\n Args:\n from_version: Optional. If this is set, returns a `GraphDef`\n containing only the nodes that were added to this graph since\n its `version` property had the given value.\n\n Returns:\n A [`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)\n protocol buffer.\n\n Raises:\n ValueError: If the `graph_def` would be too large.\n \"\"\"\n graph = graph_pb2.GraphDef()\n graph.versions.CopyFrom(self._graph_def_versions)\n bytesize = 0\n for op_id in sorted(self._nodes_by_id):\n op = self._nodes_by_id[op_id]\n if from_version is None or op_id > from_version:\n graph.node.extend([op.node_def])\n bytesize += op.node_def.ByteSize()\n if bytesize >= (1 << 31) or bytesize < 0:\n raise ValueError(\"GraphDef cannot be larger than 2GB.\")\n if self._functions:\n for f in self._functions.values():\n bytesize += f.ByteSize()\n if bytesize >= (1 << 31) or bytesize < 0:\n raise ValueError(\"GraphDef cannot be larger than 2GB.\")\n graph.library.function.extend(self._functions.values())\n return graph\n\n def _is_function(self, name):\n \"\"\"Tests whether 'name' is registered in this graph's function library.\n\n Args:\n name: string op name.\n Returns:\n bool indicating whether or not 'name' is registered in function library.\n \"\"\"\n return name in self._functions\n\n def _get_function(self, name):\n \"\"\"Returns the function definition for 'name'.\n\n Args:\n name: string function name.\n Returns:\n The function def proto.\n \"\"\"\n return self._functions[name]\n\n def _add_function(self, function_def):\n \"\"\"Adds a function to the graph.\n\n The function is specified as a [`FunctionDef`]\n (https://www.tensorflow.org/code/tensorflow/core/framework/function.proto)\n protocol buffer.\n\n After the function has been added, you can call to the function by\n passing the function name in place of an op name to\n `Graph.create_op()`.\n\n Args:\n function_def: A `FunctionDef` protocol buffer.\n \"\"\"\n previous_def = self._functions.get(function_def.signature.name, None)\n if previous_def:\n if previous_def != function_def:\n raise ValueError(\"Another function is already defined with that name\")\n else:\n # No need to add again.\n return\n self._functions[function_def.signature.name] = function_def\n\n # Helper functions to create operations.\n def create_op(self, op_type, inputs, dtypes,\n input_types=None, name=None, attrs=None, op_def=None,\n compute_shapes=True):\n \"\"\"Creates an `Operation` in this graph.\n\n This is a low-level interface for creating an `Operation`. Most\n programs will not call this method directly, and instead use the\n Python op constructors, such as `tf.constant()`, which add ops to\n the default graph.\n\n Args:\n op_type: The `Operation` type to create. This corresponds to the\n `OpDef.name` field for the proto that defines the operation.\n inputs: A list of `Tensor` objects that will be inputs to the `Operation`.\n dtypes: A list of `DType` objects that will be the types of the tensors\n that the operation produces.\n input_types: (Optional.) A list of `DType`s that will be the types of\n the tensors that the operation consumes. By default, uses the base\n `DType` of each input in `inputs`. Operations that expect\n reference-typed inputs must specify `input_types` explicitly.\n name: (Optional.) A string name for the operation. If not specified, a\n name is generated based on `op_type`.\n attrs: (Optional.) A dictionary where the key is the attribute name (a\n string) and the value is the respective `attr` attribute of the\n `NodeDef` proto that will represent the operation (an `AttrValue`\n proto).\n op_def: (Optional.) The `OpDef` proto that describes the `op_type` that\n the operation will have.\n compute_shapes: (Optional.) If True, shape inference will be performed\n to compute the shapes of the outputs.\n\n Raises:\n TypeError: if any of the inputs is not a `Tensor`.\n\n Returns:\n An `Operation` object.\n\n \"\"\"\n self._check_not_finalized()\n for idx, a in enumerate(inputs):\n if not isinstance(a, Tensor):\n raise TypeError(\"Input #%d is not a tensor: %s\" % (idx, a))\n if name is None:\n name = op_type\n # If a names ends with a '/' it is a \"name scope\" and we use it as-is,\n # after removing the trailing '/'.\n if name and name[-1] == \"/\":\n name = name[:-1]\n else:\n name = self.unique_name(name)\n\n node_def = _NodeDef(\n op_type, name, device=self._default_device or None, attrs=attrs)\n\n # Apply a kernel label if one has been specified for this op_type.\n try:\n kernel_label = self._op_to_kernel_label_map[op_type]\n node_def.attr[\"_kernel\"].CopyFrom(\n attr_value_pb2.AttrValue(s=compat.as_bytes(kernel_label)))\n except KeyError:\n pass\n\n # Apply the overriding op_type for gradients if one has been\n # specified for this op_type.\n try:\n mapped_op_type = self._gradient_override_map[op_type]\n node_def.attr[\"_gradient_op_type\"].CopyFrom(\n attr_value_pb2.AttrValue(s=compat.as_bytes(mapped_op_type)))\n except KeyError:\n pass\n\n control_inputs = self._control_dependencies_for_inputs(inputs)\n ret = Operation(node_def, self, inputs=inputs, output_types=dtypes,\n control_inputs=control_inputs, input_types=input_types,\n original_op=self._default_original_op, op_def=op_def)\n if compute_shapes:\n set_shapes_for_outputs(ret)\n self._add_op(ret)\n self._record_op_seen_by_control_dependencies(ret)\n # Apply any device functions in reverse order, so that the most recently\n # pushed function has the first chance to apply a device to the op.\n # We apply here because the result can depend on the Operation's\n # signature, which is computed in the Operation constructor.\n for device_function in reversed(self._device_function_stack):\n ret._set_device(device_function(ret))\n return ret\n\n def as_graph_element(self, obj, allow_tensor=True, allow_operation=True):\n \"\"\"Returns the object referred to by `obj`, as an `Operation` or `Tensor`.\n\n This function validates that `obj` represents an element of this\n graph, and gives an informative error message if it is not.\n\n This function is the canonical way to get/validate an object of\n one of the allowed types from an external argument reference in the\n Session API.\n\n This method may be called concurrently from multiple threads.\n\n Args:\n obj: A `Tensor`, an `Operation`, or the name of a tensor or operation.\n Can also be any object with an `_as_graph_element()` method that returns\n a value of one of these types.\n allow_tensor: If true, `obj` may refer to a `Tensor`.\n allow_operation: If true, `obj` may refer to an `Operation`.\n\n Returns:\n The `Tensor` or `Operation` in the Graph corresponding to `obj`.\n\n Raises:\n TypeError: If `obj` is not a type we support attempting to convert\n to types.\n ValueError: If `obj` is of an appropriate type but invalid. For\n example, an invalid string.\n KeyError: If `obj` is not an object in the graph.\n \"\"\"\n\n # The vast majority of this function is figuring\n # out what an API user might be doing wrong, so\n # that we can give helpful error messages.\n #\n # Ideally, it would be nice to split it up, but we\n # need context to generate nice error messages.\n\n if allow_tensor and allow_operation:\n types_str = \"Tensor or Operation\"\n elif allow_tensor:\n types_str = \"Tensor\"\n elif allow_operation:\n types_str = \"Operation\"\n else:\n raise ValueError(\"allow_tensor and allow_operation can't both be False.\")\n\n obj = _as_graph_element(obj) or obj\n\n # If obj appears to be a name...\n if isinstance(obj, compat.bytes_or_text_types):\n name = compat.as_str(obj)\n\n if \":\" in name and allow_tensor:\n # Looks like a Tensor name and can be a Tensor.\n try:\n op_name, out_n = name.split(\":\")\n out_n = int(out_n)\n except:\n raise ValueError(\"The name %s looks a like a Tensor name, but is \"\n \"not a valid one. Tensor names must be of the \"\n \"form \\\"<op_name>:<output_index>\\\".\" % repr(name))\n if op_name in self._nodes_by_name:\n op = self._nodes_by_name[op_name]\n else:\n raise KeyError(\"The name %s refers to a Tensor which does not \"\n \"exist. The operation, %s, does not exist in the \"\n \"graph.\" % (repr(name), repr(op_name)))\n try:\n return op.outputs[out_n]\n except:\n raise KeyError(\"The name %s refers to a Tensor which does not \"\n \"exist. The operation, %s, exists but only has \"\n \"%s outputs.\"\n % (repr(name), repr(op_name), len(op.outputs)))\n\n elif \":\" in name and not allow_tensor:\n # Looks like a Tensor name but can't be a Tensor.\n raise ValueError(\"Name %s appears to refer to a Tensor, not a %s.\"\n % (repr(name), types_str))\n\n elif \":\" not in name and allow_operation:\n # Looks like an Operation name and can be an Operation.\n if name not in self._nodes_by_name:\n raise KeyError(\"The name %s refers to an Operation not in the \"\n \"graph.\" % repr(name))\n return self._nodes_by_name[name]\n\n elif \":\" not in name and not allow_operation:\n # Looks like an Operation name but can't be an Operation.\n if name in self._nodes_by_name:\n # Yep, it's an Operation name\n err_msg = (\"The name %s refers to an Operation, not a %s.\"\n % (repr(name), types_str))\n else:\n err_msg = (\"The name %s looks like an (invalid) Operation name, \"\n \"not a %s.\" % (repr(name), types_str))\n err_msg += (\" Tensor names must be of the form \"\n \"\\\"<op_name>:<output_index>\\\".\")\n raise ValueError(err_msg)\n\n elif isinstance(obj, Tensor) and allow_tensor:\n # Actually obj is just the object it's referring to.\n if obj.graph is not self:\n raise ValueError(\"Tensor %s is not an element of this graph.\" % obj)\n return obj\n elif isinstance(obj, Operation) and allow_operation:\n # Actually obj is just the object it's referring to.\n if obj.graph is not self:\n raise ValueError(\"Operation %s is not an element of this graph.\" % obj)\n return obj\n else:\n # We give up!\n raise TypeError(\"Can not convert a %s into a %s.\"\n % (type(obj).__name__, types_str))\n\n def get_operations(self):\n \"\"\"Return the list of operations in the graph.\n\n You can modify the operations in place, but modifications\n to the list such as inserts/delete have no effect on the\n list of operations known to the graph.\n\n This method may be called concurrently from multiple threads.\n\n Returns:\n A list of Operations.\n \"\"\"\n return list(self._nodes_by_id.values())\n\n def get_operation_by_name(self, name):\n \"\"\"Returns the `Operation` with the given `name`.\n\n This method may be called concurrently from multiple threads.\n\n Args:\n name: The name of the `Operation` to return.\n\n Returns:\n The `Operation` with the given `name`.\n\n Raises:\n TypeError: If `name` is not a string.\n KeyError: If `name` does not correspond to an operation in this graph.\n \"\"\"\n\n if not isinstance(name, six.string_types):\n raise TypeError(\"Operation names are strings (or similar), not %s.\"\n % type(name).__name__)\n return self.as_graph_element(name, allow_tensor=False, allow_operation=True)\n\n def get_tensor_by_name(self, name):\n \"\"\"Returns the `Tensor` with the given `name`.\n\n This method may be called concurrently from multiple threads.\n\n Args:\n name: The name of the `Tensor` to return.\n\n Returns:\n The `Tensor` with the given `name`.\n\n Raises:\n TypeError: If `name` is not a string.\n KeyError: If `name` does not correspond to a tensor in this graph.\n \"\"\"\n # Names should be strings.\n if not isinstance(name, six.string_types):\n raise TypeError(\"Tensor names are strings (or similar), not %s.\"\n % type(name).__name__)\n return self.as_graph_element(name, allow_tensor=True, allow_operation=False)\n\n def _next_id(self):\n \"\"\"Id for next Operation instance. Also increments the internal id.\"\"\"\n self._check_not_finalized()\n self._next_id_counter += 1\n return self._next_id_counter\n\n @property\n def _last_id(self):\n return self._next_id_counter\n\n def as_default(self):\n \"\"\"Returns a context manager that makes this `Graph` the default graph.\n\n This method should be used if you want to create multiple graphs\n in the same process. For convenience, a global default graph is\n provided, and all ops will be added to this graph if you do not\n create a new graph explicitly. Use this method the `with` keyword\n to specify that ops created within the scope of a block should be\n added to this graph.\n\n The default graph is a property of the current thread. If you\n create a new thread, and wish to use the default graph in that\n thread, you must explicitly add a `with g.as_default():` in that\n thread's function.\n\n The following code examples are equivalent:\n\n ```python\n # 1. Using Graph.as_default():\n g = tf.Graph()\n with g.as_default():\n c = tf.constant(5.0)\n assert c.graph is g\n\n # 2. Constructing and making default:\n with tf.Graph().as_default() as g:\n c = tf.constant(5.0)\n assert c.graph is g\n ```\n\n Returns:\n A context manager for using this graph as the default graph.\n \"\"\"\n return _default_graph_stack.get_controller(self)\n\n def add_to_collection(self, name, value):\n \"\"\"Stores `value` in the collection with the given `name`.\n\n Args:\n name: The key for the collection. For example, the `GraphKeys` class\n contains many standard names for collections.\n value: The value to add to the collection.\n \"\"\"\n self._check_not_finalized()\n if name not in self._collections:\n self._collections[name] = [value]\n else:\n self._collections[name].append(value)\n\n def get_collection(self, name, scope=None):\n \"\"\"Returns a list of values in the collection with the given `name`.\n\n Args:\n key: The key for the collection. For example, the `GraphKeys` class\n contains many standard names for collections.\n scope: (Optional.) If supplied, the resulting list is filtered to include\n only items whose name begins with this string.\n\n Returns:\n The list of values in the collection with the given `name`, or\n an empty list if no value has been added to that collection. The\n list contains the values in the order under which they were\n collected.\n \"\"\"\n if scope is None:\n return self._collections.get(name, list())\n else:\n c = []\n for item in self._collections.get(name, list()):\n if hasattr(item, \"name\") and item.name.startswith(scope):\n c.append(item)\n return c\n\n @contextlib.contextmanager\n def _original_op(self, op):\n \"\"\"Python 'with' handler to help annotate ops with their originator.\n\n An op may have an 'original_op' property that indicates the op on which\n it was based. For example a replica op is based on the op that was\n replicated and a gradient op is based on the op that was differentiated.\n\n All ops created in the scope of this 'with' handler will have\n the given 'op' as their original op.\n\n Args:\n op: The Operation that all ops created in this scope will have as their\n original op.\n\n Yields:\n Nothing.\n \"\"\"\n old_original_op = self._default_original_op\n try:\n self._default_original_op = op\n yield\n finally:\n self._default_original_op = old_original_op\n\n # pylint: disable=g-doc-return-or-yield\n @contextlib.contextmanager\n def name_scope(self, name):\n \"\"\"Returns a context manager that creates hierarchical names for operations.\n\n A graph maintains a stack of name scopes. A `with name_scope(...):`\n statement pushes a new name onto the stack for the lifetime of the context.\n\n The `name` argument will be interpreted as follows:\n\n * A string (not ending with '/') will create a new name scope, in which\n `name` is appended to the prefix of all operations created in the\n context. If `name` has been used before, it will be made unique by\n calling `self.unique_name(name)`.\n * A scope previously captured from a `with g.name_scope(...) as\n scope:` statement will be treated as an \"absolute\" name scope, which\n makes it possible to re-enter existing scopes.\n * A value of `None` or the empty string will reset the current name scope\n to the top-level (empty) name scope.\n\n For example:\n\n ```python\n with tf.Graph().as_default() as g:\n c = tf.constant(5.0, name=\"c\")\n assert c_1.name == \"c\"\n c_1 = tf.constant(6.0, name=\"c\")\n assert c_1.name == \"c_1\"\n\n # Creates a scope called \"nested\"\n with g.name_scope(\"nested\") as scope:\n nested_c = tf.constant(10.0, name=\"c\")\n assert nested_c.name == \"nested/c\"\n\n # Creates a nested scope called \"inner\".\n with g.name_scope(\"inner\"):\n nested_inner_c = tf.constant(20.0, name=\"c\")\n assert nested_inner_c.name == \"nested/inner/c\"\n\n # Create a nested scope called \"inner_1\".\n with g.name_scope(\"inner\"):\n nested_inner_1_c = tf.constant(30.0, name=\"c\")\n assert nested_inner_1_c.name == \"nested/inner_1/c\"\n\n # Treats `scope` as an absolute name scope, and\n # switches to the \"nested/\" scope.\n with g.name_scope(scope):\n nested_d = tf.constant(40.0, name=\"d\")\n assert nested_d.name == \"nested/d\"\n\n with g.name_scope(\"\"):\n e = tf.constant(50.0, name=\"e\")\n assert e.name == \"e\"\n ```\n\n The name of the scope itself can be captured by `with\n g.name_scope(...) as scope:`, which stores the name of the scope\n in the variable `scope`. This value can be used to name an\n operation that represents the overall result of executing the ops\n in a scope. For example:\n\n ```python\n inputs = tf.constant(...)\n with g.name_scope('my_layer') as scope:\n weights = tf.Variable(..., name=\"weights\")\n biases = tf.Variable(..., name=\"biases\")\n affine = tf.matmul(inputs, weights) + biases\n output = tf.nn.relu(affine, name=scope)\n ```\n\n\n Args:\n name: A name for the scope.\n\n Returns:\n A context manager that installs `name` as a new name scope.\n \"\"\"\n try:\n old_stack = self._name_stack\n if not name: # Both for name=None and name=\"\" we re-set to empty scope.\n new_stack = (None, None)\n elif name and name[-1] == \"/\":\n new_stack = (name[:-1], name[:-1])\n else:\n new_stack = (self.unique_name(name), self._plain_name(name))\n self._name_stack = new_stack\n yield \"\" if new_stack[0] is None else new_stack[0] + \"/\"\n finally:\n self._name_stack = old_stack\n # pylint: enable=g-doc-return-or-yield\n\n def unique_name(self, name):\n \"\"\"Return a unique operation name for `name`.\n\n Note: You rarely need to call `unique_name()` directly. Most of\n the time you just need to create `with g.name_scope()` blocks to\n generate structured names.\n\n `unique_name` is used to generate structured names, separated by\n `\"/\"`, to help identify operations when debugging a graph.\n Operation names are displayed in error messages reported by the\n TensorFlow runtime, and in various visualization tools such as\n TensorBoard.\n\n Args:\n name: The name for an operation.\n\n Returns:\n A string to be passed to `create_op()` that will be used\n to name the operation being created.\n \"\"\"\n if self._name_stack[0]:\n name = self._name_stack[0] + \"/\" + name\n i = self._names_in_use.get(name, 0)\n # Increment the number for \"name\".\n self._names_in_use[name] = i + 1\n if i > 0:\n base_name = name\n # Make sure the composed name is not already used.\n while name in self._names_in_use:\n name = \"%s_%d\" % (base_name, i)\n i += 1\n # Mark the composed name as used in case someone wants\n # to call unique_name(\"name_1\").\n self._names_in_use[name] = 1\n return name\n\n # TODO(touts): remove\n def _plain_name(self, name):\n \"\"\"Return the fully scoped 'name'.\n\n Args:\n name: a string.\n\n Returns:\n 'name' scoped in the current name stack, without any uniquified\n elements.\n \"\"\"\n if self._name_stack[1]:\n return self._name_stack[1] + \"/\" + name\n else:\n return name\n\n def _set_default_device(self, dev):\n \"\"\"Set the default device properties.\n\n Args:\n dev: string or Device.\n \"\"\"\n self._default_device = _device_string(dev)\n\n def get_default_device(self):\n \"\"\"Returns the default device.\n\n Returns:\n A string.\n \"\"\"\n return self._default_device\n\n def _push_default_device_function(self, device_function):\n \"\"\"Pushes the given function onto the stack of device functions.\n\n See `Graph.device` for more details.\n\n Args:\n device_function: The function to be pushed onto the stack of device\n functions.\n \"\"\"\n self._device_function_stack.append(device_function)\n\n def _pop_default_device_function(self, device_function):\n \"\"\"Pops the given function from the stack of device functions.\n\n See `Graph.device` for more details.\n\n Args:\n device_function: The function to be popped from the stack of device\n functions.\n\n Raises:\n ValueError: if the device_function to be popped is not top of the stack,\n or if the stack is empty.\n \"\"\"\n if not self._device_function_stack:\n raise ValueError(\"Tried to pop, but the device function stack is empty\")\n if self._device_function_stack[-1] is not device_function:\n raise ValueError(\"Tried to pop device function, but it was not on top \"\n \"of the stack\")\n\n self._device_function_stack.pop()\n\n @contextlib.contextmanager\n def device(self, device_name_or_function):\n \"\"\"Returns a context manager that specifies the default device to use.\n\n The `device_name_or_function` argument may either be a device name\n string, a device function, or None:\n\n * If it is a device name string, all operations constructed in\n this context will be assigned to the device with that name.\n * If it is a function, it will be treated as function from\n Operation objects to device name strings, and invoked each time\n a new Operation is created. The Operation will be assigned to\n the device with the returned name.\n * If it is None, the default device will be cleared.\n\n For example:\n\n ```python\n with g.device('/gpu:0'):\n # All operations constructed in this context will be placed\n # on GPU 0.\n with g.device(None):\n # All operations constructed in this context will have no\n # assigned device.\n\n # Defines a function from `Operation` to device string.\n def matmul_on_gpu(n):\n if n.type == \"MatMul\":\n return \"/gpu:0\"\n else:\n return \"/cpu:0\"\n\n with g.device(matmul_on_gpu):\n # All operations of type \"MatMul\" constructed in this context\n # will be placed on GPU 0; all other operations will be placed\n # on CPU 0.\n ```\n\n Args:\n device_name_or_function: The device name or function to use in\n the context.\n\n Returns:\n A context manager that specifies the default device to use for newly\n created ops.\n \"\"\"\n if callable(device_name_or_function):\n try:\n self._push_default_device_function(device_name_or_function)\n yield\n finally:\n self._pop_default_device_function(device_name_or_function)\n else:\n try:\n old_dev = self.get_default_device()\n self._set_default_device(_device_string(device_name_or_function))\n yield\n finally:\n self._set_default_device(old_dev)\n\n class _ControlDependenciesController(object):\n \"\"\"Context manager for `control_dependencies()`.\"\"\"\n\n def __init__(self, graph, control_inputs):\n \"\"\"Create a new `_ControlDependenciesController`.\n\n A `_ControlDependenciesController` is the context manager for\n `with tf.control_dependencies()` blocks. These normally nest,\n as described in the documentation for `control_dependencies()`.\n\n The `control_inputs` argument list control dependencies that must be\n added to the current set of control dependencies. Because of\n uniquification the set can be empty even if the caller passed a list of\n ops. The special value `None` indicates that we want to start a new\n empty set of control dependencies instead of extending the current set.\n\n In that case we also clear the current control flow context, which is an\n additional mechanism to add control dependencies.\n\n Args:\n graph: The graph that this controller is managing.\n control_inputs: List of ops to use as control inputs in addition\n to the current control dependencies. None to indicate that\n the dependencies should be cleared.\n \"\"\"\n self._graph = graph\n if control_inputs is None:\n self._control_inputs = []\n self._new_stack = True\n else:\n self._control_inputs = control_inputs\n self._new_stack = False\n self._seen_nodes = set()\n self._old_stack = None\n self._old_control_flow_context = None\n\n# pylint: disable=protected-access\n def __enter__(self):\n if self._new_stack:\n # Clear the control_dependencies graph.\n self._old_stack = self._graph._control_dependencies_stack\n self._graph._control_dependencies_stack = []\n # Clear the control_flow_context too.\n self._old_control_flow_context = self._graph._get_control_flow_context()\n self._graph._set_control_flow_context(None)\n self._graph._push_control_dependencies_controller(self)\n\n def __exit__(self, unused_type, unused_value, unused_traceback):\n self._graph._pop_control_dependencies_controller(self)\n if self._new_stack:\n self._graph._control_dependencies_stack = self._old_stack\n self._graph._set_control_flow_context(self._old_control_flow_context)\n# pylint: enable=protected-access\n\n @property\n def control_inputs(self):\n return self._control_inputs\n\n def add_op(self, op):\n self._seen_nodes.add(op)\n\n def op_in_group(self, op):\n return op in self._seen_nodes\n\n def _push_control_dependencies_controller(self, controller):\n self._control_dependencies_stack.append(controller)\n\n def _pop_control_dependencies_controller(self, controller):\n assert self._control_dependencies_stack[-1] is controller\n self._control_dependencies_stack.pop()\n\n def _current_control_dependencies(self):\n ret = set()\n for controller in self._control_dependencies_stack:\n for op in controller.control_inputs:\n ret.add(op)\n return ret\n\n def _control_dependencies_for_inputs(self, input_tensors):\n \"\"\"For an op that takes `input_tensors` as inputs, compute control inputs.\n\n The returned control dependencies should yield an execution that\n is equivalent to adding all control inputs in\n self._control_dependencies_stack to a newly created op. However,\n this function attempts to prune the returned control dependencies\n by observing that nodes created within the same `with\n control_dependencies(...):` block may have data dependencies that make\n the explicit approach redundant.\n\n Args:\n input_tensors: The direct data dependencies for an op to be created.\n\n Returns:\n A list of control inputs for the op to be created.\n \"\"\"\n ret = []\n input_ops = set([t.op for t in input_tensors])\n for controller in self._control_dependencies_stack:\n # If any of the input_ops already depends on the inputs from controller,\n # we say that the new op is dominated (by that input), and we therefore\n # do not need to add control dependences for this controller's inputs.\n dominated = False\n for op in input_ops:\n if controller.op_in_group(op):\n dominated = True\n break\n if not dominated:\n # Don't add a control input if we already have a data dependency on i.\n # NOTE(mrry): We do not currently track transitive data dependencies,\n # so we may add redundant control inputs.\n ret.extend([c for c in controller.control_inputs if c not in input_ops])\n return ret\n\n def _record_op_seen_by_control_dependencies(self, op):\n \"\"\"Record that the given op depends on all registered control dependencies.\n\n Args:\n op: An Operation.\n \"\"\"\n for controller in self._control_dependencies_stack:\n controller.add_op(op)\n\n def control_dependencies(self, control_inputs):\n \"\"\"Returns a context manager that specifies control dependencies.\n\n Use with the `with` keyword to specify that all operations constructed\n within the context should have control dependencies on\n `control_inputs`. For example:\n\n ```python\n with g.control_dependencies([a, b, c]):\n # `d` and `e` will only run after `a`, `b`, and `c` have executed.\n d = ...\n e = ...\n ```\n\n Multiple calls to `control_dependencies()` can be nested, and in\n that case a new `Operation` will have control dependencies on the union\n of `control_inputs` from all active contexts.\n\n ```python\n with g.control_dependencies([a, b]):\n # Ops constructed here run after `a` and `b`.\n with g.control_dependencies([c, d]):\n # Ops constructed here run after `a`, `b`, `c`, and `d`.\n ```\n\n You can pass None to clear the control dependencies:\n\n ```python\n with g.control_dependencies([a, b]):\n # Ops constructed here run after `a` and `b`.\n with g.control_dependencies(None):\n # Ops constructed here run normally, not waiting for either `a` or `b`.\n with g.control_dependencies([c, d]):\n # Ops constructed here run after `c` and `d`, also not waiting\n # for either `a` or `b`.\n ```\n\n *N.B.* The control dependencies context applies *only* to ops that\n are constructed within the context. Merely using an op or tensor\n in the context does not add a control dependency. The following\n example illustrates this point:\n\n ```python\n # WRONG\n def my_func(pred, tensor):\n t = tf.matmul(tensor, tensor)\n with tf.control_dependencies([pred]):\n # The matmul op is created outside the context, so no control\n # dependency will be added.\n return t\n\n # RIGHT\n def my_func(pred, tensor):\n with tf.control_dependencies([pred]):\n # The matmul op is created in the context, so a control dependency\n # will be added.\n return tf.matmul(tensor, tensor)\n ```\n\n Args:\n control_inputs: A list of `Operation` or `Tensor` objects which\n must be executed or computed before running the operations\n defined in the context. Can also be `None` to clear the control\n dependencies.\n\n Returns:\n A context manager that specifies control dependencies for all\n operations constructed within the context.\n\n Raises:\n TypeError: If `control_inputs` is not a list of `Operation` or\n `Tensor` objects.\n \"\"\"\n if control_inputs is None:\n return self._ControlDependenciesController(self, None)\n # First convert the inputs to ops, and deduplicate them.\n # NOTE(mrry): Other than deduplication, we do not currently track direct\n # or indirect dependencies between control_inputs, which may result in\n # redundant control inputs.\n control_ops = []\n current = self._current_control_dependencies()\n for c in control_inputs:\n c = self.as_graph_element(c)\n if isinstance(c, Tensor):\n c = c.op\n elif not isinstance(c, Operation):\n raise TypeError(\"Control input must be Operation or Tensor: %s\" % c)\n if c not in current:\n control_ops.append(c)\n current.add(c)\n return self._ControlDependenciesController(self, control_ops)\n\n # pylint: disable=g-doc-return-or-yield\n @contextlib.contextmanager\n def _kernel_label_map(self, op_to_kernel_label_map):\n \"\"\"EXPERIMENTAL: A context manager for setting kernel labels.\n\n This context manager can be used to select particular\n implementations of kernels within the scope of the context.\n\n For example:\n\n with ops.Graph().as_default() as g:\n f_1 = Foo() # Uses the default registered kernel for the Foo op.\n with g.kernel_label_map({\"Foo\": \"v_2\"}):\n f_2 = Foo() # Uses the registered kernel with label \"v_2\"\n # for the Foo op.\n with g.kernel_label_map({\"Foo\": \"v_3\"}):\n f_3 = Foo() # Uses the registered kernel with label \"v_3\"\n # for the Foo op.\n with g.kernel_label_map({\"Foo\": \"\"}):\n f_4 = Foo() # Uses the default registered kernel\n # for the Foo op.\n\n Args:\n op_to_kernel_label_map: A dictionary mapping op type strings to\n kernel label strings.\n\n Returns:\n A context manager that sets the kernel label to be used for one or more\n ops created in that context.\n\n Raises:\n TypeError: If op_to_kernel_label_map is not a dictionary mapping\n strings to strings.\n \"\"\"\n if not isinstance(op_to_kernel_label_map, dict):\n raise TypeError(\"op_to_kernel_label_map must be a dictionary mapping \"\n \"strings to strings\")\n # The saved_labels dictionary stores any currently-set labels that\n # will be overridden by this context manager.\n saved_labels = {}\n # Install the given label\n for op_type, label in op_to_kernel_label_map.items():\n if not (isinstance(op_type, six.string_types)\n and isinstance(label, six.string_types)):\n raise TypeError(\"op_to_kernel_label_map must be a dictionary mapping \"\n \"strings to strings\")\n try:\n saved_labels[op_type] = self._op_to_kernel_label_map[op_type]\n except KeyError:\n pass\n self._op_to_kernel_label_map[op_type] = label\n try:\n yield # The code within the context runs here.\n finally:\n # Remove the labels set for this context, and restore any saved labels.\n for op_type, label in op_to_kernel_label_map.items():\n try:\n self._op_to_kernel_label_map[op_type] = saved_labels[op_type]\n except KeyError:\n del self._op_to_kernel_label_map[op_type]\n # pylint: enable=g-doc-return-or-yield\n\n # pylint: disable=g-doc-return-or-yield\n @contextlib.contextmanager\n def gradient_override_map(self, op_type_map):\n \"\"\"EXPERIMENTAL: A context manager for overriding gradient functions.\n\n This context manager can be used to override the gradient function\n that will be used for ops within the scope of the context.\n\n For example:\n\n ```python\n @tf.RegisterGradient(\"CustomSquare\")\n def _custom_square_grad(op, inputs):\n # ...\n\n with tf.Graph().as_default() as g:\n c = tf.constant(5.0)\n s_1 = tf.square(c) # Uses the default gradient for tf.square.\n with g.gradient_override_map({\"Square\": \"CustomSquare\"}):\n s_2 = tf.square(s_2) # Uses _custom_square_grad to compute the\n # gradient of s_2.\n ```\n\n Args:\n op_type_map: A dictionary mapping op type strings to alternative op\n type strings.\n\n Returns:\n A context manager that sets the alternative op type to be used for one\n or more ops created in that context.\n\n Raises:\n TypeError: If `op_type_map` is not a dictionary mapping strings to\n strings.\n \"\"\"\n if not isinstance(op_type_map, dict):\n raise TypeError(\"op_type_map must be a dictionary mapping \"\n \"strings to strings\")\n # The saved_mappings dictionary stores any currently-set mappings that\n # will be overridden by this context manager.\n saved_mappings = {}\n # Install the given label\n for op_type, mapped_op_type in op_type_map.items():\n if not (isinstance(op_type, six.string_types)\n and isinstance(mapped_op_type, six.string_types)):\n raise TypeError(\"op_type_map must be a dictionary mapping \"\n \"strings to strings\")\n try:\n saved_mappings[op_type] = self._gradient_override_map[op_type]\n except KeyError:\n pass\n self._gradient_override_map[op_type] = mapped_op_type\n try:\n yield # The code within the context runs here.\n finally:\n # Remove the labels set for this context, and restore any saved labels.\n for op_type, mapped_op_type in op_type_map.items():\n try:\n self._gradient_override_map[op_type] = saved_mappings[op_type]\n except KeyError:\n del self._gradient_override_map[op_type]\n # pylint: enable=g-doc-return-or-yield\n\n\ndef device(dev):\n \"\"\"Wrapper for `Graph.device()` using the default graph.\n\n See\n [`Graph.device()`](../../api_docs/python/framework.md#Graph.device)\n for more details.\n\n Args:\n device_name_or_function: The device name or function to use in\n the context.\n\n Returns:\n A context manager that specifies the default device to use for newly\n created ops.\n \"\"\"\n return get_default_graph().device(dev)\n\n\ndef name_scope(name):\n \"\"\"Wrapper for `Graph.name_scope()` using the default graph.\n\n See\n [`Graph.name_scope()`](../../api_docs/python/framework.md#Graph.name_scope)\n for more details.\n\n Args:\n name: A name for the scope.\n\n Returns:\n A context manager that installs `name` as a new name scope in the\n default graph.\n \"\"\"\n return get_default_graph().name_scope(name)\n\n\ndef control_dependencies(control_inputs):\n \"\"\"Wrapper for `Graph.control_dependencies()` using the default graph.\n\n See [`Graph.control_dependencies()`](../../api_docs/python/framework.md#Graph.control_dependencies)\n for more details.\n\n Args:\n control_inputs: A list of `Operation` or `Tensor` objects which\n must be executed or computed before running the operations\n defined in the context. Can also be `None` to clear the control\n dependencies.\n\n Returns:\n A context manager that specifies control dependencies for all\n operations constructed within the context.\n \"\"\"\n return get_default_graph().control_dependencies(control_inputs)\n\n\nclass _DefaultStack(threading.local):\n \"\"\"A thread-local stack of objects for providing implicit defaults.\"\"\"\n\n def __init__(self):\n super(_DefaultStack, self).__init__()\n self.stack = []\n\n def get_default(self):\n return self.stack[-1] if len(self.stack) >= 1 else None\n\n def reset(self):\n self.stack = []\n\n @contextlib.contextmanager\n def get_controller(self, default):\n \"\"\"A context manager for manipulating a default stack.\"\"\"\n try:\n self.stack.append(default)\n yield default\n finally:\n assert self.stack[-1] is default\n self.stack.pop()\n\n\n_default_session_stack = _DefaultStack()\n\n\ndef default_session(session):\n \"\"\"Python \"with\" handler for defining a default session.\n\n This function provides a means of registering a session for handling\n Tensor.eval() and Operation.run() calls. It is primarily intended for use\n by session.Session, but can be used with any object that implements\n the Session.run() interface.\n\n Use with the \"with\" keyword to specify that Tensor.eval() and Operation.run()\n invocations within the scope of a block should be executed by a particular\n session.\n\n The default session applies to the current thread only, so it is always\n possible to inspect the call stack and determine the scope of a default\n session. If you create a new thread, and wish to use the default session\n in that thread, you must explicitly add a \"with ops.default_session(sess):\"\n block in that thread's function.\n\n Example:\n The following code examples are equivalent:\n\n # 1. Using the Session object directly:\n sess = ...\n c = tf.constant(5.0)\n sess.run(c)\n\n # 2. Using default_session():\n sess = ...\n with ops.default_session(sess):\n c = tf.constant(5.0)\n result = c.eval()\n\n # 3. Overriding default_session():\n sess = ...\n with ops.default_session(sess):\n c = tf.constant(5.0)\n with ops.default_session(...):\n c.eval(session=sess)\n\n Args:\n session: The session to be installed as the default session.\n\n Returns:\n A context manager for the default session.\n \"\"\"\n return _default_session_stack.get_controller(weakref.ref(session))\n\n\ndef get_default_session():\n \"\"\"Returns the default session for the current thread.\n\n The returned `Session` will be the innermost session on which a\n `Session` or `Session.as_default()` context has been entered.\n\n NOTE: The default session is a property of the current thread. If you\n create a new thread, and wish to use the default session in that\n thread, you must explicitly add a `with sess.as_default():` in that\n thread's function.\n\n Returns:\n The default `Session` being used in the current thread.\n \"\"\"\n ref = _default_session_stack.get_default()\n if ref is None:\n # No default session has been registered.\n return None\n else:\n # De-reference ref.\n ret = ref()\n if ret is None:\n # This should never happen with the current session implementations.\n raise RuntimeError(\"Default session has been garbage collected.\")\n return ret\n\n\ndef _eval_using_default_session(tensors, feed_dict, graph, session=None):\n \"\"\"Uses the default session to evaluate one or more tensors.\n\n Args:\n tensors: A single Tensor, or a list of Tensor objects.\n feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,\n numpy ndarrays, TensorProtos, or strings.\n graph: The graph in which the tensors are defined.\n session: (Optional) A different session to use to evaluate \"tensors\".\n\n Returns:\n Either a single numpy ndarray if \"tensors\" is a single tensor; or a list\n of numpy ndarrays that each correspond to the respective element in\n \"tensors\".\n\n Raises:\n ValueError: If no default session is available; the default session\n does not have \"graph\" as its graph; or if \"session\" is specified,\n and it does not have \"graph\" as its graph.\n \"\"\"\n if session is None:\n session = get_default_session()\n if session is None:\n raise ValueError(\"Cannot evaluate tensor using eval(): No default \"\n \"session is registered. Use `with \"\n \"sess.as_default()` or pass an explicit session to \"\n \"eval(session=sess)\")\n if session.graph is not graph:\n raise ValueError(\"Cannot use the default session to evaluate tensor: \"\n \"the tensor's graph is different from the session's \"\n \"graph. Pass an explicit session to \"\n \"eval(session=sess).\")\n else:\n if session.graph is not graph:\n raise ValueError(\"Cannot use the given session to evaluate tensor: \"\n \"the tensor's graph is different from the session's \"\n \"graph.\")\n return session.run(tensors, feed_dict)\n\n\ndef _run_using_default_session(operation, feed_dict, graph, session=None):\n \"\"\"Uses the default session to run \"operation\".\n\n Args:\n operation: The Operation to be run.\n feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,\n numpy ndarrays, TensorProtos, or strings.\n graph: The graph in which \"operation\" is defined.\n session: (Optional) A different session to use to run \"operation\".\n\n Raises:\n ValueError: If no default session is available; the default session\n does not have \"graph\" as its graph; or if \"session\" is specified,\n and it does not have \"graph\" as its graph.\n \"\"\"\n if session is None:\n session = get_default_session()\n if session is None:\n raise ValueError(\"Cannot execute operation using Run(): No default \"\n \"session is registered. Use 'with \"\n \"default_session(sess)' or pass an explicit session to \"\n \"Run(session=sess)\")\n if session.graph is not graph:\n raise ValueError(\"Cannot use the default session to execute operation: \"\n \"the operation's graph is different from the \"\n \"session's graph. Pass an explicit session to \"\n \"Run(session=sess).\")\n else:\n if session.graph is not graph:\n raise ValueError(\"Cannot use the given session to execute operation: \"\n \"the operation's graph is different from the session's \"\n \"graph.\")\n session.run(operation, feed_dict)\n\n\nclass _DefaultGraphStack(_DefaultStack):\n \"\"\"A thread-local stack of objects for providing an implicit default graph.\"\"\"\n\n def __init__(self):\n super(_DefaultGraphStack, self).__init__()\n self._global_default_graph = None\n\n def get_default(self):\n \"\"\"Override that returns a global default if the stack is empty.\"\"\"\n ret = super(_DefaultGraphStack, self).get_default()\n if ret is None:\n ret = self._GetGlobalDefaultGraph()\n return ret\n\n def _GetGlobalDefaultGraph(self):\n if self._global_default_graph is None:\n # TODO(mrry): Perhaps log that the default graph is being used, or set\n # provide some other feedback to prevent confusion when a mixture of\n # the global default graph and an explicit graph are combined in the\n # same process.\n self._global_default_graph = Graph()\n return self._global_default_graph\n\n def reset(self):\n super(_DefaultGraphStack, self).reset()\n self._global_default_graph = None\n\n_default_graph_stack = _DefaultGraphStack()\n\n\ndef reset_default_graph():\n \"\"\"Clears the default graph stack and resets the global default graph.\n\n NOTE: The default graph is a property of the current thread. This\n function applies only to the current thread. Calling this function while\n a `tf.Session` or `tf.InteractiveSession` is active will result in undefined\n behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects\n after calling this function will result in undefined behavior.\n \"\"\"\n _default_graph_stack.reset()\n\n\ndef get_default_graph():\n \"\"\"Returns the default graph for the current thread.\n\n The returned graph will be the innermost graph on which a\n `Graph.as_default()` context has been entered, or a global default\n graph if none has been explicitly created.\n\n NOTE: The default graph is a property of the current thread. If you\n create a new thread, and wish to use the default graph in that\n thread, you must explicitly add a `with g.as_default():` in that\n thread's function.\n\n Returns:\n The default `Graph` being used in the current thread.\n \"\"\"\n return _default_graph_stack.get_default()\n\n\ndef _assert_same_graph(original_item, item):\n \"\"\"Fail if the 2 items are from different graphs.\n\n Args:\n original_item: Original item to check against.\n item: Item to check.\n\n Raises:\n ValueError: if graphs do not match.\n \"\"\"\n if original_item.graph is not item.graph:\n raise ValueError(\n \"%s must be from the same graph as %s.\" % (item, original_item))\n\n\ndef _get_graph_from_inputs(op_input_list, graph=None):\n \"\"\"Returns the appropriate graph to use for the given inputs.\n\n This library method provides a consistent algorithm for choosing the graph\n in which an Operation should be constructed:\n\n 1. If the \"graph\" is specified explicitly, we validate that all of the inputs\n in \"op_input_list\" are compatible with that graph.\n 2. Otherwise, we attempt to select a graph from the first Operation-\n or Tensor-valued input in \"op_input_list\", and validate that all other\n such inputs are in the same graph.\n 3. If the graph was not specified and it could not be inferred from\n \"op_input_list\", we attempt to use the default graph.\n\n Args:\n op_input_list: A list of inputs to an operation, which may include `Tensor`,\n `Operation`, and other objects that may be converted to a graph element.\n graph: (Optional) The explicit graph to use.\n\n Raises:\n TypeError: If op_input_list is not a list or tuple, or if graph is not a\n Graph.\n ValueError: If a graph is explicitly passed and not all inputs are from it,\n or if the inputs are from multiple graphs, or we could not find a graph\n and there was no default graph.\n\n Returns:\n The appropriate graph to use for the given inputs.\n \"\"\"\n op_input_list = tuple(op_input_list) # Handle generators correctly\n if graph and not isinstance(graph, Graph):\n raise TypeError(\"Input graph needs to be a Graph: %s\" % graph)\n\n # 1. We validate that all of the inputs are from the same graph. This is\n # either the supplied graph parameter, or the first one selected from one\n # the graph-element-valued inputs. In the latter case, we hold onto\n # that input in original_graph_element so we can provide a more\n # informative error if a mismatch is found.\n original_graph_element = None\n for op_input in op_input_list:\n # Determine if this is a valid graph_element.\n graph_element = None\n if isinstance(op_input, (Operation, Tensor, SparseTensor, IndexedSlices)):\n graph_element = op_input\n else:\n graph_element = _as_graph_element(op_input)\n\n if graph_element:\n if not graph:\n original_graph_element = graph_element\n graph = graph_element.graph\n elif original_graph_element:\n _assert_same_graph(original_graph_element, graph_element)\n elif graph_element.graph is not graph:\n raise ValueError(\n \"%s is not from the passed-in graph.\" % graph_element)\n\n # 2. If all else fails, we use the default graph, which is always there.\n return graph or get_default_graph()\n\n\nclass GraphKeys(object):\n \"\"\"Standard names to use for graph collections.\n\n The standard library uses various well-known names to collect and\n retrieve values associated with a graph. For example, the\n `tf.Optimizer` subclasses default to optimizing the variables\n collected under `tf.GraphKeys.TRAINABLE_VARIABLES` if none is\n specified, but it is also possible to pass an explicit list of\n variables.\n\n The following standard keys are defined:\n\n * `VARIABLES`: the `Variable` objects that comprise a model, and\n must be saved and restored together. See\n [`tf.all_variables()`](../../api_docs/python/state_ops.md#all_variables)\n for more details.\n * `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will\n be trained by an optimizer. See\n [`tf.trainable_variables()`](../../api_docs/python/state_ops.md#trainable_variables)\n for more details.\n * `SUMMARIES`: the summary `Tensor` objects that have been created in the\n graph. See\n [`tf.merge_all_summaries()`](../../api_docs/python/train.md#merge_all_summaries)\n for more details.\n * `QUEUE_RUNNERS`: the `QueueRunner` objects that are used to\n produce input for a computation. See\n [`tf.start_queue_runners()`](../../api_docs/python/train.md#start_queue_runners)\n for more details.\n * `MOVING_AVERAGE_VARIABLES`: the subset of `Variable` objects that will also\n keep moving averages. See\n [`tf.moving_average_variables()`](../../api_docs/python/state_ops.md#moving_average_variables)\n for more details.\n * `REGULARIZATION_LOSSES`: regularization losses collected during graph\n construction.\n \"\"\"\n\n # Key to collect Variable objects that must be saved and restored\n # by the model.\n VARIABLES = \"variables\"\n # Key to collect Variable objects that will be trained by the\n # optimizers.\n TRAINABLE_VARIABLES = \"trainable_variables\"\n # Key to collect summaries.\n SUMMARIES = \"summaries\"\n # Key to collect QueueRunners.\n QUEUE_RUNNERS = \"queue_runners\"\n # Key to collect table initializers.\n TABLE_INITIALIZERS = \"table_initializer\"\n # Key to collect asset filepaths. An asset represents an external resource\n # like a vocabulary file.\n ASSET_FILEPATHS = \"asset_filepaths\"\n # Key to collect Variable objects that keep moving averages.\n MOVING_AVERAGE_VARIABLES = \"moving_average_variables\"\n # Key to collected regularization losses at graph construction.\n REGULARIZATION_LOSSES = \"regularization_losses\"\n\n\ndef add_to_collection(name, value):\n \"\"\"Wrapper for `Graph.add_to_collection()` using the default graph.\n\n See [`Graph.add_to_collection()`](../../api_docs/python/framework.md#Graph.add_to_collection)\n for more details.\n\n Args:\n name: The key for the collection. For example, the `GraphKeys` class\n contains many standard names for collections.\n value: The value to add to the collection.\n \"\"\"\n get_default_graph().add_to_collection(name, value)\n\n\ndef get_collection(key, scope=None):\n \"\"\"Wrapper for `Graph.get_collection()` using the default graph.\n\n See [`Graph.get_collection()`](../../api_docs/python/framework.md#Graph.get_collection)\n for more details.\n\n Args:\n key: The key for the collection. For example, the `GraphKeys` class\n contains many standard names for collections.\n scope: (Optional.) If supplied, the resulting list is filtered to include\n only items whose name begins with this string.\n\n Returns:\n The list of values in the collection with the given `name`, or\n an empty list if no value has been added to that collection. The\n list contains the values in the order under which they were\n collected.\n \"\"\"\n return get_default_graph().get_collection(key, scope)\n\n\n# pylint: disable=g-doc-return-or-yield\[email protected]\ndef op_scope(values, name, default_name=None):\n \"\"\"Returns a context manager for use when defining a Python op.\n\n This context manager validates that the given `values` are from the\n same graph, ensures that that graph is the default graph, and pushes a\n name scope.\n\n For example, to define a new Python op called `my_op`:\n\n ```python\n def my_op(a, b, c, name=None):\n with tf.op_scope([a, b, c], name, \"MyOp\") as scope:\n a = tf.convert_to_tensor(a, name=\"a\")\n b = tf.convert_to_tensor(b, name=\"b\")\n c = tf.convert_to_tensor(c, name=\"c\")\n # Define some computation that uses `a`, `b`, and `c`.\n return foo_op(..., name=scope)\n ```\n\n Args:\n values: The list of `Tensor` arguments that are passed to the op function.\n name: The name argument that is passed to the op function.\n default_name: The default name to use if the `name` argument is `None`.\n\n Returns:\n A context manager for use in defining Python ops. Yields the name scope.\n\n Raises:\n ValueError: if neither `name` nor `default_name` is provided.\n \"\"\"\n g = _get_graph_from_inputs(values)\n n = default_name if name is None else name\n if n is None:\n raise ValueError(\n \"At least one of name (%s) and default_name (%s) must be provided.\" % (\n name, default_name))\n with g.as_default(), g.name_scope(n) as scope:\n yield scope\n# pylint: enable=g-doc-return-or-yield\n"
]
| [
[
"tensorflow.python.framework.tensor_shape.unknown_shape",
"tensorflow.python.framework.dtypes.as_dtype",
"tensorflow.core.framework.graph_pb2.NodeDef",
"tensorflow.core.framework.versions_pb2.VersionDef",
"tensorflow.python.framework.registry.Registry",
"tensorflow.core.framework.graph_pb2.GraphDef",
"tensorflow.python.util.compat.as_bytes",
"tensorflow.python.util.compat.as_str"
]
]
|
TianhongDai/esil-hindsight | [
"b7c22da087095610018f281245fd4f622ef190ed"
]
| [
"plot_curves.py"
]
| [
"import matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport matplotlib\n\n\"\"\"\nexample code for generate the curves\n\"\"\"\n\nsns.set(rc={\"figure.figsize\": (8, 7)})\nsns.set_context(rc={\"lines.linewidth\": 2})\n\n\"\"\"\nsame smooth function from openai baselines to make sure consistent\n\nhttps://github.com/openai/baselines/blob/master/baselines/her/experiment/plot.py\n\n\"\"\"\nfont_size = 25\n\ndef smooth_reward_curve(x, y):\n halfwidth = int(np.ceil(len(x) / 60)) # Halfwidth of our smoothing convolution\n k = halfwidth\n xsmoo = x\n ysmoo = np.convolve(y, np.ones(2 * k + 1), mode='same') / np.convolve(np.ones_like(y), np.ones(2 * k + 1),\n mode='same')\n return xsmoo, ysmoo\n\ndef pad(xs, value=np.nan):\n maxlen = np.max([len(x) for x in xs])\n padded_xs = []\n for x in xs:\n if x.shape[0] >= maxlen:\n padded_xs.append(x)\n padding = np.ones((maxlen - x.shape[0],) + x.shape[1:]) * value\n x_padded = np.concatenate([x, padding], axis=0)\n assert x_padded.shape[1:] == x.shape[1:]\n assert x_padded.shape[0] == maxlen\n padded_xs.append(x_padded)\n return np.array(padded_xs)\n\n# extract info from hsl logs\ndef extract_info_esil(path): \n file_content = open(path)\n reward_temp, episode_temp = [], []\n # start to process now...\n while True:\n content = file_content.readline()\n if len(content) == 0:\n break\n if content.find('Update:') != -1:\n split_sentence = content.split()\n index = split_sentence.index('Update:')\n get_str = split_sentence[index + 1]\n episode_temp.append(int(get_str))\n # collect the reward information...\n index = split_sentence.index('Success:')\n get_str = split_sentence[index + 1]\n reward_temp.append(float(get_str[:-1]))\n episode = np.array(episode_temp) + 1\n reward = np.array(reward_temp)\n return episode, reward\n\n# process data\ndef process_data(logs_path, seeds_number, data_len):\n episode_total = []\n reward_total = []\n for idx in range(seeds_number):\n # extract information\n ep, rewards = extract_info_esil('{}/seed_{}.log'.format(logs_path, idx+1))\n # smooth the curve\n ep, rewards = smooth_reward_curve(ep[:data_len], rewards[:data_len])\n # store the data into...\n episode_total.append(ep)\n reward_total.append(rewards)\n episode_total = np.array(episode_total)\n reward_total = np.array(reward_total)\n reward_median = np.median(reward_total, axis=0)\n return episode_total[0], reward_median, reward_total\n\ndef plot_results(task_name, title, seeds_number=5, data_len=1000):\n esil_logs_path = 'example_logs/esil-logs/' + task_name\n ep, reward_median_esil, reward_total_esil = process_data(esil_logs_path, seeds_number, data_len)\n # after load data\n plt.figure()\n _, ax = plt.subplots(1, 1)\n # plot the hsl\n plt.plot(ep, reward_median_esil, label='PPO + ESIL (Ours)')\n plt.fill_between(ep, np.nanpercentile(reward_total_esil, 25, axis=0), np.nanpercentile(reward_total_esil, 75, axis=0), alpha=0.25)\n # some format\n plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))\n ax.xaxis.get_offset_text().set_fontsize(font_size)\n plt.xticks(fontsize=font_size)\n plt.yticks(fontsize=font_size)\n plt.xlim([0, 1000])\n plt.ylim([0, 1.05])\n plt.xlabel('Epochs', fontsize=font_size)\n plt.ylabel('Success Rate', fontsize=font_size)\n plt.title(title, fontsize=font_size)\n plt.tight_layout()\n plt.legend(loc='center right', prop={'size': font_size})\n plt.savefig('{}_baseline.pdf'.format(task_name))\n \nif __name__ == '__main__':\n plot_results(task_name='push', title='FetchPush-v1')"
]
| [
[
"numpy.ones_like",
"matplotlib.pyplot.xlim",
"numpy.median",
"matplotlib.pyplot.xticks",
"numpy.concatenate",
"numpy.nanpercentile",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.tight_layout",
"numpy.array",
"matplotlib.pyplot.title",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ticklabel_format",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.ones",
"matplotlib.pyplot.ylabel"
]
]
|
aavanlee/fyp | [
"48bfc24cc17c86c635a4fe35e4e563c444cfaabc"
]
| [
"cocoapi/PythonAPI/pycocotools/coco.py"
]
| [
"__author__ = 'tylin'\n__version__ = '2.0'\n# Interface for accessing the Microsoft COCO dataset.\n\n# Microsoft COCO is a large image dataset designed for object detection,\n# segmentation, and caption generation. pycocotools is a Python API that\n# assists in loading, parsing and visualizing the annotations in COCO.\n# Please visit http://mscoco.org/ for more information on COCO, including\n# for the data, paper, and tutorials. The exact format of the annotations\n# is also described on the COCO website. For example usage of the pycocotools\n# please see pycocotools_demo.ipynb. In addition to this API, please download both\n# the COCO images and annotations in order to run the demo.\n\n# An alternative to using the API is to load the annotations directly\n# into Python dictionary\n# Using the API provides additional utility functions. Note that this API\n# supports both *instance* and *caption* annotations. In the case of\n# captions not all functions are defined (e.g. categories are undefined).\n\n# The following API functions are defined:\n# COCO - COCO api class that loads COCO annotation file and prepare data structures.\n# decodeMask - Decode binary mask M encoded via run-length encoding.\n# encodeMask - Encode binary mask M using run-length encoding.\n# getAnnIds - Get ann ids that satisfy given filter conditions.\n# getCatIds - Get cat ids that satisfy given filter conditions.\n# getImgIds - Get img ids that satisfy given filter conditions.\n# loadAnns - Load anns with the specified ids.\n# loadCats - Load cats with the specified ids.\n# loadImgs - Load imgs with the specified ids.\n# annToMask - Convert segmentation in an annotation to binary mask.\n# showAnns - Display the specified annotations.\n# loadRes - Load algorithm results and create API for accessing them.\n# download - Download COCO images from mscoco.org server.\n# Throughout the API \"ann\"=annotation, \"cat\"=category, and \"img\"=image.\n# Help on each functions can be accessed by: \"help COCO>function\".\n\n# See also COCO>decodeMask,\n# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,\n# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,\n# COCO>loadImgs, COCO>annToMask, COCO>showAnns\n\n# Microsoft COCO Toolbox. version 2.0\n# Data, paper, and tutorials available at: http://mscoco.org/\n# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.\n# Licensed under the Simplified BSD License [see bsd.txt]\n\nimport json\nimport time\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import PatchCollection\nfrom matplotlib.patches import Polygon\nimport numpy as np\nimport copy\nimport itertools\nfrom . import mask as maskUtils\nimport os\nfrom collections import defaultdict\nimport sys\nPYTHON_VERSION = sys.version_info[0]\nif PYTHON_VERSION == 2:\n from urllib import urlretrieve\nelif PYTHON_VERSION == 3:\n from urllib.request import urlretrieve\n\n\ndef _isArrayLike(obj):\n return hasattr(obj, '__iter__') and hasattr(obj, '__len__')\n\n\nclass COCO:\n def __init__(self, annotation_file=None):\n \"\"\"\n Constructor of Microsoft COCO helper class for reading and visualizing annotations.\n :param annotation_file (str): location of annotation file\n :param image_folder (str): location to the folder that hosts images.\n :return:\n \"\"\"\n # load dataset\n self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()\n self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)\n if not annotation_file == None:\n print('loading annotations into memory...')\n tic = time.time()\n dataset = json.load(open(annotation_file, 'r', encoding='utf8'))\n assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))\n print('Done (t={:0.2f}s)'.format(time.time()- tic))\n self.dataset = dataset\n self.createIndex()\n\n def createIndex(self):\n # create index\n print('creating index...')\n anns, cats, imgs = {}, {}, {}\n imgToAnns,catToImgs = defaultdict(list),defaultdict(list)\n if 'annotations' in self.dataset:\n for ann in self.dataset['annotations']:\n imgToAnns[ann['image_id']].append(ann)\n anns[ann['id']] = ann\n\n if 'images' in self.dataset:\n for img in self.dataset['images']:\n imgs[img['id']] = img\n\n if 'categories' in self.dataset:\n for cat in self.dataset['categories']:\n cats[cat['id']] = cat\n\n if 'annotations' in self.dataset and 'categories' in self.dataset:\n for ann in self.dataset['annotations']:\n catToImgs[ann['category_id']].append(ann['image_id'])\n\n print('index created!')\n\n # create class members\n self.anns = anns\n self.imgToAnns = imgToAnns\n self.catToImgs = catToImgs\n self.imgs = imgs\n self.cats = cats\n\n def info(self):\n \"\"\"\n Print information about the annotation file.\n :return:\n \"\"\"\n for key, value in self.dataset['info'].items():\n print('{}: {}'.format(key, value))\n\n def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):\n \"\"\"\n Get ann ids that satisfy given filter conditions. default skips that filter\n :param imgIds (int array) : get anns for given imgs\n catIds (int array) : get anns for given cats\n areaRng (float array) : get anns for given area range (e.g. [0 inf])\n iscrowd (boolean) : get anns for given crowd label (False or True)\n :return: ids (int array) : integer array of ann ids\n \"\"\"\n imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]\n catIds = catIds if _isArrayLike(catIds) else [catIds]\n\n if len(imgIds) == len(catIds) == len(areaRng) == 0:\n anns = self.dataset['annotations']\n else:\n if not len(imgIds) == 0:\n lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]\n anns = list(itertools.chain.from_iterable(lists))\n else:\n anns = self.dataset['annotations']\n anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]\n anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]\n if not iscrowd == None:\n ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]\n else:\n ids = [ann['id'] for ann in anns]\n return ids\n\n def getCatIds(self, catNms=[], supNms=[], catIds=[]):\n \"\"\"\n filtering parameters. default skips that filter.\n :param catNms (str array) : get cats for given cat names\n :param supNms (str array) : get cats for given supercategory names\n :param catIds (int array) : get cats for given cat ids\n :return: ids (int array) : integer array of cat ids\n \"\"\"\n catNms = catNms if _isArrayLike(catNms) else [catNms]\n supNms = supNms if _isArrayLike(supNms) else [supNms]\n catIds = catIds if _isArrayLike(catIds) else [catIds]\n\n if len(catNms) == len(supNms) == len(catIds) == 0:\n cats = self.dataset['categories']\n else:\n cats = self.dataset['categories']\n cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]\n cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]\n cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]\n ids = [cat['id'] for cat in cats]\n return ids\n\n def getImgIds(self, imgIds=[], catIds=[]):\n '''\n Get img ids that satisfy given filter conditions.\n :param imgIds (int array) : get imgs for given ids\n :param catIds (int array) : get imgs with all given cats\n :return: ids (int array) : integer array of img ids\n '''\n imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]\n catIds = catIds if _isArrayLike(catIds) else [catIds]\n\n if len(imgIds) == len(catIds) == 0:\n ids = self.imgs.keys()\n else:\n ids = set(imgIds)\n for i, catId in enumerate(catIds):\n if i == 0 and len(ids) == 0:\n ids = set(self.catToImgs[catId])\n else:\n ids &= set(self.catToImgs[catId])\n return list(ids)\n\n def loadAnns(self, ids=[]):\n \"\"\"\n Load anns with the specified ids.\n :param ids (int array) : integer ids specifying anns\n :return: anns (object array) : loaded ann objects\n \"\"\"\n if _isArrayLike(ids):\n return [self.anns[id] for id in ids]\n elif type(ids) == int:\n return [self.anns[ids]]\n\n def loadCats(self, ids=[]):\n \"\"\"\n Load cats with the specified ids.\n :param ids (int array) : integer ids specifying cats\n :return: cats (object array) : loaded cat objects\n \"\"\"\n if _isArrayLike(ids):\n return [self.cats[id] for id in ids]\n elif type(ids) == int:\n return [self.cats[ids]]\n\n def loadImgs(self, ids=[]):\n \"\"\"\n Load anns with the specified ids.\n :param ids (int array) : integer ids specifying img\n :return: imgs (object array) : loaded img objects\n \"\"\"\n if _isArrayLike(ids):\n return [self.imgs[id] for id in ids]\n elif type(ids) == int:\n return [self.imgs[ids]]\n\n def showAnns(self, anns):\n \"\"\"\n Display the specified annotations.\n :param anns (array of object): annotations to display\n :return: None\n \"\"\"\n if len(anns) == 0:\n return 0\n if 'segmentation' in anns[0] or 'keypoints' in anns[0]:\n datasetType = 'instances'\n elif 'caption' in anns[0]:\n datasetType = 'captions'\n else:\n raise Exception('datasetType not supported')\n if datasetType == 'instances':\n ax = plt.gca()\n ax.set_autoscale_on(False)\n polygons = []\n color = []\n for ann in anns:\n c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]\n if 'segmentation' in ann:\n if type(ann['segmentation']) == list:\n # polygon\n for seg in ann['segmentation']:\n poly = np.array(seg).reshape((int(len(seg)/2), 2))\n polygons.append(Polygon(poly))\n color.append(c)\n else:\n # mask\n t = self.imgs[ann['image_id']]\n if type(ann['segmentation']['counts']) == list:\n rle = maskUtils.frPyObjects([ann['segmentation']], t['height'], t['width'])\n else:\n rle = [ann['segmentation']]\n m = maskUtils.decode(rle)\n img = np.ones( (m.shape[0], m.shape[1], 3) )\n if ann['iscrowd'] == 1:\n color_mask = np.array([2.0,166.0,101.0])/255\n if ann['iscrowd'] == 0:\n color_mask = np.random.random((1, 3)).tolist()[0]\n for i in range(3):\n img[:,:,i] = color_mask[i]\n ax.imshow(np.dstack( (img, m*0.5) ))\n if 'keypoints' in ann and type(ann['keypoints']) == list:\n # turn skeleton into zero-based index\n sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1\n kp = np.array(ann['keypoints'])\n x = kp[0::3]\n y = kp[1::3]\n v = kp[2::3]\n for sk in sks:\n if np.all(v[sk]>0):\n plt.plot(x[sk],y[sk], linewidth=3, color=c)\n plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2)\n plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2)\n p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)\n ax.add_collection(p)\n p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2)\n ax.add_collection(p)\n elif datasetType == 'captions':\n for ann in anns:\n print(ann['caption'])\n\n def loadRes(self, resFile):\n \"\"\"\n Load result file and return a result api object.\n :param resFile (str) : file name of result file\n :return: res (obj) : result api object\n \"\"\"\n res = COCO()\n res.dataset['images'] = [img for img in self.dataset['images']]\n\n print('Loading and preparing results...')\n tic = time.time()\n if type(resFile) == str or type(resFile) == unicode:\n anns = json.load(open(resFile))\n elif type(resFile) == np.ndarray:\n anns = self.loadNumpyAnnotations(resFile)\n else:\n anns = resFile\n assert type(anns) == list, 'results in not an array of objects'\n annsImgIds = [ann['image_id'] for ann in anns]\n assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \\\n 'Results do not correspond to current coco set'\n if 'caption' in anns[0]:\n imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])\n res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]\n for id, ann in enumerate(anns):\n ann['id'] = id+1\n elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n bb = ann['bbox']\n x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]\n if not 'segmentation' in ann:\n ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]\n ann['area'] = bb[2]*bb[3]\n ann['id'] = id+1\n ann['iscrowd'] = 0\n elif 'segmentation' in anns[0]:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n # now only support compressed RLE format as segmentation results\n ann['area'] = maskUtils.area(ann['segmentation'])\n if not 'bbox' in ann:\n ann['bbox'] = maskUtils.toBbox(ann['segmentation'])\n ann['id'] = id+1\n ann['iscrowd'] = 0\n elif 'keypoints' in anns[0]:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n s = ann['keypoints']\n x = s[0::3]\n y = s[1::3]\n x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)\n ann['area'] = (x1-x0)*(y1-y0)\n ann['id'] = id + 1\n ann['bbox'] = [x0,y0,x1-x0,y1-y0]\n print('DONE (t={:0.2f}s)'.format(time.time()- tic))\n\n res.dataset['annotations'] = anns\n res.createIndex()\n return res\n\n def download(self, tarDir = None, imgIds = [] ):\n '''\n Download COCO images from mscoco.org server.\n :param tarDir (str): COCO results directory name\n imgIds (list): images to be downloaded\n :return:\n '''\n if tarDir is None:\n print('Please specify target directory')\n return -1\n if len(imgIds) == 0:\n imgs = self.imgs.values()\n else:\n imgs = self.loadImgs(imgIds)\n N = len(imgs)\n if not os.path.exists(tarDir):\n os.makedirs(tarDir)\n for i, img in enumerate(imgs):\n tic = time.time()\n fname = os.path.join(tarDir, img['file_name'])\n if not os.path.exists(fname):\n urlretrieve(img['coco_url'], fname)\n print('downloaded {}/{} images (t={:0.1f}s)'.format(i, N, time.time()- tic))\n\n def loadNumpyAnnotations(self, data):\n \"\"\"\n Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class}\n :param data (numpy.ndarray)\n :return: annotations (python nested list)\n \"\"\"\n print('Converting ndarray to lists...')\n assert(type(data) == np.ndarray)\n print(data.shape)\n assert(data.shape[1] == 7)\n N = data.shape[0]\n ann = []\n for i in range(N):\n if i % 1000000 == 0:\n print('{}/{}'.format(i,N))\n ann += [{\n 'image_id' : int(data[i, 0]),\n 'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ],\n 'score' : data[i, 5],\n 'category_id': int(data[i, 6]),\n }]\n return ann\n\n def annToRLE(self, ann):\n \"\"\"\n Convert annotation which can be polygons, uncompressed RLE to RLE.\n :return: binary mask (numpy 2D array)\n \"\"\"\n t = self.imgs[ann['image_id']]\n h, w = t['height'], t['width']\n segm = ann['segmentation']\n if type(segm) == list:\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(segm, h, w)\n rle = maskUtils.merge(rles)\n elif type(segm['counts']) == list:\n # uncompressed RLE\n rle = maskUtils.frPyObjects(segm, h, w)\n else:\n # rle\n rle = ann['segmentation']\n return rle\n\n def annToMask(self, ann):\n \"\"\"\n Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.\n :return: binary mask (numpy 2D array)\n \"\"\"\n rle = self.annToRLE(ann)\n m = maskUtils.decode(rle)\n return m\n"
]
| [
[
"numpy.max",
"numpy.array",
"numpy.dstack",
"matplotlib.collections.PatchCollection",
"matplotlib.pyplot.plot",
"numpy.ones",
"matplotlib.patches.Polygon",
"numpy.min",
"numpy.all",
"numpy.random.random",
"matplotlib.pyplot.gca"
]
]
|
lujiarui/bioimage | [
"af22aedcc4ae1556c507cc359f0ad18719031b2b"
]
| [
"train.py"
]
| [
"import argparse\nimport os\nimport logging\nimport random\nimport time\n\nimport torch\nimport torchvision\nimport torch.nn as nn\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\nfrom sklearn.svm import SVC\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.metrics import confusion_matrix\n\nfrom datahelper import *\nfrom model import *\nfrom util import plot\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n# Device configuration\nprint(\"[INFO] Utilized device as [{}]\".format(device))\n\n\n\ndef train_model(args, model):\n \"\"\"The model training subroutine, including epoch-wise eval\n \"\"\"\n # deploy the model to device if avail\n model = model.to(device)\n criterion = nn.BCELoss()\n optimizer = torch.optim.Adam(model.parameters(),lr=args['lr'])\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[10,20,30,40,50], gamma=0.1)\n \n path_to_trainset = args['dataset_dir'] + 'train/'\n label_map = load_label(args['dataset_dir'] + 'train.csv')\n dataset_loader = load_dataset(path_to_trainset)\n # generate a dataset(str_dirname)\n train_set, dev_set = next(dataset_loader)\n \n tot_loss = []\n tot_devacc = []\n\n for epoch in range(1, args['epoches'] + 1): #args.num_epoches\n ts = train_set[:]\n random.shuffle(ts)\n ds = dev_set[:]\n random.shuffle(ds)\n start_time = time.time()\n flag = 1\n counter = 1\n epoch_loss = 0\n while flag: # exploit training set by 'bags' ==> to generate samples with diverse classes\n images,labels,tot = [],[],[]\n # fetch 20 bags of samples and \"shuffle\" them \n # Then feed to the NN\n for i in range(args['bag_size']):\n if not ts:\n flag = 0\n break\n dirname = ts.pop()\n tmp_images, tmp_labels = unpack_directory(dirname, path_to_trainset,label_map)\n images.extend(tmp_images)\n labels.extend(tmp_labels)\n tot = list(zip(images,labels))\n random.shuffle(tot)\n if tot == []:\n break\n images[:], labels[:] = zip(*tot)\n \n # Batch training, based on the index partition\n # partition: batch (len=32(default)) starting-index of images ABOVE\n partition = []\n for i in range(0, len(images), args['batch_size']):\n partition.append(i) \n step = 0 # current 'bags'\n for pt in range(len(partition)):\n #print('[INFO] Now do training .. Epoch{} | Bag{} | miniBatch{}'\n # .format(epoch, counter, step))\n # A batch train\n if pt == len(partition) - 1:\n image_batch, label_batch = torch.cat(images[partition[pt]: ], dim=0), torch.cat(labels[partition[pt]: ],dim=0)\n else: \n image_batch, label_batch = torch.cat(images[partition[pt]:partition[pt+1]], dim=0), torch.cat(labels[partition[pt]:partition[pt+1] ],dim=0)\n \n image_batch = image_batch.to(device)\n label_batch = label_batch.to(device)\n out = model(image_batch)\n\n # To obtain the Gold label(multi-class)\n v_length = len(label_batch)\n\n #print('[DEBUG]out-shape:{},label-shape:{}'.format(out.shape,label_batch.shape))\n loss = criterion(out.squeeze(), label_batch.squeeze())\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n step += 1\n epoch_loss += loss.item()\n \n # Eval step-wise use batch-size in train set\n samples_ = random.sample(range(len(images)), args['batch_size']) # random sample explored\n sample_img = [images[idx] for idx in samples_]\n sample_img = torch.cat(sample_img, dim=0).to(device)\n sample_label = [labels[idx] for idx in samples_]\n sample_label = torch.cat(sample_label, dim=0)\n s_out = model(sample_img).detach()\n s_out = s_out.cpu()\n\n thresholds = (s_out.max(dim=0).values + s_out.min(dim=0).values) / 2\n hard_label = np.array([[1 if score > thresholds[i] else 0 for i, score in enumerate(j)] for j in s_out])\n\n _tmp = abs(sample_label - hard_label)\n acc = 0\n for row in _tmp:\n _f = 1\n for element in row:\n if element > 0.0001:\n _f = 0\n if _f:\n acc += 1 \n acc = float(acc) / args['batch_size']\n \n current_time = time.time()\n print('[LOGGER] Epoch[{}/{}], Step[{}]| Acc: {:.3f} | Time elapsed: {:.2f}/sec'\n .format(epoch, args['epoches'], counter, acc, current_time - start_time)) # args.num_epoches\n\n counter += 1\n tot_loss.append(epoch_loss)\n print('[INFO] Epoch[{}/{}] Ended| Loss {:.4f} | Time elapsed: {:.2f}/sec\\nStarting Eval step...'\n .format(epoch, args['epoches'], epoch_loss, current_time - start_time))\n \n # save model\n if epoch % args['steps_save_ckpt'] == 0:\n torch.save(model, args['output_dir'] + 'epoch-{}.ckpt'.format(epoch))\n\n # ==== Evaluate this epoch result using dev set ====\n ts = train_set[:]\n devacc = eval(args, model, ts, ds)\n tot_devacc.append(devacc)\n scheduler.step()\n\n plt.plot(tot_loss)\n plt.ylabel('Moving Loss each training epoches')\n plt.xlabel('Epoches')\n plt.savefig(args['output_dir'] + 'loss.png')\n plt.close()\n\n plt.plot(tot_devacc)\n plt.ylabel('Moving Acc each training epoches')\n plt.xlabel('Epoches')\n plt.savefig(args['output_dir'] + 'acc.png')\n plt.close()\n\n\ndef eval(args, model, trainset, devset):\n images,scores,labels,xtrues = [],[],[],[]\n path_to_trainset = args['dataset_dir'] + 'train/'\n label_map = load_label(args['dataset_dir'] + 'train.csv')\n ds = devset[:]\n ts = trainset[:]\n \n # train the svm\n while(ts):\n # traverse each dir in dev set\n dirname = ts.pop()\n images, labels = unpack_directory(dirname, path_to_trainset, label_map)\n random.shuffle(images)\n x_gold = labels[0]\n dir_score = []\n # Predicted score\n partition = []\n for i in range(0, len(images), args['batch_size']):\n partition.append(i) \n # minibatch training\n for pt in range(len(partition)):\n # A batch train\n if pt == len(partition) - 1:\n image_batch = torch.cat(images[partition[pt]: ], dim=0)\n else: \n image_batch= torch.cat(images[partition[pt]:partition[pt+1]], dim=0)\n image_batch = image_batch.to(device)\n out = model(image_batch).detach()\n out = out.cpu()\n dir_score.append(out) # consider a bag at a time \n dir_score = torch.cat(dir_score, dim=0)\n dir_score = dir_score.mean(dim=0)\n scores.append(dir_score)\n xtrues.append(x_gold)\n x_score = torch.stack(scores,dim=0)\n x_true = torch.cat(xtrues,dim=0)\n svm = svm_decision(x_score, x_true)\n \n images,scores,labels,ytrues = [],[],[],[]\n while(ds):\n # traverse each dir in dev set\n dirname = ds.pop()\n images, labels = unpack_directory(dirname, path_to_trainset, label_map)\n random.shuffle(images)\n y_gold = labels[0]\n dir_score = []\n # Predicted score\n partition = []\n for i in range(0, len(images), args['batch_size']):\n partition.append(i) \n # minibatch training\n for pt in range(len(partition)):\n # A batch train\n if pt == len(partition) - 1:\n image_batch = torch.cat(images[partition[pt]: ], dim=0)\n else: \n image_batch= torch.cat(images[partition[pt]:partition[pt+1]], dim=0)\n image_batch = image_batch.to(device)\n out = model(image_batch).detach()\n out = out.cpu()\n dir_score.append(out) # consider a bag at a time \n dir_score = torch.cat(dir_score, dim=0)\n dir_score = dir_score.mean(dim=0)\n scores.append(dir_score)\n ytrues.append(y_gold)\n # concat\n y_score = torch.stack(scores,dim=0)\n y_true = torch.cat(ytrues,dim=0)\n # use MID value to represent thresh for each label\n thresholds = (y_score.max(dim=0).values + y_score.min(dim=0).values) / 2\n # To obtain the Gold label(multi-class)\n #y_pred = torch.FloatTensor([[1 if score > thresholds[i] else 0 for i, score in enumerate(j)] for j in y_score])\n y_pred = svm.predict(y_score)\n # Acc record\n diff = y_pred - y_true.numpy()\n devacc = 0\n for row in diff:\n _f = 1\n for element in row:\n if abs(element.item()) > 0.0001:\n _f = 0\n if _f:\n devacc += 1 \n devacc = float(devacc) / len(y_true)\n # plot roc curve\n plot(y_score, y_true, args['output_dir'])\n # macro F1 Eval\n f1_macro = metrics.f1_score(y_true, y_pred, average='macro')\n # micro F1 Eval\n f1_micro = metrics.f1_score(y_true, y_pred, average='micro')\n # AUC Eval\n try:\n roc_auc = metrics.roc_auc_score(y_true, y_score, average='macro')\n except ValueError:\n print('[WARNING] Current dev set has not all of the labels')\n roc_auc = -1 \n print('[INFO] Eval result:\\n|ACC:{}|\\n|AUC:{}|\\n|F1 Macro:{}|\\n|F1 Micro:{}|'.format(\n devacc, roc_auc, f1_macro, f1_micro))\n return devacc # for train subroutine\n \n\ndef predict(args, model, trainset):\n model = model.to(device)\n \n path_to_testset = args['dataset_dir'] + 'test/'\n test_sets = listdir(path_to_testset)\n \n path_to_trainset = args['dataset_dir'] + 'train/'\n label_map = load_label(args['dataset_dir'] + 'train.csv')\n \n # ===== train the svm =====\n images,scores,labels,xtrues = [],[],[],[]\n while trainset:\n # traverse each dir in dev set\n dirname = trainset.pop()\n images, labels = unpack_directory(dirname, path_to_trainset, label_map)\n random.shuffle(images)\n x_gold = labels[0] # directory share the label\n # Predicted score\n dir_score = []\n partition = [] # minibatch training\n for i in range(0, len(images), args['batch_size']):\n partition.append(i) \n # minibatch training\n for pt in range(len(partition)):\n # A batch train\n if pt == len(partition) - 1:\n image_batch = torch.cat(images[partition[pt]: ], dim=0)\n else: \n image_batch= torch.cat(images[partition[pt]:partition[pt+1]], dim=0)\n image_batch = image_batch.to(device)\n out = model(image_batch).detach()\n out = out.cpu()\n dir_score.append(out) # consider a bag at a time \n dir_score = torch.cat(dir_score, dim=0)\n dir_score = dir_score.mean(dim=0)\n scores.append(dir_score)\n xtrues.append(x_gold)\n x_score = torch.stack(scores, dim=0).numpy()\n x_true = torch.cat(xtrues, dim=0).numpy()\n print('Training set for SVM: ', x_score.shape)\n svm = svm_decision(x_score, x_true)\n \n # ===== predict the score =====\n y_score = []\n for dirname in test_sets:\n # predict for each file\n images = unpack_directory(dirname, path_to_testset)\n dir_score = []\n partition = []\n for i in range(0, len(images), args['batch_size']):\n partition.append(i) \n for pt in range(len(partition)):\n # minibatch train\n if pt == len(partition) - 1:\n image_batch = torch.cat(images[partition[pt]: ], dim=0)\n else: \n image_batch = torch.cat(images[partition[pt]:partition[pt+1]], dim=0)\n image_batch = image_batch.to(device)\n out = model(image_batch).detach()\n out = out.cpu()\n dir_score.append(out)\n \n dir_scores = torch.cat(dir_score, dim=0)\n if len(images) != dir_scores.shape[0]:\n print('[WARNING] The read and write are not matched.')\n dir_scores = dir_scores.mean(dim=0) # reduce dim=0 (shape=10)\n y_score.append(dir_scores)\n # row represents each dir\n # column represents each label\n y_score = torch.stack(y_score, dim=0)\n y_prob = y_scores.numpy().round(4) # output, round=4\n \n #thresholds = (y_scores.max(dim=0).values + y_scores.min(dim=0).values) / 2\n #str_label = [[str(i) for i, score in enumerate(_scores) if score > thresholds[i]] for _scores in y_scores]\n\n y_pred = svm.predict(y_score)\n str_label = [[str(i) for i, pred_label in enumerate(row) if pred_label >= 0.99] for row in y_pred] # >=0.99 ~ ==1\n str_prob = [[str(p) for p in list(_prob)] for _prob in y_prob]\n\n # split using ;\n print_score = [[dirname, ';'.join(_prob)] for dirname, _prob in zip(test_sets, str_prob)]\n print_label = [[dirname, ';'.join(_label)] for dirname, _label in zip(test_sets, str_label)]\n\n csv_record(args['output_dir'] + 'test_pred.csv', print_score)\n csv_record(args['output_dir'] + 'test.csv', print_label)\n print('[INFO] Predict done.')\n\n\ndef svm_decision(y_score, y_true):\n \"\"\"\n Args:\n y_score: [batch x 10] score of each label for batch samples\n y_true: [batch x 10] labels\n Return: \n clf(svm classifier)\n \"\"\"\n # Due to the imbalance of dataset\n clf = OneVsRestClassifier(SVC(class_weight='balanced'))\n clf.fit(y_score, y_true) \n y_pred = clf.predict(y_score)\n\n return clf\n\n\nif __name__ == \"__main__\":\n #model = ResNet(BasicBlock, [2,2,2,2], 512, 512)\n # model = ResNet(Bottleneck, [3, 4, 6, 3], 512, 512)\n #model = AlexNet(10)\n #predict(model)\n #eval(model)\n args = {\n 'epoches': 50,\n 'batch_size': 32,\n 'lr': 1e-3,\n 'bag_size': 20,\n 'do_train': True,\n 'do_eval': True,\n 'do_predict': False,\n 'steps_save_ckpt': 5,\n 'dataset_dir': '/chpc/home/stu-jrlu-a/ml_dataset/',\n 'init_checkpoint': 'resnet18-init.ckpt',\n 'output_dir': '/chpc/home/stu-jrlu-a/ml_dataset/'\n }\n #train_model(args, model)\n\n path_to_trainset = args['dataset_dir'] + 'train/'\n label_map = load_label(args['dataset_dir'] + 'train.csv')\n dataset_loader = load_dataset(path_to_trainset)\n # generate a dataset(str_dirname)\n train_set, dev_set = next(dataset_loader)\n train_set.extend(dev_set) # feed all the data to train svm\n model = torch.load(args['dataset_dir'] + args['init_checkpoint'])\n #eval(args, model, train_set, dev_set)\n predict(args, model, train_set)\n"
]
| [
[
"torch.cat",
"torch.stack",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"torch.optim.lr_scheduler.MultiStepLR",
"sklearn.svm.SVC",
"torch.cuda.is_available",
"matplotlib.pyplot.ylabel",
"torch.nn.BCELoss",
"torch.load",
"sklearn.metrics.f1_score",
"sklearn.metrics.roc_auc_score"
]
]
|
Srinivas200599/PrePred | [
"10733b5871c5b24351d5548c5b65788aa335d4bc"
]
| [
"Personality Prediction/finetune_models/utils/linguistic_features_utils.py"
]
| [
"import numpy as np\nimport pandas as pd\nimport re\nimport preprocessor as p\nfrom scipy.io import arff\n\ndef read_and_process(path):\n arff = open(path, 'r')\n attributes = []\n values = []\n is_attr = True\n arff.readline()\n arff.readline()\n while is_attr:\n line = arff.readline()\n if len(line.split()) == 0:\n is_attr = False\n continue\n type = line.split()[0]\n attr = ' '.join(line.split()[1:])\n if type == \"@attribute\":\n attributes.append(attr)\n else:\n is_attr = False\n for line in arff.readlines():\n if len(line.split(\",\")) < 10:\n continue\n else:\n components = line.split(\",\")\n values.append(components)\n name = components[0].replace(\"\\'\", \"\").split(\"\\\\\\\\\")[-1]\n values[-1][0] = name\n df = pd.DataFrame(columns=attributes, data=values)\n df['idx'] = [int(re.sub('id_', '', i)) for i in df[df.columns[0]]]\n df = df.drop(df.columns[0], axis=1)\n df = df.set_index(['idx'])\n df = df.apply(pd.to_numeric, errors='coerce')\n df = df.sort_index()\n return df\n\n\ndef sentence_preprocess(sentence):\n sentence = p.clean(sentence)\n # Remove hyperlinks\n sentence = re.sub(r'http\\S+', ' ', sentence)\n # Remove punctuations and numbers\n # sentence = re.sub('[^a-zA-Z]', ' ', sentence)\n sentence = re.sub('[^a-zA-Z.?!,]', ' ', sentence)\n # Single character removal (except I)\n sentence = re.sub(r\"\\s+[a-zA-HJ-Z]\\s+\", ' ', sentence)\n # Removing multiple spaces\n sentence = re.sub(r'\\s+', ' ', sentence)\n return sentence\n\n\ndef load_features(dir, dataset):\n idx = 'id'\n if dataset == 'kaggle':\n drop_cols = ['BROWN-FREQ numeric', 'K-F-FREQ numeric', 'K-F-NCATS numeric', 'K-F-NSAMP numeric',\n 'T-L-FREQ numeric', 'Extraversion numeric'\n , '\\'Emotional stability\\' numeric', 'Agreeableness numeric', 'Conscientiousness numeric',\n '\\'Openness to experience\\' numeric']\n mairesse = read_and_process(dir + dataset + '_mairesse_labeled.arff')\n mairesse = mairesse.drop(drop_cols, axis=1)\n elif dataset == 'essays':\n idx = '#AUTHID'\n mairesse = pd.read_csv(dir + dataset + '_mairesse_labeled.csv')\n mairesse = mairesse.set_index(mairesse.columns[0])\n nrc = pd.read_csv(dir + dataset + '_nrc.csv').set_index([idx])\n # nrc = nrc.sort_values(by=['id'])\n # nrc = nrc.drop(['id'], axis=1)\n nrc_vad = pd.read_csv(dir + dataset + '_nrc_vad.csv').set_index([idx])\n # nrc_vad = nrc_vad.sort_values(by=['id'])\n # nrc_vad = nrc_vad.drop(['id'], axis=1)\n # affectivespace = pd.read_csv(dir + 'essays_affectivespace.csv').set_index(['#AUTHID'])\n # hourglass = pd.read_csv(dir + dataset + '_hourglass.csv').set_index([idx])\n readability = pd.read_csv(dir + dataset + '_readability.csv').set_index([idx])\n\n #return [nrc, nrc_vad, readability, mairesse]\n return [nrc, nrc_vad, readability]\n\n\ndef get_psycholinguist_data(dump_data, dataset, feature_flags):\n features = load_features('../data/' + dataset + '/psycholinguist_features/', dataset)\n\n first = 1\n for feature, feature_flag in zip(features, feature_flags):\n if feature_flag:\n if first:\n df = feature\n first = 0\n else:\n df = pd.merge(df, feature, left_index=True, right_index=True)\n if dataset == 'essays':\n labels = dump_data[['user', 'EXT', 'NEU', 'AGR', 'CON', 'OPN']]\n if dataset == 'kaggle':\n labels = dump_data[['user', 'E', 'N', 'F', 'J']]\n labels = labels.set_index('user')\n if dataset == 'kaggle':\n labels.index = pd.to_numeric(labels.index, errors='coerce')\n df.index = pd.to_numeric(df.index, errors='coerce')\n merged = pd.merge(df, labels, left_index=True, right_index=True).fillna(0)\n label_size = labels.shape[1]\n data = merged[merged.columns[:(-1*label_size)]].values\n full_targets = merged[merged.columns[(-1*label_size):]].values\n feature_names = merged.columns\n return data, full_targets, feature_names, merged\n"
]
| [
[
"pandas.DataFrame",
"pandas.read_csv",
"pandas.to_numeric",
"pandas.merge"
]
]
|
NauticalMile64/PyFR | [
"dcb16f01f7a68098efc9d043e77befd076b3eab5"
]
| [
"pyfr/backends/opencl/base.py"
]
| [
"# -*- coding: utf-8 -*-\n\nimport numpy as np\n\nfrom pyfr.backends.base import BaseBackend\nfrom pyfr.mpiutil import get_local_rank\n\n\nclass OpenCLBackend(BaseBackend):\n name = 'opencl'\n\n def __init__(self, cfg):\n super().__init__(cfg)\n\n import pyopencl as cl\n\n # Get the platform/device info from the config file\n platid = cfg.get('backend-opencl', 'platform-id', '0').lower()\n devid = cfg.get('backend-opencl', 'device-id', 'local-rank').lower()\n devtype = cfg.get('backend-opencl', 'device-type', 'all').upper()\n\n # Handle the local-rank case\n if devid == 'local-rank':\n devid = str(get_local_rank())\n\n # Map the device type to the corresponding PyOpenCL constant\n devtype = getattr(cl.device_type, devtype)\n\n # Determine the OpenCL platform to use\n for i, platform in enumerate(cl.get_platforms()):\n if platid == str(i) or platid == platform.name.lower():\n break\n else:\n raise ValueError('No suitable OpenCL platform found')\n\n # Determine the OpenCL device to use\n for i, device in enumerate(platform.get_devices(devtype)):\n if devid == str(i) or devid == device.name.lower():\n break\n else:\n raise ValueError('No suitable OpenCL device found')\n\n # Determine if the device supports double precision arithmetic\n if self.fpdtype == np.float64 and not device.double_fp_config:\n raise ValueError('Device does not support double precision')\n\n # Create a OpenCL context on this device\n self.ctx = cl.Context([device])\n\n # Create a queue for initialisation-type operations\n self.qdflt = cl.CommandQueue(self.ctx)\n\n # Compute the alignment requirement for the context\n self.alignb = device.mem_base_addr_align // 8\n\n # Compute the SoA size\n self.soasz = 2*self.alignb // np.dtype(self.fpdtype).itemsize\n\n from pyfr.backends.opencl import (blasext, clblas, gimmik, packing,\n provider, types)\n\n # Register our data types\n self.base_matrix_cls = types.OpenCLMatrixBase\n self.const_matrix_cls = types.OpenCLConstMatrix\n self.matrix_cls = types.OpenCLMatrix\n self.matrix_bank_cls = types.OpenCLMatrixBank\n self.matrix_rslice_cls = types.OpenCLMatrixRSlice\n self.queue_cls = types.OpenCLQueue\n self.view_cls = types.OpenCLView\n self.xchg_matrix_cls = types.OpenCLXchgMatrix\n self.xchg_view_cls = types.OpenCLXchgView\n\n # Instantiate the base kernel providers\n kprovs = [provider.OpenCLPointwiseKernelProvider,\n blasext.OpenCLBlasExtKernels,\n packing.OpenCLPackingKernels,\n gimmik.OpenCLGiMMiKKernels,\n clblas.OpenCLClBLASKernels]\n self._providers = [k(self) for k in kprovs]\n\n # Pointwise kernels\n self.pointwise = self._providers[0]\n\n def _malloc_impl(self, nbytes):\n import pyopencl as cl\n\n # Allocate the device buffer\n buf = cl.Buffer(self.ctx, cl.mem_flags.READ_WRITE, nbytes)\n\n # Zero the buffer\n cl.enqueue_copy(self.qdflt, buf, np.zeros(nbytes, dtype=np.uint8))\n\n return buf\n"
]
| [
[
"numpy.dtype",
"numpy.zeros"
]
]
|
u-square2/AcadVault | [
"571612c928a1357c849cf9a06b47bab15dc5afa3"
]
| [
"CT/111_introduction_to_communication_theory/yash_vasavda/2019/projects/201801052/test.py"
]
| [
"import matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib as mpl\nimport scipy.io\nmat = scipy.io.loadmat('Hmatrix2.mat')\n\ndef CopySubMatrix(Matrix,SubMatrix,i,j,rows,columns):\n\tn=rows*rows\n\tk=columns*columns\n\tfor x in range(0,rows):\n\t\tfor y in range(0,columns):\n\t\t\tMatrix[((i + x) , (j + y))] = SubMatrix[(x , y)]\n\n\n\n\ndef GeneratorMatrix(k):\n\tn=int(k + (2 * np.sqrt(k)) + 1)\n\trootn = int(np.sqrt(n))\n\trootk = int(np.sqrt(k))\n\tGenerator=np.zeros((int (n),int(k)))\n\tMatrix =np.zeros((int(rootn),int(rootk)))\n\tfor i in range(0,rootk):\n\t\tMatrix [i,i]=1\n\t\tMatrix [(rootn-1) , i]=1\n\tfor i in range(0,rootk):\n\t\tCopySubMatrix (Generator, Matrix, i*rootn, i*rootk, rootn, rootk)\n\t\tCopySubMatrix (Generator, Matrix, n - rootn, i*rootk, rootn, rootk) \n\treturn Generator \n\n\n\ndef Encoder(Message):\n\tK = len(Message)\n\n\tGenerator = GeneratorMatrix(K)\n\tCodedMessage = (np.dot(Generator, Message))%2\n\n\treturn CodedMessage\n\n\ndef bec(encodedM,probability):\n ErrorM=np.zeros((len(encodedM),), dtype=int)\n NoiseM=encodedM\n \n for i in range(0,len(ErrorM)):\n P=np.around(np.random.rand(1),decimals=3)\n \n if P[0]<probability:\n ErrorM[i]=1\n for i in range(0,len(encodedM)):\n if ErrorM[i]==1:\n NoiseM[i]=-1\n return NoiseM \n\ndef bsc(encodedM,probability):\n ErrorM=np.zeros((len(encodedM),), dtype=int)\n NoiseM=encodedM\n P=np.around(np.random.rand(1),decimals=4)\n for i in range(0,len(encodedM)):\n P=np.around(np.random.rand(1),decimals=4)\n \n if P[0]<probability:\n ErrorM[i]=1\n for i in range(0,len(encodedM)):\n if ErrorM[i]==1:\n if NoiseM[i]==1:\n NoiseM[i]=0\n else:\n NoiseM[i]=1\n return NoiseM \n\ndef ParityCheckMatrix(k) :\n\tsqrtk = int(np.sqrt(k))\n\tsqrtn = int(sqrtk + 1)\n\tn = int(k + 2 * sqrtk + 1)\n\tOfSet = 0\n\n\tParityCheckMatrix = np.zeros((n - k, n))\n\n\tfor i in range(0,sqrtk):\n\t\tfor j in range(OfSet,sqrtk + OfSet + 1):\n\t\t\tParityCheckMatrix[i][j] = 1\n\n\t\tOfSet = OfSet + sqrtk + 1\n\n\tfor i in range(sqrtk,2 * sqrtk):\n\t\tfor j in range(i - sqrtk,n,sqrtn): \n\t\t\tParityCheckMatrix[i][j] = 1\n\n\tfor i in range(0,n):\n\t\tParityCheckMatrix[n-k-1][i] = 1\n\n\treturn ParityCheckMatrix\n\n\ndef decoder(RecievedM,ParityCheckMatrix):\n alpha=RecievedM\n n=len(RecievedM)\n syndrome=np.dot(ParityCheckMatrix,RecievedM)\n\n for z in range(50):\n tempalpha=alpha\n for i in range(0,n):\n beta=np.zeros(n)\n SyndromeCount=0\n for j in range(0,np.size(ParityCheckMatrix,0)):\n beta[0]=alpha[i]\n\n if ParityCheckMatrix[j][i]==1:\n SyndromeCount=SyndromeCount+1\n sum=0\n for k in range(0,n):\n if ParityCheckMatrix[j][k]==1 and k!=i:\n sum+=alpha[k] \n sum=sum%2 \n beta[SyndromeCount]=sum\n\n Bincount=np.zeros(2) \n for t in range(0,SyndromeCount+1):\n if beta[t]==0:\n Bincount[0]+=1\n else:\n Bincount[1]+=1\n\n if (Bincount[0]==Bincount[1]):\n if (beta[0]==1):\n tempalpha[i]=0\n else:\n tempalpha[i]=1\n\n elif Bincount[0]>Bincount[1]:\n tempalpha[i]=0\n\n elif Bincount[0]<Bincount[1]:\n tempalpha[i]=1\n\n alpha=tempalpha\n syndrome=(np.dot(ParityCheckMatrix,alpha))%2\n IsSyndromZero=np.count_nonzero(syndrome)\n \n if IsSyndromZero==0:\n break \n\n return alpha \n\n\ndef decoderBEC(RecievedM,ParityCheckMatrix):\n alpha=RecievedM\n n=len(RecievedM)\n syndrome=np.dot(ParityCheckMatrix,RecievedM)\n\n for z in range(50):\n tempalpha=alpha\n for i in range(0,n):\n if alpha[i]==-1:\n beta=np.zeros(n)\n SyndromeCount=0\n for j in range(0,np.size(ParityCheckMatrix,0)):\n beta[0]=alpha[i]\n\n if ParityCheckMatrix[j][i]==1:\n SyndromeCount=SyndromeCount+1\n sum=0\n for k in range(0,n):\n if ParityCheckMatrix[j][k]==1 and k!=i:\n sum+=alpha[k] \n sum=sum%2 \n beta[SyndromeCount]=sum\n\n Bincount=np.zeros(2) \n for t in range(0,SyndromeCount+1):\n if beta[t]==0:\n Bincount[0]+=1\n else:\n Bincount[1]+=1\n\n if (Bincount[0]==Bincount[1]):\n if (beta[0]==1):\n tempalpha[i]=0\n else:\n tempalpha[i]=0\n\n elif Bincount[0]>Bincount[1]:\n tempalpha[i]=0\n\n elif Bincount[0]<Bincount[1]:\n tempalpha[i]=1\n\n alpha=tempalpha\n syndrome=(np.dot(ParityCheckMatrix,alpha))%2\n IsSyndromZero=np.count_nonzero(syndrome)\n \n if IsSyndromZero==0:\n break \n\n return alpha \n\ndef DecoderBSC(RecievedMessage,RelationMatrix):\n n=len(RecievedMessage)\n k=int((np.sqrt(n)-1)*(np.sqrt(n)-1))\n alpha=np.array(RecievedMessage)\n\n for Cycle in range(50):\n tempalpha=np.array(alpha)\n MessageCheckNode=np.zeros((n,n-k+1))\n for i in range(n):\n MessageCheckNode[i][0]=1\n MessageCheckNode[i][1]=alpha[i]\n \n Syndrome=np.zeros(n-k)\n\n for i in range(n-k):\n indexMatrix=np.zeros(n)\n z=0\n for j in range(n):\n if (RelationMatrix[i][j]==1):\n Syndrome[i]+=alpha[j]\n indexMatrix[z]=j\n z+=1\n\n Syndrome%=2\n for l in range(z):\n index1=int(indexMatrix[l])\n index2=int(MessageCheckNode[index1][0])\n MessageCheckNode[index1][index2]=(alpha[index1]+Syndrome[i])%2\n MessageCheckNode[index1][0]+=1\n \n for j in range(n):\n Ones=0\n for l in range(int(MessageCheckNode[j][0])):\n if (MessageCheckNode[j][l+1]):\n Ones+=1\n\n if (Ones>(MessageCheckNode[j][0]/2)):\n alpha[j]=1\n else:\n alpha[j]=0\n\n CheckZero=np.dot(RelationMatrix,alpha)\n IsSyndromZero=np.count_nonzero(CheckZero)%2\n \n if IsSyndromZero==0:\n break\n\n CheckEqual=tempalpha-alpha\n if (np.count_nonzero(CheckEqual)==0):\n break\n\n return alpha\n\nH2=mat['H']\n\ndef LDPCBsc(EncodedM,p):\n \n\n\n \n \n a=np.zeros(5000)\n \n prob=p\n \n NoiseMBsc=bsc(a,prob)\n\n \n \n \n DecodedBsc=DecoderBSC(NoiseMBsc,H2)\n \n print(DecodedBsc)\n return DecodedBsc\n\nP=np.zeros(100)\nS=np.zeros(100)\n\na=[0,0,0,0,0,0,0,0,0]\n#for x in range(1):\n # b=np.zeros(5000)\n #P[x]=x/100\n \n #success=0\n #z=LDPCBsc(b,x/100)\n #print(z)\n #if np.count_nonzero(z)==0:\n # success+=1\n #S[x]=success \n #print(x)\nb=np.zeros(5000)\nsuccess=0\nz=LDPCBsc(b,.1)\nprint(z)\nif np.count_nonzero(z)==0:\n success+=1\nS[0]=success \nP[0]=.1\n\n\n\n\n\nprint(P)\nprint(S)\n\nplt.plot(P,S)\nplt.ylabel(\"%age of Succesful decoding\")\nplt.xlabel(\"Probability of Error\")\nplt.title(\"Performance of BEC Product Code Decoder for (9,4)Product code \")\nplt.show()\n\n \n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
]
| [
[
"numpy.array",
"numpy.dot",
"numpy.count_nonzero",
"numpy.random.rand",
"numpy.zeros",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"numpy.sqrt",
"numpy.size",
"matplotlib.pyplot.show"
]
]
|
shahdloo/pycortex | [
"08693a048cc66d77ada1126129349c7c1ad46d92"
]
| [
"cortex/brainctm.py"
]
| [
"'''Generates the OpenCTM file that holds the brain mesh for the webgl viewer. \nThis ctm file contains the following information:\n\npts, polys\n Forms the base of the brain mesh, in the fiducial space\n\nmorphTarget%d\n Holds additional surfaces (inflated, etc) as morphTargets for three.js.\n Morphtargets are normalized to the same extent as the fiducial, for better\n morphing effect.\n\nuv\n Actually stores the raw flatmap coordinates, unnormalized. Normalization is handled\n in javascript, in the load function\n'''\nimport os\nimport sys\nimport json\nimport tempfile\nimport six\nimport numpy as np\nfrom scipy.spatial import cKDTree\n\nfrom .database import db\nfrom .utils import get_cortical_mask, get_mapper, get_dropout\nfrom . import polyutils\nfrom .openctm import CTMfile\n\nclass BrainCTM(object):\n def __init__(self, subject, decimate=False):\n self.subject = subject\n self.types = []\n\n left, right = db.get_surf(subject, \"fiducial\")\n try:\n fleft, fright = db.get_surf(subject, \"flat\", nudge=True, merge=False)\n except IOError:\n fleft = None\n\n if decimate:\n try:\n pleft, pright = db.get_surf(subject, \"pia\")\n self.left = DecimatedHemi(left[0], left[1], fleft[1], pia=pleft[0])\n self.right = DecimatedHemi(right[0], right[1], fright[1], pia=pright[0])\n self.addSurf(\"wm\", addtype=False, renorm=False)\n except IOError:\n self.left = DecimatedHemi(left[0], left[1], fleft[1])\n self.right = DecimatedHemi(right[0], right[1], fright[1])\n else:\n try:\n pleft, pright = db.get_surf(subject, \"pia\")\n wleft, wright = db.get_surf(subject, \"wm\")\n self.left = Hemi(pleft[0], left[1])\n self.right = Hemi(pright[0], right[1])\n self.addSurf(\"wm\", addtype=False, renorm=False)\n except IOError:\n self.left = Hemi(left[0], left[1])\n self.right = Hemi(right[0], right[1])\n\n if fleft is not None:\n #set medial wall\n for hemi, ptpoly in ([self.left, fleft], [self.right, fright]):\n # fidpolys = set(tuple(f) for f in polyutils.sort_polys(hemi.polys))\n # flatpolys = set(tuple(f) for f in polyutils.sort_polys(ptpoly[1]))\n # medial_verts = set(np.ravel(list(fidpolys - flatpolys)))\n medial_verts = set(hemi.polys.ravel()) - set(ptpoly[1].ravel())\n hemi.aux[list(medial_verts), 0] = 1\n\n connected = [set() for _ in range(len(ptpoly[0]))]\n for p1, p2, p3 in hemi.polys:\n if p1 not in medial_verts:\n connected[p2].add(p1)\n connected[p3].add(p1)\n if p2 not in medial_verts:\n connected[p1].add(p2)\n connected[p3].add(p2)\n if p3 not in medial_verts:\n connected[p1].add(p3)\n connected[p2].add(p3)\n\n #move the medial wall vertices out of the flatmap\n for vert in medial_verts:\n candidates = connected[vert]\n if len(candidates) > 0:\n ptpoly[0][vert] = ptpoly[0][candidates.pop()]\n else:\n ptpoly[0][vert] = 0\n\n #Find the flatmap limits\n if fleft is not None:\n flatmerge = np.vstack([fleft[0][:,:2], fright[0][:,:2]])\n fmin, fmax = flatmerge.min(0), flatmerge.max(0)\n self.flatlims = [float(x) for x in -fmin], [float(x) for x in fmax-fmin]\n\n self.left.setFlat(fleft[0])\n self.right.setFlat(fright[0])\n else:\n self.flatlims = None\n\n def addSurf(self, typename, addtype=True, **kwargs):\n left, right = db.get_surf(self.subject, typename, nudge=False, merge=False)\n self.left.addSurf(left[0], typename, **kwargs)\n self.right.addSurf(right[0], typename, **kwargs)\n if addtype:\n self.types.append(typename)\n\n def addCurvature(self, **kwargs):\n npz = db.get_surfinfo(self.subject, type='curvature', **kwargs)\n try:\n self.left.aux[:,1] = npz.left[self.left.mask]\n self.right.aux[:,1] = npz.right[self.right.mask]\n except AttributeError:\n self.left.aux[:,1] = npz.left\n self.right.aux[:,1] = npz.right\n\n def save(self, path, method='mg2', **kwargs):\n \"\"\"Save CTM file for static html display. \n\n Parameters\n ----------\n path : string\n File path for cached ctm file to save\n method : idkwtf\n \"\"\"\n ctmname = path + \".ctm\"\n svgname = path + \".svg\"\n jsname = path + \".json\"\n mapname = path + \".npz\"\n\n # Save CTM concatenation\n (lpts, _, _), lbin = self.left.save(method=method, **kwargs)\n (rpts, _, _), rbin = self.right.save(method=method, **kwargs)\n\n offsets = [0]\n with open(path+'.ctm', 'wb') as fp:\n fp.write(lbin)\n offsets.append(fp.tell())\n fp.write(rbin)\n\n # Save the JSON descriptor | Need to add to this for extra_disp?\n jsdict = dict(rois=os.path.split(svgname)[1],\n data=os.path.split(ctmname)[1],\n names=self.types, \n materials=[],\n offsets=offsets)\n if self.flatlims is not None:\n jsdict['flatlims'] = self.flatlims\n json.dump(jsdict, open(jsname, 'w'))\n\n # Compute and save the index map\n if method != 'raw':\n ptmap, inverse = [], []\n for hemi, pts in zip([self.left, self.right], [lpts, rpts]):\n kdt = cKDTree(hemi.pts)\n diff, idx = kdt.query(pts)\n ptmap.append(idx)\n inverse.append(idx.argsort())\n else:\n ptmap = inverse = np.arange(len(self.left.ctm)), np.arange(len(self.right.ctm))\n\n np.savez(mapname, \n index=np.hstack([ptmap[0], ptmap[1]+len(ptmap[0])]), \n inverse=np.hstack([inverse[0], inverse[1]+len(inverse[0])]))\n\n # Save the SVG with remapped indices (map 2D flatmap locations to vertices)\n if self.left.flat is not None:\n flatpts = np.vstack([self.left.flat, self.right.flat])\n svg = db.get_overlay(self.subject, pts=flatpts) # PROBLEM HERE\n \n # assign coordinates in left hemisphere negative values\n with open(svgname, \"wb\") as fp:\n for element in svg.svg.findall(\".//{http://www.w3.org/2000/svg}text\"):\n if 'data-ptidx' in element.attrib:\n idx = int(element.attrib[\"data-ptidx\"])\n if idx < len(inverse[0]):\n idx = inverse[0][idx]\n else:\n idx -= len(inverse[0])\n idx = inverse[1][idx] + len(inverse[0])\n element.attrib[\"data-ptidx\"] = str(idx)\n fp.write(svg.toxml())\n return ptmap\n\nclass Hemi(object):\n def __init__(self, pts, polys, norms=None):\n self.tf = tempfile.NamedTemporaryFile()\n if six.PY3:\n self.tf.name = bytes(self.tf.name, 'ascii')\n self.ctm = CTMfile(self.tf.name, \"w\")\n\n self.ctm.setMesh(pts.astype(np.float32),\n polys.astype(np.uint32),\n norms=norms)\n\n self.pts = pts\n self.polys = polys\n self.flat = None\n self.surfs = {}\n self.aux = np.zeros((len(self.ctm), 4))\n\n def addSurf(self, pts, name, renorm=True):\n '''Scales the in-between surfaces to be same scale as fiducial'''\n if renorm:\n norm = (pts - pts.min(0)) / (pts.max(0) - pts.min(0))\n rnorm = norm * (self.pts.max(0) - self.pts.min(0)) + self.pts.min(0)\n else:\n rnorm = pts\n\n attrib = np.hstack([rnorm, np.zeros((len(rnorm),1))])\n self.surfs[name] = attrib\n self.ctm.addAttrib(attrib, name)\n print(name)\n\n def setFlat(self, pts):\n self.ctm.addUV(pts[:,:2].astype(float), 'uv')\n self.flat = pts[:,:2]\n\n def save(self, **kwargs):\n self.ctm.addAttrib(self.aux, 'auxdat')\n self.ctm.save(**kwargs)\n ctm = CTMfile(self.tf.name)\n return ctm.getMesh(), self.tf.read()\n\nclass DecimatedHemi(Hemi):\n def __init__(self, pts, polys, fpolys, pia=None):\n print(\"Decimating...\")\n kdt = cKDTree(pts)\n mask = np.zeros((len(pts),), dtype=bool)\n\n fidset = set([tuple(p) for p in polyutils.sort_polys(polys)])\n flatset = set([tuple(p) for p in polyutils.sort_polys(fpolys)])\n mwall = np.array(list(fidset - flatset))\n\n dpts, dpolys = polyutils.decimate(pts, fpolys)\n dist, didx = kdt.query(dpts)\n mask[didx] = True\n\n mwpts, mwpolys = polyutils.decimate(pts, mwall)\n dist, mwidx = kdt.query(mwpts)\n mask[mwidx] = True\n\n allpolys = np.vstack([didx[dpolys], mwidx[mwpolys]])\n idxmap = np.zeros((len(pts),), dtype=np.uint32)\n idxmap[mask] = np.arange(mask.sum()).astype(np.uint32)\n #norms = polyutils.Surface(pts, polys).normals[mask]\n basepts = pts[mask] if pia is None else pia[mask]\n super(DecimatedHemi, self).__init__(basepts, idxmap[allpolys])\n self.aux[idxmap[mwidx], 0] = 1\n self.mask = mask\n self.idxmap = idxmap\n\n def setFlat(self, pts):\n super(DecimatedHemi, self).setFlat(pts[self.mask])\n\n def addSurf(self, pts, **kwargs):\n super(DecimatedHemi, self).addSurf(pts[self.mask], **kwargs)\n\ndef make_pack(outfile, subj, types=(\"inflated\",), method='raw', level=0,\n decimate=False, disp_layers=['rois'], extra_disp=None):\n \"\"\"Generates a cached CTM file\"\"\"\n\n ctm = BrainCTM(subj, decimate=decimate)\n ctm.addCurvature()\n for name in types:\n ctm.addSurf(name)\n\n if not os.path.exists(os.path.split(outfile)[0]):\n os.makedirs(os.path.split(outfile)[0])\n return ctm.save(os.path.splitext(outfile)[0],\n method=method,\n level=level)\n\ndef read_pack(ctmfile):\n fname = os.path.splitext(ctmfile)[0]\n jsfile = json.load(open(fname + \".json\"))\n offset = jsfile['offsets']\n\n meshes = []\n\n with open(ctmfile, 'rb') as ctmfp:\n ctmfp.seek(0, 2)\n offset.append(ctmfp.tell())\n\n for start, end in zip(offset[:-1], offset[1:]):\n ctmfp.seek(start)\n tf = tempfile.NamedTemporaryFile()\n tf.write(ctmfp.read(end-start))\n tf.seek(0)\n ctm = CTMfile(tf.name, \"r\")\n pts, polys, norms = ctm.getMesh()\n meshes.append((pts, polys))\n\n return meshes\n"
]
| [
[
"scipy.spatial.cKDTree",
"numpy.vstack"
]
]
|
oluwayetty/dagan-camera | [
"b8d10b32b480f454e26b7290f137907d3f3afa0a"
]
| [
"dagan_architectures.py"
]
| [
"import tensorflow as tf\nfrom tensorflow.contrib.layers import batch_norm, layer_norm\nfrom tensorflow.python.ops.image_ops_impl import ResizeMethod\nfrom tensorflow.python.ops.nn_ops import leaky_relu\nfrom utils.network_summary import count_parameters\n\n\ndef remove_duplicates(input_features):\n \"\"\"\n Remove duplicate entries from layer list.\n :param input_features: A list of layers\n :return: Returns a list of unique feature tensors (i.e. no duplication).\n \"\"\"\n feature_name_set = set()\n non_duplicate_feature_set = []\n for feature in input_features:\n if feature.name not in feature_name_set:\n non_duplicate_feature_set.append(feature)\n feature_name_set.add(feature.name)\n return non_duplicate_feature_set\n\n\nclass UResNetGenerator:\n def __init__(self, layer_sizes, layer_padding, batch_size, num_channels=1,\n inner_layers=0, name=\"g\"):\n \"\"\"\n Initialize a UResNet generator.\n :param layer_sizes: A list with the filter sizes for each MultiLayer e.g. [64, 64, 128, 128]\n :param layer_padding: A list with the padding type for each layer e.g. [\"SAME\", \"SAME\", \"SAME\", \"SAME\"]\n :param batch_size: An integer indicating the batch size\n :param num_channels: An integer indicating the number of input channels\n :param inner_layers: An integer indicating the number of inner layers per MultiLayer\n \"\"\"\n self.reuse = False\n self.batch_size = batch_size\n self.num_channels = num_channels\n self.layer_sizes = layer_sizes\n self.layer_padding = layer_padding\n self.inner_layers = inner_layers\n self.conv_layer_num = 0\n self.build = True\n self.name = name\n\n def upscale(self, x, h_size, w_size):\n \"\"\"\n Upscales an image using nearest neighbour\n :param x: Input image\n :param h_size: Image height size\n :param w_size: Image width size\n :return: Upscaled image\n \"\"\"\n [b, h, w, c] = [int(dim) for dim in x.get_shape()]\n\n return tf.image.resize_nearest_neighbor(x, (h_size, w_size))\n\n def conv_layer(self, inputs, num_filters, filter_size, strides, activation=None,\n transpose=False, w_size=None, h_size=None):\n \"\"\"\n Add a convolutional layer to the network.\n :param inputs: Inputs to the conv layer.\n :param num_filters: Num of filters for conv layer.\n :param filter_size: Size of filter.\n :param strides: Stride size.\n :param activation: Conv layer activation.\n :param transpose: Whether to apply upscale before convolution.\n :param w_size: Used only for upscale, w_size to scale to.\n :param h_size: Used only for upscale, h_size to scale to.\n :return: Convolution features\n \"\"\"\n self.conv_layer_num += 1\n if transpose:\n outputs = self.upscale(inputs, h_size=h_size, w_size=w_size)\n outputs = tf.layers.conv2d_transpose(outputs, num_filters, filter_size,\n strides=strides,\n padding=\"SAME\", activation=activation)\n elif not transpose:\n outputs = tf.layers.conv2d(inputs, num_filters, filter_size, strides=strides,\n padding=\"SAME\", activation=activation)\n return outputs\n\n def resize_batch(self, batch_images, size):\n\n \"\"\"\n Resize image batch using nearest neighbour\n :param batch_images: Image batch\n :param size: Size to upscale to\n :return: Resized image batch.\n \"\"\"\n images = tf.image.resize_images(batch_images, size=size, method=ResizeMethod.NEAREST_NEIGHBOR)\n\n return images\n\n def add_encoder_layer(self, input, name, training, dropout_rate, layer_to_skip_connect, local_inner_layers,\n num_features, dim_reduce=False):\n\n \"\"\"\n Adds a resnet encoder layer.\n :param input: The input to the encoder layer\n :param training: Flag for training or validation\n :param dropout_rate: A float or a placeholder for the dropout rate\n :param layer_to_skip_connect: Layer to skip-connect this layer to\n :param local_inner_layers: A list with the inner layers of the current Multi-Layer\n :param num_features: Number of feature maps for the convolutions\n :param dim_reduce: Boolean value indicating if this is a dimensionality reducing layer or not\n :return: The output of the encoder layer\n \"\"\"\n [b1, h1, w1, d1] = input.get_shape().as_list()\n\n if len(layer_to_skip_connect) >= 2:\n layer_to_skip_connect = layer_to_skip_connect[-2]\n else:\n layer_to_skip_connect = None\n\n if layer_to_skip_connect is not None:\n [b0, h0, w0, d0] = layer_to_skip_connect.get_shape().as_list()\n if h0 > h1:\n skip_connect_layer = self.conv_layer(layer_to_skip_connect, int(layer_to_skip_connect.get_shape()[3]),\n [3, 3], strides=(2, 2))\n else:\n skip_connect_layer = layer_to_skip_connect\n current_layers = [input, skip_connect_layer]\n else:\n current_layers = [input]\n\n current_layers.extend(local_inner_layers)\n current_layers = remove_duplicates(current_layers)\n outputs = tf.concat(current_layers, axis=3)\n\n if dim_reduce:\n outputs = self.conv_layer(outputs, num_features, [3, 3], strides=(2, 2))\n outputs = leaky_relu(outputs)\n outputs = batch_norm(outputs, decay=0.99, scale=True,\n center=True, is_training=training,\n renorm=True)\n outputs = tf.layers.dropout(outputs, rate=dropout_rate, training=training)\n else:\n outputs = self.conv_layer(outputs, num_features, [3, 3], strides=(1, 1))\n outputs = leaky_relu(features=outputs)\n outputs = batch_norm(outputs, decay=0.99, scale=True,\n center=True, is_training=training,\n renorm=True)\n\n return outputs\n\n def add_decoder_layer(self, input, name, training, dropout_rate, layer_to_skip_connect, local_inner_layers,\n num_features, dim_upscale=False, h_size=None, w_size=None):\n\n \"\"\"\n Adds a resnet decoder layer.\n :param input: Input features\n :param name: Layer Name\n :param training: Training placeholder or boolean flag\n :param dropout_rate: Float placeholder or float indicating the dropout rate\n :param layer_to_skip_connect: Layer to skip connect to.\n :param local_inner_layers: A list with the inner layers of the current MultiLayer\n :param num_features: Num feature maps for convolution\n :param dim_upscale: Dimensionality upscale\n :param h_size: Height to upscale to\n :param w_size: Width to upscale to\n :return: The output of the decoder layer\n \"\"\"\n [b1, h1, w1, d1] = input.get_shape().as_list()\n if len(layer_to_skip_connect) >= 2:\n layer_to_skip_connect = layer_to_skip_connect[-2]\n else:\n layer_to_skip_connect = None\n\n if layer_to_skip_connect is not None:\n [b0, h0, w0, d0] = layer_to_skip_connect.get_shape().as_list()\n\n if h0 < h1:\n skip_connect_layer = self.conv_layer(layer_to_skip_connect,\n int(layer_to_skip_connect.get_shape()[3]),\n [3, 3], strides=(1, 1),\n transpose=True,\n h_size=h_size,\n w_size=w_size)\n else:\n skip_connect_layer = layer_to_skip_connect\n current_layers = [input, skip_connect_layer]\n else:\n current_layers = [input]\n\n current_layers.extend(local_inner_layers)\n current_layers = remove_duplicates(current_layers)\n outputs = tf.concat(current_layers, axis=3)\n\n if dim_upscale:\n outputs = self.conv_layer(outputs, num_features, [3, 3], strides=(1, 1),\n transpose=True, w_size=w_size, h_size=h_size)\n outputs = leaky_relu(features=outputs)\n outputs = batch_norm(outputs,\n decay=0.99, scale=True,\n center=True, is_training=training,\n renorm=True)\n outputs = tf.layers.dropout(outputs, rate=dropout_rate, training=training)\n else:\n outputs = self.conv_layer(outputs, num_features, [3, 3], strides=(1, 1),\n transpose=False)\n outputs = leaky_relu(features=outputs)\n outputs = batch_norm(outputs, decay=0.99, scale=True,\n center=True, is_training=training,\n renorm=True)\n\n return outputs\n\n def __call__(self, z_inputs, conditional_input, training=False, dropout_rate=0.0):\n \"\"\"\n Apply network on data.\n :param z_inputs: Random noise to inject [batch_size, z_dim]\n :param conditional_input: A batch of images to use as conditionals [batch_size, height, width, channels]\n :param training: Training placeholder or boolean\n :param dropout_rate: Dropout rate placeholder or float\n :return: Returns x_g (generated images), encoder_layers(encoder features), decoder_layers(decoder features)\n \"\"\"\n conditional_input = tf.convert_to_tensor(conditional_input)\n with tf.variable_scope(self.name, reuse=self.reuse):\n # reshape from inputs\n outputs = conditional_input\n encoder_layers = []\n current_layers = [outputs]\n with tf.variable_scope('conv_layers'):\n\n for i, layer_size in enumerate(self.layer_sizes):\n encoder_inner_layers = [outputs]\n with tf.variable_scope('g_conv{}'.format(i)):\n if i==0: #first layer is a single conv layer instead of MultiLayer for best results\n outputs = self.conv_layer(outputs, num_filters=64,\n filter_size=(3, 3), strides=(2, 2))\n outputs = leaky_relu(features=outputs)\n outputs = batch_norm(outputs, decay=0.99, scale=True,\n center=True, is_training=training,\n renorm=True)\n current_layers.append(outputs)\n encoder_inner_layers.append(outputs)\n else:\n for j in range(self.inner_layers[i]): #Build the inner Layers of the MultiLayer\n outputs = self.add_encoder_layer(input=outputs,\n training=training,\n name=\"encoder_layer_{}_{}\".format(i, j),\n layer_to_skip_connect=current_layers,\n num_features=self.layer_sizes[i],\n dim_reduce=False,\n local_inner_layers=encoder_inner_layers,\n dropout_rate=dropout_rate)\n encoder_inner_layers.append(outputs)\n current_layers.append(outputs)\n #add final dim reducing conv layer for this MultiLayer\n outputs = self.add_encoder_layer(input=outputs, name=\"encoder_layer_{}\".format(i),\n training=training, layer_to_skip_connect=current_layers,\n local_inner_layers=encoder_inner_layers,\n num_features=self.layer_sizes[i],\n dim_reduce=True, dropout_rate=dropout_rate)\n current_layers.append(outputs)\n encoder_layers.append(outputs)\n\n g_conv_encoder = outputs\n\n with tf.variable_scope(\"vector_expansion\"): # Used for expanding the z injected noise to match the\n # dimensionality of the various decoder MultiLayers, injecting\n # noise into multiple decoder layers in a skip-connection way\n # improves quality of results. We inject in the first 3 decode\n # multi layers\n num_filters = 8\n z_layers = []\n concat_shape = [layer_shape.get_shape().as_list() for layer_shape in encoder_layers]\n\n for i in range(len(self.inner_layers)):\n h = concat_shape[len(encoder_layers) - 1 - i][1]\n w = concat_shape[len(encoder_layers) - 1 - i][1]\n z_dense = tf.layers.dense(z_inputs, h * w * num_filters)\n\n\n # z_reshape_noise = tf.reshape(z_dense, [self.batch_size, h, w, num_filters])\n z_reshape_noise = tf.reshape(z_dense, [self.batch_size, h, w, num_filters])\n num_filters /= 2\n num_filters = int(num_filters)\n print(z_reshape_noise)\n z_layers.append(z_reshape_noise)\n\n outputs = g_conv_encoder\n decoder_layers = []\n current_layers = [outputs]\n with tf.variable_scope('g_deconv_layers'):\n for i in range(len(self.layer_sizes)+1):\n if i<3: #Pass the injected noise to the first 3 decoder layers for sharper result\n outputs = tf.concat([z_layers[i], outputs], axis=3)\n current_layers[-1] = outputs\n idx = len(self.layer_sizes) - 1 - i\n num_features = self.layer_sizes[idx]\n inner_layers = self.inner_layers[idx]\n upscale_shape = encoder_layers[idx].get_shape().as_list()\n if idx<0:\n num_features = self.layer_sizes[0]\n inner_layers = self.inner_layers[0]\n outputs = tf.concat([outputs, conditional_input], axis=3)\n upscale_shape = conditional_input.get_shape().as_list()\n\n with tf.variable_scope('g_deconv{}'.format(i)):\n decoder_inner_layers = [outputs]\n for j in range(inner_layers):\n if i==0 and j==0:\n outputs = self.add_decoder_layer(input=outputs,\n name=\"decoder_inner_conv_{}_{}\"\n .format(i, j),\n training=training,\n layer_to_skip_connect=current_layers,\n num_features=num_features,\n dim_upscale=False,\n local_inner_layers=decoder_inner_layers,\n dropout_rate=dropout_rate)\n decoder_inner_layers.append(outputs)\n else:\n outputs = self.add_decoder_layer(input=outputs,\n name=\"decoder_inner_conv_{}_{}\"\n .format(i, j), training=training,\n layer_to_skip_connect=current_layers,\n num_features=num_features,\n dim_upscale=False,\n local_inner_layers=decoder_inner_layers,\n w_size=upscale_shape[1],\n h_size=upscale_shape[2],\n dropout_rate=dropout_rate)\n decoder_inner_layers.append(outputs)\n current_layers.append(outputs)\n decoder_layers.append(outputs)\n\n if idx>=0:\n upscale_shape = encoder_layers[idx - 1].get_shape().as_list()\n if idx == 0:\n upscale_shape = conditional_input.get_shape().as_list()\n outputs = self.add_decoder_layer(\n input=outputs,\n name=\"decoder_outer_conv_{}\".format(i),\n training=training,\n layer_to_skip_connect=current_layers,\n num_features=num_features,\n dim_upscale=True, local_inner_layers=decoder_inner_layers, w_size=upscale_shape[1],\n h_size=upscale_shape[2], dropout_rate=dropout_rate)\n current_layers.append(outputs)\n if (idx-1)>=0:\n outputs = tf.concat([outputs, encoder_layers[idx-1]], axis=3)\n current_layers[-1] = outputs\n\n high_res_layers = []\n\n for p in range(2):\n outputs = self.conv_layer(outputs, self.layer_sizes[0], [3, 3], strides=(1, 1),\n transpose=False)\n outputs = leaky_relu(features=outputs)\n\n outputs = batch_norm(outputs,\n decay=0.99, scale=True,\n center=True, is_training=training,\n renorm=True)\n high_res_layers.append(outputs)\n outputs = self.conv_layer(outputs, self.num_channels, [3, 3], strides=(1, 1),\n transpose=False)\n # output images\n with tf.variable_scope('g_tanh'):\n gan_decoder = tf.tanh(outputs, name='outputs')\n\n self.reuse = True\n self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)\n\n if self.build:\n print(\"generator_total_layers\", self.conv_layer_num)\n count_parameters(self.variables, name=\"generator_parameter_num\")\n self.build = False\n return gan_decoder, encoder_layers, decoder_layers\n\n\nclass Discriminator:\n def __init__(self, batch_size, layer_sizes, inner_layers, use_wide_connections=False, name=\"d\"):\n \"\"\"\n Initialize a discriminator network.\n :param batch_size: Batch size for discriminator.\n :param layer_sizes: A list with the feature maps for each MultiLayer.\n :param inner_layers: An integer indicating the number of inner layers.\n \"\"\"\n self.reuse = False\n self.batch_size = batch_size\n self.layer_sizes = layer_sizes\n self.inner_layers = inner_layers\n self.conv_layer_num = 0\n self.use_wide_connections = use_wide_connections\n self.build = True\n self.name = name\n\n def upscale(self, x, scale):\n \"\"\"\n Upscales an image using nearest neighbour\n :param x: Input image\n :param h_size: Image height size\n :param w_size: Image width size\n :return: Upscaled image\n \"\"\"\n [b, h, w, c] = [int(dim) for dim in x.get_shape()]\n\n return tf.image.resize_nearest_neighbor(x, (h * scale, w * scale))\n\n def conv_layer(self, inputs, num_filters, filter_size, strides, activation=None, transpose=False):\n \"\"\"\n Add a convolutional layer to the network.\n :param inputs: Inputs to the conv layer.\n :param num_filters: Num of filters for conv layer.\n :param filter_size: Size of filter.\n :param strides: Stride size.\n :param activation: Conv layer activation.\n :param transpose: Whether to apply upscale before convolution.\n :return: Convolution features\n \"\"\"\n self.conv_layer_num += 1\n if transpose:\n outputs = tf.layers.conv2d_transpose(inputs, num_filters, filter_size, strides=strides,\n padding=\"SAME\", activation=activation)\n elif not transpose:\n outputs = tf.layers.conv2d(inputs, num_filters, filter_size, strides=strides,\n padding=\"SAME\", activation=activation)\n return outputs\n\n def add_encoder_layer(self, input, name, training, layer_to_skip_connect, local_inner_layers, num_features,\n dim_reduce=False, dropout_rate=0.0):\n\n \"\"\"\n Adds a resnet encoder layer.\n :param input: The input to the encoder layer\n :param training: Flag for training or validation\n :param dropout_rate: A float or a placeholder for the dropout rate\n :param layer_to_skip_connect: Layer to skip-connect this layer to\n :param local_inner_layers: A list with the inner layers of the current Multi-Layer\n :param num_features: Number of feature maps for the convolutions\n :param dim_reduce: Boolean value indicating if this is a dimensionality reducing layer or not\n :return: The output of the encoder layer\n :return:\n \"\"\"\n [b1, h1, w1, d1] = input.get_shape().as_list()\n if layer_to_skip_connect is not None:\n [b0, h0, w0, d0] = layer_to_skip_connect.get_shape().as_list()\n\n if h0 > h1:\n skip_connect_layer = self.conv_layer(layer_to_skip_connect, int(layer_to_skip_connect.get_shape()[3]),\n [3, 3], strides=(2, 2))\n else:\n skip_connect_layer = layer_to_skip_connect\n else:\n skip_connect_layer = layer_to_skip_connect\n current_layers = [input, skip_connect_layer]\n current_layers.extend(local_inner_layers)\n current_layers = remove_duplicates(current_layers)\n outputs = tf.concat(current_layers, axis=3)\n if dim_reduce:\n outputs = self.conv_layer(outputs, num_features, [3, 3], strides=(2, 2))\n outputs = leaky_relu(features=outputs)\n outputs = layer_norm(inputs=outputs, center=True, scale=True)\n outputs = tf.layers.dropout(outputs, rate=dropout_rate, training=training)\n else:\n outputs = self.conv_layer(outputs, num_features, [3, 3], strides=(1, 1))\n outputs = leaky_relu(features=outputs)\n outputs = layer_norm(inputs=outputs, center=True, scale=True)\n\n return outputs\n\n\n def __call__(self, conditional_input, generated_input, training=False, dropout_rate=0.0):\n \"\"\"\n :param conditional_input: A batch of conditional inputs (x_i) of size [batch_size, height, width, channel]\n :param generated_input: A batch of generated inputs (x_g) of size [batch_size, height, width, channel]\n :param training: Placeholder for training or a boolean indicating training or validation\n :param dropout_rate: A float placeholder for dropout rate or a float indicating the dropout rate\n :param name: Network name\n :return:\n \"\"\"\n conditional_input = tf.convert_to_tensor(conditional_input)\n generated_input = tf.convert_to_tensor(generated_input)\n with tf.variable_scope(self.name, reuse=self.reuse):\n concat_images = tf.concat([conditional_input, generated_input], axis=3)\n outputs = concat_images\n encoder_layers = []\n current_layers = [outputs]\n with tf.variable_scope('conv_layers'):\n for i, layer_size in enumerate(self.layer_sizes):\n encoder_inner_layers = [outputs]\n with tf.variable_scope('g_conv{}'.format(i)):\n if i == 0:\n outputs = self.conv_layer(outputs, num_filters=64,\n filter_size=(3, 3), strides=(2, 2))\n outputs = leaky_relu(features=outputs)\n outputs = layer_norm(inputs=outputs, center=True, scale=True)\n current_layers.append(outputs)\n else:\n for j in range(self.inner_layers[i]):\n outputs = self.add_encoder_layer(input=outputs,\n name=\"encoder_inner_conv_{}_{}\"\n .format(i, j), training=training,\n layer_to_skip_connect=current_layers[-2],\n num_features=self.layer_sizes[i],\n dropout_rate=dropout_rate,\n dim_reduce=False,\n local_inner_layers=encoder_inner_layers)\n current_layers.append(outputs)\n encoder_inner_layers.append(outputs)\n outputs = self.add_encoder_layer(input=outputs,\n name=\"encoder_outer_conv_{}\"\n .format(i),\n training=training,\n layer_to_skip_connect=\n current_layers[-2],\n local_inner_layers=\n encoder_inner_layers,\n num_features=self.layer_sizes[i],\n dropout_rate=dropout_rate,\n dim_reduce=True)\n current_layers.append(outputs)\n encoder_layers.append(outputs)\n\n\n with tf.variable_scope('discriminator_dense_block'):\n if self.use_wide_connections:\n mean_encoder_layers = []\n concat_encoder_layers = []\n for layer in encoder_layers:\n mean_encoder_layers.append(tf.reduce_mean(layer, axis=[1, 2]))\n concat_encoder_layers.append(tf.layers.flatten(layer))\n feature_level_flatten = tf.concat(mean_encoder_layers, axis=1)\n location_level_flatten = tf.concat(concat_encoder_layers, axis=1)\n else:\n feature_level_flatten = tf.reduce_mean(encoder_layers[-1], axis=[1, 2])\n location_level_flatten = tf.layers.flatten(encoder_layers[-1])\n\n feature_level_dense = tf.layers.dense(feature_level_flatten, units=1024, activation=leaky_relu)\n combo_level_flatten = tf.concat([feature_level_dense, location_level_flatten], axis=1)\n with tf.variable_scope('discriminator_out_block'):\n outputs = tf.layers.dense(combo_level_flatten, 1, name='outputs')\n\n self.reuse = True\n self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)\n #view_names_of_variables(self.variables)\n if self.build:\n print(\"discr layers\", self.conv_layer_num)\n count_parameters(self.variables, name=\"discriminator_parameter_num\")\n self.build = False\n return outputs, current_layers\n"
]
| [
[
"tensorflow.layers.dropout",
"tensorflow.contrib.layers.batch_norm",
"tensorflow.convert_to_tensor",
"tensorflow.concat",
"tensorflow.layers.flatten",
"tensorflow.python.ops.nn_ops.leaky_relu",
"tensorflow.contrib.layers.layer_norm",
"tensorflow.image.resize_nearest_neighbor",
"tensorflow.reshape",
"tensorflow.variable_scope",
"tensorflow.layers.conv2d",
"tensorflow.layers.dense",
"tensorflow.layers.conv2d_transpose",
"tensorflow.tanh",
"tensorflow.image.resize_images",
"tensorflow.get_collection",
"tensorflow.reduce_mean"
]
]
|
frgfm/pair-assignment | [
"2a40fbead16f7cf9ea0adcfa10fda0d2d2998d37"
]
| [
"main.py"
]
| [
"#!/usr/bin/env python\n\n'''\nThese functions were developped to provide optimisation solutions for assignment problems\n'''\n\n__author__ = 'François-Guillaume Fernandez'\n__license__ = 'MIT License'\n__version__ = '0.1'\n__maintainer__ = 'François-Guillaume Fernandez'\n__status__ = 'Development'\n\nfrom assignment import bf_assign, approx_assign\nimport numpy as np\nfrom datetime import datetime\n\n\ndef main():\n # Generate random cost matrix\n nb_agents = 13\n cost_matrix = np.random.rand(nb_agents, nb_agents)\n for row_idx in range(cost_matrix.shape[0]):\n for col_idx in range(row_idx, cost_matrix.shape[1]):\n if row_idx == col_idx:\n cost_matrix[row_idx, col_idx] = 0\n else:\n cost_matrix[col_idx, row_idx] = cost_matrix[row_idx, col_idx]\n\n # Brute Force\n start_time = datetime.now()\n bf_assignment, bf_score = bf_assign(cost_matrix)\n print('Brute force score: %s (computed in %s)' % (np.sum([cost_matrix[couple] for couple in bf_assignment]), datetime.now() - start_time))\n\n # Approx method\n start_time = datetime.now()\n ap_assignment = approx_assign(cost_matrix)\n print('Approx method score: %s (computed in %s)' % (np.sum([cost_matrix[couple] for couple in ap_assignment]), datetime.now() - start_time))\n\n\nif __name__ == \"__main__\":\n main()\n"
]
| [
[
"numpy.sum",
"numpy.random.rand"
]
]
|
jemorrison/jwst_reffiles | [
"9fca3901b059c72f14d05272e314f816bee55e99"
]
| [
"jwst_reffiles/bad_pixel_mask/bad_pixel_mask.py"
]
| [
"#! /usr/bin/env python\n\n\"\"\"Wrapper script that calls badpix_from_flats.py and badpix_from_darks.py.\nThe results are combined to create a single bad pixel reference file.\n\"\"\"\nimport copy\nimport datetime\nimport os\n\nfrom astropy.io import fits\nfrom jwst.datamodels import MaskModel, util\nimport numpy as np\n\nfrom jwst_reffiles.bad_pixel_mask import badpix_from_flats\nfrom jwst_reffiles.dark_current import badpix_from_darks\n\n# Flat field-related header keywords\ndead_search_kw = 'BPFDEAD'\nlow_qe_search_kw = 'BPFLOWQE'\ndead_search_type_kw = 'BPFSCHTP'\nmean_sig_threshold_kw = 'BPFSIGMA'\nnorm_method_kw = 'BPFNORM'\nsmooth_box_width_kw = 'BPFSMOTH'\nsmoothing_type_kw = 'BPFSMTYP'\ndead_sig_thresh_kw = 'BPFDEDSG'\ndead_zero_sig_frac_kw = 'BPFZEROF'\ndead_flux_check_kw = 'BPFFXCHK'\n#dead_flux_file_kw = 'BPFFXFIL'\nmax_dead_sig_kw = 'BPFMXDED'\nmanual_flag_kw = 'BPFMANFL'\nflat_do_not_use_kw = 'BPFDONOT'\nmax_low_qe_kw = 'BPFMXLQE'\nmax_open_adj_kw = 'BPFMXOAD'\n\n# Dark current-related header keywords\nbad_from_dark_kw = 'BPDSERCH'\ndark_clip_sigma_kw = 'BPDCLPSG'\ndark_clip_iters_kw = 'BPDCLPIT'\ndark_noisy_thresh_kw = 'BPDNSETH'\nmax_sat_frac_kw = 'BPDMXSAT'\njump_limit_kw = 'BPDMXJMP'\njump_ratio_thresh_kw = 'BPDJPRAT'\ncutoff_frac_kw = 'BPDCUTFC'\npedestal_sig_thresh_kw = 'BPDPEDTH'\nrc_frac_thresh_kw = 'BPDRCTH'\nlow_ped_frac_kw = 'BPDLOPFC'\nhigh_cr_frac_kw = 'BPDCRFC'\ndark_do_not_use_kw = 'BPDDONOT'\nflag_mapping_kw = 'BPDMAPS'\n\n\ndef bad_pixels(flat_slope_files=None, dead_search=True, low_qe_and_open_search=True,\n dead_search_type='sigma_rate', flat_mean_sigma_threshold=3, flat_mean_normalization_method='smoothed',\n smoothing_box_width=15, smoothing_type='Box2D', dead_sigma_threshold=5., max_dead_norm_signal=None,\n run_dead_flux_check=False, dead_flux_check_files=None, flux_check=45000, max_low_qe_norm_signal=0.5,\n max_open_adj_norm_signal=1.05, manual_flag_file='default', flat_do_not_use=[],\n dark_slope_files=None, dark_uncal_files=None, dark_jump_files=None, dark_fitopt_files=None,\n dark_stdev_clipping_sigma=5., dark_max_clipping_iters=5,\n dark_noisy_threshold=5, max_saturated_fraction=0.5, max_jump_limit=10, jump_ratio_threshold=5,\n early_cutoff_fraction=0.25, pedestal_sigma_threshold=5, rc_fraction_threshold=0.8, low_pedestal_fraction=0.8,\n high_cr_fraction=0.8,\n flag_values={'hot': ['HOT'], 'rc': ['RC'], 'low_pedestal': ['OTHER_BAD_PIXEL'], 'high_cr': [\"TELEGRAPH\"]},\n dark_do_not_use=['hot', 'rc', 'low_pedestal', 'high_cr'], plot=False,\n output_file=None, author='jwst_reffiles', description='A bad pix mask',\n pedigree='GROUND', useafter='2019-04-01 00:00:00', history='', quality_check=False):\n \"\"\"\n Wrapper that calls the two modules for finding bad pixels from input flat\n field files, and bad pixels from dark current files.\n\n Parameters\n ----------\n flat_slope_files : list\n List of flat field slope files to be used for the dead pixel search.\n If None, the search is skipped.\n\n dead_search : bool\n Whether or not to search for DEAD pixels using the flat field files\n\n low_qe_and_open_search : bool\n Whether or not to search for LOW_QE, OPEN, and ADJ_OPEN pixels using\n the flat field files\n\n dead_search_type : str\n Type of search to use when looking for dead pixels. Options are:\n ``sigma_rate``: Using a normalized signal rate image, dead pixels\n are defined as those with a rate smaller than\n ``dead_sigma_threshold`` standard deviations below\n the mean.\n ``absolute_rate``: Using a normalized signal rate image, dead pixels\n are defined as those with a rate less than\n ``max_dead_norm_signal``.\n\n flat_mean_sigma_threshold : float\n Number of standard deviations to use when sigma-clipping to\n calculate the mean slope image or the mean across the detector\n\n flat_mean_normalization_method : str\n Specify how the mean image is normalized prior to searching for\n bad pixels. Options are:\n 'smoothed': Mean image will be smoothed using a\n ``smoothing_box_width`` x ``smoothing_box_width``\n box kernel. The mean image is then normalized by\n this smoothed image.\n 'none': No normalization is done. Mean slope image is used as is\n 'mean': Mean image is normalized by its sigma-clipped mean\n 'fit2d': Mean image will be normalized by a fit of 2-D surface to\n mean image. The degree of the fit is controlled by the\n ``fit_degree`` parameters\n\n smoothing_box_width : float\n Width in pixels of the box kernel to use to compute the smoothed\n mean image\n\n smoothing_typ : string\n Type of smoothing to do ``Box2D `` or ``median`` filtering\n\n smoothing_sigma : float\n Number of standard deviations to use when smoothing in a box defined\n by smoothing_box_width.\n\n dead_sigma_threshold : float\n Number of standard deviations below the mean at which a pixel is\n considered dead.\n\n max_dead_norm_signal : float\n Maximum normalized signal rate of a pixel that is considered dead\n\n run_dead_flux_check : bool\n Whether or not to check for dead pixels using an absolute flux value\n\n dead_flux_check_files : list\n List of ramp (uncalibrated) files to use to check the flux of average\n of last 4 groups. If None then the uncalibration files are not read in\n and no flux_check is done.\n\n flux_check: float\n Tolerance on average signal in last 4 groups. If dead_flux_check_files is\n a list of uncalibrated files, then the average of the last four groups\n for all the integrations is determined. If this average > flux_check\n then this pixel is not a dead pixel.\n\n max_low_qe_norm_signal: float\n The maximum normalized signal a pixel can have and be considered\n low QE.\n\n max_open_adj_norm_signal : float\n The maximum normalized signal a pixel adjacent to a low QE pixel can have\n in order for the low QE pixel to be reclassified as OPEN\n\n manual_flag_file : str\n Name of the ascii file containing a list of pixels to be added manually\n to the output bad pixel mask file. Default is 'default', in which case\n the file contained in the ``bad_pixel_mask`` directory of the repo will\n be used.\n\n flat_do_not_use : list\n List of bad pixel types where the DO_NOT_USE flag should also be\n applied (e.g. ['DEAD', 'LOW_QE'])\n\n dark_slope_files : list\n List of dark current slope files to be used for the noisy pixel search.\n If None, the search is skipped.\n\n dark_uncal_files : list\n List of uncalibrated dark current ramp files. These should correspond\n 1-to-1 with the files listed in ``dark_slope_files``. If None,\n the code assumes the files are in the same location as the slope\n files and have names ending in uncal.fits\n\n dark_jump_files : list\n List of dark current ramp files output from the jump step of the pipeline.\n These should correspond 1-to-1 with the files listed in ``dark_slope_files``.\n If None, the code assumes the files are in the same location as the slope\n files and have names ending in jump.fits\n\n dark_fitopt_files : list\n List of optional output files produced by the ramp-fitting step of the\n pipeline. These should correspond 1-to-1 with the files listed in\n ``dark_slope_files``. If None, the code assumes the files are in the\n same location as the slope files and have names ending in fitopt.fits\n\n dark_stdev_clipping_sigma : int\n Number of sigma to use when sigma-clipping the 2D array of\n standard deviation values. The sigma-clipped mean and standard\n deviation are used to locate noisy pixels.\n\n dark_max_clipping_iters : int\n Maximum number of iterations to use when sigma clipping to find\n the mean and standard deviation values that are used when\n locating noisy pixels.\n\n dark_noisy_threshold : int\n Number of sigma above the mean noise (associated with the slope)\n to use as a threshold for identifying noisy pixels.\n\n max_saturated_fraction : float\n When identifying pixels that are fully saturated (in all groups\n of an integration), this is the fraction of integrations within\n which a pixel must be fully saturated before flagging it as HOT\n\n max_jump_limit : int\n The maximum number of jumps a pixel can have in an integration\n before it is flagged as a ``high jump`` pixel (which may be\n flagged as noisy later)\n\n jump_ratio_threshold : int\n Cutoff for the ratio of jumps early in the ramp to jumps later in\n the ramp. Pixels with a ratio greater than this value (and which\n also have a high total number of jumps) will be flagged as\n potential (I)RC pixels.\n\n early_cutoff_fraction : float\n Fraction of the integration to use when comparing the jump rate\n early in the integration to that across the entire integration.\n Must be <= 0.5\n\n pedestal_sigma_threshold : int\n Used when searching for RC pixels via the pedestal image. Pixels\n with pedestal values more than ``pedestal_sigma_threshold`` above\n the mean are flagged as potential RC pixels\n\n rc_fraction_threshold : float\n Used when searching for RC pixels. This is the fraction of input\n files within which the pixel must be identified as an RC pixel\n before it will be flagged as a permanent RC pixel\n\n low_pedestal_fraction : float\n This is the fraction of input files within which a pixel must be\n identified as a low pedestal pixel before it will be flagged as\n a permanent low pedestal pixel\n\n high_cr_fraction : float\n This is the fraction of input files within which a pixel must be\n flagged as having a high number of jumps before it will be flagged\n as permanently noisy\n\n flag_values : dict\n This dictionary maps the types of bad pixels searched for to the\n flag mnemonics to use when creating the bad pixel file. Keys are\n the types of bad pixels searched for, and values are lists that\n include mnemonics recognized by the jwst calibration pipeline\n e.g. {'hot': ['HOT'], 'rc': ['RC'], 'low_pedestal': ['OTHER_BAD_PIXEL'], 'high_cr': [\"TELEGRAPH\"]}\n\n dark_do_not_use : list\n List of bad pixel types to be flagged as DO_NOT_USE\n e.g. ['hot', 'rc', 'low_pedestal', 'high_cr']\n\n plot : bool\n If True, produce and save intermediate results from noisy pixel search\n\n output_file : str\n Name of the CRDS-formatted bad pixel reference file to save the final\n bad pixel map into\n\n author : str\n CRDS-required name of the reference file author, to be placed in the\n referece file header\n\n description : str\n CRDS-required description of the reference file, to be placed in the\n reference file header\n\n pedigree : str\n CRDS-required pedigree of the data used to create the reference file\n\n useafter : str\n CRDS-required date of earliest data with which this referece file\n should be used. (e.g. '2019-04-01 00:00:00')\n\n history : str\n Text to be added to the HISOTRY section of the output bad pixel file\n\n quality_check : bool\n If True, the pipeline is run using the output reference file to be\n sure the pipeline doens't crash\n \"\"\"\n instrument = None\n detector = None\n all_files = []\n history = [history]\n hdu = fits.PrimaryHDU()\n\n if flat_slope_files is not None:\n all_files = copy.deepcopy(flat_slope_files)\n instrument, detector = instrument_info(flat_slope_files[0])\n\n # Get output filenames\n if output_file is None:\n output_file = create_output_filename(instrument, detector)\n\n flat_output_file = output_file.replace('.fits', '_from_flats.fits')\n\n # Get bad pixels from the flats\n flatmask = badpix_from_flats.find_bad_pix(flat_slope_files, dead_search=dead_search,\n low_qe_and_open_search=low_qe_and_open_search,\n dead_search_type=dead_search_type,\n sigma_threshold=flat_mean_sigma_threshold,\n normalization_method=flat_mean_normalization_method,\n smoothing_type=smoothing_type,\n smoothing_box_width=smoothing_box_width,\n dead_sigma_threshold=dead_sigma_threshold,\n #dead_zero_signal_fraction=dead_zero_signal_fraction,\n run_dead_flux_check=run_dead_flux_check,\n dead_flux_check_files=dead_flux_check_files,\n max_dead_norm_signal=max_dead_norm_signal,\n manual_flag_file=manual_flag_file,\n max_low_qe_norm_signal=max_low_qe_norm_signal,\n max_open_adj_norm_signal=max_open_adj_norm_signal,\n do_not_use=flat_do_not_use,\n output_file=flat_output_file,\n author=author,\n description=description,\n pedigree=pedigree,\n useafter=useafter,\n history=history[0],\n quality_check=quality_check)\n\n # Convert the do not use list to a string to add to the header\n if len(flat_do_not_use) > 0:\n flat_do_not_use_string = ', '.join(flat_do_not_use)\n else:\n flat_do_not_use_string = 'None'\n flat_do_not_use_string = '{} {}'.format('Bad pixel types from flat to which DO_NOT_USE is applied: ', flat_do_not_use_string)\n\n # Add the do not use string to the list of history entries to add,\n # since it may end up being longer than 8 characters\n history.append(flat_do_not_use_string)\n\n # Define the non-standard fits header keywords by placing them in a\n # fits HDU List\n hdu.header[dead_search_kw] = dead_search\n hdu.header[low_qe_search_kw] = low_qe_and_open_search\n hdu.header[dead_search_type_kw] = dead_search_type\n hdu.header[mean_sig_threshold_kw] = flat_mean_sigma_threshold\n hdu.header[norm_method_kw] = flat_mean_normalization_method\n hdu.header[smooth_box_width_kw] = smoothing_box_width\n hdu.header[dead_sig_thresh_kw] = dead_sigma_threshold\n #hdu.header[dead_zero_sig_frac_kw] = dead_zero_signal_fraction\n hdu.header[dead_flux_check_kw] = run_dead_flux_check\n #hdu.header[dead_flux_file_kw] = dead_flux_check_files\n hdu.header[max_dead_sig_kw] = max_dead_norm_signal\n hdu.header[manual_flag_kw] = manual_flag_file\n hdu.header[max_low_qe_kw] = max_low_qe_norm_signal\n hdu.header[max_open_adj_kw] = max_open_adj_norm_signal\n\n else:\n flatmask = 0\n hdu.header[dead_search_kw] = False\n hdu.header[low_qe_search_kw] = False\n\n if dark_slope_files is not None:\n if len(all_files) == 0:\n all_files = copy.deepcopy(dark_slope_files)\n instrument, detector = instrument_info(dark_slope_files[0])\n else:\n all_files = all_files + dark_slope_files\n\n # Get output filenames\n if output_file is None:\n output_file = create_output_filename(instrument, detector)\n\n dark_output_file = output_file.replace('.fits', '_from_darks.fits')\n\n # Get bad pixels from the darks\n darkmask = badpix_from_darks.find_bad_pix(dark_slope_files, uncal_filenames=dark_uncal_files,\n jump_filenames=dark_jump_files,\n fitopt_filenames=dark_fitopt_files,\n clipping_sigma=dark_stdev_clipping_sigma,\n max_clipping_iters=dark_max_clipping_iters,\n noisy_threshold=dark_noisy_threshold,\n max_saturated_fraction=max_saturated_fraction,\n max_jump_limit=max_jump_limit,\n jump_ratio_threshold=jump_ratio_threshold,\n early_cutoff_fraction=early_cutoff_fraction,\n pedestal_sigma_threshold=pedestal_sigma_threshold,\n rc_fraction_threshold=rc_fraction_threshold,\n low_pedestal_fraction=low_pedestal_fraction,\n high_cr_fraction=high_cr_fraction,\n flag_values=flag_values,\n do_not_use=dark_do_not_use,\n outfile=dark_output_file, plot=False)\n\n # Convert the do not use list to a string to add to the header\n if len(dark_do_not_use) > 0:\n dark_do_not_use_string = ', '.join(dark_do_not_use)\n else:\n dark_do_not_use_string = 'None'\n dark_do_not_use_string = '{} {}'.format('Bad pixel types from dark to which DO_NOT_USE is applied: ', dark_do_not_use_string)\n\n # Add the do not use string to the list of history entries to add,\n # since it may end up being longer than 8 characters\n history.append(dark_do_not_use_string)\n\n # Convert the bad pixel type mapping into a string so it can be\n # added to the output header\n if len(flag_values) > 0:\n mapping_str = ''\n for key in flag_values:\n substr = '{}: {}, '.format(key, flag_values[key])\n mapping_str = mapping_str + substr\n else:\n mapping_str = 'None'\n mapping_str = '{} {}'.format('Mapping of jwst_reffiles bad pixel types to jwst cal bad pixel flags: ', mapping_str)\n\n # Add the do not use string to the list of history entries to add,\n # since it may end up being longer than 8 characters\n history.append(mapping_str)\n\n # Define the non-standard fits header keywords by placing them in a\n # fits HDU List\n hdu.header[bad_from_dark_kw] = True\n hdu.header[dark_clip_sigma_kw] = dark_stdev_clipping_sigma\n hdu.header[dark_clip_iters_kw] = dark_max_clipping_iters\n hdu.header[dark_noisy_thresh_kw] = dark_noisy_threshold\n hdu.header[max_sat_frac_kw] = max_saturated_fraction\n hdu.header[jump_limit_kw] = max_jump_limit\n hdu.header[jump_ratio_thresh_kw] = jump_ratio_threshold\n hdu.header[cutoff_frac_kw] = early_cutoff_fraction\n hdu.header[pedestal_sig_thresh_kw ] = pedestal_sigma_threshold\n hdu.header[rc_frac_thresh_kw ] = rc_fraction_threshold\n hdu.header[low_ped_frac_kw] = low_pedestal_fraction\n hdu.header[high_cr_frac_kw] = high_cr_fraction\n\n else:\n darkmask = 0.\n hdu.header[bad_from_dark_kw] = False\n\n # Combine the two masks\n final_mask = np.bitwise_or(flatmask, darkmask)\n\n # Some pixels that are saturated in all groups may be flagged as hot and dead,\n # because the slope in the flat ramp appears to be zero, while the more in-depth\n # checking of the darks shows that the pixel is saturated the entire time. For any\n # pixels flagged as both, keep only the hot flag and throw out the dead flag.\n hot_and_or_dead = (final_mask & dqflags.pixel['HOT']) + (final_mask & dqflags.pixel['DEAD'])\n hot_and_dead = hot_and_or_dead == (dqflags.pixel['HOT'] + dqflags.pixel['DEAD'])\n final_mask[hot_and_dead] = np.bitwise_xor(final_mask[hot_and_dead], dqflags.pixel['DEAD'])\n\n # Save mask in reference file\n hdu_list = fits.HDUList([hdu])\n save_final_map(final_mask, instrument.upper(), detector.upper(), hdu_list, all_files, author, description,\n pedigree, useafter, history, output_file)\n\n\ndef create_output_filename(inst_name, det_name):\n \"\"\"Create a default output filename for the bad pixel mask given\n instrument and detector names\n\n Parameters\n ----------\n inst_name : str\n Instrument name\n\n det_name : str\n Detector name\n\n Returns\n -------\n outfile : str\n Default bad pixel mask filename\n \"\"\"\n # Add in timestamp as a way to prevent overwriting past runs\n current_time = datetime.datetime.now()\n\n # Use the current working directory\n outfile = '{}_{}_{}_badpix_mask.fits'.format(inst_name, det_name, current_time)\n outfile = os.path.join(os.getcwd(), outfile)\n return outfile\n\n\ndef instrument_info(filename):\n \"\"\"Get the instrument and detector name from the header of the\n input file\n\n Parameters\n ----------\n filename : str\n Name of fits file\n\n Returns\n -------\n inst : str\n Instrument name\n\n det : str\n Detector name\n \"\"\"\n with fits.open(filename) as hdulist:\n try:\n inst = hdulist[0].header['INSTRUME'].lower()\n except KeyError:\n raise KeyError(\"ERROR: expecting instrument name in main header of {}\".format(filename))\n\n try:\n det = hdulist[0].header['DETECTOR'].lower()\n except KeyError:\n raise KeyError(\"ERROR: expecting detector name in main header of {}\".format(filename))\n return inst, det\n\n\ndef save_final_map(bad_pix_map, instrument, detector, hdulist, files, author, description, pedigree, useafter,\n history_text, outfile):\n \"\"\"Save a bad pixel map into a CRDS-formatted reference file\n\n Parameters\n ----------\n bad_pix_map : numpy.ndarray\n 2D bad pixel array\n\n instrument : str\n Name of instrument associated with the bad pixel array\n\n detector : str\n Name of detector associated with the bad pixel array\n\n hdulist : astropy.fits.HDUList\n HDUList containing \"extra\" fits keywords\n\n files : list\n List of files used to create ``bad_pix_map``\n\n author : str\n Author of the bad pixel mask reference file\n\n description : str\n CRDS description to use in the final bad pixel file\n\n pedigree : str\n CRDS pedigree to use in the final bad pixel file\n\n useafter : str\n CRDS useafter string for the bad pixel file\n\n history_text : list\n List of strings to add as HISTORY entries to the bad pixel file\n\n outfile : str\n Name of the output bad pixel file\n \"\"\"\n yd, xd = bad_pix_map.shape\n\n # Initialize the MaskModel using the hdu_list, so the new keywords will\n # be populated\n model = MaskModel(hdulist)\n model.dq = bad_pix_map\n\n # Create dq_def data\n dq_def = badpix_from_flats.create_dqdef()\n model.dq_def = dq_def\n model.meta.reftype = 'MASK'\n model.meta.subarray.name = 'FULL'\n model.meta.subarray.xstart = 1\n model.meta.subarray.xsize = xd\n model.meta.subarray.ystart = 1\n model.meta.subarray.ysize = yd\n model.meta.instrument.name = instrument.upper()\n model.meta.instrument.detector = detector\n\n # Get the fast and slow axis directions from one of the input files\n fastaxis, slowaxis = badpix_from_flats.get_fastaxis(files[0])\n model.meta.subarray.fastaxis = fastaxis\n model.meta.subarray.slowaxis = slowaxis\n\n model.meta.author = author\n model.meta.description = description\n model.meta.pedigree = pedigree\n model.meta.useafter = useafter\n\n # Add information about parameters used\n # Parameters from badpix_from_flats\n package_note = ('This file was created using the bad_pixel_mask.py module within the '\n 'jwst_reffiles package.')\n\n software_dict = {'name': 'jwst_reffiles.bad_pixel_mask.bad_pixel_mask.py', 'author': 'STScI',\n 'homepage': 'https://github.com/spacetelescope/jwst_reffiles',\n 'version': '0.0.0'}\n entry = util.create_history_entry(package_note, software=software_dict)\n model.history.append(entry)\n\n model.history.append(util.create_history_entry('Parameter values and descriptions:'))\n dead_search_descrip = ('dead_search: Boolean, whether or not to run the dead pixel search '\n 'using flat field files. The value is stored in the {} keyword.'.format(dead_search_kw))\n model.history.append(util.create_history_entry(dead_search_descrip))\n\n low_qe_search_descrip = ('low_qe_and_open_search: Boolean, whether or not to run the low QE '\n 'and open pixel search using flat field files. The value is stored in the {} '\n 'keyword.'.format(low_qe_search_kw))\n model.history.append(util.create_history_entry(low_qe_search_descrip))\n\n dead_type_descrip = ('dead_search_type: Method used to identify dead pixels. The value is stored in the '\n '{} keyword.'.format(dead_search_type_kw))\n model.history.append(util.create_history_entry(dead_type_descrip))\n\n sigma_descrip = ('flat_mean_sigma_threshold: Number of standard deviations to use when sigma-clipping to '\n 'calculate the mean slope image or the mean across the detector. The value '\n 'used is stored in the {} keyword.'.format(mean_sig_threshold_kw))\n model.history.append(util.create_history_entry(sigma_descrip))\n\n norm_descrip = ('flat_mean_normalization_method: Specify how the mean image is normalized prior to searching '\n 'for bad pixels. The value used is stored in the {} keyword.'.format(norm_method_kw))\n model.history.append(util.create_history_entry(norm_descrip))\n\n smooth_descrip = ('smoothing_box_width: Width in pixels of the box kernel to use to compute the '\n 'smoothed mean image. The value used is stored in the {} keyword.'.format(smooth_box_width_kw))\n model.history.append(util.create_history_entry(smooth_descrip))\n\n smooth_type_descrip = ('smoothing_type: Type of smoothing to do: Box2D or median filtering. The value used '\n 'is stored in the {} keyword.'.format(smoothing_type_kw))\n model.history.append(util.create_history_entry(smooth_type_descrip))\n\n dead_sig_descrip = ('Number of standard deviations below the mean at which a pixel is considered dead. '\n 'The value used is stored in the {} keyword.'.format(dead_sig_thresh_kw))\n model.history.append(util.create_history_entry(dead_sig_descrip))\n\n max_dead_descrip = ('Maximum normalized signal rate of a pixel that is considered dead. The value '\n 'used is stored in the {} keyword.'.format(max_dead_sig_kw))\n model.history.append(util.create_history_entry(max_dead_descrip))\n\n run_dead_flux_descrip = ('run_dead_flux_check: Boolean, if True, search for pixels erroneously flagged '\n 'as dead because they are saturated in all groups. The value used is stored '\n 'in the {} keyword.'.format(dead_flux_check_kw))\n model.history.append(util.create_history_entry(run_dead_flux_descrip))\n\n dead_flux_limit_descrip = ('Signal limit in raw data above which the pixel is considered not dead. The '\n 'value used is stored in the {} keyword.'.format(max_dead_sig_kw))\n model.history.append(util.create_history_entry(dead_flux_limit_descrip))\n\n max_low_qe_descrip = ('The maximum normalized signal a pixel can have and be considered low QE. The '\n 'value used is stored in the {} keyword.'.format(max_low_qe_kw))\n model.history.append(util.create_history_entry(max_low_qe_descrip))\n\n max_open_adj_descrip = ('The maximum normalized signal a pixel adjacent to a low QE pixel can have '\n 'in order for the low QE pixel to be reclassified as OPEN. The value used '\n 'is stored in the {} keyword.'.format(max_open_adj_kw))\n model.history.append(util.create_history_entry(max_open_adj_descrip))\n\n flat_do_not_use_descrip = ('List of bad pixel types (from flats) where the DO_NOT_USE flag is also applied. '\n 'The values used are stored in the {} keyword.'.format(flat_do_not_use_kw))\n model.history.append(util.create_history_entry(flat_do_not_use_descrip))\n\n manual_file_descrip = ('Name of the ascii file containing a list of pixels to be added manually. The '\n 'value used is stored in the {} keyword.'.format(manual_flag_kw))\n model.history.append(util.create_history_entry(manual_file_descrip))\n\n # Parameters from badpix_from_darks\n bad_from_dark_descrip = ('badpix_from_dark: Boolean, whether or not the bad pixel from dark search '\n 'has been run. The value is stored in the {} keyword.'.format(bad_from_dark_kw))\n model.history.append(util.create_history_entry(bad_from_dark_descrip))\n\n dark_clip_sig_descrip = ('Number of sigma to use when sigma-clipping 2D stdev image. The value used '\n 'is stored in the {} keyword.'.format(dark_clip_sigma_kw))\n model.history.append(util.create_history_entry(dark_clip_sig_descrip))\n\n dark_clip_iter_descrip = ('Max number of iterations to use when sigma clipping mean and stdev values. '\n 'The value used is stored in the {} keyword.'.format(dark_clip_iters_kw))\n model.history.append(util.create_history_entry(dark_clip_iter_descrip))\n\n dark_noisy_thresh_descrip = ('Number of sigma above mean noise for noisy pix threshold. The value '\n 'used is stored in the {} keyword.'.format(dark_noisy_thresh_kw))\n model.history.append(util.create_history_entry(dark_noisy_thresh_descrip))\n\n max_sat_frac_descrip = ('Fraction of integrations within which a pixel must be fully saturated before '\n 'flagging it as HOT. The value used is stored in the {} keyword.'.format(max_sat_frac_kw))\n model.history.append(util.create_history_entry(max_sat_frac_descrip))\n\n jump_limit_descrip = ('Maximum number of jumps a pixel can have in an integration before it is flagged as a '\n '\"high jump\" pixel. The value used is stored in the {} keyword.'.format(jump_limit_kw))\n model.history.append(util.create_history_entry(jump_limit_descrip))\n\n jump_ratio_descrip = ('Cutoff for the ratio of jumps early in the ramp to jumps later in the ramp when '\n 'looking for RC pixels. The value used is stored in the {} keyword.'.format(jump_ratio_thresh_kw))\n model.history.append(util.create_history_entry(jump_ratio_descrip))\n\n cutoff_frac_descrip = ('Fraction of the integration to use when comparing the jump rate early in the integration to '\n 'that across the entire integration. The value used is stored in the {} keyword.'.format(cutoff_frac_kw))\n model.history.append(util.create_history_entry(cutoff_frac_descrip))\n\n ped_sigma_descrip = ('Pixels with pedestal values more than this limit above the mean are flagged as RC. '\n 'The value used is stored in the {} keyword.'.format(pedestal_sig_thresh_kw))\n model.history.append(util.create_history_entry(ped_sigma_descrip))\n\n rc_thresh_descrip = ('Fraction of input files within which a pixel must be identified as an RC pixel before '\n 'it will be flagged as a permanent RC pixel. The value used is stored in the {} '\n 'keyword.'.format(rc_frac_thresh_kw))\n model.history.append(util.create_history_entry(rc_thresh_descrip))\n\n low_ped_descrip = ('Fraction of input files within which a pixel must be identified as a low pedestal '\n 'pixel before it will be flagged as a permanent low pedestal pixel. The value used '\n 'is stored in the {} keyword.'.format(low_ped_frac_kw))\n model.history.append(util.create_history_entry(low_ped_descrip))\n\n high_cr_descrip = ('Fraction of input files within which a pixel must be flagged as having a high number '\n 'of jumps before it will be flagged as permanently noisy. The value used '\n 'is stored in the {} keyword.'.format(high_cr_frac_kw))\n dark_do_not_use_descrip = ('List of bad pixel types (from darks) where the DO_NOT_USE flag is also applied. '\n 'The values used are stored in the {} keyword.'.format(dark_do_not_use_kw))\n model.history.append(util.create_history_entry(dark_do_not_use_descrip))\n\n # Add the list of input files used to create the map\n model.history.append('DATA USED:')\n for file in files:\n totlen = len(file)\n div = np.arange(0, totlen, 60)\n for val in div:\n if totlen > (val+60):\n model.history.append(util.create_history_entry(file[val:val+60]))\n else:\n model.history.append(util.create_history_entry(file[val:]))\n\n # Add the do not use lists, pixel flag mappings, and user-provided\n # history text\n for history_entry in history_text:\n if history_entry != '':\n model.history.append(util.create_history_entry(history_entry))\n\n model.save(outfile, overwrite=True)\n print('Final bad pixel mask reference file save to: {}'.format(outfile))\n"
]
| [
[
"numpy.bitwise_xor",
"numpy.bitwise_or",
"numpy.arange"
]
]
|
Nanovel-Ltd/keras-retinanet | [
"8aa02c3c32f8dd545ad20f9e977aee0d6a56dbd2"
]
| [
"keras_retinanet/bin/predict_on_images.py"
]
| [
"\nimport sys\nimport os\nimport argparse\n\n# Allow relative imports when being executed as script.\nif __name__ == \"__main__\" and __package__ is None:\n sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))\n import keras_retinanet.bin # noqa: F401\n __package__ = \"keras_retinanet.bin\"\n \n\n\ndef load_model_from_path(model_path):\n model = models.load_model(model_path, backbone_name='resnet50')\n try:\n model = models.convert_model(model)\n except:\n print(\"Model is likely already an inference model\")\n return model\n\n\ndef parse_args(args):\n \"\"\" Parse the arguments.\n \"\"\"\n parser = argparse.ArgumentParser(description='Simple prediction script for evaluating a RetinaNet network.')\n \n\n parser.add_argument('--labels-json', help='Path to json file conatining new labels for replacing pascal-voc deafults', type=str, default=None,required=True)\n parser.add_argument('--weights-path', help='path to model weights snapshot', type = str,default= None,required=True)\n parser.add_argument('--min-confidence-cutoff', help=\"minimum confidence cutoff value, default is 0.5\", type=float, default=0.5)\n parser.add_argument(\"-d\", action='store_true')\n parser.add_argument(\"-o\",\"--output\",help=\"Output dor path for saving result. the result name will be current time\",type=str,required=True)\n parser.add_argument(\"--file-ext\",help=\"imges file type\",type=str,default=\"jpg\")\n parser.add_argument(\"--samples-num\",help=\"number of images to sample from input directory\",default=20,type=int,required=False)\n parser.add_argument(\"input\",help=\"input data path (file/dir)\")\n\n\n return parser.parse_args(args)\n\ndef predict_for_single_image(label_names, model,image_path,confidence_cutoff):\n image = np.asarray(Image.open(image_path).convert('RGB'))\n image = image[:, :, ::-1].copy()\n\n # copy to draw on\n draw = image.copy()\n draw = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)\n # Image formatting specific to Retinanet\n image = preprocess_image(image)\n image, scale = resize_image(image)\n\n\n boxes, scores, labels = model.predict_on_batch(np.expand_dims(image.copy(), axis=0))\n boxes /= scale\n for box, score, label in zip(boxes[0], scores[0], labels[0]):\n # scores are sorted so we can break\n if score < confidence_cutoff:\n break\n #Add boxes and captions\n color = (255, 255, 255)\n thickness = 2\n b = np.array(box).astype(int)\n cv2.rectangle(draw, (b[0], b[1]), (b[2], b[3]), color, thickness, cv2.LINE_AA)\n\n if(label > len(label_names)):\n print(\"WARNING: Got unknown label, using 'detection' instead\")\n caption = \"Detection {:.3f}\".format(score)\n else:\n caption = \"{} {:.3f}\".format(label_names[label], score)\n\n cv2.putText(draw, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), 2)\n cv2.putText(draw, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)\n\n return draw\n\ndef args_check(args):\n \n if os.path.isdir (args.labels_json) or args.labels_json.split(\".\")[-1] != \"json\":\n raise Exception(\"input labels file must be a json file\")\n if args.d:\n if not os.path.isdir(args.output):\n raise Exception(\"the specified output path is a single file, though the input is a directory\")\n else:\n if os.path.isdir(args.output):\n raise Exception(\"not specified \\\"-d\\\" for directory input\")\n\ndef load_labels_from_json(json_path):\n import json\n with open(json_path,\"r+\") as json_file:\n labels = json.load(json_file)\n for key in list(labels.keys()):\n labels[int(key)] = labels[key]\n del labels[key]\n return labels\n \ndef main(args=None):\n \n # parse arguments\n import glob \n import os\n import random\n import shutil \n if args is None:\n args = sys.argv[1:]\n args = parse_args(args)\n \n args_check(args) \n \n\n\n\n if args.d:\n images_list = glob.glob(os.path.join(args.input,f\"*.{args.file_ext}\"))\n print(\"samples num is \"+ str(args.samples_num))\n images_list = random.choices(images_list,k=args.samples_num)\n output_path = args.output\n print(f\"output path is {output_path}\")\n new_output_dir_name = os.path.join(args.output,f\"{time.time()}\")\n print(f\"creating output folder with name: \\n{new_output_dir_name}\\n _______________________________________\")\n os.makedirs(new_output_dir_name)\n print(\"output folder created successfully!\")\n\n print(\"loading model....\\n\\n\\n_________________________________\")\n model = load_model_from_path(args.weights_path)\n print(\"**************Model loaded successfully!! ****************\\n\\n\\n\")\n print(\"start batch prediction\")\n labels = load_labels_from_json(args.labels_json)\n counter = 0\n for image_path in images_list:\n output_original_name = os.path.join(new_output_dir_name,f\"origin_{counter}.{args.file_ext}\")\n output_predicted_name = os.path.join(new_output_dir_name,f\"predicted_{counter}.{args.file_ext}\")\n shutil.copyfile(image_path,output_original_name)\n image_with_boxes = predict_for_single_image(model=model,\n label_names=labels,\n image_path=image_path,\n confidence_cutoff=args.min_confidence_cutoff)\n\n cv2.imwrite(output_predicted_name,cv2.cvtColor(image_with_boxes,cv2.COLOR_RGB2BGR))\n print(f\"predict for image: {image_path}\")\n counter+=1\n else:\n print(\"loading model....\\n\\n\\n_________________________________\")\n model = load_model_from_path(args.weights_path)\n print(\"**************Model loaded successfully!! ****************\\n\\n\\n\")\n print(\"prediction for single file\")\n labels = load_labels_from_json(args.labels_json)\n image_with_boxes = predict_for_single_image(model=model,\n label_names=labels,\n image_path=args.input,\n confidence_cutoff=args.min_confidence_cutoff)\n\n cv2.imwrite(args.output,cv2.cvtColor(image_with_boxes,cv2.COLOR_RGB2BGR))\n print(f\"predict for image: {args.input}\")\n \n\nif __name__ == \"__main__\":\n args = sys.argv[1:]\n args = parse_args(args)\n args_check(args)\n #imports\n import keras\n import matplotlib.pyplot as plt\n import json\n import time\n from .. import models\n from ..utils.image import preprocess_image, read_image_bgr, resize_image\n import cv2\n import os\n import numpy as np\n import time\n from PIL import Image\n import tensorflow as tf\n main()"
]
| [
[
"numpy.array"
]
]
|
TTornblom/CMSIS_5 | [
"d2039b239cb6385d5a0e865966b2bb0c57f1f21f"
]
| [
"CMSIS/NN/Tests/UnitTest/generate_test_data.py"
]
| [
"#!/usr/bin/env python3\n#\n# Copyright (C) 2010-2021 Arm Limited or its affiliates.\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n# Licensed under the Apache License, Version 2.0 (the License); you may\n# not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an AS IS BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport os\nimport sys\nimport json\nimport math\nimport argparse\nimport subprocess\nimport numpy as np\n\nfrom packaging import version\nfrom abc import ABC, abstractmethod\nfrom tensorflow.lite.python.interpreter import Interpreter\nfrom tensorflow.lite.python.interpreter import OpResolverType\n\n\ntry:\n import tensorflow as tf\nexcept Exception as e:\n print(e)\n sys.exit(1)\n\nREQUIRED_MINIMUM_TENSORFLOW_VERSION = version.parse(\"2.5\")\nALL_TESTDATA_SETS = {}\nCLANG_FORMAT = 'clang-format-9 -i'\nINT32_MAX = 2147483647\nINT32_MIN = -2147483648\nINT16_MAX = 32767\nINT16_MIN = -32768\nINT8_MAX = 127\nINT8_MIN = -128\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Generate input and refererence output data for unittests.\"\n \" It can regenerate all data, load all stored data or a combination of it.\")\n parser.add_argument('--dataset', type=str, default=None, help=\"Name of generated test set.\")\n parser.add_argument('--regenerate-weights', action='store_true', help=\"Regenerate and store new weights.\")\n parser.add_argument('--regenerate-input', action='store_true', help=\"Regenerate and store new input.\")\n parser.add_argument('--regenerate-biases', action='store_true', help=\"Regenerate and store new biases.\")\n parser.add_argument('-a', '--regenerate-all', action='store_true', help=\"Regenerate and store all data.\")\n parser.add_argument('-t', '--testtype', type=str, default=None, choices=['conv', 'depthwise_conv', 'avgpool',\n 'maxpool', 'fully_connected', 'softmax',\n 'svdf'],\n help='Type of test.')\n parser.add_argument('--run-all-testsets', action='store_true', help=\"Run the script for all existing test \"\n \"sets. Regenerate all, partially all or no input data (output may still change, depending on\"\n \" changes in script) depending on regenerate flags. If used together with the -t flag, only\"\n \" tests of that type will be run.\")\n parser.add_argument('--schema-file', type=str, help=\"Path to schema file. This may be needed for some tests.\")\n\n args = parser.parse_args()\n return args\n\n\nclass TestSettings(ABC):\n\n # This is the generated test data used by the test cases.\n OUTDIR = 'TestCases/TestData/'\n\n # This is input to the data generation. If everything or something is regenerated then it is overwritten.\n # So it always has the same data as the OUTDIR.\n # The purpose of the pregen is primarily for debugging, as it is enabling to change a single parameter and see how\n # output changes (or not changes), without regenerating all input data.\n # It also convinient when tesing changes in the script, to be able to run all test sets again.\n PREGEN = 'PregeneratedData/'\n\n def __init__(self, dataset, testtype, args, in_ch, out_ch, x_in, y_in, w_x, w_y, stride_x, stride_y, pad,\n randmin=INT8_MIN, randmax=INT8_MAX, batches=1, generate_bias=True, relu6=False,\n out_activation_min=None, out_activation_max=None, int16xint8=False, bias_min=None, bias_max=None,\n dilation_x=1, dilation_y=1):\n\n self.tensor_flow_reference_version = (\"// Generated by {} using TFL version {} as reference.\\n\".\n format(os.path.basename(__file__), tf.__version__))\n\n # Randomization interval\n self.mins = randmin\n self.maxs = randmax\n\n self.bias_mins = bias_min\n self.bias_maxs = bias_max\n\n self.input_ch = in_ch\n self.output_ch = out_ch\n self.x_input = x_in\n self.y_input = y_in\n self.filter_x = w_x\n self.filter_y = w_y\n self.stride_x = stride_x\n self.stride_y = stride_y\n self.dilation_x = dilation_x\n self.dilation_y = dilation_y\n self.batches = batches\n self.test_type = testtype\n self.has_padding = pad\n\n self.is_int16xint8 = int16xint8\n\n if relu6:\n self.out_activation_max = 6\n self.out_activation_min = 0\n else:\n if out_activation_min is not None:\n self.out_activation_min = out_activation_min\n else:\n self.out_activation_min = INT16_MIN if self.is_int16xint8 else INT8_MIN\n if out_activation_max is not None:\n self.out_activation_max = out_activation_max\n else:\n self.out_activation_max = INT16_MAX if self.is_int16xint8 else INT8_MAX\n\n # Bias is optional.\n self.generate_bias = generate_bias\n\n self.generated_header_files = []\n self.pregenerated_data_dir = self.PREGEN\n\n self.config_data = \"config_data.h\"\n\n self.testdataset = dataset\n\n self.kernel_table_file = self.pregenerated_data_dir + self.testdataset + '/' + 'kernel.txt'\n self.inputs_table_file = self.pregenerated_data_dir + self.testdataset + '/' + 'input.txt'\n self.bias_table_file = self.pregenerated_data_dir + self.testdataset + '/' + 'bias.txt'\n self.parameters_file = self.pregenerated_data_dir + self.testdataset + '/' + 'params.txt'\n\n if self.has_padding:\n self.padding = 'SAME'\n else:\n self.padding = 'VALID'\n\n self.regenerate_new_weights = args.regenerate_weights\n self.regenerate_new_input = args.regenerate_input\n self.regenerate_new_bias = args.regenerate_biases\n if not os.path.exists(self.parameters_file) or args.regenerate_all:\n self.regenerate_new_bias = True\n self.regenerate_new_weights = True\n self.regenerate_new_input = True\n\n self.headers_dir = self.OUTDIR + self.testdataset + '/'\n self.model_path = \"{}model_{}\".format(self.headers_dir, self.testdataset)\n self.model_path_tflite = self.model_path + '.tflite'\n\n def save_multiple_dim_array_in_txt(self, file, data):\n header = ','.join(map(str, data.shape))\n np.savetxt(file, data.reshape(-1, data.shape[-1]), header=header,\n delimiter=',')\n\n def load_multiple_dim_array_from_txt(self, file):\n with open(file) as f:\n shape = list(map(int, next(f)[1:].split(',')))\n data = np.genfromtxt(f, delimiter=',').reshape(shape)\n return data.astype(np.float32)\n\n def convert_tensor_np(self, tensor_in, converter, *qminmax):\n w = tensor_in.numpy()\n shape = w.shape\n w = w.ravel()\n if len(qminmax) == 2:\n fw = converter(w, qminmax[0], qminmax[1])\n else:\n fw = converter(w)\n fw.shape = shape\n return tf.convert_to_tensor(fw)\n\n def convert_tensor(self, tensor_in, converter, *qminmax):\n w = tensor_in.numpy()\n shape = w.shape\n w = w.ravel()\n normal = np.array(w)\n float_normal = []\n\n for i in normal:\n if len(qminmax) == 2:\n float_normal.append(converter(i, qminmax[0], qminmax[1]))\n else:\n float_normal.append(converter(i))\n\n np_float_array = np.asarray(float_normal)\n np_float_array.shape = shape\n\n return tf.convert_to_tensor(np_float_array)\n\n def get_randomized_data(self, dims, npfile, regenerate, decimals=0, minrange=None, maxrange=None):\n if not minrange:\n minrange = self.mins\n if not maxrange:\n maxrange = self.maxs\n if not os.path.exists(npfile) or regenerate:\n regendir = os.path.dirname(npfile)\n os.makedirs(regendir, exist_ok=True)\n if decimals == 0:\n data = tf.Variable(tf.random.uniform(dims, minval=minrange, maxval=maxrange, dtype=tf.dtypes.int64))\n data = tf.cast(data, dtype=tf.float32)\n else:\n data = tf.Variable(tf.random.uniform(dims, minval=minrange, maxval=maxrange, dtype=tf.dtypes.float32))\n data = np.around(data.numpy(), decimals)\n data = tf.convert_to_tensor(data)\n\n print(\"Saving data to {}\".format(npfile))\n self.save_multiple_dim_array_in_txt(npfile, data.numpy())\n else:\n print(\"Loading data from {}\".format(npfile))\n data = tf.convert_to_tensor(self.load_multiple_dim_array_from_txt(npfile))\n return data\n\n def get_randomized_input_data(self, input_data, input_shape=None):\n # Generate or load saved input data unless hardcoded data provided\n if input_shape is None:\n input_shape = [self.batches, self.y_input, self.x_input, self.input_ch]\n if input_data is not None:\n input_data = tf.reshape(input_data, input_shape)\n else:\n input_data = self.get_randomized_data(input_shape,\n self.inputs_table_file,\n regenerate=self.regenerate_new_input)\n return input_data\n\n def get_randomized_bias_data(self, biases):\n # Generate or load saved bias data unless hardcoded data provided\n if not self.generate_bias:\n biases = tf.reshape(np.full([self.output_ch], 0), [self.output_ch])\n elif biases is not None:\n biases = tf.reshape(biases, [self.output_ch])\n else:\n biases = self.get_randomized_data([self.output_ch],\n self.bias_table_file,\n regenerate=self.regenerate_new_bias,\n minrange=self.bias_mins,\n maxrange=self.bias_maxs)\n return biases\n\n def format_output_file(self, file):\n command_list = CLANG_FORMAT.split(' ')\n command_list.append(file)\n process = subprocess.run(command_list)\n if process.returncode != 0:\n print(\"ERROR: {} failed\".format(command_list))\n sys.exit(1)\n\n def write_c_header_wrapper(self):\n filename = \"test_data.h\"\n filepath = self.headers_dir + filename\n\n print(\"Generating C header wrapper {}...\".format(filepath))\n with open(filepath, 'w+') as f:\n f.write(self.tensor_flow_reference_version)\n while len(self.generated_header_files) > 0:\n f.write('#include \"{}\"\\n'.format(self.generated_header_files.pop()))\n self.format_output_file(filepath)\n\n def write_common_config(self, f, prefix):\n \"\"\"\n Shared by conv/depthwise_conv and pooling\n \"\"\"\n f.write(\"#define {}_FILTER_X {}\\n\".format(prefix, self.filter_x))\n f.write(\"#define {}_FILTER_Y {}\\n\".format(prefix, self.filter_y))\n f.write(\"#define {}_STRIDE_X {}\\n\".format(prefix, self.stride_x))\n f.write(\"#define {}_STRIDE_Y {}\\n\".format(prefix, self.stride_y))\n f.write(\"#define {}_PAD_X {}\\n\".format(prefix, self.pad_x))\n f.write(\"#define {}_PAD_Y {}\\n\".format(prefix, self.pad_y))\n f.write(\"#define {}_OUTPUT_W {}\\n\".format(prefix, self.x_output))\n f.write(\"#define {}_OUTPUT_H {}\\n\".format(prefix, self.y_output))\n\n def write_c_common_header(self, f):\n f.write(self.tensor_flow_reference_version)\n f.write(\"#pragma once\\n\")\n\n def write_c_config_header(self, write_common_parameters=True):\n filename = self.config_data\n\n self.generated_header_files.append(filename)\n filepath = self.headers_dir + filename\n\n prefix = self.testdataset.upper()\n\n print(\"Writing C header with config data {}...\".format(filepath))\n with open(filepath, \"w+\") as f:\n self.write_c_common_header(f)\n if (write_common_parameters):\n f.write(\"#define {}_OUT_CH {}\\n\".format(prefix, self.output_ch))\n f.write(\"#define {}_IN_CH {}\\n\".format(prefix, self.input_ch))\n f.write(\"#define {}_INPUT_W {}\\n\".format(prefix, self.x_input))\n f.write(\"#define {}_INPUT_H {}\\n\".format(prefix, self.y_input))\n f.write(\"#define {}_DST_SIZE {}\\n\".format(prefix, self.x_output * self.y_output * self.output_ch\n * self.batches))\n f.write(\"#define {}_INPUT_SIZE {}\\n\".format(prefix, self.x_input * self.y_input * self.input_ch))\n f.write(\"#define {}_OUT_ACTIVATION_MIN {}\\n\".format(prefix, self.out_activation_min))\n f.write(\"#define {}_OUT_ACTIVATION_MAX {}\\n\".format(prefix, self.out_activation_max))\n f.write(\"#define {}_INPUT_BATCHES {}\\n\".format(prefix, self.batches))\n self.format_output_file(filepath)\n\n def generate_c_array(self, name, array, datatype=\"q7_t\", const=\"const \"):\n os.makedirs(self.headers_dir, exist_ok=True)\n\n w = None\n if type(array) is list:\n w = array\n size = len(array)\n elif type(array) is np.ndarray:\n w = array\n w = w.ravel()\n size = w.size\n else:\n w = array.numpy()\n w = w.ravel()\n size = tf.size(array)\n filename = name + \"_data.h\"\n filepath = self.headers_dir + filename\n\n self.generated_header_files.append(filename)\n\n print(\"Generating C header {}...\".format(filepath))\n with open(filepath, \"w+\") as f:\n self.write_c_common_header(f)\n f.write(\"#include <stdint.h>\\n\\n\")\n f.write(const + datatype + \" \" + self.testdataset + '_' + name + \"[%d] =\\n{\\n\" % size)\n for i in range(size - 1):\n f.write(\" %d,\\n\" % w[i])\n f.write(\" %d\\n\" % w[size - 1])\n f.write(\"};\\n\")\n self.format_output_file(filepath)\n\n def set_output_dims_and_padding(self, output_x, output_y):\n self.x_output = output_x\n self.y_output = output_y\n if self.has_padding:\n # Take dilation into account.\n filter_x = (self.filter_x - 1) * self.dilation_x + 1\n filter_y = (self.filter_y - 1) * self.dilation_y + 1\n\n pad_along_width = max((self.x_output - 1) * self.stride_x + filter_x - self.x_input, 0)\n pad_along_height = max((self.y_output - 1) * self.stride_y + filter_y - self.y_input, 0)\n pad_top = pad_along_height // 2\n pad_left = pad_along_width // 2\n self.pad_x = pad_left\n self.pad_y = pad_top\n else:\n self.pad_x = 0\n self.pad_y = 0\n\n @abstractmethod\n def generate_data(self, input_data=None, weights=None, biases=None):\n ''' Must be overriden '''\n\n def quantize_scale(self, scale):\n significand, shift = math.frexp(scale)\n significand_q31 = round(significand * (1 << 31))\n return significand_q31, shift\n\n def get_convolving_calib_data_func(self):\n def representative_data_gen():\n # testset = np.random.rand(self.batches, self.y_input, self.x_input, self.input_ch).astype('float32')\n testset = np.ones((self.batches, self.y_input, self.x_input, self.input_ch), dtype=np.float32)\n yield [testset]\n return representative_data_gen\n\n def convert_and_interpret(self, model, input_data, inttype):\n \"\"\"\n Compile and convert a model to Tflite format, run interpreter and allocate tensors.\n \"\"\"\n model.compile(loss=tf.keras.losses.categorical_crossentropy,\n optimizer=tf.keras.optimizers.Adam(),\n metrics=['accuracy'])\n\n converter = tf.lite.TFLiteConverter.from_keras_model(model)\n converter.optimizations = [tf.lite.Optimize.DEFAULT]\n converter.representative_dataset = self.get_convolving_calib_data_func()\n if self.is_int16xint8:\n converter.target_spec.supported_ops = [\n tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8]\n else:\n converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]\n converter.inference_input_type = inttype\n converter.inference_output_type = inttype\n tflite_model = converter.convert()\n\n os.makedirs(os.path.dirname(self.model_path_tflite), exist_ok=True)\n with open(self.model_path_tflite, \"wb\") as model:\n model.write(tflite_model)\n\n interpreter = Interpreter(\n model_path=str(self.model_path_tflite), experimental_op_resolver_type=OpResolverType.BUILTIN_REF)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n\n (self.input_scale, self.input_zero_point) = input_details[0]['quantization']\n (self.output_scale, self.output_zero_point) = output_details[0]['quantization']\n\n # Set input tensors\n interpreter.set_tensor(input_details[0][\"index\"], tf.cast(input_data, inttype))\n\n return interpreter\n\n\nclass ConvSettings(TestSettings):\n\n def __init__(self, dataset, testtype, args, in_ch=1, out_ch=1, x_in=7, y_in=7, w_x=3, w_y=3, stride_x=2, stride_y=2,\n pad=True, randmin=INT8_MIN, randmax=INT8_MAX, batches=1, generate_bias=True, relu6=False,\n out_activation_min=None, out_activation_max=None, int16xint8=False, bias_min=None,\n bias_max=None):\n super().__init__(dataset, testtype, args, in_ch, out_ch, x_in, y_in, w_x, w_y, stride_x, stride_y, pad,\n randmin, randmax, batches, generate_bias=generate_bias, relu6=relu6,\n out_activation_min=out_activation_min, out_activation_max=out_activation_max,\n int16xint8=int16xint8, bias_min=bias_min, bias_max=bias_max)\n\n self.scaling_factors = []\n\n if self.test_type == 'conv':\n self.quantized_dimension = 0\n elif self.test_type == 'depthwise_conv':\n self.quantized_dimension = 3\n self.channel_multiplier = self.output_ch // self.input_ch\n if self.output_ch % self.input_ch != 0:\n raise RuntimeError(\"out channel ({}) is not multiple of in channel ({})\".format(out_ch, in_ch))\n else:\n raise RuntimeError(\"Invalid test type {}\".format(self.test_type))\n\n def write_c_config_header(self):\n super().write_c_config_header()\n\n filename = self.config_data\n filepath = self.headers_dir + filename\n prefix = self.testdataset.upper()\n\n with open(filepath, \"a\") as f:\n self.write_common_config(f, prefix)\n if self.test_type == 'depthwise_conv':\n f.write(\"#define {}_CH_MULT {}\\n\".format(prefix, self.channel_multiplier))\n f.write(\"#define {}_INPUT_OFFSET {}\\n\".format(prefix, -self.input_zero_point))\n f.write(\"#define {}_OUTPUT_OFFSET {}\\n\".format(prefix, self.output_zero_point))\n\n def generate_quantize_per_channel_multiplier(self):\n num_channels = self.output_ch\n per_channel_multiplier = []\n per_channel_shift = []\n\n if len(self.scaling_factors) != num_channels:\n raise RuntimeError(\"Missing scaling factors\")\n\n for i in range(num_channels):\n effective_output_scale = self.input_scale * self.scaling_factors[i] / self.output_scale\n (quantized_multiplier, shift) = self.quantize_scale(effective_output_scale)\n\n per_channel_multiplier.append(quantized_multiplier)\n per_channel_shift.append(shift)\n\n self.generate_c_array(\"output_mult\", per_channel_multiplier, datatype='int32_t')\n self.generate_c_array(\"output_shift\", per_channel_shift, datatype='int32_t')\n\n def generate_data(self, input_data=None, weights=None, biases=None):\n if self.is_int16xint8:\n inttype = tf.int16\n datatype = \"q15_t\"\n bias_datatype = \"int64_t\"\n else:\n inttype = tf.int8\n datatype = \"q7_t\"\n bias_datatype = \"int32_t\"\n\n input_data = self.get_randomized_input_data(input_data)\n\n if self.test_type == 'conv':\n out_channel = self.output_ch\n elif self.test_type == 'depthwise_conv':\n out_channel = self.channel_multiplier\n\n if weights is not None:\n weights = tf.reshape(weights, [self.filter_y, self.filter_x, self.input_ch, out_channel])\n else:\n weights = self.get_randomized_data([self.filter_y, self.filter_x, self.input_ch, out_channel],\n self.kernel_table_file,\n regenerate=self.regenerate_new_weights)\n\n biases = self.get_randomized_bias_data(biases)\n\n # Create a one layer Keras model.\n model = tf.keras.models.Sequential()\n input_shape = (self.batches, self.y_input, self.x_input, self.input_ch)\n model.add(tf.keras.layers.InputLayer(\n input_shape=input_shape[1:], batch_size=self.batches))\n if self.test_type == 'conv':\n conv_layer = tf.keras.layers.Conv2D(self.output_ch, kernel_size=(self.filter_y, self.filter_x),\n strides=(self.stride_y, self.stride_x),\n padding=self.padding, input_shape=input_shape[1:])\n model.add(conv_layer)\n conv_layer.set_weights([weights, biases])\n elif self.test_type == 'depthwise_conv':\n depthwise_layer = tf.keras.layers.DepthwiseConv2D(\n kernel_size=(self.filter_y, self.filter_x),\n strides=(self.stride_y, self.stride_x),\n padding=self.padding, depth_multiplier=self.channel_multiplier,\n input_shape=input_shape[1:])\n model.add(depthwise_layer)\n depthwise_layer.set_weights([weights, biases])\n\n interpreter = self.convert_and_interpret(model, input_data, inttype)\n\n all_layers_details = interpreter.get_tensor_details()\n filter_layer = all_layers_details[1]\n bias_layer = all_layers_details[2]\n if weights.numpy().size != interpreter.get_tensor(filter_layer['index']).size or \\\n (self.generate_bias and biases.numpy().size != interpreter.get_tensor(bias_layer['index']).size):\n raise RuntimeError(\"Dimension mismatch\")\n\n output_details = interpreter.get_output_details()\n self.set_output_dims_and_padding(output_details[0]['shape'][2], output_details[0]['shape'][1])\n\n self.generate_c_array(\"input\", input_data, datatype=datatype)\n self.generate_c_array(\"weights\", interpreter.get_tensor(filter_layer['index']))\n\n self.scaling_factors = filter_layer['quantization_parameters']['scales']\n self.generate_quantize_per_channel_multiplier()\n\n self.generate_c_array(\"biases\", interpreter.get_tensor(bias_layer['index']), bias_datatype)\n\n # Generate reference\n interpreter.invoke()\n output_data = interpreter.get_tensor(output_details[0][\"index\"])\n self.generate_c_array(\"output_ref\", np.clip(output_data, self.out_activation_min, self.out_activation_max),\n datatype=datatype)\n\n self.write_c_config_header()\n self.write_c_header_wrapper()\n\n\nclass PoolingSettings(TestSettings):\n\n def __init__(self, dataset, testtype, args, channels=8, x_in=4, y_in=4, w_x=4, w_y=4, stride_x=1, stride_y=1,\n batches=1, pad=False, relu6=False):\n super().__init__(dataset, testtype, args, channels, channels, x_in, y_in, w_x, w_y, stride_x, stride_y, pad,\n relu6=relu6)\n\n def generate_data(self, input_data=None):\n input_data = self.get_randomized_input_data(input_data)\n self.generate_c_array(\"input\", input_data, datatype=\"int8_t\")\n\n input_data = tf.cast(input_data, tf.float32)\n\n # Create a one-layer Keras model\n model = tf.keras.models.Sequential()\n input_shape = (self.batches, self.y_input, self.x_input, self.input_ch)\n model.add(tf.keras.layers.InputLayer(\n input_shape=input_shape[1:], batch_size=self.batches))\n if self.test_type == 'avgpool':\n model.add(tf.keras.layers.AveragePooling2D(pool_size=(self.filter_y, self.filter_x),\n strides=(self.stride_y, self.stride_x),\n padding=self.padding, input_shape=input_shape[1:]))\n elif self.test_type == 'maxpool':\n model.add(tf.keras.layers.MaxPooling2D(pool_size=(self.filter_y, self.filter_x),\n strides=(self.stride_y, self.stride_x),\n padding=self.padding, input_shape=input_shape[1:]))\n else:\n raise RuntimeError(\"Wrong test type\")\n\n interpreter = self.convert_and_interpret(model, input_data, tf.int8)\n\n output_details = interpreter.get_output_details()\n self.set_output_dims_and_padding(output_details[0]['shape'][2], output_details[0]['shape'][1])\n\n # Generate reference\n interpreter.invoke()\n output_data = interpreter.get_tensor(output_details[0][\"index\"])\n self.generate_c_array(\"output_ref\", np.clip(output_data, self.out_activation_min, self.out_activation_max),\n datatype=\"int8_t\")\n\n self.write_c_config_header()\n self.write_c_header_wrapper()\n\n def write_c_config_header(self):\n super().write_c_config_header()\n\n filename = self.config_data\n filepath = self.headers_dir + filename\n prefix = self.testdataset.upper()\n\n with open(filepath, \"a\") as f:\n self.write_common_config(f, prefix)\n\n\nclass FullyConnectedSettings(TestSettings):\n\n def __init__(self, dataset, testtype, args, in_ch=1, out_ch=1, x_in=1, y_in=1, w_x=1, w_y=1, stride_x=1, stride_y=1,\n pad=False, randmin=INT8_MIN, randmax=INT8_MAX, batches=1, generate_bias=True, out_activation_min=None,\n out_activation_max=None, int16xint8=False, bias_min=None, bias_max=None):\n super().__init__(dataset, testtype, args, in_ch, out_ch, x_in, y_in, x_in, y_in, stride_x, stride_y, pad,\n randmin, randmax, batches, generate_bias=generate_bias, out_activation_min=out_activation_min,\n out_activation_max=out_activation_max, int16xint8=int16xint8, bias_min=bias_min,\n bias_max=bias_min)\n\n if not self.test_type == 'fully_connected':\n raise RuntimeError(\"Invalid test type {}\".format(self.test_type))\n\n def write_c_config_header(self):\n super().write_c_config_header()\n\n filename = self.config_data\n filepath = self.headers_dir + filename\n prefix = self.testdataset.upper()\n\n with open(filepath, \"a\") as f:\n f.write(\"#define {}_OUTPUT_MULTIPLIER {}\\n\".format(prefix, self.quantized_multiplier))\n f.write(\"#define {}_OUTPUT_SHIFT {}\\n\".format(prefix, self.quantized_shift))\n f.write(\"#define {}_ACCUMULATION_DEPTH {}\\n\".format(prefix, self.input_ch*self.x_input*self.y_input))\n f.write(\"#define {}_INPUT_OFFSET {}\\n\".format(prefix, -self.input_zero_point))\n f.write(\"#define {}_OUTPUT_OFFSET {}\\n\".format(prefix, self.output_zero_point))\n\n def quantize_multiplier(self):\n input_product_scale = self.input_scale * self.weights_scale\n if input_product_scale < 0:\n raise RuntimeError(\"negative input product scale\")\n real_multipler = input_product_scale / self.output_scale\n (self.quantized_multiplier, self.quantized_shift) = self.quantize_scale(real_multipler)\n\n def generate_data(self, input_data=None, weights=None, biases=None):\n input_data = self.get_randomized_input_data(input_data,\n [self.batches, self.input_ch * self.x_input * self.y_input])\n\n if self.is_int16xint8:\n inttype = tf.int16\n datatype = \"q15_t\"\n bias_datatype = \"int64_t\"\n else:\n inttype = tf.int8\n datatype = \"q7_t\"\n bias_datatype = \"int32_t\"\n\n fc_weights_format = [self.input_ch * self.y_input * self.x_input, self.output_ch]\n\n if weights is not None:\n weights = tf.reshape(weights, fc_weights_format)\n else:\n weights = self.get_randomized_data(fc_weights_format,\n self.kernel_table_file,\n regenerate=self.regenerate_new_weights)\n\n biases = self.get_randomized_bias_data(biases)\n\n # Create model with one fully_connected layer.\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.InputLayer(\n input_shape=(self.y_input * self.x_input * self.input_ch,), batch_size=self.batches))\n fully_connected_layer = tf.keras.layers.Dense(self.output_ch, activation=None)\n model.add(fully_connected_layer)\n fully_connected_layer.set_weights([weights, biases])\n\n interpreter = self.convert_and_interpret(model, input_data, inttype)\n\n all_layers_details = interpreter.get_tensor_details()\n if self.is_int16xint8:\n filter_layer = all_layers_details[2]\n bias_layer = all_layers_details[1]\n else:\n filter_layer = all_layers_details[1]\n bias_layer = all_layers_details[2]\n if weights.numpy().size != interpreter.get_tensor(filter_layer['index']).size or \\\n (self.generate_bias and biases.numpy().size != interpreter.get_tensor(bias_layer['index']).size):\n raise RuntimeError(\"Dimension mismatch\")\n\n # The generic destination size calculation for these tests are: self.x_output * self.y_output * self.output_ch\n # * self.batches.\n self.x_output = 1\n self.y_output = 1\n output_details = interpreter.get_output_details()\n if self.output_ch != output_details[0]['shape'][1] or self.batches != output_details[0]['shape'][0]:\n raise RuntimeError(\"Fully connected out dimension mismatch\")\n\n self.weights_scale = filter_layer['quantization_parameters']['scales'][0]\n self.quantize_multiplier()\n\n self.generate_c_array(\"input\", input_data, datatype=datatype)\n self.generate_c_array(\"weights\", interpreter.get_tensor(filter_layer['index']))\n\n if self.generate_bias:\n self.generate_c_array(\"biases\", interpreter.get_tensor(bias_layer['index']), bias_datatype)\n else:\n self.generate_c_array(\"biases\", biases, bias_datatype)\n\n # Generate reference\n interpreter.invoke()\n output_data = interpreter.get_tensor(output_details[0][\"index\"])\n self.generate_c_array(\"output_ref\", np.clip(output_data, self.out_activation_min, self.out_activation_max),\n datatype=datatype)\n\n self.write_c_config_header()\n self.write_c_header_wrapper()\n\n\nclass SoftmaxSettings(TestSettings):\n softmax_input_integer_bits = 5\n\n def __init__(self, dataset, testtype, args, x_in=5, y_in=1, randmin=INT8_MIN, randmax=INT8_MAX):\n super().__init__(dataset, testtype, args, 1, 1, x_in, y_in, 1, 1, 1, 1, False, randmin,\n randmax)\n self.x_input = self.x_output = x_in\n self.y_input = self.y_output = y_in\n\n def calc_softmax_params(self):\n input_real_multiplier = min(self.input_scale * (1 << (31 - self.softmax_input_integer_bits)),\n (1 << 31) - 1)\n (self.input_multiplier, self.input_left_shift) = self.quantize_scale(input_real_multiplier)\n\n self.diff_min = ((1 << self.softmax_input_integer_bits) - 1) * \\\n (1 << (31 - self.softmax_input_integer_bits)) / \\\n (1 << self.input_left_shift)\n self.diff_min = math.floor(self.diff_min)\n\n def write_c_config_header(self):\n super().write_c_config_header(write_common_parameters=False)\n\n filename = self.config_data\n filepath = self.headers_dir + filename\n prefix = self.testdataset.upper()\n\n with open(filepath, \"a\") as f:\n f.write(\"#define {}_NUM_ROWS {}\\n\".format(prefix, self.y_input))\n f.write(\"#define {}_ROW_SIZE {}\\n\".format(prefix, self.x_input))\n f.write(\"#define {}_INPUT_MULT {}\\n\".format(prefix, self.input_multiplier))\n f.write(\"#define {}_INPUT_LEFT_SHIFT {}\\n\".format(prefix, self.input_left_shift))\n f.write(\"#define {}_DIFF_MIN {}\\n\".format(prefix, -self.diff_min))\n f.write(\"#define {}_DST_SIZE {}\\n\".format(prefix, self.x_output * self.y_output))\n\n def get_softmax_randomized_input_data(self, input_data, input_shape):\n # Generate or load saved input data unless hardcoded data provided.\n if input_data is not None:\n input_data = tf.reshape(input_data, input_shape)\n else:\n input_data = self.get_randomized_data(input_shape,\n self.inputs_table_file,\n regenerate=self.regenerate_new_input)\n return input_data\n\n def generate_data(self, input_data=None, weights=None, biases=None):\n input_data = self.get_softmax_randomized_input_data(input_data, [self.y_input, self.x_input])\n self.generate_c_array(\"input\", input_data, datatype=\"int8_t\")\n\n # Create a one-layer Keras model.\n model = tf.keras.models.Sequential()\n input_shape = (self.y_input, self.x_input)\n model.add(tf.keras.layers.Softmax(input_shape=input_shape[1:]))\n\n interpreter = self.convert_and_interpret(model, input_data, tf.int8)\n\n self.calc_softmax_params()\n\n # Generate reference\n output_details = interpreter.get_output_details()\n interpreter.invoke()\n output_data = interpreter.get_tensor(output_details[0][\"index\"])\n self.generate_c_array(\"output_ref\", output_data)\n\n self.write_c_config_header()\n self.write_c_header_wrapper()\n\n\nclass SVDFSettings(TestSettings):\n\n def __init__(self, dataset, testtype, args, batches=2, number_inputs=2, rank=8, memory_size=10, randmin=INT8_MIN,\n randmax=INT8_MAX, input_size=3, number_units=4, generate_bias=True, input_scale=0.1, input_zp=0,\n w_1_scale=0.005, w_1_zp=0, w_2_scale=0.005, w_2_zp=0, bias_scale=0.000001, bias_zp=0,\n state_scale=0.005, state_zp=0, output_scale=0.1, output_zp=0):\n super().__init__(dataset, testtype, args, 1, 1, 1, 1, 1, 1, 1, 1, False, randmin,\n randmax, generate_bias=generate_bias)\n self.batches = batches\n self.number_units = number_units\n self.input_size = input_size\n self.memory_size = memory_size\n self.rank = rank\n self.number_filters = self.number_units * self.rank\n self.time_table_file = self.pregenerated_data_dir + self.testdataset + '/' + 'time_data.txt'\n\n self.number_inputs = number_inputs\n self.input_sequence_length = self.number_inputs * self.input_size * self.batches\n\n self.in_activation_max = INT16_MAX\n self.in_activation_min = INT16_MIN\n\n self.json_template = \"TestCases/Common/svdf_template.json\"\n self.json_replacements = {\"memory_sizeXnumber_filters\": self.memory_size * self.number_filters,\n \"batches\": self.batches,\n \"input_size\": self.input_size,\n \"number_filters\": self.number_filters,\n \"memory_size\": self.memory_size,\n \"number_units\": self.number_units,\n \"rank_value\": self.rank,\n \"input_scale\": input_scale,\n \"input_zp\": input_zp,\n \"w_1_scale\": w_1_scale,\n \"w_1_zp\": w_1_zp,\n \"w_2_scale\": w_2_scale,\n \"w_2_zp\": w_2_zp,\n \"bias_scale\": bias_scale,\n \"bias_zp\": bias_zp,\n \"state_scale\": state_scale,\n \"state_zp\": state_zp,\n \"output_scale\": output_scale,\n \"output_zp\": output_zp}\n\n def calc_multipliers_and_shifts(self, input_scale, weights_1_scale, weights_2_scale, state_scale, output_scale):\n effective_scale_1 = weights_1_scale * input_scale / state_scale\n effective_scale_2 = state_scale * weights_2_scale / output_scale\n (self.multiplier_in, self.shift_1) = self.quantize_scale(effective_scale_1)\n (self.multiplier_out, self.shift_2) = self.quantize_scale(effective_scale_2)\n\n def write_c_config_header(self):\n super().write_c_config_header(write_common_parameters=False)\n\n filename = self.config_data\n filepath = self.headers_dir + filename\n prefix = self.testdataset.upper()\n\n with open(filepath, \"a\") as f:\n f.write(\"#define {}_MULTIPLIER_IN {}\\n\".format(prefix, self.multiplier_in))\n f.write(\"#define {}_MULTIPLIER_OUT {}\\n\".format(prefix, self.multiplier_out))\n f.write(\"#define {}_SHIFT_1 {}\\n\".format(prefix, self.shift_1))\n f.write(\"#define {}_SHIFT_2 {}\\n\".format(prefix, self.shift_2))\n f.write(\"#define {}_IN_ACTIVATION_MIN {}\\n\".format(prefix, self.in_activation_min))\n f.write(\"#define {}_IN_ACTIVATION_MAX {}\\n\".format(prefix, self.in_activation_max))\n f.write(\"#define {}_RANK {}\\n\".format(prefix, self.rank))\n f.write(\"#define {}_FEATURE_BATCHES {}\\n\".format(prefix, self.number_filters))\n f.write(\"#define {}_TIME_BATCHES {}\\n\".format(prefix, self.memory_size))\n f.write(\"#define {}_INPUT_SIZE {}\\n\".format(prefix, self.input_size))\n f.write(\"#define {}_DST_SIZE {}\\n\".format(prefix, self.number_units * self.batches))\n f.write(\"#define {}_OUT_ACTIVATION_MIN {}\\n\".format(prefix, self.out_activation_min))\n f.write(\"#define {}_OUT_ACTIVATION_MAX {}\\n\".format(prefix, self.out_activation_max))\n f.write(\"#define {}_INPUT_BATCHES {}\\n\".format(prefix, self.batches))\n f.write(\"#define {}_INPUT_OFFSET {}\\n\".format(prefix, self.input_zero_point))\n f.write(\"#define {}_OUTPUT_OFFSET {}\\n\".format(prefix, self.output_zero_point))\n\n def generate_data(self, input_data=None, weights=None, biases=None, time_data=None, state_data=None):\n if input_data is not None:\n input_data = tf.reshape(input_data, [self.input_sequence_length])\n else:\n input_data = self.get_randomized_data([self.input_sequence_length],\n self.inputs_table_file,\n regenerate=self.regenerate_new_input)\n self.generate_c_array(\"input_sequence\", input_data)\n\n if weights is not None:\n weights_feature_data = tf.reshape(weights, [self.number_filters, self.input_size])\n else:\n weights_feature_data = self.get_randomized_data([self.number_filters, self.input_size],\n self.kernel_table_file,\n regenerate=self.regenerate_new_weights)\n\n if time_data is not None:\n weights_time_data = tf.reshape(time_data, [self.number_filters, self.memory_size])\n else:\n weights_time_data = self.get_randomized_data([self.number_filters, self.memory_size],\n self.time_table_file,\n regenerate=self.regenerate_new_weights)\n\n if not self.generate_bias:\n biases = [0] * self.number_units\n if biases is not None:\n biases = tf.reshape(biases, [self.number_units])\n else:\n biases = self.get_randomized_data([self.number_units],\n self.bias_table_file,\n regenerate=self.regenerate_new_weights)\n\n # Generate tflite model\n generated_json = self.generate_json_from_template(weights_feature_data, weights_time_data, biases)\n self.flatc_generate_tflite(generated_json, args.schema_file)\n\n # Run TFL interpreter\n interpreter = Interpreter(\n model_path=str(self.model_path_tflite), experimental_op_resolver_type=OpResolverType.BUILTIN_REF)\n interpreter.allocate_tensors()\n\n # Read back scales and zero points from tflite model\n all_layers_details = interpreter.get_tensor_details()\n input_layer = all_layers_details[0]\n weights_1_layer = all_layers_details[1]\n weights_2_layer = all_layers_details[2]\n bias_layer = all_layers_details[3]\n state_layer = all_layers_details[4]\n output_layer = all_layers_details[5]\n (input_scale, self.input_zero_point) = self.get_scale_and_zp(input_layer)\n (weights_1_scale, zero_point) = self.get_scale_and_zp(weights_1_layer)\n (weights_2_scale, zero_point) = self.get_scale_and_zp(weights_2_layer)\n (bias_scale, zero_point) = self.get_scale_and_zp(bias_layer)\n (state_scale, zero_point) = self.get_scale_and_zp(state_layer)\n (output_scale, self.output_zero_point) = self.get_scale_and_zp(output_layer)\n\n self.calc_multipliers_and_shifts(input_scale, weights_1_scale, weights_2_scale, state_scale, output_scale)\n\n # Generate unit test C headers\n self.generate_c_array(\"weights_feature\", interpreter.get_tensor(weights_1_layer['index']))\n self.generate_c_array(\"weights_time\", interpreter.get_tensor(weights_2_layer['index']), datatype='q15_t')\n self.generate_c_array(\"biases\", interpreter.get_tensor(bias_layer['index']), \"int32_t\")\n self.generate_c_array(\"state\", interpreter.get_tensor(state_layer['index']), \"q15_t\")\n\n # Generate reference output\n svdf_ref = None\n for i in range(self.number_inputs):\n start = i * self.input_size * self.batches\n end = i * self.input_size * self.batches + self.input_size * self.batches\n input_sequence = input_data[start:end]\n input_sequence = tf.reshape(input_sequence, [self.batches, self.input_size])\n interpreter.set_tensor(input_layer[\"index\"], tf.cast(input_sequence, tf.int8))\n interpreter.invoke()\n svdf_ref = interpreter.get_tensor(output_layer[\"index\"])\n self.generate_c_array(\"output_ref\", svdf_ref)\n\n self.write_c_config_header()\n self.write_c_header_wrapper()\n\n def flatc_generate_tflite(self, json_input, schema):\n flatc = 'flatc'\n if schema is None:\n raise RuntimeError(\"A schema file is required.\")\n command = \"{} -o {} -c -b {} {}\".format(flatc, self.headers_dir, schema, json_input)\n command_list = command.split(' ')\n process = subprocess.run(command_list)\n if process.returncode != 0:\n raise RuntimeError(\"The following command failed: {}. Did you install flatc?\".format(command))\n\n def get_scale_and_zp(self, layer):\n return (layer['quantization_parameters']['scales'][0], layer['quantization_parameters']['zero_points'][0])\n\n def to_bytes(self, tensor_data, type_size):\n result_bytes = []\n\n if type_size == 1:\n tensor_type = np.uint8\n elif type_size == 2:\n tensor_type = np.uint16\n elif type_size == 4:\n tensor_type = np.uint32\n else:\n raise RuntimeError(\"Size not supported: {}\".format(type_size))\n\n for val in tensor_data:\n for byte in int(tensor_type(val)).to_bytes(type_size, 'little'):\n result_bytes.append(byte)\n\n return result_bytes\n\n def generate_json_from_template(self, weights_feature_data, weights_time_data, bias_data):\n \"\"\"\n Takes a json template and parameters as input and creates a new json file.\n \"\"\"\n w_1_buffer_index = 1\n w_2_buffer_index = 2\n bias_buffer_index = 3\n generated_json_file = self.model_path + '.json'\n\n with open(self.json_template, 'r') as in_file, open(generated_json_file, 'w') as out_file:\n # Update shapes, scales and zero points\n data = in_file.read()\n for item, to_replace in self.json_replacements.items():\n data = data.replace(item, str(to_replace))\n\n # Update weights and bias data\n data = json.loads(data)\n data[\"buffers\"][w_1_buffer_index][\"data\"] = self.to_bytes(weights_feature_data.numpy().ravel(), 1)\n data[\"buffers\"][w_2_buffer_index][\"data\"] = self.to_bytes(weights_time_data.numpy().ravel(), 2)\n data[\"buffers\"][bias_buffer_index][\"data\"] = self.to_bytes(bias_data.numpy().ravel(), 4)\n json.dump(data, out_file, indent=2)\n\n return generated_json_file\n\n\ndef load_all_testdatasets():\n \"\"\"\n Add all new testdata sets here\n \"\"\"\n\n type_of_test = 'conv'\n dataset = 'basic'\n ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=1, out_ch=1, x_in=5,\n y_in=8, w_x=2, w_y=4, stride_x=1, stride_y=1, pad=False)\n dataset = 'stride2pad1'\n ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=1, out_ch=1, x_in=7,\n y_in=7, w_x=3, w_y=3, stride_x=2, stride_y=2, pad=True)\n dataset = 'kernel1x1'\n ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=4, out_ch=17, x_in=15,\n y_in=15, w_x=1, w_y=1, stride_x=1, stride_y=1, pad=False,\n out_activation_min=-126, out_activation_max=127)\n dataset = 'conv_3'\n ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=3, out_ch=1, x_in=10, y_in=49, w_x=4,\n w_y=10, stride_x=1, stride_y=2, pad=True,\n out_activation_min=-127, out_activation_max=127)\n dataset = 'conv_1_x_n_1'\n ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=3, out_ch=3, x_in=5, y_in=5, w_x=2,\n w_y=1, stride_x=2, stride_y=1, pad=False, out_activation_min=-127,\n out_activation_max=127, batches=2)\n dataset = 'conv_1_x_n_2'\n ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=3, out_ch=1, x_in=11, y_in=11, w_x=11,\n w_y=1, stride_x=1, stride_y=1, pad=True,\n out_activation_min=-111, out_activation_max=127)\n dataset = 'conv_1_x_n_3'\n ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=1, out_ch=3, x_in=11, y_in=11, w_x=1,\n w_y=11, stride_x=1, stride_y=1, pad=True,\n out_activation_min=-88, out_activation_max=127)\n dataset = 'conv_2'\n ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=2, out_ch=4, x_in=6, y_in=3, w_x=3,\n w_y=3, stride_x=1, stride_y=1, pad=True, out_activation_min=-101,\n out_activation_max=127)\n dataset = 'conv_4' # batches > 2\n ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=3, out_ch=3, x_in=5, y_in=5, w_x=2,\n w_y=3, stride_x=2, stride_y=2, pad=False,\n out_activation_min=-109, out_activation_max=127, batches=3)\n dataset = 'conv_out_activation'\n ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=2, out_ch=2, x_in=3, y_in=3, w_x=3,\n w_y=3, stride_x=1, stride_y=1, pad=True, out_activation_min=-61,\n out_activation_max=107)\n dataset = 'int16xint8'\n ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=3, out_ch=4, x_in=7,\n y_in=8, w_x=2, w_y=4, stride_x=2, stride_y=3, pad=True,\n randmin=INT16_MIN, randmax=INT16_MAX, out_activation_min=-13335,\n out_activation_max=32767, int16xint8=True)\n dataset = 'requantize_s64'\n ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=2, out_ch=2, x_in=3,\n y_in=2, w_x=2, w_y=2, stride_x=1, stride_y=1, pad=False,\n out_activation_min=INT16_MIN, out_activation_max=INT16_MAX,\n int16xint8=True, bias_min=-0x300, bias_max=0x9fff)\n\n type_of_test = 'depthwise_conv'\n dataset = 'depthwise_2'\n ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=3, out_ch=9, x_in=6, y_in=5, w_x=3,\n w_y=4, stride_x=2, stride_y=2, pad=True,\n out_activation_min=-73, out_activation_max=127)\n dataset = 'depthwise_kernel_3x3'\n ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=5, out_ch=5, x_in=4, y_in=5, w_x=3,\n w_y=3, stride_x=2, stride_y=2, pad=True,\n out_activation_min=-104, out_activation_max=127)\n dataset = 'depthwise_eq_in_out_ch'\n ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=6, out_ch=6, x_in=4, y_in=5, w_x=2,\n w_y=3, stride_x=1, stride_y=1, pad=True,\n out_activation_min=-86, out_activation_max=127)\n dataset = 'depthwise_out_activation'\n ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=3, out_ch=3, x_in=6, y_in=5, w_x=3,\n w_y=4, pad=False, out_activation_min=-45,\n out_activation_max=103)\n dataset = 'depthwise_mult_batches'\n ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=3, out_ch=3, x_in=3, y_in=5, w_x=2,\n w_y=4, stride_x=2, stride_y=2, pad=True,\n batches=2)\n dataset = 'depthwise_null_bias_0'\n ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=2, out_ch=2, x_in=4, y_in=5, w_x=2,\n w_y=2, stride_x=1, stride_y=1, pad=True, generate_bias=False,\n batches=1)\n dataset = 'depthwise_null_bias_1'\n ALL_TESTDATA_SETS[dataset] = ConvSettings(dataset, type_of_test, args, in_ch=2, out_ch=8, x_in=4, y_in=5, w_x=2,\n w_y=2, stride_x=1, stride_y=1, pad=True, generate_bias=False,\n batches=1) \n type_of_test = 'fully_connected'\n dataset = 'fully_connected'\n ALL_TESTDATA_SETS[dataset] = FullyConnectedSettings(dataset, type_of_test, args, in_ch=10, out_ch=6, x_in=2, y_in=1,\n batches=3)\n dataset = 'fully_connected_mve_0'\n ALL_TESTDATA_SETS[dataset] = FullyConnectedSettings(dataset, type_of_test, args, in_ch=16, out_ch=9, x_in=1, y_in=1,\n batches=1)\n dataset = 'fully_connected_mve_1'\n ALL_TESTDATA_SETS[dataset] = FullyConnectedSettings(dataset, type_of_test, args, in_ch=20, out_ch=4, x_in=1, y_in=1,\n batches=1)\n dataset = 'fully_connected_null_bias_0'\n ALL_TESTDATA_SETS[dataset] = FullyConnectedSettings(dataset, type_of_test, args, in_ch=33, out_ch=5,\n batches=2, generate_bias=False)\n dataset = 'fully_connected_out_activation'\n ALL_TESTDATA_SETS[dataset] = FullyConnectedSettings(dataset, type_of_test, args, in_ch=10, out_ch=4,\n out_activation_min=-70, out_activation_max=100)\n dataset = 'fully_connected_int16'\n ALL_TESTDATA_SETS[dataset] = FullyConnectedSettings(dataset, type_of_test, args, in_ch=7, out_ch=11, x_in=3, y_in=3,\n batches=2, randmin=INT16_MIN, randmax=INT16_MAX,\n out_activation_min=-9999, out_activation_max=32767,\n int16xint8=True)\n dataset = 'fully_connected_int16_big'\n ALL_TESTDATA_SETS[dataset] = FullyConnectedSettings(dataset, type_of_test, args, in_ch=7, out_ch=11, x_in=10,\n y_in=10, batches=3, out_activation_min=-1444,\n out_activation_max=32767, int16xint8=True)\n\n type_of_test = 'avgpool'\n dataset = 'avgpooling'\n ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=8, x_in=22, y_in=12, stride_x=9,\n stride_y=5, w_x=6, w_y=5, pad=True)\n dataset = 'avgpooling_1'\n ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=3, x_in=9, y_in=5, stride_x=1,\n stride_y=2, w_x=9, w_y=5, pad=False)\n dataset = 'avgpooling_2'\n ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=5, x_in=12, y_in=1, stride_x=1,\n stride_y=2, w_x=3, w_y=1, pad=True)\n dataset = 'avgpooling_3'\n ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=2, x_in=9, y_in=1, stride_x=2,\n stride_y=1, w_x=1, w_y=1, pad=False)\n dataset = 'avgpooling_4'\n ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=2, x_in=1, y_in=20, stride_x=1,\n stride_y=3, w_x=1, w_y=3, pad=True)\n dataset = 'avgpooling_5'\n ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=1, x_in=3, y_in=3,\n stride_x=1, stride_y=1, w_x=1, w_y=3, pad=True, relu6=True)\n\n type_of_test = 'maxpool'\n dataset = 'maxpooling'\n ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=8, x_in=22, y_in=12, stride_x=9,\n stride_y=5, w_x=6, w_y=5, pad=True)\n dataset = 'maxpooling_1'\n ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=3, x_in=9, y_in=5, stride_x=1,\n stride_y=2, w_x=9, w_y=5, pad=False)\n dataset = 'maxpooling_2'\n ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=5, x_in=12, y_in=1, stride_x=1,\n stride_y=2, w_x=3, w_y=1, pad=True)\n dataset = 'maxpooling_3'\n ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=2, x_in=9, y_in=1, stride_x=2,\n stride_y=1, w_x=1, w_y=1, pad=False)\n dataset = 'maxpooling_4'\n ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=2, x_in=1, y_in=20, stride_x=1,\n stride_y=3, w_x=1, w_y=3, pad=True)\n dataset = 'maxpooling_5'\n ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=20, x_in=1, y_in=1, stride_x=1,\n stride_y=1, w_x=1, w_y=1, pad=True)\n dataset = 'maxpooling_6'\n ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=17, x_in=1, y_in=5, stride_x=1,\n stride_y=3, w_x=3, w_y=4, pad=True)\n dataset = 'maxpooling_7'\n ALL_TESTDATA_SETS[dataset] = PoolingSettings(dataset, type_of_test, args, channels=1, x_in=4, y_in=2, stride_x=2,\n stride_y=2, w_x=2, w_y=2, pad=False, relu6=True)\n type_of_test = 'softmax'\n dataset = 'softmax'\n ALL_TESTDATA_SETS[dataset] = SoftmaxSettings(dataset, type_of_test, args, x_in=5, y_in=1)\n\n type_of_test = 'svdf'\n dataset = 'svdf'\n ALL_TESTDATA_SETS[dataset] = SVDFSettings(dataset, type_of_test, args, batches=2, number_inputs=2, rank=8,\n memory_size=8, input_size=3, number_units=3)\n type_of_test = 'svdf'\n dataset = 'svdf_1'\n ALL_TESTDATA_SETS[dataset] = SVDFSettings(dataset, type_of_test, args, batches=3, number_inputs=2, rank=1,\n memory_size=2, input_size=7, number_units=5)\n\n type_of_test = 'svdf'\n dataset = 'svdf_2'\n ALL_TESTDATA_SETS[dataset] = SVDFSettings(dataset, type_of_test, args, batches=3, number_inputs=2, rank=2,\n memory_size=2, input_size=7, number_units=5, generate_bias=False)\n\n type_of_test = 'svdf'\n dataset = 'svdf_3'\n ALL_TESTDATA_SETS[dataset] = SVDFSettings(dataset, type_of_test, args, batches=1, number_inputs=2, rank=1,\n memory_size=2, input_size=20, number_units=12, generate_bias=False)\n\n\nif __name__ == '__main__':\n if version.parse(tf.__version__) < REQUIRED_MINIMUM_TENSORFLOW_VERSION:\n print(\"Unsupported tensorflow version, \", version.parse(tf.__version__))\n sys.exit(0)\n\n args = parse_args()\n testdataset = args.dataset\n test_type = args.testtype\n\n load_all_testdatasets()\n\n if (args.run_all_testsets):\n for testset_name, testset_generator in ALL_TESTDATA_SETS.items():\n if test_type and testset_generator.test_type != test_type:\n continue\n print(\"Generating testset {}..\".format(testset_name))\n testset_generator.generate_data()\n print()\n\n # Check that all testsets have been loaded.\n found_test_data_sets = []\n directory = 'TestCases/TestData'\n for dir in next(os.walk(directory))[1]:\n found_test_data_sets.append(dir)\n for testset_name in found_test_data_sets:\n if testset_name not in ALL_TESTDATA_SETS:\n print(\"WARNING: Testset {} in {} was not loaded\".format(testset_name, directory))\n else:\n try:\n if not testdataset:\n raise RuntimeError(\"Please select testdataset or use --run_all_testsets\")\n generator = ALL_TESTDATA_SETS[testdataset]\n except KeyError:\n print(\"WARNING: testset {} not in testset list\".format(testdataset))\n if args.testtype == 'conv' or args.testtype == 'depthwise_conv':\n generator = ConvSettings(testdataset, test_type, args)\n elif args.testtype == 'fully_connected':\n generator = FullyConnectedSettings(testdataset, test_type, args)\n elif args.testtype == 'avgpool' or args.testtype == 'maxpool':\n generator = PoolingSettings(testdataset, test_type, args)\n generator.generate_data()\n"
]
| [
[
"tensorflow.keras.layers.InputLayer",
"tensorflow.keras.layers.AveragePooling2D",
"numpy.genfromtxt",
"tensorflow.reshape",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.models.Sequential",
"tensorflow.cast",
"numpy.full",
"tensorflow.keras.layers.DepthwiseConv2D",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.optimizers.Adam",
"numpy.array",
"tensorflow.random.uniform",
"numpy.clip",
"tensorflow.convert_to_tensor",
"tensorflow.size",
"numpy.asarray",
"numpy.ones",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.lite.TFLiteConverter.from_keras_model",
"tensorflow.keras.layers.Softmax"
]
]
|
benblack769/audiosearch | [
"6e260e589e7fc9dd8b32d8b2428b33c66c8a639e"
]
| [
"learner_cli/create_json_dataset.py"
]
| [
"import pandas\nimport base64\nimport numpy as np\nimport json\n\ntrack_data = pandas.read_csv(\"data/fma_metadata/track_info.csv\")\nlast_vector_num = int(open(\"data/fma_outs/epoc_num.txt\").read())\nvectors = np.load(\"data/final_word_vecs.npy\")#np.load(f\"data/fma_outs/vector_at_{last_vector_num}.npy\")\n\nall_fnames = open(\"data/fma_outs/music_list.txt\").readlines()\nall_fnames = [fname.strip()[2:][:-4] for fname in all_fnames]\n\nfma_small = pandas.DataFrame({\n \"id\": all_fnames,\n \"url\": [\"http://datasets-mirror.s3.amazonaws.com/fma_small/\"+fname for fname in all_fnames]\n})\n\nnew_dataframe = pandas.merge(fma_small,track_data,how=\"inner\",on=\"id\")\nassert len(new_dataframe.index) == len(track_data.index)\nprint(new_dataframe['url'])\ndatas = json.loads(new_dataframe.to_json(orient=\"records\"))\nnew_datas = []\nfor vec, data in zip(vectors, datas):\n bytes = base64.b64encode(vec.astype(\"float64\").tobytes()).decode(\"utf-8\")\n new_entry = {\n \"key\": bytes,\n \"value\": data,\n }\n new_datas.append(new_entry)\njson.dump(new_datas,open(\"data/learned_dataset.json\",'w'))\n"
]
| [
[
"pandas.DataFrame",
"pandas.read_csv",
"numpy.load",
"pandas.merge"
]
]
|
sgjholt/mlmags | [
"e0a5f418440aa3206f24c62c4ccffbf6bd8b8063"
]
| [
"src/data/clean_data_tables.py"
]
| [
"import numpy as np\nimport pandas as pd\nfrom typing import Tuple \n\n\ndef col_to_dt(df: pd.DataFrame, name: str):\n \"\"\"[summary]\n\n Args:\n df (pd.DataFrame): [description]\n name (str): [description]\n \"\"\"\n df[name] = pd.to_datetime(df[name].values, unit='ns', utc=True)\n\n\ndef clean_mag_table(df: pd.DataFrame, min_nsta: int, max_std_err: float) -> pd.DataFrame:\n \"\"\"[summary]\n\n Args:\n df (pd.DataFrame): [description]\n\n Returns:\n pd.DataFrame: [description]\n \"\"\"\n conds = (df[\"Mw-Nobs\"] >= min_nsta) & (df[\"Mw-std-err\"] <= max_std_err)\n mag_table = df[conds].copy(deep=True).rename(columns={\"UTC\": \"otime\"})\n col_to_dt(mag_table, 'otime')\n return mag_table\n\n\ndef clean_fit_table(df: pd.DataFrame, min_dep: float, max_fc: float) -> pd.DataFrame:\n \"\"\"[summary]\n\n Args:\n df (pd.DataFrame): [description]\n\n Returns:\n pd.DataFrame: [description]\n \"\"\"\n conds = (df[\"dep\"] >= min_dep) & (df[\"fc\"] <= max_fc)\n fit_table = df[conds].copy(deep=True)\n col_to_dt(fit_table, 'otime')\n return fit_table"
]
| [
[
"pandas.to_datetime"
]
]
|
Gaurav1401/Awesome_Python_Scripts | [
"e98044cc42a975e81d880b27546fadcdead17a42"
]
| [
"GUIScripts/Postal Code Validator/postal_code_validator.py"
]
| [
"\n# Postal Code Validator\n\n# imported necessary library\nimport tkinter\nfrom tkinter import *\nimport tkinter as tk\nimport tkinter.messagebox as mbox\nfrom PIL import Image, ImageTk\nimport pandas as pd\nimport re\n\n\n# created main window\nwindow = Tk()\nwindow.geometry(\"1000x700\")\nwindow.title(\"Postal Code Validator\")\n\n# read the csv data\ndata = pd.read_csv(\"pincode_data.csv\")\nPincode = data['Pincode'].tolist()\nDistrict = data['District'].tolist()\nStateName = data['StateName'].tolist()\n\n# function for showing validity rules\ndef valid_fun():\n mbox.showinfo(\"VALID POSTAL CODE RULES\",\"A valid postal code have to fullfil both below requirements :\\n\\n1.) Must be a number in the range from 100000 to 999999 inclusive.\\n2.) Must not contain more than one alternating repetitive digit pair.\")\n\n# function to check validity of Postal code\ndef validate_fun():\n global valid\n entered_code = code_entry.get()\n valid = len(re.findall(r'(?=(\\d)\\d\\1)',entered_code)) < 2 and bool(re.match(r'^[1-9][0-9]{5}$',entered_code))\n if(valid):\n mbox.showinfo(\"Validity Status\",\"The Postal Code \" + entered_code + \" is VALID.\")\n else:\n mbox.showerror(\"Validity Error\", \"THe Postal Code \" + entered_code + \" is INVALID.\")\n\n# function for locating the Valid Postal Code using csv data\ndef locate_fun():\n entered_code = code_entry.get()\n if(valid):\n for i in range(0,len(Pincode)):\n if(str(Pincode[i])==str(entered_code)):\n mbox.showinfo(\"Locate Postal Code\", \"Postal Code : \" + entered_code + \"\\n\\nDistrict Name : \" + District[i] + \"\\n\\nState Name : \" + StateName[i])\n break\n else:\n mbox.showerror(\"Locate Error\", \"The Postal Code in INVALID. So can't locate.\")\n\n# top label\nstart1 = tk.Label(text = \"POSTAL CODE VALIDATOR\", font=(\"Arial\", 40), fg=\"magenta\") # same way bg\nstart1.place(x = 150, y = 10)\n\n# image on the main window\npath = \"Images/postal_code1.jpg\"\nimg1 = ImageTk.PhotoImage(Image.open(path))\n# The Label widget is a standard Tkinter widget used to display a text or image on the screen.\npanel = tk.Label(window, image = img1)\npanel.place(x = 135, y = 80)\n\n# Button for rules\nvpcrb = Button(window, text=\"VALID POSTAL CODE RULES\",command=valid_fun,font=(\"Arial\", 25), bg = \"light green\", fg = \"blue\", borderwidth=3, relief=\"raised\")\nvpcrb.place(x =238 , y =425 )\n\n# label for Entering Postal Code ---------------------------------------------------------------------------------\nsel_label = tk.Label(text = \"ENTER POSTAL CODE : \", font=(\"Arial\", 35), fg=\"brown\") # same way bg\nsel_label.place(x = 50, y = 515)\n\n# Created Entry Box\ncode_entry = Entry(window, font=(\"Arial\", 30), fg='orange', bg=\"light yellow\", borderwidth=3, width=14)\ncode_entry.place(x=600, y=520)\n\n# created Locate Button\nlocateb = Button(window, text=\"LOCATE\",command=locate_fun,font=(\"Arial\", 25), bg = \"light green\", fg = \"blue\", borderwidth=3, relief=\"raised\")\nlocateb.place(x =100 , y =600 )\n\n# created Validate button\nvalidateb = Button(window, text=\"VALIDATE\",command=validate_fun,font=(\"Arial\", 25), bg = \"light green\", fg = \"blue\", borderwidth=3, relief=\"raised\")\nvalidateb.place(x =320 , y =600 )\n\n# function for clearing the entry box\ndef clear_entry():\n code_entry.set(0,END)\n\n# created clear button\nclearb = Button(window, text=\"CLEAR\",command=clear_entry,font=(\"Arial\", 25), bg = \"orange\", fg = \"blue\", borderwidth=3, relief=\"raised\")\nclearb.place(x =570 , y =600 )\n\n# function for exiting\ndef exit_win():\n if mbox.askokcancel(\"Exit\", \"Do you want to exit?\"):\n window.destroy()\n\n# created exit button\nexitb = Button(window, text=\"EXIT\",command=exit_win,font=(\"Arial\", 25), bg = \"red\", fg = \"blue\", borderwidth=3, relief=\"raised\")\nexitb.place(x =780 , y =600 )\n\n\nwindow.protocol(\"WM_DELETE_WINDOW\", exit_win)\nwindow.mainloop()"
]
| [
[
"pandas.read_csv"
]
]
|
ucgmsim/Pre-processing | [
"c4b9ae20a9e5e4f96f930bde29aa15176d9c8b64"
]
| [
"srf_generation/source_parameter_generation/uncertainties/versions/cs_20_4.py"
]
| [
"\"\"\"A basic perturbator as an example and starting point\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom typing import Any, Dict\n\nfrom qcore.nhm import NHMFault\n\nfrom srf_generation.Fault import fault_factory, Type4\nfrom srf_generation.source_parameter_generation.uncertainties.common import (\n verify_realisation_params,\n get_seed,\n)\nfrom srf_generation.source_parameter_generation.uncertainties.distributions import (\n rand_shyp,\n truncated_normal,\n truncated_weibull,\n)\nfrom srf_generation.source_parameter_generation.uncertainties.mag_scaling import (\n lw_to_mw_sigma_scaling_relation,\n)\n\nTYPE = 4\n\n\ndef generate_source_params(\n source_data: NHMFault,\n additional_source_parameters: Dict[str, Any],\n vel_mod_1d: pd.DataFrame,\n vs30_data: pd.DataFrame = None,\n **kwargs\n) -> Dict[str, Any]:\n \"\"\"source_data should have the following parameters available via . notation:\n - source_data.pid: name of the event\n - source_data.lat: latitude\n - source_data.lon: longitude\n - source_data.depth\n - source_data.mag: magnitude\n - source_data.strike\n - source_data.dip\n - source_data.rake\n \"\"\"\n\n fault: Type4 = fault_factory(TYPE)(source_data)\n\n fault.shypo = (fault.length / 2) * rand_shyp()\n fault.dhypo = fault.width * truncated_weibull(1)\n\n fault.rake = truncated_normal(fault.rake, 15, 4)\n mag, sigma = lw_to_mw_sigma_scaling_relation(\n fault.length, fault.width, fault.mwsr, fault.rake\n )\n perturbated_magnitude = truncated_normal(mag, sigma, 1)\n\n params = fault.to_dict()\n params.update({\"dt\": 0.005, \"seed\": get_seed(), \"genslip_version\": \"5.4.2\"})\n\n params[\"sdrop\"] = 50 * np.sqrt(10 ** (perturbated_magnitude - mag))\n params[\"magnitude\"] = perturbated_magnitude\n\n realisation = kwargs\n\n realisation[\"params\"] = params\n realisation[\"params\"].update(additional_source_parameters)\n\n if vs30_data is not None:\n realisation[\"vs30\"] = vs30_data\n realisation[\"vs30\"][\"vs30\"] = vs30_data[\"median\"]\n\n verify_realisation_params(realisation[\"params\"])\n return realisation\n"
]
| [
[
"numpy.sqrt"
]
]
|
amyrebecca/aggregation-for-caesar | [
"5f0d884932312010f9caeb8ebfcfe358f490e41f"
]
| [
"panoptes_aggregation/reducers/shape_reducer_dbscan.py"
]
| [
"'''\nShape Reducer DBSCAN\n--------------------\nThis module provides functions to cluster shapes extracted with\n:mod:`panoptes_aggregation.extractors.shape_extractor`.\n'''\nimport numpy as np\nfrom sklearn.cluster import DBSCAN\nfrom collections import OrderedDict\nfrom .reducer_wrapper import reducer_wrapper\nfrom .subtask_reducer_wrapper import subtask_wrapper\nfrom ..shape_tools import SHAPE_LUT\nfrom .shape_process_data import process_data, DEFAULTS_PROCESS\nfrom .shape_metric import get_shape_metric_and_avg\n\n\nDEFAULTS = {\n 'eps': {'default': 5.0, 'type': float},\n 'min_samples': {'default': 3, 'type': int},\n 'algorithm': {'default': 'auto', 'type': str},\n 'leaf_size': {'default': 30, 'type': int},\n 'p': {'default': None, 'type': float}\n}\n\n\n@reducer_wrapper(process_data=process_data, defaults_data=DEFAULTS, defaults_process=DEFAULTS_PROCESS)\n@subtask_wrapper\ndef shape_reducer_dbscan(data_by_tool, **kwargs):\n '''Cluster a shape by tool using DBSCAN\n\n Parameters\n ----------\n data_by_tool : dict\n A dictionary returned by :meth:`process_data`\n kwrgs :\n `See DBSCAN <http://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html>`_\n\n Returns\n -------\n reduction : dict\n A dictinary with the following keys for each frame\n\n * `tool*_<shape>_<param>` : A list of **all** `param` for the `sahpe` drawn with `tool*`\n * `tool*_cluster_labels` : A list of cluster labels for **all** shapes drawn with `tool*`\n * `tool*_clusters_count` : The number of points in each **cluster** found\n * `tool*_clusters_<param>` : The `param` value for each **cluster** found\n '''\n shape = data_by_tool.pop('shape')\n shape_params = SHAPE_LUT[shape]\n symmetric = data_by_tool.pop('symmetric')\n metric, avg = get_shape_metric_and_avg(shape, symmetric=symmetric)\n kwargs['metric'] = metric\n clusters = OrderedDict()\n for frame, frame_data in data_by_tool.items():\n clusters[frame] = OrderedDict()\n for tool, loc_list in frame_data.items():\n loc = np.array(loc_list)\n if len(shape_params) == 1:\n loc = loc.reshape(-1, 1)\n # orignal data points in order used by cluster code\n for pdx, param in enumerate(shape_params):\n clusters[frame]['{0}_{1}_{2}'.format(tool, shape, param)] = loc[:, pdx].tolist()\n # default each point in no cluster\n clusters[frame]['{0}_cluster_labels'.format(tool)] = [-1] * loc.shape[0]\n if loc.shape[0] >= kwargs['min_samples']:\n db = DBSCAN(**kwargs).fit(loc)\n # what cluster each point belongs to\n clusters[frame]['{0}_cluster_labels'.format(tool)] = db.labels_.tolist()\n for k in set(db.labels_):\n if k > -1:\n idx = db.labels_ == k\n # number of points in the cluster\n clusters[frame].setdefault('{0}_clusters_count'.format(tool), []).append(int(idx.sum()))\n # mean of the cluster\n k_loc = avg(loc[idx])\n for pdx, param in enumerate(shape_params):\n clusters[frame].setdefault('{0}_clusters_{1}'.format(tool, param), []).append(float(k_loc[pdx]))\n return clusters\n"
]
| [
[
"numpy.array",
"sklearn.cluster.DBSCAN"
]
]
|
guodashun/self_supervised_ggcnn | [
"3656d7b017dffefda6c57d1616f0ae61811f01bb"
]
| [
"utils/dataset_processing/grasp.py"
]
| [
"import numpy as np\n\nimport matplotlib.pyplot as plt\n\nfrom skimage.draw import polygon\nfrom skimage.feature import peak_local_max\n\n\ndef _gr_text_to_no(l, offset=(0, 0)):\n \"\"\"\n Transform a single point from a Cornell file line to a pair of ints.\n :param l: Line from Cornell grasp file (str)\n :param offset: Offset to apply to point positions\n :return: Point [y, x]\n \"\"\"\n x, y = l.split()\n return [int(round(float(y))) - offset[0], int(round(float(x))) - offset[1]]\n\n\nclass GraspRectangles:\n \"\"\"\n Convenience class for loading and operating on sets of Grasp Rectangles.\n \"\"\"\n def __init__(self, grs=None):\n if grs:\n self.grs = grs\n else:\n self.grs = []\n\n def __getitem__(self, item):\n return self.grs[item]\n\n def __iter__(self):\n return self.grs.__iter__()\n\n def __getattr__(self, attr):\n \"\"\"\n Test if GraspRectangle has the desired attr as a function and call it.\n \"\"\"\n # Fuck yeah python.\n if hasattr(GraspRectangle, attr) and callable(getattr(GraspRectangle, attr)):\n return lambda *args, **kwargs: list(map(lambda gr: getattr(gr, attr)(*args, **kwargs), self.grs))\n else:\n raise AttributeError(\"Couldn't find function %s in BoundingBoxes or BoundingBox\" % attr)\n\n @classmethod\n def load_from_array(cls, arr):\n \"\"\"\n Load grasp rectangles from numpy array.\n :param arr: Nx4x2 array, where each 4x2 array is the 4 corner pixels of a grasp rectangle.\n :return: GraspRectangles()\n \"\"\"\n grs = []\n for i in range(arr.shape[0]):\n grp = arr[i, :, :].squeeze()\n if grp.max() == 0:\n break\n else:\n grs.append(GraspRectangle(grp))\n return cls(grs)\n\n @classmethod\n def load_from_cornell_file(cls, fname):\n \"\"\"\n Load grasp rectangles from a Cornell dataset grasp file.\n :param fname: Path to text file.\n :return: GraspRectangles()\n \"\"\"\n grs = []\n with open(fname) as f:\n while True:\n # Load 4 lines at a time, corners of bounding box.\n p0 = f.readline()\n if not p0:\n break # EOF\n p1, p2, p3 = f.readline(), f.readline(), f.readline()\n try:\n gr = np.array([\n _gr_text_to_no(p0),\n _gr_text_to_no(p1),\n _gr_text_to_no(p2),\n _gr_text_to_no(p3)\n ])\n\n grs.append(GraspRectangle(gr))\n\n except ValueError:\n # Some files contain weird values.\n continue\n return cls(grs)\n\n @classmethod\n def load_from_jacquard_file(cls, fname, scale=1.0):\n \"\"\"\n Load grasp rectangles from a Jacquard dataset file.\n :param fname: Path to file.\n :param scale: Scale to apply (e.g. if resizing images)\n :return: GraspRectangles()\n \"\"\"\n grs = []\n with open(fname) as f:\n for l in f:\n x, y, theta, w, h = [float(v) for v in l[:-1].split(';')]\n # index based on row, column (y,x), and the Jacquard dataset's angles are flipped around an axis.\n grs.append(Grasp(np.array([y, x]), -theta/180.0*np.pi, w, h).as_gr)\n grs = cls(grs)\n grs.scale(scale)\n return grs\n\n @classmethod\n def load_from_pybullet_gym(cls, idx, data, scale=1.0):\n grs = []\n data = data[idx][4]\n x, y, theta, w, h = data\n grs.append(Grasp(np.array([y, x]), -theta/180.0*np.pi, w, h).as_gr)\n grs = cls(grs)\n grs.scale(scale)\n return grs\n\n\n def append(self, gr):\n \"\"\"\n Add a grasp rectangle to this GraspRectangles object\n :param gr: GraspRectangle\n \"\"\"\n self.grs.append(gr)\n\n def copy(self):\n \"\"\"\n :return: A deep copy of this object and all of its GraspRectangles.\n \"\"\"\n new_grs = GraspRectangles()\n for gr in self.grs:\n new_grs.append(gr.copy())\n return new_grs\n\n def show(self, ax=None, shape=None):\n \"\"\"\n Draw all GraspRectangles on a matplotlib plot.\n :param ax: (optional) existing axis\n :param shape: (optional) Plot shape if no existing axis\n \"\"\"\n if ax is None:\n f = plt.figure()\n ax = f.add_subplot(1, 1, 1)\n ax.imshow(np.zeros(shape))\n ax.axis([0, shape[1], shape[0], 0])\n self.plot(ax)\n plt.show()\n else:\n self.plot(ax)\n\n def draw(self, shape, position=True, angle=True, width=True):\n \"\"\"\n Plot all GraspRectangles as solid rectangles in a numpy array, e.g. as network training data.\n :param shape: output shape\n :param position: If True, Q output will be produced\n :param angle: If True, Angle output will be produced\n :param width: If True, Width output will be produced\n :return: Q, Angle, Width outputs (or None)\n \"\"\"\n if position:\n pos_out = np.zeros(shape)\n else:\n pos_out = None\n if angle:\n ang_out = np.zeros(shape)\n else:\n ang_out = None\n if width:\n width_out = np.zeros(shape)\n else:\n width_out = None\n\n for gr in self.grs:\n rr, cc = gr.compact_polygon_coords(shape)\n if position:\n pos_out[rr, cc] = 1.0\n if angle:\n ang_out[rr, cc] = gr.angle\n if width:\n width_out[rr, cc] = gr.length\n\n return pos_out, ang_out, width_out\n\n def to_array(self, pad_to=0):\n \"\"\"\n Convert all GraspRectangles to a single array.\n :param pad_to: Length to 0-pad the array along the first dimension\n :return: Nx4x2 numpy array\n \"\"\"\n a = np.stack([gr.points for gr in self.grs])\n if pad_to:\n if pad_to > len(self.grs):\n a = np.concatenate((a, np.zeros((pad_to - len(self.grs), 4, 2))))\n return a.astype(np.int)\n\n @property\n def center(self):\n \"\"\"\n Compute mean center of all GraspRectangles\n :return: float, mean centre of all GraspRectangles\n \"\"\"\n points = [gr.points for gr in self.grs]\n return np.mean(np.vstack(points), axis=0).astype(np.int)\n\n\nclass GraspRectangle:\n \"\"\"\n Representation of a grasp in the common \"Grasp Rectangle\" format.\n \"\"\"\n def __init__(self, points):\n self.points = points\n\n def __str__(self):\n return str(self.points)\n\n @property\n def angle(self):\n \"\"\"\n :return: Angle of the grasp to the horizontal.\n \"\"\"\n dx = self.points[1, 1] - self.points[0, 1]\n dy = self.points[1, 0] - self.points[0, 0]\n return (np.arctan2(-dy, dx) + np.pi/2) % np.pi - np.pi/2\n\n @property\n def as_grasp(self):\n \"\"\"\n :return: GraspRectangle converted to a Grasp\n \"\"\"\n return Grasp(self.center, self.angle, self.length, self.width)\n\n @property\n def center(self):\n \"\"\"\n :return: Rectangle center point\n \"\"\"\n return self.points.mean(axis=0).astype(np.int)\n\n @property\n def length(self):\n \"\"\"\n :return: Rectangle length (i.e. along the axis of the grasp)\n \"\"\"\n dx = self.points[1, 1] - self.points[0, 1]\n dy = self.points[1, 0] - self.points[0, 0]\n return np.sqrt(dx ** 2 + dy ** 2)\n\n @property\n def width(self):\n \"\"\"\n :return: Rectangle width (i.e. perpendicular to the axis of the grasp)\n \"\"\"\n dy = self.points[2, 1] - self.points[1, 1]\n dx = self.points[2, 0] - self.points[1, 0]\n return np.sqrt(dx ** 2 + dy ** 2)\n\n def polygon_coords(self, shape=None):\n \"\"\"\n :param shape: Output Shape\n :return: Indices of pixels within the grasp rectangle polygon.\n \"\"\"\n return polygon(self.points[:, 0], self.points[:, 1], shape)\n\n def compact_polygon_coords(self, shape=None):\n \"\"\"\n :param shape: Output shape\n :return: Indices of pixels within the centre thrid of the grasp rectangle.\n \"\"\"\n return Grasp(self.center, self.angle, self.length/3, self.width).as_gr.polygon_coords(shape)\n\n def iou(self, gr, angle_threshold=np.pi/6):\n \"\"\"\n Compute IoU with another grasping rectangle\n :param gr: GraspingRectangle to compare\n :param angle_threshold: Maximum angle difference between GraspRectangles\n :return: IoU between Grasp Rectangles\n \"\"\"\n if abs((self.angle - gr.angle + np.pi/2) % np.pi - np.pi/2) > angle_threshold:\n return 0\n\n rr1, cc1 = self.polygon_coords()\n rr2, cc2 = polygon(gr.points[:, 0], gr.points[:, 1])\n try:\n r_max = max(rr1.max(), rr2.max()) + 1\n c_max = max(cc1.max(), cc2.max()) + 1\n except:\n return 0\n canvas = np.zeros((r_max, c_max))\n canvas[rr1, cc1] += 1\n canvas[rr2, cc2] += 1\n union = np.sum(canvas > 0)\n if union == 0:\n return 0\n intersection = np.sum(canvas == 2)\n return intersection/union\n\n def copy(self):\n \"\"\"\n :return: Copy of self.\n \"\"\"\n return GraspRectangle(self.points.copy())\n\n def offset(self, offset):\n \"\"\"\n Offset grasp rectangle\n :param offset: array [y, x] distance to offset\n \"\"\"\n self.points += np.array(offset).reshape((1, 2))\n\n def rotate(self, angle, center):\n \"\"\"\n Rotate grasp rectangle\n :param angle: Angle to rotate (in radians)\n :param center: Point to rotate around (e.g. image center)\n \"\"\"\n R = np.array(\n [\n [np.cos(-angle), np.sin(-angle)],\n [-1 * np.sin(-angle), np.cos(-angle)],\n ]\n )\n c = np.array(center).reshape((1, 2))\n self.points = ((np.dot(R, (self.points - c).T)).T + c).astype(np.int)\n\n def scale(self, factor):\n \"\"\"\n :param factor: Scale grasp rectangle by factor\n \"\"\"\n if factor == 1.0:\n return\n self.points *= factor\n\n def plot(self, ax, color=None):\n \"\"\"\n Plot grasping rectangle.\n :param ax: Existing matplotlib axis\n :param color: matplotlib color code (optional)\n \"\"\"\n points = np.vstack((self.points, self.points[0]))\n ax.plot(points[:, 1], points[:, 0], color=color)\n\n def zoom(self, factor, center):\n \"\"\"\n Zoom grasp rectangle by given factor.\n :param factor: Zoom factor\n :param center: Zoom zenter (focus point, e.g. image center)\n \"\"\"\n T = np.array(\n [\n [1/factor, 0],\n [0, 1/factor]\n ]\n )\n c = np.array(center).reshape((1, 2))\n self.points = ((np.dot(T, (self.points - c).T)).T + c).astype(np.int)\n\n\nclass Grasp:\n \"\"\"\n A Grasp represented by a center pixel, rotation angle and gripper width (length)\n \"\"\"\n def __init__(self, center, angle, length=60, width=30):\n self.center = center\n self.angle = angle # Positive angle means rotate anti-clockwise from horizontal.\n self.length = length\n self.width = width\n\n @property\n def as_gr(self):\n \"\"\"\n Convert to GraspRectangle\n :return: GraspRectangle representation of grasp.\n \"\"\"\n xo = np.cos(self.angle)\n yo = np.sin(self.angle)\n\n y1 = self.center[0] + self.length / 2 * yo\n x1 = self.center[1] - self.length / 2 * xo\n y2 = self.center[0] - self.length / 2 * yo\n x2 = self.center[1] + self.length / 2 * xo\n\n return GraspRectangle(np.array(\n [\n [y1 - self.width/2 * xo, x1 - self.width/2 * yo],\n [y2 - self.width/2 * xo, x2 - self.width/2 * yo],\n [y2 + self.width/2 * xo, x2 + self.width/2 * yo],\n [y1 + self.width/2 * xo, x1 + self.width/2 * yo],\n ]\n ).astype(np.float))\n\n def max_iou(self, grs):\n \"\"\"\n Return maximum IoU between self and a list of GraspRectangles\n :param grs: List of GraspRectangles\n :return: Maximum IoU with any of the GraspRectangles\n \"\"\"\n self_gr = self.as_gr\n max_iou = 0\n for gr in grs:\n iou = self_gr.iou(gr)\n max_iou = max(max_iou, iou)\n return max_iou\n\n def plot(self, ax, color=None):\n \"\"\"\n Plot Grasp\n :param ax: Existing matplotlib axis\n :param color: (optional) color\n \"\"\"\n self.as_gr.plot(ax, color)\n\n def to_jacquard(self, scale=1):\n \"\"\"\n Output grasp in \"Jacquard Dataset Format\" (https://jacquard.liris.cnrs.fr/database.php)\n :param scale: (optional) scale to apply to grasp\n :return: string in Jacquard format\n \"\"\"\n # Output in jacquard format.\n return '%0.2f;%0.2f;%0.2f;%0.2f;%0.2f' % (self.center[1]*scale, self.center[0]*scale, -1*self.angle*180/np.pi, self.length*scale, self.width*scale)\n\n\ndef detect_grasps(q_img, ang_img, width_img=None, no_grasps=1):\n \"\"\"\n Detect grasps in a GG-CNN output.\n :param q_img: Q image network output\n :param ang_img: Angle image network output\n :param width_img: (optional) Width image network output\n :param no_grasps: Max number of grasps to return\n :return: list of Grasps\n \"\"\"\n local_max = peak_local_max(q_img, min_distance=20, threshold_abs=0.2, num_peaks=no_grasps)\n\n grasps = []\n for grasp_point_array in local_max:\n grasp_point = tuple(grasp_point_array)\n\n grasp_angle = ang_img[grasp_point]\n\n g = Grasp(grasp_point, grasp_angle)\n if width_img is not None:\n g.length = width_img[grasp_point]\n g.width = g.length/2\n\n grasps.append(g)\n\n return grasps\n"
]
| [
[
"numpy.array",
"numpy.sin",
"numpy.dot",
"numpy.zeros",
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.stack",
"numpy.arctan2",
"numpy.sqrt",
"numpy.cos",
"matplotlib.pyplot.show",
"numpy.vstack"
]
]
|
HarlockOfficial/dva336_labs | [
"0fc354fe02eba38d123fec959433a53b2a99a7c7"
]
| [
"Project2/plot_graph.py"
]
| [
"#!/usr/bin/python3\nimport math\nfrom typing import Tuple\n\nimport matplotlib.pyplot as plt\n\ndivision_factor_for_reference_values = math.pow(10, 4)/1.5\nk = 4 # thread count\n\n\ndef main():\n x, y_seq, y_par, res_seq, res_par = get_data(\"test_code_output.csv\")\n\n x_fixed = list()\n x_div_k = list()\n pi = list()\n for elem in x:\n x_fixed.append(elem / division_factor_for_reference_values)\n x_div_k.append(elem / k / division_factor_for_reference_values)\n pi.append(math.pi)\n\n # plot time\n plt.figure(1)\n plt.plot(x, y_seq, label=\"Sequential Time\")\n plt.plot(x, y_par, label=\"Parallel Time\")\n plt.plot(x, x_fixed, label=\"N\")\n plt.plot(x, x_div_k, label=\"N/k\")\n plt.xlabel(\"Value of N\")\n plt.ylabel(\"ms to calculate approximate value of PI\")\n plt.legend()\n plt.gca().set_ylim([0, 12000])\n\n # plot results\n plt.figure(2)\n plt.plot(x, res_seq, label=\"Sequential Results\")\n plt.plot(x, pi, label=\"Expected Result\")\n plt.plot(x, res_par, label=\"Parallel Results\")\n plt.xlabel(\"Value of N\")\n plt.ylabel(\"calculated value of PI\")\n plt.legend()\n plt.gca().set_ylim([3.141585, 3.14166])\n\n # show both plots\n plt.show()\n\n\ndef get_data(file_name: str) -> Tuple[list, list, list, list, list]:\n x = list()\n y_seq = list()\n y_par = list()\n res_seq = list()\n res_par = list()\n with open(file_name, \"r\") as input_file:\n for line in input_file:\n data = line.split(\",\")\n x.append(int(data[0]))\n y_seq.append(int(data[1]))\n y_par.append(int(data[2]))\n res_seq.append(float(data[3]))\n res_par.append(float(data[4]))\n return x, y_seq, y_par, res_seq, res_par\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.gca"
]
]
|
AceZhan/pyserini | [
"188060178c012181e1c32791694930bced186534"
]
| [
"scripts/ltr_msmarco/ltr_inference.py"
]
| [
"#\n# Pyserini: Reproducible IR research with sparse and dense representations\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport sys\n\n# We're going to explicitly use a local installation of Pyserini (as opposed to a pip-installed one).\n# Comment these lines out to use a pip-installed one instead.\nsys.path.insert(0, './')\n\nimport argparse\nimport json\nimport multiprocessing\nimport os\nimport pickle\nimport time\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom pyserini.ltr.search_msmarco._search_msmarco import MsmarcoLtrSearcher\nfrom pyserini.ltr import *\n\n\"\"\"\nRunning prediction on candidates\n\"\"\"\ndef dev_data_loader(file, format, data, top=100):\n if format == 'tsv':\n dev = pd.read_csv(file, sep=\"\\t\",\n names=['qid', 'pid', 'rank'],\n dtype={'qid': 'S','pid': 'S', 'rank':'i',})\n elif format == 'trec':\n dev = pd.read_csv(file, sep=\"\\s+\",\n names=['qid', 'q0', 'pid', 'rank', 'score', 'tag'],\n usecols=['qid', 'pid', 'rank'],\n dtype={'qid': 'S','pid': 'S', 'rank':'i',})\n else:\n raise Exception('unknown parameters')\n assert dev['qid'].dtype == np.object\n assert dev['pid'].dtype == np.object\n assert dev['rank'].dtype == np.int32\n dev = dev[dev['rank']<=top]\n if data == 'passage':\n dev_qrel = pd.read_csv('tools/topics-and-qrels/qrels.msmarco-passage.dev-subset.txt', sep=\" \",\n names=[\"qid\", \"q0\", \"pid\", \"rel\"], usecols=['qid', 'pid', 'rel'],\n dtype={'qid': 'S','pid': 'S', 'rel':'i'})\n elif data == 'document':\n dev_qrel = pd.read_csv('tools/topics-and-qrels/qrels.msmarco-doc.dev.txt', sep=\"\\t\",\n names=[\"qid\", \"q0\", \"pid\", \"rel\"], usecols=['qid', 'pid', 'rel'],\n dtype={'qid': 'S','pid': 'S', 'rel':'i'})\n assert dev['qid'].dtype == np.object\n assert dev['pid'].dtype == np.object\n assert dev['rank'].dtype == np.int32\n dev = dev.merge(dev_qrel, left_on=['qid', 'pid'], right_on=['qid', 'pid'], how='left')\n dev['rel'] = dev['rel'].fillna(0).astype(np.int32)\n dev = dev.sort_values(['qid', 'pid']).set_index(['qid', 'pid'])\n\n print(dev.shape)\n print(dev.index.get_level_values('qid').drop_duplicates().shape)\n print(dev.groupby('qid').count().mean())\n print(dev.head(10))\n print(dev.info())\n\n dev_rel_num = dev_qrel[dev_qrel['rel'] > 0].groupby('qid').count()['rel']\n\n recall_point = [10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000]\n recall_curve = {k: [] for k in recall_point}\n for qid, group in tqdm(dev.groupby('qid')):\n group = group.reset_index()\n assert len(group['pid'].tolist()) == len(set(group['pid'].tolist()))\n total_rel = dev_rel_num.loc[qid]\n query_recall = [0 for k in recall_point]\n for t in group.sort_values('rank').itertuples():\n if t.rel > 0:\n for i, p in enumerate(recall_point):\n if t.rank <= p:\n query_recall[i] += 1\n for i, p in enumerate(recall_point):\n if total_rel > 0:\n recall_curve[p].append(query_recall[i] / total_rel)\n else:\n recall_curve[p].append(0.)\n\n for k, v in recall_curve.items():\n avg = np.mean(v)\n print(f'recall@{k}:{avg}')\n\n return dev, dev_qrel\n\n\ndef query_loader():\n queries = {}\n with open(f'{args.queries}/queries.dev.small.json') as f:\n for line in f:\n query = json.loads(line)\n qid = query.pop('id')\n query['analyzed'] = query['analyzed'].split(\" \")\n query['text'] = query['text_unlemm'].split(\" \")\n query['text_unlemm'] = query['text_unlemm'].split(\" \")\n query['text_bert_tok'] = query['text_bert_tok'].split(\" \")\n queries[qid] = query\n return queries\n\n\ndef eval_mrr(dev_data):\n score_tie_counter = 0\n score_tie_query = set()\n MRR = []\n for qid, group in tqdm(dev_data.groupby('qid')):\n group = group.reset_index()\n rank = 0\n prev_score = None\n assert len(group['pid'].tolist()) == len(set(group['pid'].tolist()))\n # stable sort is also used in LightGBM\n\n for t in group.sort_values('score', ascending=False, kind='mergesort').itertuples():\n if prev_score is not None and abs(t.score - prev_score) < 1e-8:\n score_tie_counter += 1\n score_tie_query.add(qid)\n prev_score = t.score\n rank += 1\n if t.rel > 0:\n MRR.append(1.0 / rank)\n break\n elif rank == 10 or rank == len(group):\n MRR.append(0.)\n break\n\n score_tie = f'score_tie occurs {score_tie_counter} times in {len(score_tie_query)} queries'\n print(score_tie)\n mrr_10 = np.mean(MRR).item()\n print(f'MRR@10:{mrr_10} with {len(MRR)} queries')\n return {'score_tie': score_tie, 'mrr_10': mrr_10}\n\n\ndef eval_recall(dev_qrel, dev_data):\n dev_rel_num = dev_qrel[dev_qrel['rel'] > 0].groupby('qid').count()['rel']\n\n score_tie_counter = 0\n score_tie_query = set()\n\n recall_point = [10,20,50,100,200,250,300,333,400,500,1000]\n recall_curve = {k: [] for k in recall_point}\n for qid, group in tqdm(dev_data.groupby('qid')):\n group = group.reset_index()\n rank = 0\n prev_score = None\n assert len(group['pid'].tolist()) == len(set(group['pid'].tolist()))\n # stable sort is also used in LightGBM\n total_rel = dev_rel_num.loc[qid]\n query_recall = [0 for k in recall_point]\n for t in group.sort_values('score', ascending=False, kind='mergesort').itertuples():\n if prev_score is not None and abs(t.score - prev_score) < 1e-8:\n score_tie_counter += 1\n score_tie_query.add(qid)\n prev_score = t.score\n rank += 1\n if t.rel > 0:\n for i, p in enumerate(recall_point):\n if rank <= p:\n query_recall[i] += 1\n for i, p in enumerate(recall_point):\n if total_rel > 0:\n recall_curve[p].append(query_recall[i] / total_rel)\n else:\n recall_curve[p].append(0.)\n\n score_tie = f'score_tie occurs {score_tie_counter} times in {len(score_tie_query)} queries'\n print(score_tie)\n res = {'score_tie': score_tie}\n\n for k, v in recall_curve.items():\n avg = np.mean(v)\n print(f'recall@{k}:{avg}')\n res[f'recall@{k}'] = avg\n\n return res\n\n\ndef output(file, dev_data, format):\n score_tie_counter = 0\n score_tie_query = set()\n output_file = open(file,'w')\n\n for qid, group in tqdm(dev_data.groupby('qid')):\n group = group.reset_index()\n rank = 0\n prev_score = None\n assert len(group['pid'].tolist()) == len(set(group['pid'].tolist()))\n # stable sort is also used in LightGBM\n\n for t in group.sort_values('score', ascending=False, kind='mergesort').itertuples():\n if prev_score is not None and abs(t.score - prev_score) < 1e-8:\n score_tie_counter += 1\n score_tie_query.add(qid)\n prev_score = t.score\n rank += 1\n if (format == 'tsv'):\n output_file.write(f\"{qid}\\t{t.pid}\\t{rank}\\n\")\n else:\n output_file.write(f\"{qid}\\tQ0\\t{t.pid}\\t{rank}\\t{t.score}\\tltr\\n\")\n\n score_tie = f'score_tie occurs {score_tie_counter} times in {len(score_tie_query)} queries'\n print(score_tie)\n\nif __name__ == \"__main__\":\n os.environ[\"ANSERINI_CLASSPATH\"] = \"./pyserini/resources/jars\"\n parser = argparse.ArgumentParser(description='Learning to rank reranking')\n parser.add_argument('--input', required=True)\n parser.add_argument('--reranking-top', type=int, default=1000)\n parser.add_argument('--input-format', required=True)\n parser.add_argument('--model', required=True)\n parser.add_argument('--index', required=True)\n parser.add_argument('--output', required=True)\n parser.add_argument('--ibm-model', required=True)\n parser.add_argument('--queries', required=True)\n parser.add_argument('--data', required=True)\n parser.add_argument('--output-format',default='trec')\n\n args = parser.parse_args()\n searcher = MsmarcoLtrSearcher(args.model, args.ibm_model, args.index, args.data)\n searcher.add_fe()\n print(\"load dev\")\n dev, dev_qrel = dev_data_loader(args.input, args.input_format, args.data, args.reranking_top)\n print(\"load queries\")\n queries = query_loader()\n\n batch_info = searcher.search(dev, queries)\n del dev, queries\n\n eval_res = eval_mrr(batch_info)\n eval_recall(dev_qrel, batch_info)\n output(args.output, batch_info,args.output_format)\n print('Done!')\n\n\n"
]
| [
[
"pandas.read_csv",
"numpy.mean"
]
]
|
tikisi/donkeycar | [
"688204ca074886321e0d58e75d81d89f04f7a2b6"
]
| [
"donkeycar/parts/graph.py"
]
| [
"import numpy as np\nimport cv2\n\n\nclass Graph(object):\n '''\n Take input values and plot them on an image.\n Takes a list of (x, y) (b, g, r) pairs and\n plots the color at the given coordinate.\n When the x value exceeds the width, the graph is erased\n and begins with an offset to x values such that drawing\n begins again at the left edge.\n This assumes x is monotonically increasing, like a time value.\n '''\n def __init__(self, res=(200, 200, 3)):\n self.img = np.zeros(res)\n self.prev = 0\n\n def clamp(self, val, lo, hi):\n if val < lo:\n val = lo\n elif val > hi:\n val = hi\n return int(val)\n\n def run(self, values):\n if values is None:\n return self.img\n\n for coord, col in values:\n x = coord[0] % self.img.shape[1]\n y = self.clamp(coord[1], 0, self.img.shape[0] - 1)\n self.img[y, x] = col\n\n if abs(self.prev - x) > self.img.shape[1] / 2:\n self.img = np.zeros_like(self.img)\n\n self.prev = x\n \n return self.img\n\n def shutdown(self):\n pass\n"
]
| [
[
"numpy.zeros_like",
"numpy.zeros"
]
]
|
globophobe/fastapi-quant-candles | [
"0bc95f6bb32071aa32a4951ca0a15521f67f7f97"
]
| [
"cryptofeed_werks/exchanges/bitfinex/base.py"
]
| [
"from datetime import datetime\nfrom decimal import Decimal\nfrom typing import Optional\n\nimport numpy as np\nfrom pandas import DataFrame\n\nfrom cryptofeed_werks.lib import candles_to_data_frame, timestamp_to_inclusive\n\nfrom .api import format_bitfinex_api_timestamp, get_bitfinex_api_timestamp\nfrom .candles import get_candles\nfrom .trades import get_trades\n\n\nclass BitfinexMixin:\n \"\"\"\n Details: https://docs.bitfinex.com/reference#rest-public-trades\n\n ID\tint\tID of the trade\n MTS\tint\tmillisecond time stamp\n ±AMOUNT\tfloat\tHow much was bought (positive) or sold (negative).\n PRICE\tfloat\tPrice at which the trade was executed (trading tickers only)\n RATE\tfloat\tRate at which funding transaction occurred (funding tickers only)\n PERIOD\tint\tAmount of time the funding transaction was for (funding tickers only)\n \"\"\"\n\n def get_pagination_id(self, timestamp_from: datetime) -> Optional[int]:\n \"\"\"Get pagination_id.\"\"\"\n return format_bitfinex_api_timestamp(timestamp_from)\n\n def iter_api(self, timestamp_from: datetime, pagination_id: int) -> list:\n \"\"\"Iterate Bitfinex API.\"\"\"\n return get_trades(\n self.symbol.api_symbol,\n timestamp_from,\n pagination_id,\n log_format=self.log_format,\n )\n\n def get_uid(self, trade: dict) -> str:\n \"\"\"Get uid.\"\"\"\n return str(trade[0])\n\n def get_timestamp(self, trade: dict) -> datetime:\n \"\"\"Get timestamp.\"\"\"\n return get_bitfinex_api_timestamp(trade)\n\n def get_nanoseconds(self, trade: dict) -> int:\n \"\"\"Get nanoseconds.\"\"\"\n return self.get_timestamp(trade).nanosecond\n\n def get_price(self, trade: dict) -> Decimal:\n \"\"\"Get price.\"\"\"\n return Decimal(trade[3])\n\n def get_volume(self, trade: dict) -> Decimal:\n \"\"\"Get volume.\"\"\"\n return self.get_price(trade) * self.get_notional(trade)\n\n def get_notional(self, trade: dict) -> Decimal:\n \"\"\"Get notional.\"\"\"\n return abs(Decimal(trade[2]))\n\n def get_tick_rule(self, trade: dict) -> int:\n \"\"\"Get tick rule.\n\n Buy side indicates a down-tick because the maker was a buy order and\n their order was removed. Conversely, sell side indicates an up-tick.\n \"\"\"\n return np.sign(trade[2])\n\n def get_index(self, trade: dict) -> int:\n \"\"\"Get index.\"\"\"\n return trade[0]\n\n def get_data_frame(self, trades: list) -> list:\n \"\"\"Get data_frame.\n\n Websocket sends trades in order, by incrementing non sequential integer\n REST API returns results unsorted\n Sort by uid, reversed\n \"\"\"\n trades.sort(key=lambda x: x[\"index\"], reverse=True)\n return super().get_data_frame(trades)\n\n def get_candles(\n self, timestamp_from: datetime, timestamp_to: datetime\n ) -> DataFrame:\n \"\"\"Get candles from Exchange API.\"\"\"\n ts_to = timestamp_to_inclusive(timestamp_from, timestamp_to, value=\"1t\")\n candles = get_candles(\n self.symbol.api_symbol,\n timestamp_from,\n ts_to,\n time_frame=\"1m\",\n log_format=f\"{self.log_format} validating\",\n )\n return candles_to_data_frame(timestamp_from, timestamp_to, candles)\n"
]
| [
[
"numpy.sign"
]
]
|
MaharshiPedu/Medical-Transformer-with-DWT | [
"a2193b74cbe8f6e9855e0e35b988131a0b77a7cd"
]
| [
"disWT/discrete_wt.py"
]
| [
"from numpy.core.fromnumeric import size\nimport pywt\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimage\nimport glob\nimport os\nfrom torch.utils.data import DataLoader\nimport numpy as np\nimport torch\ndef DWT(x_batch):\n x_batch_np = x_batch.detach().cpu().numpy()\n LL_list, LH_list, HL_list, HH_list = [], [], [], []\n for _ in range(len(x_batch_np)): # len(x_batch_np) finds out the number of images in the dataset\n \n imgs = [mpimage.imread(file) for file in x_batch] #Reading all the images in a particular subdirectory, say, 1 in T1fusion\n tensor_imgs = torch.tensor(imgs)\n for img in tensor_imgs:\n \n coeffs2 = pywt.dwt2(img, 'db3')\n LL, (LH, HL, HH) = coeffs2\n LL_list.append(LL)\n LH_list.append(LH)\n HL_list.append(HL)\n HH_list.append(HH)\n\n LL_list = np.array(LL_list)\n LH_list = np.array(LH_list)\n HL_list = np.array(HL_list)\n HH_list = np.array(HH_list)\n \n return [LL_list, LH_list, HL_list, HH_list]\n\ndef IDWT(output_LL, output_LH, output_HL, output_HH):\n output_LL_np = output_LL.detach().cpu().numpy()\n output_LH_np = output_LH.detach().cpu().numpy()\n output_HL_np = output_HL.detach().cpu().numpy()\n output_HH_np = output_HH.detach().cpu().numpy()\n\n coeff2 = output_LL_np, (output_LH_np, output_HL_np, output_HH_np)\n idwt_fig = pywt.idwt2(coeff2, 'db3')\n return idwt_fig\n \n# new_path = os.path.join(parent_dir, img_folder, sub_directory)\n# os.makedirs(new_path)\n# for j, a in enumerate([LL, LH, HL, HH]):\n# plt.figure(figsize=(1, 1), dpi=256)\n \n# plt.imshow(a, interpolation='nearest', cmap=plt.cm.gray)\n# plt.axis('off')\n# plt.tight_layout(pad=0)\n# plt.savefig(new_path +'{}_{}_{}'.format(k+1, i+1, j+1) +'.png')\n \n# plt.show()"
]
| [
[
"numpy.array",
"matplotlib.image.imread",
"torch.tensor"
]
]
|
fangedward/pylot | [
"a742b3789ee8e7fa2d692ae22bda1e2960ed9345"
]
| [
"scripts/sign_data_gatherer.py"
]
| [
"from absl import app\nfrom absl import flags\nimport carla\nimport json\nimport numpy as np\nimport PIL.Image as Image\nimport time\nimport re\n\nfrom pylot.drivers.sensor_setup import DepthCameraSetup, RGBCameraSetup, \\\n SegmentedCameraSetup\nfrom pylot.perception.camera_frame import CameraFrame\nfrom pylot.perception.depth_frame import DepthFrame\nfrom pylot.perception.detection.speed_limit_sign import SpeedLimitSign\nfrom pylot.perception.detection.stop_sign import StopSign\nfrom pylot.perception.detection.traffic_light import TrafficLight\nfrom pylot.perception.messages import DepthFrameMessage\nfrom pylot.perception.segmentation.segmented_frame import SegmentedFrame\nfrom pylot.simulation.utils import get_world\nimport pylot.simulation.utils\nimport pylot.utils\n\nFLAGS = flags.FLAGS\nCARLA_IMAGE = None\nDEPTH_FRAME = None\nSEGMENTED_FRAME = None\n\nflags.DEFINE_string('data_path', 'data/', 'Path where data will be saved')\nflags.DEFINE_integer('frame_width', 1920, 'Camera frame width')\nflags.DEFINE_integer('frame_height', 1080, 'Camera frame height')\nflags.DEFINE_bool('visualize_bboxes', False, 'True to enable bbox visualizer')\nflags.DEFINE_bool('log_bbox_images', False,\n 'True to enable logging of bbox annodated images')\nflags.DEFINE_integer('camera_fov', 45, 'Camera fov')\n\n\ndef on_camera_msg(image):\n global CARLA_IMAGE\n CARLA_IMAGE = image\n\n\ndef on_depth_msg(carla_image):\n global DEPTH_FRAME\n transform = pylot.utils.Transform.from_carla_transform(\n carla_image.transform)\n camera_setup = DepthCameraSetup(\"depth_camera\", FLAGS.frame_width,\n FLAGS.camera_height, transform,\n FLAGS.camera_fov)\n\n DEPTH_FRAME = DepthFrameMessage(\n int(carla_image.timestamp * 1000),\n DepthFrame.from_carla_frame(carla_image, camera_setup))\n\n\ndef on_segmented_msg(carla_image):\n global SEGMENTED_FRAME\n transform = pylot.utils.Transform.from_carla_transform(\n carla_image.transform)\n camera_setup = SegmentedCameraSetup(\"segmented_camera\", FLAGS.frame_width,\n FLAGS.camera_height, transform,\n FLAGS.camera_fov)\n SEGMENTED_FRAME = SegmentedFrame(carla_image, 'carla', camera_setup)\n\n\ndef add_camera(world, transform, callback):\n camera_blueprint = world.get_blueprint_library().find('sensor.camera.rgb')\n camera_blueprint.set_attribute('image_size_x', str(FLAGS.frame_width))\n camera_blueprint.set_attribute('image_size_y', str(FLAGS.frame_height))\n camera_blueprint.set_attribute('fov', str(FLAGS.camera_fov))\n camera = world.spawn_actor(camera_blueprint, transform)\n # Register callback to be invoked when a new frame is received.\n camera.listen(callback)\n return camera\n\n\ndef add_depth_camera(world, transform, callback):\n depth_blueprint = world.get_blueprint_library().find('sensor.camera.depth')\n depth_blueprint.set_attribute('image_size_x', str(FLAGS.frame_width))\n depth_blueprint.set_attribute('image_size_y', str(FLAGS.frame_height))\n depth_blueprint.set_attribute('fov', str(FLAGS.camera_fov))\n depth_camera = world.spawn_actor(depth_blueprint, transform)\n # Register callback to be invoked when a new frame is received.\n depth_camera.listen(callback)\n return depth_camera\n\n\ndef add_segmented_camera(world, transform, callback):\n segmented_blueprint = world.get_blueprint_library().find(\n 'sensor.camera.semantic_segmentation')\n segmented_blueprint.set_attribute('image_size_x', str(FLAGS.frame_width))\n segmented_blueprint.set_attribute('image_size_y', str(FLAGS.frame_height))\n segmented_blueprint.set_attribute('fov', str(FLAGS.camera_fov))\n segmented_camera = world.spawn_actor(segmented_blueprint, transform)\n segmented_camera.listen(callback)\n return segmented_camera\n\n\ndef setup_world():\n # Connect to the Carla simulator.\n client, world = get_world()\n settings = world.get_settings()\n settings.synchronous_mode = True\n settings.fixed_delta_seconds = 0.02\n world.apply_settings(settings)\n return world\n\n\ndef wait_for_data(world):\n world.tick()\n global CARLA_IMAGE\n global DEPTH_FRAME\n global SEGMENTED_FRAME\n while (CARLA_IMAGE is None or DEPTH_FRAME is None\n or SEGMENTED_FRAME is None):\n time.sleep(0.1)\n\n\ndef reset_frames():\n global DEPTH_FRAME\n global SEGMENTED_FRAME\n global CARLA_IMAGE\n # Reset frames.\n DEPTH_FRAME = None\n SEGMENTED_FRAME = None\n CARLA_IMAGE = None\n\n\ndef get_traffic_light_obstacles(traffic_lights, depth_frame, segmented_frame,\n color, town_name):\n det_obstacles = pylot.simulation.utils.get_traffic_lights_obstacles(\n traffic_lights, depth_frame, segmented_frame, town_name)\n # Overwrite traffic light color because we control it without refreshing\n # the agents.\n if color == carla.TrafficLightState.Yellow:\n label = 'yellow'\n elif color == carla.TrafficLightState.Green:\n label = 'green'\n elif color == carla.TrafficLightState.Red:\n label = 'red'\n elif color == carla.TrafficLightState.Off:\n label = 'off'\n else:\n raise ValueError('Unknown traffic light color')\n label += ' traffic light'\n\n for obstacle in det_obstacles:\n obstacle.label = label\n return det_obstacles\n\n\ndef log_bounding_boxes(carla_image, depth_msg, segmented_frame, traffic_lights,\n tl_color, speed_signs, stop_signs, weather, town):\n game_time = int(carla_image.timestamp * 1000)\n print(\"Processing game time {} in {} with weather {}\".format(\n game_time, town, weather))\n transform = pylot.utils.Transform.from_carla_transform(\n carla_image.transform)\n camera_setup = RGBCameraSetup(\"rgb_camera\", FLAGS.frame_width,\n FLAGS.camera_height, transform,\n FLAGS.camera_fov)\n frame = CameraFrame.from_carla_frame(carla_image, camera_setup)\n _, world = get_world()\n town_name = world.get_map().name\n\n speed_limit_det_obstacles = []\n if speed_signs:\n speed_limit_det_obstacles = pylot.simulation.utils.get_detected_speed_limits(\n speed_signs, depth_msg.frame, segmented_frame)\n\n traffic_stop_det_obstacles = []\n if stop_signs:\n traffic_stop_det_obstacles = pylot.simulation.utils.get_detected_traffic_stops(\n stop_signs, depth_msg.frame)\n\n traffic_light_det_obstacles = []\n if traffic_lights:\n traffic_light_det_obstacles = get_traffic_light_obstacles(\n traffic_lights, depth_msg.frame, segmented_frame, tl_color,\n town_name)\n\n det_obstacles = (speed_limit_det_obstacles + traffic_stop_det_obstacles +\n traffic_light_det_obstacles)\n # Log the frame.\n file_name = '{}signs-{}_{}_{}.png'.format(FLAGS.data_path, game_time,\n weather, town)\n rgb_img = Image.fromarray(frame.as_rgb_numpy_array())\n rgb_img.save(file_name)\n\n if FLAGS.log_bbox_images:\n frame.annotate_with_bounding_boxes(game_time, det_obstacles)\n file_name = '{}annotated-signs-{}_{}_{}.png'.format(\n FLAGS.data_path, game_time, weather, town)\n rgb_img = Image.fromarray(frame.as_rgb_numpy_array())\n rgb_img.save(file_name)\n\n # Log the bounding boxes.\n bboxes = [obstacle.get_bbox_label() for obstacle in det_obstacles]\n file_name = '{}bboxes-{}_{}_{}.json'.format(FLAGS.data_path, game_time,\n weather, town)\n with open(file_name, 'w') as outfile:\n json.dump(bboxes, outfile)\n\n if FLAGS.visualize_bboxes:\n frame.annotate_with_bounding_boxes(game_time, det_obstacles)\n frame.visualize('bboxes')\n\n\ndef change_traffic_light_colors(world, color):\n actor_list = world.get_actors()\n tl_actors = actor_list.filter('traffic.traffic_light*')\n for tl in tl_actors:\n tl.set_state(color)\n tl.freeze(True)\n if color == carla.TrafficLightState.Green:\n tl.set_green_time(99999999999999999999999999999999999999)\n tl.set_yellow_time(0)\n tl.set_red_time(0)\n elif color == carla.TrafficLightState.Yellow:\n tl.set_green_time(0)\n tl.set_yellow_time(99999999999999999999999999999999999999)\n tl.set_red_time(0)\n else:\n tl.set_green_time(0)\n tl.set_yellow_time(0)\n tl.set_red_time(99999999999999999999999999999999999999)\n world.tick()\n time.sleep(1)\n\n # Make sure that everything is frozen and in the correct state.\n tl_actors = actor_list.filter('traffic.traffic_light*')\n for tl in tl_actors:\n assert tl.is_frozen() and tl.state == color, \\\n \"Traffic Light {}: Frozen: {}, Color: {}, Expected: {}\".format(\n tl.id, tl.is_frozen(), tl.state, color)\n\n\ndef test_traffic_light_colors(world, color):\n actor_list = world.get_actors()\n tl_actors = actor_list.filter('traffic.traffic_light*')\n for tl in tl_actors:\n if tl.state != color:\n raise ValueError(\"The expected color was {}, got {}\".format(\n color, tl.state))\n\n\ndef get_actors(world):\n actor_list = world.get_actors()\n tl_actors = actor_list.filter('traffic.traffic_light*')\n traffic_lights = [\n TrafficLight.from_carla_actor(tl_actor) for tl_actor in tl_actors\n ]\n traffic_stop_actors = actor_list.filter('traffic.stop')\n traffic_stops = [\n StopSign.from_carla_actor(ts_actor) for ts_actor in traffic_stop_actors\n ]\n speed_limit_actors = actor_list.filter('traffic.speed_limit*')\n speed_signs = [\n SpeedLimitSign.from_carla_actor(ts_actor)\n for ts_actor in speed_limit_actors\n ]\n return (tl_actors, traffic_lights, traffic_stops, speed_signs)\n\n\ndef log_obstacles(world, transforms_of_interest, traffic_lights, tl_color,\n speed_signs, traffic_stops, weather, town):\n for transform in transforms_of_interest:\n camera = add_camera(world, transform, on_camera_msg)\n depth_camera = add_depth_camera(world, transform, on_depth_msg)\n segmented_camera = add_segmented_camera(world, transform,\n on_segmented_msg)\n wait_for_data(world)\n global CARLA_IMAGE\n global DEPTH_FRAME\n global SEGMENTED_FRAME\n\n # Ensure that the traffic lights are of the given color.\n test_traffic_light_colors(world, tl_color)\n\n log_bounding_boxes(CARLA_IMAGE, DEPTH_FRAME, SEGMENTED_FRAME,\n traffic_lights, tl_color, speed_signs,\n traffic_stops, weather, town)\n reset_frames()\n segmented_camera.destroy()\n depth_camera.destroy()\n camera.destroy()\n\n\ndef check_lights_opposite(light_a, light_b):\n \"\"\" Checks if the two given lights are opposite to each other or not. \"\"\"\n def get_forward_vector(light):\n light_vector = light.get_transform().get_forward_vector()\n return [light_vector.x, light_vector.y, light_vector.z]\n\n light_a_vector = get_forward_vector(light_a)\n light_b_vector = get_forward_vector(light_b)\n return np.dot(light_a_vector, light_b_vector) < -0.98\n\n\ndef log_traffic_lights(world):\n world_map = world.get_map()\n (traffic_lights, _, _, _) = get_actors(world)\n tl_colors = [\n carla.TrafficLightState.Yellow, carla.TrafficLightState.Green,\n carla.TrafficLightState.Red\n ]\n transforms_of_interest = []\n for light in traffic_lights:\n print(\"Working for traffic light {}\".format(light.id))\n # For every traffic light, get the neighbouring lights except the one\n # directly opposite.\n for offset in range(10, 40, 5):\n # Traffic lights have different coordinate systems, hence\n # we need to offset y, instead of x and add that to the trigger\n # volume location.\n offset_loc = pylot.utils.Location(\n x=light.trigger_volume.location.x,\n y=light.trigger_volume.location.y + offset,\n z=light.trigger_volume.location.z)\n offset_trans = pylot.utils.Transform(offset_loc,\n pylot.utils.Rotation())\n\n # Transform the offset relative to the traffic light.\n transform = pylot.utils.Transform.from_carla_transform(\n light.get_transform()) * offset_trans\n location = transform.location.as_carla_location()\n\n # Get the waypoint nearest to the transform.\n w = world_map.get_waypoint(location,\n project_to_road=True,\n lane_type=carla.LaneType.Driving)\n w_rotation = w.transform.rotation\n camera_transform = pylot.utils.Transform.from_carla_transform(\n w.transform)\n camera_transform.location.z += 2.0\n transform = camera_transform.as_carla_transform()\n transforms_of_interest.append(transform)\n\n # Get the right lanes.\n wp_right = w.get_right_lane()\n while wp_right and wp_right.lane_type == carla.LaneType.Driving \\\n and w_rotation == wp_right.transform.rotation:\n camera_transform = pylot.utils.Transform.from_carla_transform(\n wp_right.transform)\n camera_transform.location.z += 2.0\n transform = camera_transform.as_carla_transform()\n transforms_of_interest.append(transform)\n wp_right = wp_right.get_right_lane()\n\n # Get the left lanes.\n wp_left = w.get_left_lane()\n while wp_left and wp_left.lane_type == carla.LaneType.Driving and \\\n w_rotation == wp_left.transform.rotation:\n camera_transform = pylot.utils.Transform.from_carla_transform(\n wp_left.transform)\n camera_transform.location.z += 2.0\n transform = camera_transform.as_carla_transform()\n transforms_of_interest.append(transform)\n wp_left = wp_left.get_left_lane()\n\n print(\"The total number of transforms were: {}\".format(\n len(transforms_of_interest)))\n\n traffic_lights = [\n TrafficLight.from_carla_actor(light) for light in traffic_lights\n ]\n for weather in find_weather_presets():\n change_weather(world, weather)\n time.sleep(1)\n for tl_color in tl_colors:\n change_traffic_light_colors(world, tl_color)\n world.tick()\n time.sleep(1)\n log_obstacles(world, transforms_of_interest, traffic_lights,\n tl_color, None, None, weather, world_map.name)\n\n\ndef log_speed_limits(world):\n world_map = world.get_map()\n (_, traffic_lights, traffic_stops, speed_signs) = get_actors(world)\n transforms_of_interest = []\n # Add transforms that are close to speed limit signs.\n for speed_sign in speed_signs:\n for offset in range(10, 25, 5):\n # Speed signs have different coordinate systems, hence\n # we need to offset y, instead of x.\n offset_loc = pylot.utils.Location(x=0, y=offset, z=0)\n offset_trans = pylot.utils.Transform(offset_loc,\n pylot.utils.Rotation())\n transform = speed_sign.transform * offset_trans\n location = transform.location.as_carla_location()\n w = world_map.get_waypoint(location,\n project_to_road=True,\n lane_type=carla.LaneType.Driving)\n camera_transform = pylot.utils.Transform.from_carla_transform(\n w.transform)\n camera_transform.location.z += 2.0\n transform = camera_transform.as_carla_transform()\n transforms_of_interest.append(transform)\n # Ensure all traffic lights are red.\n change_traffic_light_colors(world, carla.TrafficLightState.Red)\n world.tick()\n time.sleep(1)\n (_, traffic_lights, traffic_stops, speed_signs) = get_actors(world)\n for weather in find_weather_presets():\n change_weather(world, weather)\n log_obstacles(world, transforms_of_interest, traffic_lights,\n carla.TrafficLightState.Red, speed_signs, traffic_stops,\n weather, world_map.name)\n\n\ndef log_stop_signs(world):\n world_map = world.get_map()\n (_, traffic_lights, traffic_stops, speed_signs) = get_actors(world)\n transforms_of_interest = []\n # Add transforms that are close to stop signs.\n for stop_sign in traffic_stops:\n for offset in range(10, 25, 5):\n offset_loc = pylot.utils.Location(x=-offset, y=0, z=0)\n offset_trans = pylot.utils.Transform(offset_loc,\n pylot.utils.Rotation())\n transform = stop_sign.transform * offset_trans\n location = transform.location.as_carla_location()\n w = world_map.get_waypoint(location,\n project_to_road=True,\n lane_type=carla.LaneType.Driving)\n camera_transform = pylot.utils.Transform.from_carla_transform(\n w.transform)\n camera_transform.location.z += 2.0\n transform = camera_transform.as_carla_transform()\n transforms_of_interest.append(transform)\n # Ensure all traffic lights are red.\n change_traffic_light_colors(world, carla.TrafficLightState.Red)\n world.tick()\n time.sleep(1)\n (_, traffic_lights, traffic_stops, speed_signs) = get_actors(world)\n for weather in find_weather_presets():\n change_weather(world, weather)\n log_obstacles(world, transforms_of_interest, traffic_lights,\n carla.TrafficLightState.Red, speed_signs, traffic_stops,\n weather, world_map.name)\n\n\ndef change_weather(world, weather):\n world.set_weather(getattr(carla.WeatherParameters, weather))\n\n\ndef find_weather_presets():\n presets = [\n x for x in dir(carla.WeatherParameters) if re.match('[A-Z].+', x)\n ]\n return presets\n\n\ndef main(argv):\n world = setup_world()\n world.tick()\n # Sleep a bit to ensure the simulator actually ticks.\n time.sleep(1)\n log_traffic_lights(world)\n # log_speed_limits(world)\n # log_stop_signs(world)\n\n\nif __name__ == '__main__':\n app.run(main)\n"
]
| [
[
"numpy.dot"
]
]
|
zhuyifan1993/Direct_RGB-D_SLAM | [
"2567177ae7e27479f298fd5a9369e3e89566c41c"
]
| [
"output_trajectory.py"
]
| [
"import numpy as np\nimport utils.conversions as conv\n\ntraj = np.load('traj_0.npy')\nnum = len(traj)\n\nabsolute_pose_path = 'kf_index_level4.txt'\nf = open(absolute_pose_path)\nline = f.readlines()\n\nfor i in range(num):\n trans = traj[i]\n quater = conv.trans_to_quater(trans)\n timestamp = line[i].split()[0]\n with open('pose_op_level4_with_auto_lc.txt', 'a') as file_handle:\n file_handle.write(timestamp)\n for ii in range(len(quater)):\n file_handle.write(' ' + str(quater[ii]))\n file_handle.write('\\n')\n"
]
| [
[
"numpy.load"
]
]
|
bdshieh/interaction3 | [
"b44c390045cf3b594125e90d2f2f4f617bc2433b"
]
| [
"interaction3/arrays/matrix.py"
]
| [
"## interaction3 / arrays / matrix.py\n\nimport numpy as np\n\nfrom interaction3.abstract import *\n\n# default parameters\ndefaults = dict()\n\n# membrane properties\ndefaults['length'] = [40e-6, 40e-6]\ndefaults['electrode'] = [40e-6, 40e-6]\ndefaults['nnodes'] = [9, 9]\ndefaults['thickness'] = [2.2e-6,]\ndefaults['density'] = [2040,]\ndefaults['y_modulus'] = [110e9,]\ndefaults['p_ratio'] = [0.22,]\ndefaults['isolation'] = 200e-9\ndefaults['permittivity'] = 6.3\ndefaults['gap'] = 100e-9\ndefaults['att_mech'] = 0\ndefaults['ndiv'] = [2, 2]\ndefaults['k_matrix_comsol_file'] = None\n\n# array properties\ndefaults['mempitch'] = [50e-6, 50e-6]\ndefaults['nmem'] = [2, 2]\ndefaults['elempitch'] = [100e-6, 100e-6]\ndefaults['nelem'] = [7, 7]\n\n\ndef create(**kwargs):\n\n # set defaults if not in kwargs:\n for k, v in defaults.items():\n kwargs.setdefault(k, v)\n\n nmem_x, nmem_y = kwargs['nmem']\n mempitch_x, mempitch_y = kwargs['mempitch']\n length_x, length_y = kwargs['length']\n electrode_x, electrode_y = kwargs['electrode']\n nnodes_x, nnodes_y = kwargs['nnodes']\n ndiv_x, ndiv_y = kwargs['ndiv']\n nelem_x, nelem_y = kwargs['nelem']\n elempitch_x, elempitch_y = kwargs['elempitch']\n\n # membrane properties\n mem_properties = dict()\n mem_properties['length_x'] = length_x\n mem_properties['length_y'] = length_y\n mem_properties['electrode_x'] = electrode_x\n mem_properties['electrode_y'] = electrode_y\n mem_properties['y_modulus'] = kwargs['y_modulus']\n mem_properties['p_ratio'] = kwargs['p_ratio']\n mem_properties['isolation'] = kwargs['isolation']\n mem_properties['permittivity'] = kwargs['permittivity']\n mem_properties['gap'] = kwargs['gap']\n mem_properties['nnodes_x'] = nnodes_x\n mem_properties['nnodes_y'] = nnodes_y\n mem_properties['thickness'] = kwargs['thickness']\n mem_properties['density'] = kwargs['density']\n mem_properties['att_mech'] = kwargs['att_mech']\n mem_properties['ndiv_x'] = ndiv_x\n mem_properties['ndiv_y'] = ndiv_y\n mem_properties['k_matrix_comsol_file'] = kwargs['k_matrix_comsol_file']\n\n # calculate membrane positions\n xx, yy, zz = np.meshgrid(np.linspace(0, (nmem_x - 1) * mempitch_x, nmem_x),\n np.linspace(0, (nmem_y - 1) * mempitch_y, nmem_y),\n 0)\n mem_pos = np.c_[xx.ravel(), yy.ravel(), zz.ravel()] - [(nmem_x - 1) * mempitch_x / 2,\n (nmem_y - 1) * mempitch_y / 2,\n 0]\n\n # calculate element positions\n xx, yy, zz = np.meshgrid(np.linspace(0, (nelem_x - 1) * elempitch_x, nelem_x),\n np.linspace(0, (nelem_y - 1) * elempitch_y, nelem_y),\n 0)\n elem_pos = np.c_[xx.ravel(), yy.ravel(), zz.ravel()] - [(nelem_x - 1) * elempitch_x / 2,\n (nelem_y - 1) * elempitch_y / 2,\n 0]\n\n # construct channels\n channels = []\n\n for i, epos in enumerate(elem_pos):\n\n membranes = []\n elements = []\n\n for j, mpos in enumerate(mem_pos):\n\n # construct membrane\n m = SquareCmutMembrane(**mem_properties)\n m['id'] = i * len(mem_pos) + j\n m['position'] = (epos + mpos).tolist()\n membranes.append(m)\n # membranes.append(SquareCmutMembrane(id=(i * len(mem_pos) + j),\n # position=(epos + mpos).tolist(),\n # **mem_properties))\n\n # construct element\n elem = Element(id=i,\n position=epos.tolist(),\n membranes=membranes)\n element_position_from_membranes(elem)\n elements.append(elem)\n\n # construct channel\n chan = Channel(id=i,\n kind='both',\n position=epos.tolist(),\n elements=elements,\n dc_bias=5,\n active=True,\n delay=0)\n\n # channel_position_from_elements(chan)\n channels.append(chan)\n\n # construct array\n array = Array(id=0,\n channels=channels,\n position=[0, 0, 0])\n\n return array\n\n\n## COMMAND LINE INTERFACE ##\n\nif __name__ == '__main__':\n\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-nmem', '--nmem', nargs=2, type=int)\n parser.add_argument('-mempitch', '--mempitch', nargs=2, type=float)\n parser.add_argument('-nelem', '--nelem', nargs=2, type=int)\n parser.add_argument('-elempitch', '--elempitch', nargs=2, type=float)\n parser.add_argument('-d', '--dump', nargs='?', default=None)\n parser.set_defaults(**defaults)\n\n args = vars(parser.parse_args())\n filename = args.pop('dump')\n\n spec = init(**args)\n print(spec)\n\n if filename is not None:\n dump(spec, filename)"
]
| [
[
"numpy.linspace"
]
]
|
kvemani/ibis | [
"37616bd3df0599f33b28101ca1c19e0c0003cf4d"
]
| [
"ibis/backends/pandas/aggcontext.py"
]
| [
"\"\"\"Implements an object to describe the context of a window aggregation.\n\nFor any particular aggregation such as ``sum``, ``mean``, etc we need to decide\nbased on the presence or absence of other expressions like ``group_by`` and\n``order_by`` whether we should call a different method of aggregation.\n\nHere are the different aggregation contexts and the conditions under which they\nare used.\n\nNote that in the pandas backend, only trailing and cumulative windows are\nsupported right now.\n\nNo ``group_by`` or ``order_by``: ``context.Summarize()``\n--------------------------------------------------------\nThis is an aggregation on a column, repeated for every row in the table.\n\nSQL\n\n::\n\n SELECT SUM(value) OVER () AS sum_value FROM t\n\nPandas\n\n::\n >>> import pandas as pd\n >>> import numpy as np\n >>> df = pd.DataFrame({\n ... 'key': list('aabc'),\n ... 'value': np.random.randn(4),\n ... 'time': pd.date_range(periods=4, start='now')\n ... })\n >>> s = pd.Series(df.value.sum(), index=df.index, name='sum_value')\n >>> s # doctest: +SKIP\n\nIbis\n\n::\n\n >>> import ibis\n >>> schema = [\n ... ('time', 'timestamp'), ('key', 'string'), ('value', 'double')\n ... ]\n >>> t = ibis.table(schema, name='t')\n >>> t[t, t.value.sum().name('sum_value')].sum_value # doctest: +SKIP\n\n\n``group_by``, no ``order_by``: ``context.Transform()``\n------------------------------------------------------\n\nThis performs an aggregation per group and repeats it across every row in the\ngroup.\n\nSQL\n\n::\n\n SELECT SUM(value) OVER (PARTITION BY key) AS sum_value\n FROM t\n\nPandas\n\n::\n\n >>> import pandas as pd\n >>> import numpy as np\n >>> df = pd.DataFrame({\n ... 'key': list('aabc'),\n ... 'value': np.random.randn(4),\n ... 'time': pd.date_range(periods=4, start='now')\n ... })\n >>> df.groupby('key').value.transform('sum') # doctest: +SKIP\n\nIbis\n\n::\n\n >>> import ibis\n >>> schema = [\n ... ('time', 'timestamp'), ('key', 'string'), ('value', 'double')\n ... ]\n >>> t = ibis.table(schema, name='t')\n >>> t.value.sum().over(ibis.window(group_by=t.key)) # doctest: +SKIP\n\n``order_by``, no ``group_by``: ``context.Cumulative()``/``context.Rolling()``\n-----------------------------------------------------------------------------\n\nCumulative and trailing window operations.\n\nCumulative\n~~~~~~~~~~\n\nAlso called expanding.\n\nSQL\n\n::\n\n SELECT SUM(value) OVER (\n ORDER BY time ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW\n ) AS sum_value\n FROM t\n\n\nPandas\n\n::\n\n >>> import pandas as pd\n >>> import numpy as np\n >>> df = pd.DataFrame({\n ... 'key': list('aabc'),\n ... 'value': np.random.randn(4),\n ... 'time': pd.date_range(periods=4, start='now')\n ... })\n >>> df.sort_values('time').value.cumsum() # doctest: +SKIP\n\nIbis\n\n::\n\n >>> import ibis\n >>> schema = [\n ... ('time', 'timestamp'), ('key', 'string'), ('value', 'double')\n ... ]\n >>> t = ibis.table(schema, name='t')\n >>> window = ibis.cumulative_window(order_by=t.time)\n >>> t.value.sum().over(window) # doctest: +SKIP\n\nMoving\n~~~~~~\n\nAlso called referred to as \"rolling\" in other libraries such as pandas.\n\nSQL\n\n::\n\n SELECT SUM(value) OVER (\n ORDER BY time ROWS BETWEEN 3 PRECEDING AND CURRENT ROW\n ) AS sum_value\n FROM t\n\n\nPandas\n\n::\n\n >>> import pandas as pd\n >>> import numpy as np\n >>> df = pd.DataFrame({\n ... 'key': list('aabc'),\n ... 'value': np.random.randn(4),\n ... 'time': pd.date_range(periods=4, start='now')\n ... })\n >>> df.sort_values('time').value.rolling(3).sum() # doctest: +SKIP\n\nIbis\n\n::\n\n >>> import ibis\n >>> schema = [\n ... ('time', 'timestamp'), ('key', 'string'), ('value', 'double')\n ... ]\n >>> t = ibis.table(schema, name='t')\n >>> window = ibis.trailing_window(3, order_by=t.time)\n >>> t.value.sum().over(window) # doctest: +SKIP\n\n\n``group_by`` and ``order_by``: ``context.Cumulative()``/``context.Rolling()``\n-----------------------------------------------------------------------------\n\nThis performs a cumulative or rolling operation within a group.\n\nSQL\n\n::\n\n SELECT SUM(value) OVER (\n PARTITION BY key ORDER BY time ROWS BETWEEN 4 PRECEDING AND CURRENT ROW\n ) AS sum_value\n FROM t\n\n\nPandas\n\n::\n\n >>> import pandas as pd\n >>> import numpy as np\n >>> df = pd.DataFrame({\n ... 'key': list('aabc'),\n ... 'value': np.random.randn(4),\n ... 'time': pd.date_range(periods=4, start='now')\n ... })\n >>> sorter = lambda df: df.sort_values('time')\n >>> gb = df.groupby('key').apply(sorter).reset_index(\n ... drop=True\n ... ).groupby('key')\n >>> rolling = gb.value.rolling(2)\n >>> rolling.sum() # doctest: +SKIP\n\nIbis\n\n::\n\n >>> import ibis\n >>> schema = [\n ... ('time', 'timestamp'), ('key', 'string'), ('value', 'double')\n ... ]\n >>> t = ibis.table(schema, name='t')\n >>> window = ibis.trailing_window(2, order_by=t.time, group_by=t.key)\n >>> t.value.sum().over(window) # doctest: +SKIP\n\"\"\"\n\nimport abc\nimport functools\nimport itertools\nimport operator\nfrom typing import Any, Callable, Dict, Iterator, Optional, Tuple, Union\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.core.groupby import SeriesGroupBy\n\nimport ibis\nimport ibis.common.exceptions as com\nimport ibis.expr.datatypes as dt\nimport ibis.util\nfrom ibis.expr.timecontext import (\n construct_time_context_aware_series,\n get_time_col,\n)\n\n\nclass AggregationContext(abc.ABC):\n __slots__ = (\n 'parent',\n 'group_by',\n 'order_by',\n 'dtype',\n 'max_lookback',\n 'output_type',\n )\n\n def __init__(\n self,\n parent=None,\n group_by=None,\n order_by=None,\n max_lookback=None,\n output_type=None,\n ):\n self.parent = parent\n self.group_by = group_by\n self.order_by = order_by\n self.dtype = None if output_type is None else output_type.to_pandas()\n self.output_type = output_type\n self.max_lookback = max_lookback\n\n @abc.abstractmethod\n def agg(self, grouped_data, function, *args, **kwargs):\n pass\n\n\ndef wrap_for_apply(\n function: Callable,\n args: Optional[Tuple[Any, ...]] = None,\n kwargs: Optional[Dict[str, Any]] = None,\n) -> Callable:\n \"\"\"Wrap a function for use with Pandas `apply`.\n\n Parameters\n ----------\n function : Callable\n A function to be used with Pandas `apply`.\n args : Optional[Tuple[Any, ...]]\n args to be passed to function when it is called by Pandas `apply`\n kwargs : Optional[Dict[str, Any]]\n kwargs to be passed to function when it is called by Pandas `apply`\n\n \"\"\"\n assert callable(function), f'function {function} is not callable'\n\n new_args: Tuple[Any, ...] = ()\n if args is not None:\n new_args = args\n\n new_kwargs: Dict[str, Any] = {}\n if kwargs is not None:\n new_kwargs = kwargs\n\n @functools.wraps(function)\n def wrapped_func(\n data: Any,\n function: Callable = function,\n args: Tuple[Any, ...] = new_args,\n kwargs: Dict[str, Any] = new_kwargs,\n ) -> Callable:\n return function(data, *args, **kwargs)\n\n return wrapped_func\n\n\ndef wrap_for_agg(\n function: Callable,\n args: Tuple[Any, ...],\n kwargs: Dict[str, Any],\n) -> Callable:\n \"\"\"Wrap a function for use with Pandas `agg`.\n\n This includes special logic that will force Pandas `agg` to always treat\n the function as an aggregation function. Details:\n\n When passed a function, Pandas `agg` will either:\n 1) Behave like Pandas `apply` and treat the function as a N->N mapping\n function (i.e. calls the function once for every value in the Series\n that `agg` is being called on), OR\n 2) Treat the function as a N->1 aggregation function (i.e. calls the\n function once on the entire Series)\n Pandas `agg` will use behavior #1 unless an error is raised when doing so.\n\n We want to force Pandas `agg` to use behavior #2. To do this, we will wrap\n the function with logic that checks that a Series is being passed in, and\n raises a TypeError otherwise. When Pandas `agg` is attempting to use\n behavior #1 but sees the TypeError, it will fall back to behavior #2.\n\n Parameters\n ----------\n function : Callable\n An aggregation function to be used with Pandas `agg`.\n args : Tuple[Any, ...]\n args to be passed to function when it is called by Pandas `agg`\n kwargs : Dict[str, Any]\n kwargs to be passed to function when it is called by Pandas `agg`\n\n \"\"\"\n assert callable(function), f'function {function} is not callable'\n\n @functools.wraps(function)\n def wrapped_func(\n data: Any,\n function: Callable = function,\n args: Tuple[Any, ...] = args,\n kwargs: Dict[str, Any] = kwargs,\n ) -> Callable:\n # `data` will be a scalar here if Pandas `agg` is trying to behave like\n # like Pandas `apply`.\n if not isinstance(data, pd.Series):\n # Force `agg` to NOT behave like `apply`. We want Pandas to use\n # `function` as an aggregation function, not as a mapping function.\n raise TypeError(\n f'This function expects a Series, but saw an object of type '\n f'{type(data)} instead.'\n )\n return function(data, *args, **kwargs)\n\n return wrapped_func\n\n\nclass Summarize(AggregationContext):\n __slots__ = ()\n\n def agg(self, grouped_data, function, *args, **kwargs):\n if isinstance(function, str):\n return getattr(grouped_data, function)(*args, **kwargs)\n\n if not callable(function):\n raise TypeError(f'Object {function} is not callable or a string')\n\n if isinstance(\n grouped_data, pd.core.groupby.generic.SeriesGroupBy\n ) and len(grouped_data):\n # `SeriesGroupBy.agg` does not allow np.arrays to be returned\n # from UDFs. To avoid `SeriesGroupBy.agg`, we will call the\n # aggregation function manually on each group. (#2768)\n aggs = {}\n for k, v in grouped_data:\n func_args = [d.get_group(k) for d in args]\n aggs[k] = function(v, *func_args, **kwargs)\n grouped_col_name = v.name\n return (\n pd.Series(aggs)\n .rename(grouped_col_name)\n .rename_axis(grouped_data.grouper.names)\n )\n else:\n return grouped_data.agg(wrap_for_agg(function, args, kwargs))\n\n\nclass Transform(AggregationContext):\n __slots__ = ()\n\n def agg(self, grouped_data, function, *args, **kwargs):\n # If this is a multi column UDF, then we cannot use\n # \"transform\" here (Data must be 1-dimensional)\n # Instead, we need to use \"apply\", which can return a non\n # numeric type, e.g, tuple of two double.\n if isinstance(self.output_type, dt.Struct):\n res = grouped_data.apply(function, *args, **kwargs)\n else:\n res = grouped_data.transform(function, *args, **kwargs)\n\n # The result series uses the name of the input. We should\n # unset it to avoid confusion, when result is not guranteed\n # to be the same series / have the same type after transform\n res.name = None\n return res\n\n\[email protected]\ndef compute_window_spec(dtype, obj):\n raise com.IbisTypeError(\n \"Unknown dtype type {} and object {} for compute_window_spec\".format(\n dtype, obj\n )\n )\n\n\n@compute_window_spec.register(type(None))\ndef compute_window_spec_none(_, obj):\n \"\"\"Helper method only used for row-based windows:\n\n Window spec in ibis is an inclusive window bound. A bound of 0 indicates\n the current row.\n Window spec in Pandas indicates window size. Therefore, we must add 1\n to the ibis window bound to get the expected behavior.\n \"\"\"\n return obj + 1\n\n\n@compute_window_spec.register(dt.Interval)\ndef compute_window_spec_interval(_, expr):\n value = ibis.pandas.execute(expr)\n return pd.tseries.frequencies.to_offset(value)\n\n\ndef window_agg_built_in(\n frame: pd.DataFrame,\n windowed: pd.core.window.Window,\n function: str,\n max_lookback: int,\n *args: Tuple[Any],\n **kwargs: Dict[str, Any],\n) -> pd.Series:\n \"\"\"Apply window aggregation with built-in aggregators.\"\"\"\n assert isinstance(function, str)\n method = operator.methodcaller(function, *args, **kwargs)\n\n if max_lookback is not None:\n agg_method = method\n\n def sliced_agg(s):\n return agg_method(s.iloc[-max_lookback:])\n\n method = operator.methodcaller('apply', sliced_agg, raw=False)\n\n result = method(windowed)\n index = result.index\n result.index = pd.MultiIndex.from_arrays(\n [frame.index]\n + list(map(index.get_level_values, range(index.nlevels))),\n names=[frame.index.name] + index.names,\n )\n return result\n\n\ndef create_window_input_iter(\n grouped_data: Union[SeriesGroupBy, pd.Series],\n masked_window_lower_indices: pd.Series,\n masked_window_upper_indices: pd.Series,\n) -> Iterator[np.ndarray]:\n # create a generator for each input series\n # the generator will yield a slice of the\n # input series for each valid window\n data = getattr(grouped_data, 'obj', grouped_data).values\n lower_indices_array = masked_window_lower_indices.values\n upper_indices_array = masked_window_upper_indices.values\n for i in range(len(lower_indices_array)):\n lower_index = lower_indices_array[i]\n upper_index = upper_indices_array[i]\n yield data[lower_index:upper_index]\n\n\ndef window_agg_udf(\n grouped_data: SeriesGroupBy,\n function: Callable,\n window_lower_indices: pd.Series,\n window_upper_indices: pd.Series,\n mask: pd.Series,\n result_index: pd.Index,\n dtype: np.dtype,\n max_lookback: int,\n *args: Tuple[Any],\n **kwargs: Dict[str, Any],\n) -> pd.Series:\n \"\"\"Apply window aggregation with UDFs.\n\n Notes:\n Use custom logic to computing rolling window UDF instead of\n using pandas's rolling function.\n This is because pandas's rolling function doesn't support\n multi param UDFs.\n \"\"\"\n assert len(window_lower_indices) == len(window_upper_indices)\n assert len(window_lower_indices) == len(mask)\n\n # Reset index here so we don't need to deal with mismatching\n # indices\n window_lower_indices = window_lower_indices.reset_index(drop=True)\n window_upper_indices = window_upper_indices.reset_index(drop=True)\n mask = mask.reset_index(drop=True)\n\n # Compute window indices and manually roll\n # over the window.\n\n # If an window has only nan values, we output nan for\n # the window result. This follows pandas rolling apply\n # behavior.\n\n # The first input column is in grouped_data, but there may\n # be additional input columns in args.\n inputs = (grouped_data,) + args\n\n masked_window_lower_indices = window_lower_indices[mask].astype('i8')\n masked_window_upper_indices = window_upper_indices[mask].astype('i8')\n\n input_iters = [\n create_window_input_iter(\n arg, masked_window_lower_indices, masked_window_upper_indices\n )\n if isinstance(arg, (pd.Series, SeriesGroupBy))\n else itertools.repeat(arg)\n for arg in inputs\n ]\n\n valid_result = pd.Series(\n function(*(next(gen) for gen in input_iters))\n for i in range(len(masked_window_lower_indices))\n )\n\n valid_result = pd.Series(valid_result)\n valid_result.index = masked_window_lower_indices.index\n result = pd.Series(index=mask.index, dtype=dtype)\n result[mask] = valid_result\n result.index = result_index\n\n return result\n\n\nclass Window(AggregationContext):\n __slots__ = ('construct_window',)\n\n def __init__(self, kind, *args, **kwargs):\n super().__init__(\n parent=kwargs.pop('parent', None),\n group_by=kwargs.pop('group_by', None),\n order_by=kwargs.pop('order_by', None),\n output_type=kwargs.pop('output_type'),\n max_lookback=kwargs.pop('max_lookback', None),\n )\n self.construct_window = operator.methodcaller(kind, *args, **kwargs)\n\n def agg(\n self,\n grouped_data: Union[pd.Series, SeriesGroupBy],\n function: Union[str, Callable],\n *args: Any,\n **kwargs: Any,\n ) -> pd.Series:\n # avoid a pandas warning about numpy arrays being passed through\n # directly\n group_by = self.group_by\n order_by = self.order_by\n\n # if we don't have a grouping key, just call into pandas\n if not group_by and not order_by:\n # the result of calling .rolling(...) in pandas\n windowed = self.construct_window(grouped_data)\n\n # if we're a UD(A)F or a function that isn't a string (like the\n # collect implementation) then call apply\n if callable(function):\n return windowed.apply(\n wrap_for_apply(function, args, kwargs), raw=True\n )\n else:\n # otherwise we're a string and probably faster\n assert isinstance(function, str)\n method = getattr(windowed, function, None)\n if method is not None:\n return method(*args, **kwargs)\n\n # handle the case where we pulled out a name from an operation\n # but it doesn't actually exist\n return windowed.apply(\n wrap_for_apply(\n operator.methodcaller(function, *args, **kwargs)\n ),\n raw=True,\n )\n else:\n # Get the DataFrame from which the operand originated\n # (passed in when constructing this context object in\n # execute_node(ops.WindowOp))\n parent = self.parent\n frame = getattr(parent, 'obj', parent)\n obj = getattr(grouped_data, 'obj', grouped_data)\n name = obj.name\n if frame[name] is not obj or name in group_by or name in order_by:\n name = f\"{name}_{ibis.util.guid()}\"\n frame = frame.assign(**{name: obj})\n\n # set the index to our order_by keys and append it to the existing\n # index\n # TODO: see if we can do this in the caller, when the context\n # is constructed rather than pulling out the data\n columns = group_by + order_by + [name]\n # Create a new frame to avoid mutating the original one\n indexed_by_ordering = frame[columns].copy()\n # placeholder column to compute window_sizes below\n indexed_by_ordering['_placeholder'] = 0\n indexed_by_ordering = indexed_by_ordering.set_index(order_by)\n\n # regroup if needed\n if group_by:\n grouped_frame = indexed_by_ordering.groupby(group_by)\n else:\n grouped_frame = indexed_by_ordering\n grouped = grouped_frame[name]\n\n if callable(function):\n # To compute the window_size, we need to contruct a\n # RollingGroupby and compute count using construct_window.\n # However, if the RollingGroupby is not numeric, e.g.,\n # we are calling window UDF on a timestamp column, we\n # cannot compute rolling count directly because:\n # (1) windowed.count() will exclude NaN observations\n # , which results in incorrect window sizes.\n # (2) windowed.apply(len, raw=True) will include NaN\n # obversations, but doesn't work on non-numeric types.\n # https://github.com/pandas-dev/pandas/issues/23002\n # To deal with this, we create a _placeholder column\n\n windowed_frame = self.construct_window(grouped_frame)\n window_sizes = (\n windowed_frame['_placeholder']\n .count()\n .reset_index(drop=True)\n )\n mask = ~(window_sizes.isna())\n window_upper_indices = pd.Series(range(len(window_sizes))) + 1\n window_lower_indices = window_upper_indices - window_sizes\n # The result Series of udf may need to be trimmed by\n # timecontext. In order to do so, 'time' must be added\n # as an index to the Series, if present. Here We extract\n # time column from the parent Dataframe `frame`.\n if get_time_col() in frame:\n result_index = construct_time_context_aware_series(\n obj, frame\n ).index\n else:\n result_index = obj.index\n result = window_agg_udf(\n grouped_data,\n function,\n window_lower_indices,\n window_upper_indices,\n mask,\n result_index,\n self.dtype,\n self.max_lookback,\n *args,\n **kwargs,\n )\n else:\n # perform the per-group rolling operation\n windowed = self.construct_window(grouped)\n result = window_agg_built_in(\n frame,\n windowed,\n function,\n self.max_lookback,\n *args,\n **kwargs,\n )\n try:\n return result.astype(self.dtype, copy=False)\n except (TypeError, ValueError):\n return result\n\n\nclass Cumulative(Window):\n __slots__ = ()\n\n def __init__(self, *args, **kwargs):\n super().__init__('expanding', *args, **kwargs)\n\n\nclass Moving(Window):\n __slots__ = ()\n\n def __init__(self, preceding, max_lookback, *args, **kwargs):\n from .core import timedelta_types\n\n ibis_dtype = getattr(preceding, 'type', lambda: None)()\n preceding = compute_window_spec(ibis_dtype, preceding)\n closed = (\n None\n if not isinstance(\n preceding, timedelta_types + (pd.offsets.DateOffset,)\n )\n else 'both'\n )\n super().__init__(\n 'rolling',\n preceding,\n *args,\n max_lookback=max_lookback,\n closed=closed,\n min_periods=1,\n **kwargs,\n )\n\n def short_circuit_method(self, grouped_data, function):\n raise AttributeError('No short circuit method for rolling operations')\n"
]
| [
[
"pandas.tseries.frequencies.to_offset",
"pandas.Series"
]
]
|
jizhi-zhang/Counterfactual_Reasoning_Model | [
"3c4eb3e022e66e8626facc6fc772141a0079b807"
]
| [
"Sentiment_task/roberta_base_CRM.py"
]
| [
"from transformers import AutoTokenizer, AutoModelForSequenceClassification\n# from transformers import AutoTokenizer, AutoModelForSequenceClassification\nimport torch\n#from pytorch_pretrained_bert import BertTokenizer, BertModel\nfrom transformers import BertModel,BertTokenizer, BertForSequenceClassification, AdamW, get_linear_schedule_with_warmup\nimport argparse\nimport numpy as np\nimport sys\nimport torch.optim as optim\nfrom torch import nn\n# import spacy\nimport pandas as pd\nimport numpy as np\n# from sklearn.metrics import accuracy_score\nfrom collections import namedtuple\nfrom tqdm import tqdm\nimport os\nimport torch.nn as nn\nimport matplotlib.pyplot as plt\nimport random\nimport copy\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--device\", type= int, default= 2)\nparser.add_argument(\"--run_seed\", type= int, default= 4)\nparser.add_argument(\"--train_file\", type =str, default= \"./dataset/sentiment/combined/train.tsv\")\nparser.add_argument(\"--val_file\", type =str, default= \"./dataset/sentiment/combined/dev.tsv\")\nparser.add_argument(\"--test_file\", type=str, default= \"./dataset/sentiment/combined/test.tsv\")\nparser.add_argument(\"--orig_train_file\", type =str, default= \"./dataset/sentiment/orig/train.tsv\")\nparser.add_argument(\"--orig_val_file\", type =str, default= \"./dataset/sentiment/orig/dev.tsv\")\nparser.add_argument(\"--orig_test_file\", type=str, default= \"./dataset/sentiment/orig/test.tsv\")\nparser.add_argument(\"--revised_train_file\", type =str, default= \"./dataset/sentiment/combined/paired/train_paired.tsv\")\nparser.add_argument(\"--revised_val_file\", type =str, default= \"./dataset/sentiment/combined/paired/dev_paired.tsv\")\nparser.add_argument(\"--revised_test_file\", type=str, default= \"./dataset/sentiment/combined/paired/test_paired.tsv\")\nparser.add_argument(\"--cf_model_folder\", type= str, default=\"./roberta_base_cf_train/\")\nparser.add_argument(\"--lr\", type=float, default= 1e-3)\nparser.add_argument(\"--batchsize\", type=int , default= 4)\nparser.add_argument(\"--warm_up_rate\", type=float, default=.1)\nparser.add_argument(\"--epochs\", type=int , default= 20)\nparser.add_argument(\"--save_folder\", type=str, default=\"./robert_base_CRM/\")\nparser.add_argument(\"--log_name\", type= str, default= \"cf_inference_out.log\")\nparser.add_argument(\"--plot_name\", type = str, default= \"result_plot2.jpg\")\nargs = parser.parse_args()\n\ndevice = torch.device(\"cuda:\"+str(args.device))\n\ndef setup_seed(seed):\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.cuda.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\ntokenizer = AutoTokenizer.from_pretrained(\"roberta-base\")\n\n\nclass cf_conv_linear_net (nn.Module):\n def __init__(self, hidde_channels):\n super().__init__()\n self.conv1 = nn.Conv2d(1, hidde_channels, (3,1))\n self.fc = nn.Linear(hidde_channels * 2, 2, bias= False)\n # self.fc = nn.Linear(3,3)\n def forward(self, x):\n # x = x[:,:, 0:1, :]\n out = torch.flatten(self.conv1(x), start_dim= 1)\n # out = x.view(len(x),3)\n return self.fc(out)\n\n\ndef get_label(text):\n if text == \"Positive\":\n return 1\n elif text == \"Negative\":\n return 0\n\ndef create_batch(train_data, batchsize):\n count = 0\n batch_list = []\n train_indexs = [i for i in range(len(train_data))] \n random.shuffle(train_indexs)\n for index in tqdm(train_indexs):\n if count == 0:\n label =torch.stack([torch.tensor(get_label(train_data[\"Sentiment\"][index]))])\n sent_list = [(train_data[\"Text\"][index])]\n count += 1\n else :\n label = torch.cat([label, torch.stack([torch.tensor(get_label(train_data[\"Sentiment\"][index]))])])\n sent_list.append((train_data[\"Text\"][index]))\n count += 1\n if count == batchsize:\n count = 0\n batch_list.append((label,sent_list))\n return batch_list\n\ndef calc_cf_sent_list(sent_list, model, tokenizer):\n model.eval()\n with torch.no_grad():\n real_out = model(**tokenizer(sent_list[0], padding=True, truncation=True, max_length=256, return_tensors='pt' ).to(device)).logits.detach()\n cf_out = model(**tokenizer(sent_list[1], padding=True, truncation=True, max_length=256, return_tensors='pt').to(device)).logits.detach()\n delta_embed = model.roberta(**tokenizer(sent_list[1], padding=True, truncation=True, max_length=256, return_tensors='pt').to(device)).last_hidden_state.detach()[:,:1,:]\\\n - model.roberta(**tokenizer(sent_list[0], padding=True, truncation=True, max_length=256, return_tensors='pt').to(device)).last_hidden_state.detach()[:,:1,:]\n return delta_embed, [cf_out, real_out]\n\n\ndef create_batch_with_delta_cf(orig_data, cf_data, batchsize, model, tokenizer):\n model.eval()\n with torch.no_grad():\n count = 0\n batch_list = []\n data_indexs = [i for i in range(len(orig_data))]\n random.shuffle(data_indexs)\n for index in tqdm(data_indexs):\n if count == 0:\n label =torch.stack([torch.tensor(get_label(cf_data[\"Sentiment\"][index * 2]))])\n sent_list = [cf_data[\"Text\"][index * 2]]\n sent_list.append(cf_data[\"Text\"][index * 2 + 1])\n delta_embed, output =calc_cf_sent_list(sent_list, model, tokenizer)\n delta_embed_list = [delta_embed]\n output = torch.cat(output)\n output_list = [output]\n count = count + 1\n else:\n # label =torch.stack([torch.tensor(get_label(orig_data[\"gold_label\"][index]))])\n sent_list = [cf_data[\"Text\"][index * 2]]\n sent_list.append(cf_data[\"Text\"][index * 2 + 1])\n delta_embed, output =calc_cf_sent_list(sent_list, model, tokenizer)\n delta_embed_list.append(delta_embed)\n output = torch.cat(output)\n output_list.append(output)\n label = torch.cat([label, torch.stack([torch.tensor(get_label(cf_data[\"Sentiment\"][index * 2]))])])\n count = count + 1 \n if count == batchsize:\n count = 0\n # embed_list = torch.stack([torch.stack([j]) for j in embed_list])\n batch_list.append((label, delta_embed_list, output_list))\n if count != 0:\n # embed_list = torch.stack([torch.stack([j]) for j in embed_list])\n batch_list.append((label, delta_embed_list, output_list))\n return batch_list\n\n\ndef isNan_2(a):\n return a != a\n\ndef mk_dir(path):\n try:\n os.mkdir(path)\n except:\n pass\n\ntrain_data = pd.read_csv(args.train_file, sep= \"\\t\")\nval_data = pd.read_csv(args.val_file, sep =\"\\t\")\ntest_data = pd.read_csv(args.test_file, sep = \"\\t\")\n\norig_train_data = pd.read_csv(args.orig_train_file, sep= \"\\t\")\norig_val_data = pd.read_csv(args.orig_val_file, sep =\"\\t\")\norig_test_data = pd.read_csv(args.orig_test_file, sep = \"\\t\")\n\nrevised_train_data = pd.read_csv(args.revised_train_file, sep= \"\\t\")\nrevised_val_data = pd.read_csv(args.revised_val_file, sep =\"\\t\")\nrevised_test_data = pd.read_csv(args.revised_test_file, sep = \"\\t\")\n\n\ndef model_test(batch_train, classifier, cf_net):\n cf_net = cf_net.eval()\n classifier = classifier.eval()\n correct=0\n total=0\n with torch.no_grad():\n for index in tqdm(range(len(batch_train))):\n label = batch_train[index][0].to(device)\n # encoder = tokenizer(batch_train[index][1], padding=True, truncation=True, max_length=512, return_tensors='pt' )\n out = classifier(torch.cat(batch_train[index][1])).view(len(label),2)\n # output = model(**tokenizer(batch_train[index][1], padding=True, truncation=True, max_length=256, return_tensors='pt' ))\n output = cf_net(torch.cat([torch.stack(batch_train[index][2]).view(len(label),2,2),out.view(len(label),1,2)], dim=1).view(len(label),1,3,2))\n # output = out_net(output)\n _,predict = torch.max(output,1)\n total+=label.size(0)\n correct += (predict == label).sum().item()\n return 100 * correct/total\n\n\ndef shuffle_from_bs_1(batch_train_bs_1, batchsize):\n batch_train_bs = copy.deepcopy(batch_train_bs_1)\n count = 0\n batch_list = []\n index_list = [i for i in range(len(batch_train_bs))]\n random.shuffle(index_list)\n for index in index_list:\n item = batch_train_bs[index]\n if count == 0:\n label_1 = item[0]\n delta_1 = item[1]\n out_1 = item[2]\n count += 1\n else:\n label_1 = torch.cat([label_1, item[0]])\n delta_1 += item[1]\n out_1 += item[2]\n count += 1\n if count >= batchsize:\n batch_list.append((label_1, delta_1, out_1))\n count = 0\n if count != 0:\n batch_list.append((label_1, delta_1, out_1))\n return batch_list\n\n\nseed = args.run_seed\nsetup_seed(seed)\nmodel = torch.load(args.cf_model_folder + str(seed) + \"/roberta-base.pt\", map_location = device)\nclassifier = copy.deepcopy(model.classifier).to(device)\ncf_net = cf_conv_linear_net(10).to(device)\noptimizer = optim.Adam([{\"params\":cf_net.parameters()},\n {\"params\":classifier.parameters()}])\nLoss = nn.CrossEntropyLoss()\nbatch_val = create_batch_with_delta_cf(orig_val_data, revised_val_data, args.batchsize, model, tokenizer)\nbatch_test = create_batch_with_delta_cf(orig_test_data, revised_test_data, args.batchsize, model, tokenizer)\nbatch_train_bs_1 = create_batch_with_delta_cf(orig_train_data, revised_train_data, 1, model, tokenizer)\nacc_train_list = []\nacc_val_list = []\nacc_test_list = []\nmax_val_acc = 0\nfinal_test_acc = 0 \nmk_dir(args.save_folder)\nsaving_folder = args.save_folder + \"/\" + str(seed) + \"/\"\nmk_dir(saving_folder)\nfor i in range(0, args.epochs): \n batch_train = shuffle_from_bs_1(batch_train_bs_1, args.batchsize)\n print(\"epoch:\" + str(i))\n loss_total = 0\n with open(saving_folder + \"/\" + args.log_name,\"a+\") as f:\n if i == 0:\n f.write(\"settings:\\n\")\n f.write(\"lr:\" + str(args.lr) + \"\\n\")\n f.write(\"net_struc:\" + \"\\n\")\n print(cf_net, file=f)\n print(classifier, file=f)\n acc1 = model_test(batch_train, classifier, cf_net)\n acc2 = model_test(batch_val, classifier, cf_net)\n acc3 = model_test(batch_test,classifier, cf_net)\n # f.write(\"before optim:\" + str(i) + \" train_acc:\" + str(acc1) + \" val_acc:\" + str(acc2) + \" test_acc:\" + str(acc3) + \"\\n\")\n for index in tqdm(range(len(batch_train))):\n label = batch_train[index][0].to(device)\n out = classifier(torch.cat(batch_train[index][1])).view(len(label),2)\n output = cf_net(torch.cat([torch.stack(batch_train[index][2]).view(len(label),2,2),out.view(len(label),1,2)], dim=1).view(len(label),1,3,2))\n loss = Loss(output, label)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n loss_total += loss.item()\n print(loss_total/len(batch_train))\n acc1 = model_test(batch_train, classifier, cf_net)\n acc2 = model_test(batch_val, classifier, cf_net)\n acc3 = model_test(batch_test, classifier, cf_net)\n acc_train_list.append(acc1)\n acc_val_list.append(acc2)\n acc_test_list.append(acc3)\n print(acc1, acc2, acc3)\n if acc2 > max_val_acc:\n max_val_acc = acc2\n final_test_acc = acc3\n torch.save(classifier, saving_folder + \"/max_classifier.pt\")\n torch.save(cf_net, saving_folder + \"/max_cf_net.pt\")\n with open(saving_folder + \"/\" + args.log_name,\"a+\") as f:\n if i == 0:\n f.write(\"settings:\\n\")\n f.write(\"lr:\" + str(args.lr) + \"\\n\")\n f.write(\"net_struc:\" + \"\\n\")\n print(cf_net, file=f)\n f.write(\"epoch:\" + str(i) + \" train_acc:\" + str(acc1) + \" val_acc:\" + str(acc2) + \" test_acc:\" + str(acc3) + \"\\n\")\n mk_dir(saving_folder + str(i) + \"epoch\")\n torch.save(classifier, saving_folder + str(i) + \"epoch\" + \"/classifier.pt\")\n torch.save(cf_net, saving_folder + str(i) + \"epoch\" + \"/cf_net.pt\")\nx = [i for i in range(len(acc_train_list))]\np1 = plt.plot(x, acc_train_list, \"b\", marker = \"o\", label = \"train\")\np2 = plt.plot(x, acc_val_list, \"g\", marker = \"v\", label = \"val\")\np3 = plt.plot(x, acc_test_list, \"y\", marker = \"^\", label = \"test\")\nplt.xlabel(\"epochs\")\nplt.ylabel(\"acc\")\nplt.title(\"cf_net result\")\nplt.legend(labels = [\"train\", \"val\", \"test\"])\nplt.savefig(saving_folder + args.plot_name)\nplt.cla()\n# end\nwith open(args.save_folder + \"/final_acc\", \"a+\") as f:\n f.write(\"ramdom seed:\" + str(seed) + \" max_val_acc:\" + str(max_val_acc) + \" test_acc:\" + str(final_test_acc) + \"\\n\")\n\n\n"
]
| [
[
"torch.nn.Linear",
"torch.cat",
"torch.cuda.manual_seed",
"torch.stack",
"pandas.read_csv",
"torch.nn.CrossEntropyLoss",
"matplotlib.pyplot.savefig",
"torch.manual_seed",
"torch.cuda.manual_seed_all",
"torch.max",
"torch.save",
"matplotlib.pyplot.title",
"torch.nn.Conv2d",
"numpy.random.seed",
"matplotlib.pyplot.xlabel",
"torch.no_grad",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.cla",
"matplotlib.pyplot.ylabel"
]
]
|
sanger640/attMPTI | [
"a2784b784e0900f3603baa3779631da67bcd0562"
]
| [
"models/mpti.py"
]
| [
"\"\"\" Multi-prototype transductive inference\n\nAuthor: Zhao Na, 2020\n\"\"\"\nimport numpy as np\nimport faiss\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch_cluster import fps\n\nfrom models.dgcnn import DGCNN\nfrom models.attention import SelfAttention\n\n\nclass BaseLearner(nn.Module):\n \"\"\"The class for inner loop.\"\"\"\n def __init__(self, in_channels, params):\n super(BaseLearner, self).__init__()\n\n self.num_convs = len(params)\n self.convs = nn.ModuleList()\n\n for i in range(self.num_convs):\n if i == 0:\n in_dim = in_channels\n else:\n in_dim = params[i-1]\n self.convs.append(nn.Sequential(\n nn.Conv1d(in_dim, params[i], 1),\n nn.BatchNorm1d(params[i])))\n\n def forward(self, x):\n for i in range(self.num_convs):\n x = self.convs[i](x)\n if i != self.num_convs-1:\n x = F.relu(x)\n return x\n\n\nclass MultiPrototypeTransductiveInference(nn.Module):\n def __init__(self, args):\n super(MultiPrototypeTransductiveInference, self).__init__()\n # self.gpu_id = args.gpu_id\n self.n_way = args.n_way\n self.k_shot = args.k_shot\n self.in_channels = args.pc_in_dim\n self.n_points = args.pc_npts\n self.use_attention = args.use_attention\n self.n_subprototypes = args.n_subprototypes\n self.k_connect = args.k_connect\n self.sigma = args.sigma\n\n self.n_classes = self.n_way+1\n\n self.encoder = DGCNN(args.edgeconv_widths, args.dgcnn_mlp_widths, args.pc_in_dim, k=args.dgcnn_k)\n self.base_learner = BaseLearner(args.dgcnn_mlp_widths[-1], args.base_widths)\n\n if self.use_attention:\n self.att_learner = SelfAttention(args.dgcnn_mlp_widths[-1], args.output_dim)\n else:\n self.linear_mapper = nn.Conv1d(args.dgcnn_mlp_widths[-1], args.output_dim, 1, bias=False)\n\n self.feat_dim = args.edgeconv_widths[0][-1] + args.output_dim + args.base_widths[-1]\n\n def forward(self, support_x, support_y, query_x, query_y):\n \"\"\"\n Args:\n support_x: support point clouds with shape (n_way, k_shot, in_channels, num_points)\n support_y: support masks (foreground) with shape (n_way, k_shot, num_points)\n query_x: query point clouds with shape (n_queries, in_channels, num_points)\n query_y: query labels with shape (n_queries, num_points), each point \\in {0,..., n_way}\n Return:\n query_pred: query point clouds predicted similarity, shape: (n_queries, n_way+1, num_points)\n \"\"\"\n support_x = support_x.view(self.n_way*self.k_shot, self.in_channels, self.n_points)\n support_feat = self.getFeatures(support_x)\n support_feat = support_feat.view(self.n_way, self.k_shot, self.feat_dim, self.n_points)\n query_feat = self.getFeatures(query_x) #(n_queries, feat_dim, num_points)\n query_feat = query_feat.transpose(1,2).contiguous().view(-1, self.feat_dim) #(n_queries*num_points, feat_dim)\n\n fg_mask = support_y\n bg_mask = torch.logical_not(support_y)\n\n fg_prototypes, fg_labels = self.getForegroundPrototypes(support_feat, fg_mask, k=self.n_subprototypes)\n bg_prototype, bg_labels = self.getBackgroundPrototypes(support_feat, bg_mask, k=self.n_subprototypes)\n\n # prototype learning\n if bg_prototype is not None and bg_labels is not None:\n prototypes = torch.cat((bg_prototype, fg_prototypes), dim=0) #(*, feat_dim)\n prototype_labels = torch.cat((bg_labels, fg_labels), dim=0) #(*,n_classes)\n else:\n prototypes = fg_prototypes\n prototype_labels = fg_labels\n self.num_prototypes = prototypes.shape[0]\n\n # construct label matrix Y, with Y_ij = 1 if x_i is from the support set and labeled as y_i = j, otherwise Y_ij = 0.\n self.num_nodes = self.num_prototypes + query_feat.shape[0] # number of node of partial observed graph\n Y = torch.zeros(self.num_nodes, self.n_classes).cuda()\n Y[:self.num_prototypes] = prototype_labels\n\n # construct feat matrix F\n node_feat = torch.cat((prototypes, query_feat), dim=0) #(num_nodes, feat_dim)\n\n # label propagation\n A = self.calculateLocalConstrainedAffinity(node_feat, k=self.k_connect)\n Z = self.label_propagate(A, Y) #(num_nodes, n_way+1)\n\n query_pred = Z[self.num_prototypes:, :] #(n_queries*num_points, n_way+1)\n query_pred = query_pred.view(-1, query_y.shape[1], self.n_classes).transpose(1,2) #(n_queries, n_way+1, num_points)\n loss = self.computeCrossEntropyLoss(query_pred, query_y)\n return query_pred, loss\n\n def getFeatures(self, x):\n \"\"\"\n Forward the input data to network and generate features\n :param x: input data with shape (B, C_in, L)\n :return: features with shape (B, C_out, L)\n \"\"\"\n if self.use_attention:\n feat_level1, feat_level2 = self.encoder(x)\n feat_level3 = self.base_learner(feat_level2)\n att_feat = self.att_learner(feat_level2)\n return torch.cat((feat_level1, att_feat, feat_level3), dim=1)\n else:\n # return self.base_learner(self.encoder(x))\n feat_level1, feat_level2 = self.encoder(x)\n feat_level3 = self.base_learner(feat_level2)\n map_feat = self.linear_mapper(feat_level2)\n return torch.cat((feat_level1, map_feat, feat_level3), dim=1)\n\n def getMutiplePrototypes(self, feat, k):\n \"\"\"\n Extract multiple prototypes by points separation and assembly\n\n Args:\n feat: input point features, shape:(n_points, feat_dim)\n Return:\n prototypes: output prototypes, shape: (n_prototypes, feat_dim)\n \"\"\"\n # sample k seeds as initial centers with Farthest Point Sampling (FPS)\n n = feat.shape[0]\n assert n > 0\n ratio = k / n\n if ratio < 1:\n fps_index = fps(feat, None, ratio=ratio, random_start=False).unique()\n num_prototypes = len(fps_index)\n farthest_seeds = feat[fps_index]\n\n # compute the point-to-seed distance\n distances = F.pairwise_distance(feat[..., None], farthest_seeds.transpose(0, 1)[None, ...],\n p=2) # (n_points, n_prototypes)\n\n # hard assignment for each point\n assignments = torch.argmin(distances, dim=1) # (n_points,)\n\n # aggregating each cluster to form prototype\n prototypes = torch.zeros((num_prototypes, self.feat_dim)).cuda()\n for i in range(num_prototypes):\n selected = torch.nonzero(assignments == i).squeeze(1)\n selected = feat[selected, :]\n prototypes[i] = selected.mean(0)\n return prototypes\n else:\n return feat\n\n def getForegroundPrototypes(self, feats, masks, k=100):\n \"\"\"\n Extract foreground prototypes for each class via clustering point features within that class\n\n Args:\n feats: input support features, shape: (n_way, k_shot, feat_dim, num_points)\n masks: foreground binary masks, shape: (n_way, k_shot, num_points)\n Return:\n prototypes: foreground prototypes, shape: (n_way*k, feat_dim)\n labels: foreground prototype labels (one-hot), shape: (n_way*k, n_way+1)\n \"\"\"\n prototypes = []\n labels = []\n for i in range(self.n_way):\n # extract point features belonging to current foreground class\n feat = feats[i, ...].transpose(1,2).contiguous().view(-1, self.feat_dim) #(k_shot*num_points, feat_dim)\n index = torch.nonzero(masks[i, ...].view(-1)).squeeze(1) #(k_shot*num_points,)\n feat = feat[index]\n class_prototypes = self.getMutiplePrototypes(feat, k)\n prototypes.append(class_prototypes)\n\n # construct label matrix\n class_labels = torch.zeros(class_prototypes.shape[0], self.n_classes)\n class_labels[:, i+1] = 1\n labels.append(class_labels)\n\n prototypes = torch.cat(prototypes, dim=0)\n labels = torch.cat(labels, dim=0)\n\n return prototypes, labels\n\n def getBackgroundPrototypes(self, feats, masks, k=100):\n \"\"\"\n Extract background prototypes via clustering point features within background class\n\n Args:\n feats: input support features, shape: (n_way, k_shot, feat_dim, num_points)\n masks: background binary masks, shape: (n_way, k_shot, num_points)\n Return:\n prototypes: background prototypes, shape: (k, feat_dim)\n labels: background prototype labels (one-hot), shape: (k, n_way+1)\n \"\"\"\n feats = feats.transpose(2,3).contiguous().view(-1, self.feat_dim)\n index = torch.nonzero(masks.view(-1)).squeeze(1)\n feat = feats[index]\n # in case this support set does not contain background points..\n if feat.shape[0] != 0:\n prototypes = self.getMutiplePrototypes(feat, k)\n\n labels = torch.zeros(prototypes.shape[0], self.n_classes)\n labels[:, 0] = 1\n\n return prototypes, labels\n else:\n return None, None\n\n def calculateLocalConstrainedAffinity(self, node_feat, k=200, method='gaussian'):\n \"\"\"\n Calculate the Affinity matrix of the nearest neighbor graph constructed by prototypes and query points,\n It is a efficient way when the number of nodes in the graph is too large.\n\n Args:\n node_feat: input node features\n shape: (num_nodes, feat_dim)\n k: the number of nearest neighbors for each node to compute the similarity\n method: 'cosine' or 'gaussian', different similarity function\n Return:\n A: Affinity matrix with zero diagonal, shape: (num_nodes, num_nodes)\n \"\"\"\n # kNN search for the graph\n X = node_feat.detach().cpu().numpy()\n # build the index with cpu version\n index = faiss.IndexFlatL2(self.feat_dim)\n index.add(X)\n _, I = index.search(X, k + 1)\n I = torch.from_numpy(I[:, 1:]).cuda() #(num_nodes, k)\n\n # create the affinity matrix\n knn_idx = I.unsqueeze(2).expand(-1, -1, self.feat_dim).contiguous().view(-1, self.feat_dim)\n knn_feat = torch.gather(node_feat, dim=0, index=knn_idx).contiguous().view(self.num_nodes, k, self.feat_dim)\n\n if method == 'cosine':\n knn_similarity = F.cosine_similarity(node_feat[:,None,:], knn_feat, dim=2)\n elif method == 'gaussian':\n dist = F.pairwise_distance(node_feat[:,:,None], knn_feat.transpose(1,2), p=2)\n knn_similarity = torch.exp(-0.5*(dist/self.sigma)**2)\n else:\n raise NotImplementedError('Error! Distance computation method (%s) is unknown!' %method)\n\n A = torch.zeros(self.num_nodes, self.num_nodes, dtype=torch.float).cuda()\n A = A.scatter_(1, I, knn_similarity)\n A = A + A.transpose(0,1)\n\n identity_matrix = torch.eye(self.num_nodes, requires_grad=False).cuda()\n A = A * (1 - identity_matrix)\n return A\n\n\n def label_propagate(self, A, Y, alpha=0.99):\n \"\"\" Label Propagation, refer to \"Learning with Local and Global Consistency\" NeurIPs 2003\n Args:\n A: Affinity matrix with zero diagonal, shape: (num_nodes, num_nodes)\n Y: initial label matrix, shape: (num_nodes, n_way+1)\n alpha: a parameter to control the amount of propagated info.\n Return:\n Z: label predictions, shape: (num_nodes, n_way+1)\n \"\"\"\n #compute symmetrically normalized matrix S\n eps = np.finfo(float).eps\n D = A.sum(1) #(num_nodes,)\n D_sqrt_inv = torch.sqrt(1.0/(D+eps))\n D_sqrt_inv = torch.diag_embed(D_sqrt_inv).cuda()\n S = D_sqrt_inv @ A @ D_sqrt_inv\n\n #close form solution\n Z = torch.inverse(torch.eye(self.num_nodes).cuda() - alpha*S + eps) @ Y\n return Z\n\n def computeCrossEntropyLoss(self, query_logits, query_labels):\n \"\"\" Calculate the CrossEntropy Loss for query set\n \"\"\"\n return F.cross_entropy(query_logits, query_labels)\n"
]
| [
[
"torch.zeros",
"torch.diag_embed",
"torch.cat",
"torch.sqrt",
"torch.nonzero",
"torch.nn.ModuleList",
"torch.nn.Conv1d",
"torch.gather",
"torch.argmin",
"torch.from_numpy",
"torch.logical_not",
"torch.nn.functional.cross_entropy",
"numpy.finfo",
"torch.eye",
"torch.nn.BatchNorm1d",
"torch.nn.functional.cosine_similarity",
"torch.nn.functional.relu",
"torch.exp"
]
]
|
caiostringari/BBC-JGR-Oceans | [
"4089081ce6e477a232039f6d2257ea43c97bd87b"
]
| [
"plot_profiles.py"
]
| [
"# ------------------------------------------------------------------------\n# ------------------------------------------------------------------------\n#\n# script : plot_profiles.py\n# pourpose : plot beach profiles\n# author : caio eadi stringari\n# email : [email protected]\n#\n# ------------------------------------------------------------------------\n# ------------------------------------------------------------------------\n\n# data I/O\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\n\n# linear regression\nfrom scipy import interpolate\nfrom scipy.stats import linregress\nfrom scipy.stats import gaussian_kde\n\nfrom pywavelearn.utils import ellapsedseconds, dffs\nfrom matplotlib.dates import date2num, num2date\n\nimport warnings\n\nimport seaborn as sns\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nsns.set_context(\"paper\", font_scale=2.0, rc={\"lines.linewidth\": 2.0})\nsns.set_style(\"ticks\", {'axes.linewidth': 2,\n 'legend.frameon': True,\n 'axes.facecolor': \"#E9E9F1\",\n 'grid.color': \"w\"})\nmpl.rcParams['axes.linewidth'] = 2\n\nwarnings.filterwarnings(\"ignore\")\n\n\nif __name__ == '__main__':\n # Colors\n # ('Frazer Beach', '#1f77b4')\n # ('Moreton Island', '#ff7f0e')\n # ('Nobbys Beach', '#2ca02c')\n # ('One Mile Beach', '#d62728')\n # ('Seven Mile Beach', '#9467bd')\n # ('Werri Beach', '#8c564b')\n # ('Elizabeth Beach', '#e377c2')\n\n beaches = [\"Elizabeth Beach\",\n \"Werri Beach\",\n \"One Mile Beach\",\n \"Frazer Beach\",\n \"Moreton Island\",\n \"Seven Mile Beach\"]\n\n profiles = [\"Raw_Data/Profiles/13052019_Elizabeth_Beach.csv\",\n \"Raw_Data/Profiles/20140816_Werri_Beach.csv\",\n \"Raw_Data/Profiles/20140807_One_Mile_Beach.csv\",\n \"Raw_Data/Profiles/20180424_Frazer_Beach.csv\",\n \"Raw_Data/Profiles/20161220_Moreton_Island.csv\",\n \"Raw_Data/Profiles/20180616_Seven_Mile_Beach.csv\"]\n\n WL = [1., 0.8, 1.0, 0.3, 1.6, 1.2]\n\n # colors\n colors = [\"#e377c2\", \"#8c564b\", \"#d62728\", \"#1f77b4\",\n \"#ff7f0e\", \"#9467bd\"]\n\n gs = gridspec.GridSpec(4, 2)\n\n ax1 = plt.subplot(gs[0, 0]) # FB\n ax2 = plt.subplot(gs[0, 1]) # OMB\n\n ax3 = plt.subplot(gs[1, 0]) # WB\n ax4 = plt.subplot(gs[1, 1]) # EB\n\n ax5 = plt.subplot(gs[2, :]) # MI\n ax6 = plt.subplot(gs[3, :]) # SMB\n\n fig = plt.gcf()\n fig.set_size_inches(12, 10)\n axs = [ax1, ax2, ax3, ax4, ax5, ax6]\n\n # loop and plot\n k = 0\n for ax, prof, color, location, wl in zip(axs,\n profiles,\n colors,\n beaches,\n WL):\n\n # read profile\n df = pd.read_csv(prof)\n x = df[\"x\"].values\n dx = df[\"x\"].values.min()\n z = df[\"z\"].values\n x -= dx\n\n # fix water level\n z -= -wl\n\n # plot the profile\n ax.plot(x, z, \"-\", lw=3,\n color=colors[k], zorder=12,)\n\n # fill sand\n ax.fill_between(x, -10, z, interpolate=True,\n color='#ded6c4', zorder=10, )\n\n # fill water\n ax.fill_between(x, z, 0, interpolate=True,\n color=\"#c9e7ff\", zorder=5, alpha=0.5)\n\n ax.axhline(0, lw=3, color=\"navy\", zorder=6)\n\n # grids\n sns.despine(ax=ax)\n ax.grid(color=\"w\", ls=\"-\", lw=2, zorder=10)\n for _, spine in ax.spines.items():\n spine.set_zorder(300)\n\n # axis limits\n ax.set_xlim(x.min(), x.max())\n ax.set_ylim(ymin=-4)\n ax.set_ylabel(r\"Depth $[m]$\")\n ax.set_xlabel(r\"Cross-shore distance $[m]$\")\n\n ax.set_title(location)\n\n # aspect ratio\n ax.set_aspect(4)\n\n k += 1\n\n # letters\n bbox = dict(boxstyle=\"square\", ec=\"none\", fc=\"1\", lw=1, alpha=0.7)\n axs[0].text(0.025, 0.065, \"a)\", transform=axs[0].transAxes, ha=\"left\",\n va=\"bottom\", bbox=bbox, zorder=100)\n axs[1].text(0.025, 0.1, \"b)\", transform=axs[1].transAxes, ha=\"left\",\n va=\"bottom\", bbox=bbox, zorder=100)\n axs[2].text(0.025, 0.065, \"c)\", transform=axs[2].transAxes, ha=\"left\",\n va=\"bottom\", bbox=bbox, zorder=100)\n axs[3].text(0.035, 0.05, \"d)\", transform=axs[3].transAxes, ha=\"left\",\n va=\"bottom\", bbox=bbox, zorder=100)\n axs[4].text(0.015, 0.05, \"e)\", transform=axs[4].transAxes, ha=\"left\",\n va=\"bottom\", bbox=bbox, zorder=100)\n axs[5].text(0.015, 0.05, \"f)\", transform=axs[5].transAxes, ha=\"left\",\n va=\"bottom\", bbox=bbox, zorder=100)\n\n fig.tight_layout()\n plt.show()\n"
]
| [
[
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.show",
"pandas.read_csv",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.subplot"
]
]
|
zhengkaitu/rdchiral | [
"eb9d53782a7b1f47cdc85cd19f6ec97c0d26bf10"
]
| [
"rdchiral/template_extractor.py"
]
| [
"import re\nfrom numpy.random import shuffle\nfrom copy import deepcopy\nfrom rdkit import Chem\nfrom rdkit.Chem import AllChem\nfrom rdkit.Chem.rdchem import ChiralType\n\nVERBOSE = False\nUSE_STEREOCHEMISTRY = True\nMAXIMUM_NUMBER_UNMAPPED_PRODUCT_ATOMS = 5\nINCLUDE_ALL_UNMAPPED_REACTANT_ATOMS = True\n\ndef mols_from_smiles_list(all_smiles):\n '''Given a list of smiles strings, this function creates rdkit\n molecules'''\n mols = []\n for smiles in all_smiles:\n if not smiles: continue\n mols.append(Chem.MolFromSmiles(smiles))\n return mols\n\ndef replace_deuterated(smi):\n return re.sub('\\[2H\\]', r'[H]', smi)\n\ndef clear_mapnum(mol):\n [a.ClearProp('molAtomMapNumber') for a in mol.GetAtoms() if a.HasProp('molAtomMapNumber')]\n return mol\n\ndef get_tagged_atoms_from_mols(mols):\n '''Takes a list of RDKit molecules and returns total list of\n atoms and their tags'''\n atoms = []\n atom_tags = []\n for mol in mols:\n new_atoms, new_atom_tags = get_tagged_atoms_from_mol(mol)\n atoms += new_atoms \n atom_tags += new_atom_tags\n return atoms, atom_tags\n\ndef get_tagged_atoms_from_mol(mol):\n '''Takes an RDKit molecule and returns list of tagged atoms and their\n corresponding numbers'''\n atoms = []\n atom_tags = []\n for atom in mol.GetAtoms():\n if atom.HasProp('molAtomMapNumber'):\n atoms.append(atom)\n atom_tags.append(str(atom.GetProp('molAtomMapNumber')))\n return atoms, atom_tags\n\ndef atoms_are_different(atom1, atom2):\n '''Compares two RDKit atoms based on basic properties'''\n\n if atom1.GetAtomicNum() != atom2.GetAtomicNum(): return True # must be true for atom mapping\n if atom1.GetTotalNumHs() != atom2.GetTotalNumHs(): return True\n if atom1.GetFormalCharge() != atom2.GetFormalCharge(): return True\n if atom1.GetDegree() != atom2.GetDegree(): return True\n #if atom1.IsInRing() != atom2.IsInRing(): return True # do not want to check this!\n # e.g., in macrocycle formation, don't want the template to include the entire ring structure\n if atom1.GetNumRadicalElectrons() != atom2.GetNumRadicalElectrons(): return True\n if atom1.GetIsAromatic() != atom2.GetIsAromatic(): return True \n\n # Check bonds and nearest neighbor identity\n bonds1 = sorted([bond_to_label(bond) for bond in atom1.GetBonds()]) \n bonds2 = sorted([bond_to_label(bond) for bond in atom2.GetBonds()]) \n if bonds1 != bonds2: return True\n\n return False\n\ndef find_map_num(mol, mapnum):\n return [(a.GetIdx(), a) for a in mol.GetAtoms() if a.HasProp('molAtomMapNumber') \n and a.GetProp('molAtomMapNumber') == str(mapnum)][0]\n\ndef get_tetrahedral_atoms(reactants, products):\n tetrahedral_atoms = []\n for reactant in reactants:\n for ar in reactant.GetAtoms():\n if not ar.HasProp('molAtomMapNumber'):\n continue\n atom_tag = ar.GetProp('molAtomMapNumber')\n ir = ar.GetIdx()\n for product in products:\n try:\n (ip, ap) = find_map_num(product, atom_tag)\n if ar.GetChiralTag() != ChiralType.CHI_UNSPECIFIED or\\\n ap.GetChiralTag() != ChiralType.CHI_UNSPECIFIED:\n tetrahedral_atoms.append((atom_tag, ar, ap))\n except IndexError:\n pass\n return tetrahedral_atoms\n\ndef set_isotope_to_equal_mapnum(mol):\n for a in mol.GetAtoms():\n if a.HasProp('molAtomMapNumber'):\n a.SetIsotope(int(a.GetProp('molAtomMapNumber')))\n \ndef get_frag_around_tetrahedral_center(mol, idx):\n '''Builds a MolFragment using neighbors of a tetrahedral atom,\n where the molecule has already been updated to include isotopes'''\n ids_to_include = [idx]\n for neighbor in mol.GetAtomWithIdx(idx).GetNeighbors():\n ids_to_include.append(neighbor.GetIdx())\n symbols = ['[{}{}]'.format(a.GetIsotope(), a.GetSymbol()) if a.GetIsotope() != 0\\\n else '[#{}]'.format(a.GetAtomicNum()) for a in mol.GetAtoms()]\n return Chem.MolFragmentToSmiles(mol, ids_to_include, isomericSmiles=True,\n atomSymbols=symbols, allBondsExplicit=True,\n allHsExplicit=True)\n \ndef check_tetrahedral_centers_equivalent(atom1, atom2):\n '''Checks to see if tetrahedral centers are equivalent in\n chirality, ignoring the ChiralTag. Owning molecules of the\n input atoms must have been Isotope-mapped'''\n atom1_frag = get_frag_around_tetrahedral_center(atom1.GetOwningMol(), atom1.GetIdx())\n atom1_neighborhood = Chem.MolFromSmiles(atom1_frag, sanitize=False)\n for matched_ids in atom2.GetOwningMol().GetSubstructMatches(atom1_neighborhood, useChirality=True):\n if atom2.GetIdx() in matched_ids:\n return True\n return False\n\ndef clear_isotope(mol):\n [a.SetIsotope(0) for a in mol.GetAtoms()]\n\ndef get_changed_atoms(reactants, products):\n '''Looks at mapped atoms in a reaction and determines which ones changed'''\n\n err = 0\n prod_atoms, prod_atom_tags = get_tagged_atoms_from_mols(products)\n\n if VERBOSE: print('Products contain {} tagged atoms'.format(len(prod_atoms)))\n if VERBOSE: print('Products contain {} unique atom numbers'.format(len(set(prod_atom_tags))))\n\n reac_atoms, reac_atom_tags = get_tagged_atoms_from_mols(reactants)\n if len(set(prod_atom_tags)) != len(set(reac_atom_tags)):\n if VERBOSE: print('warning: different atom tags appear in reactants and products')\n #err = 1 # okay for Reaxys, since Reaxys creates mass\n if len(prod_atoms) != len(reac_atoms):\n if VERBOSE: print('warning: total number of tagged atoms differ, stoichometry != 1?')\n #err = 1\n\n # Find differences \n changed_atoms = [] # actual reactant atom species\n changed_atom_tags = [] # atom map numbers of those atoms\n\n # Product atoms that are different from reactant atom equivalent\n for i, prod_tag in enumerate(prod_atom_tags):\n\n for j, reac_tag in enumerate(reac_atom_tags):\n if reac_tag != prod_tag: continue\n if reac_tag not in changed_atom_tags: # don't bother comparing if we know this atom changes\n # If atom changed, add\n if atoms_are_different(prod_atoms[i], reac_atoms[j]):\n changed_atoms.append(reac_atoms[j])\n changed_atom_tags.append(reac_tag)\n break\n # If reac_tag appears multiple times, add (need for stoichometry > 1)\n if prod_atom_tags.count(reac_tag) > 1:\n changed_atoms.append(reac_atoms[j])\n changed_atom_tags.append(reac_tag)\n break\n\n # Reactant atoms that do not appear in product (tagged leaving groups)\n for j, reac_tag in enumerate(reac_atom_tags):\n if reac_tag not in changed_atom_tags:\n if reac_tag not in prod_atom_tags:\n changed_atoms.append(reac_atoms[j])\n changed_atom_tags.append(reac_tag)\n\n # Atoms that change CHIRALITY (just tetrahedral for now...)\n tetra_atoms = get_tetrahedral_atoms(reactants, products)\n if VERBOSE:\n print('Found {} atom-mapped tetrahedral atoms that have chirality specified at least partially'.format(len(tetra_atoms)))\n [set_isotope_to_equal_mapnum(reactant) for reactant in reactants]\n [set_isotope_to_equal_mapnum(product) for product in products]\n for (atom_tag, ar, ap) in tetra_atoms:\n if VERBOSE: \n print('For atom tag {}'.format(atom_tag))\n print(' reactant: {}'.format(ar.GetChiralTag()))\n print(' product: {}'.format(ap.GetChiralTag()))\n if atom_tag in changed_atom_tags:\n if VERBOSE:\n print('-> atoms have changed (by more than just chirality!)')\n else:\n unchanged = check_tetrahedral_centers_equivalent(ar, ap) and \\\n ChiralType.CHI_UNSPECIFIED not in [ar.GetChiralTag(), ap.GetChiralTag()]\n if unchanged:\n if VERBOSE: \n print('-> atoms confirmed to have same chirality, no change')\n else:\n if VERBOSE:\n print('-> atom changed chirality!!')\n # Make sure chiral change is next to the reaction center and not\n # a random specifidation (must be CONNECTED to a changed atom)\n tetra_adj_to_rxn = False\n for neighbor in ap.GetNeighbors():\n if neighbor.HasProp('molAtomMapNumber'):\n if neighbor.GetProp('molAtomMapNumber') in changed_atom_tags:\n tetra_adj_to_rxn = True\n break\n if tetra_adj_to_rxn:\n if VERBOSE:\n print('-> atom adj to reaction center, now included')\n changed_atom_tags.append(atom_tag)\n changed_atoms.append(ar)\n else:\n if VERBOSE:\n print('-> adj far from reaction center, not including')\n [clear_isotope(reactant) for reactant in reactants]\n [clear_isotope(product) for product in products]\n\n\n if VERBOSE: \n print('{} tagged atoms in reactants change 1-atom properties'.format(len(changed_atom_tags)))\n for smarts in [atom.GetSmarts() for atom in changed_atoms]:\n print(' {}'.format(smarts))\n\n return changed_atoms, changed_atom_tags, err\n\ndef get_special_groups(mol):\n '''Given an RDKit molecule, this function returns a list of tuples, where\n each tuple contains the AtomIdx's for a special group of atoms which should \n be included in a fragment all together. This should only be done for the \n reactants, otherwise the products might end up with mapping mismatches\n\n We draw a distinction between atoms in groups that trigger that whole\n group to be included, and \"unimportant\" atoms in the groups that will not\n be included if another atom matches.'''\n\n # Define templates\n group_templates = [ \n (range(3), '[OH0,SH0]=C[O,Cl,I,Br,F]',), # carboxylic acid / halogen\n (range(3), '[OH0,SH0]=CN',), # amide/sulfamide\n (range(4), 'S(O)(O)[Cl]',), # sulfonyl chloride\n (range(3), 'B(O)O',), # boronic acid/ester\n ((0,), '[Si](C)(C)C'), # trialkyl silane\n ((0,), '[Si](OC)(OC)(OC)'), # trialkoxy silane, default to methyl\n (range(3), '[N;H0;$(N-[#6]);D2]-,=[N;D2]-,=[N;D1]',), # azide\n (range(8), 'O=C1N([Br,I,F,Cl])C(=O)CC1',), # NBS brominating agent\n (range(11), 'Cc1ccc(S(=O)(=O)O)cc1'), # Tosyl\n ((7,), 'CC(C)(C)OC(=O)[N]'), # N(boc)\n ((4,), '[CH3][CH0]([CH3])([CH3])O'), # \n (range(2), '[C,N]=[C,N]',), # alkene/imine\n (range(2), '[C,N]#[C,N]',), # alkyne/nitrile\n ((2,), 'C=C-[*]',), # adj to alkene\n ((2,), 'C#C-[*]',), # adj to alkyne\n ((2,), 'O=C-[*]',), # adj to carbonyl\n ((3,), 'O=C([CH3])-[*]'), # adj to methyl ketone\n ((3,), 'O=C([O,N])-[*]',), # adj to carboxylic acid/amide/ester\n (range(4), 'ClS(Cl)=O',), # thionyl chloride\n (range(2), '[Mg,Li,Zn,Sn][Br,Cl,I,F]',), # grinard/metal (non-disassociated)\n (range(3), 'S(O)(O)',), # SO2 group\n (range(2), 'N~N',), # diazo\n ((1,), '[!#6;R]@[#6;R]',), # adjacency to heteroatom in ring\n ((2,), '[a!c]:a:a',), # two-steps away from heteroatom in aromatic ring\n #((1,), 'c(-,=[*]):c([Cl,I,Br,F])',), # ortho to halogen on ring - too specific?\n #((1,), 'c(-,=[*]):c:c([Cl,I,Br,F])',), # meta to halogen on ring - too specific?\n ((0,), '[B,C](F)(F)F'), # CF3, BF3 should have the F3 included\n ]\n\n # Stereo-specific ones (where we will need to include neighbors)\n # Tetrahedral centers should already be okay...\n group_templates += [\n ((1,2,), '[*]/[CH]=[CH]/[*]'), # trans with two hydrogens\n ((1,2,), '[*]/[CH]=[CH]\\[*]'), # cis with two hydrogens\n ((1,2,), '[*]/[CH]=[CH0]([*])\\[*]'), # trans with one hydrogens\n ((1,2,), '[*]/[D3;H1]=[!D1]'), # specified on one end, can be N or C\n ]\n\n # Build list\n groups = []\n for (add_if_match, template) in group_templates:\n matches = mol.GetSubstructMatches(Chem.MolFromSmarts(template), useChirality=True)\n for match in matches:\n add_if = []\n for pattern_idx, atom_idx in enumerate(match):\n if pattern_idx in add_if_match:\n add_if.append(atom_idx)\n groups.append((add_if, match))\n return groups\n\ndef expand_atoms_to_use(mol, atoms_to_use, groups=[], symbol_replacements=[]):\n '''Given an RDKit molecule and a list of AtomIdX which should be included\n in the reaction, this function expands the list of AtomIdXs to include one \n nearest neighbor with special consideration of (a) unimportant neighbors and\n (b) important functional groupings'''\n\n # Copy\n new_atoms_to_use = atoms_to_use[:]\n\n # Look for all atoms in the current list of atoms to use\n for atom in mol.GetAtoms():\n if atom.GetIdx() not in atoms_to_use: continue\n # Ensure membership of changed atom is checked against group\n for group in groups:\n if int(atom.GetIdx()) in group[0]:\n if VERBOSE: \n print('adding group due to match')\n try:\n print('Match from molAtomMapNum {}'.format(\n atom.GetProp('molAtomMapNumber'),\n ))\n except KeyError:\n pass\n for idx in group[1]:\n if idx not in atoms_to_use:\n new_atoms_to_use.append(idx)\n symbol_replacements.append((idx, convert_atom_to_wildcard(mol.GetAtomWithIdx(idx))))\n # Look for all nearest neighbors of the currently-included atoms\n for neighbor in atom.GetNeighbors():\n # Evaluate nearest neighbor atom to determine what should be included\n new_atoms_to_use, symbol_replacements = \\\n expand_atoms_to_use_atom(mol, new_atoms_to_use, neighbor.GetIdx(), \n groups=groups, symbol_replacements=symbol_replacements)\n \n return new_atoms_to_use, symbol_replacements\n\ndef expand_atoms_to_use_atom(mol, atoms_to_use, atom_idx, groups=[], symbol_replacements=[]):\n '''Given an RDKit molecule and a list of AtomIdx which should be included\n in the reaction, this function extends the list of atoms_to_use by considering \n a candidate atom extension, atom_idx'''\n\n # See if this atom belongs to any special groups (highest priority)\n found_in_group = False\n for group in groups: # first index is atom IDs for match, second is what to include\n if int(atom_idx) in group[0]: # int correction\n if VERBOSE: \n print('adding group due to match')\n try:\n print('Match from molAtomMapNum {}'.format(\n mol.GetAtomWithIdx(atom_idx).GetProp('molAtomMapNumber'),\n ))\n except KeyError:\n pass\n # Add the whole list, redundancies don't matter \n # *but* still call convert_atom_to_wildcard!\n for idx in group[1]:\n if idx not in atoms_to_use:\n atoms_to_use.append(idx)\n symbol_replacements.append((idx, convert_atom_to_wildcard(mol.GetAtomWithIdx(idx))))\n found_in_group = True\n if found_in_group: \n return atoms_to_use, symbol_replacements\n \n # How do we add an atom that wasn't in an identified important functional group?\n # Develop generalized SMARTS symbol\n\n # Skip current candidate atom if it is already included\n if atom_idx in atoms_to_use:\n return atoms_to_use, symbol_replacements\n\n # Include this atom\n atoms_to_use.append(atom_idx)\n\n # Look for suitable SMARTS replacement\n symbol_replacements.append((atom_idx, convert_atom_to_wildcard(mol.GetAtomWithIdx(atom_idx))))\n\n return atoms_to_use, symbol_replacements\n\ndef convert_atom_to_wildcard(atom):\n '''This function takes an RDKit atom and turns it into a wildcard \n using heuristic generalization rules. This function should be used\n when candidate atoms are used to extend the reaction core for higher\n generalizability'''\n\n # Is this a terminal atom? We can tell if the degree is one\n if atom.GetDegree() == 1:\n symbol = '[' + atom.GetSymbol() + ';D1;H{}'.format(atom.GetTotalNumHs())\n if atom.GetFormalCharge() != 0:\n charges = re.search('([-+]+[1-9]?)', atom.GetSmarts())\n symbol = symbol.replace(';D1', ';{};D1'.format(charges.group()))\n \n else:\n # Initialize\n symbol = '['\n\n # Add atom primitive - atomic num and aromaticity (don't use COMPLETE wildcards)\n if atom.GetAtomicNum() != 6:\n symbol += '#{};'.format(atom.GetAtomicNum())\n if atom.GetIsAromatic():\n symbol += 'a;'\n elif atom.GetIsAromatic():\n symbol += 'c;'\n else:\n symbol += 'C;'\n\n # Charge is important\n if atom.GetFormalCharge() != 0:\n charges = re.search('([-+]+[1-9]?)', atom.GetSmarts())\n if charges: symbol += charges.group() + ';'\n\n # Strip extra semicolon\n if symbol[-1] == ';': symbol = symbol[:-1]\n\n # Close with label or with bracket\n label = re.search('\\:[0-9]+\\]', atom.GetSmarts())\n if label: \n symbol += label.group()\n else:\n symbol += ']'\n\n if VERBOSE: \n if symbol != atom.GetSmarts():\n print('Improved generality of atom SMARTS {} -> {}'.format(atom.GetSmarts(), symbol))\n\n return symbol\n\ndef reassign_atom_mapping(transform):\n '''This function takes an atom-mapped reaction SMILES and reassigns \n the atom-mapping labels (numbers) from left to right, once \n that transform has been canonicalized.'''\n\n all_labels = re.findall('\\:([0-9]+)\\]', transform)\n\n # Define list of replacements which matches all_labels *IN ORDER*\n replacements = []\n replacement_dict = {}\n counter = 1\n for label in all_labels: # keep in order! this is important\n if label not in replacement_dict:\n replacement_dict[label] = str(counter)\n counter += 1\n replacements.append(replacement_dict[label])\n\n # Perform replacements in order\n transform_newmaps = re.sub('\\:[0-9]+\\]', \n lambda match: (':' + replacements.pop(0) + ']'),\n transform)\n\n return transform_newmaps\n\ndef get_strict_smarts_for_atom(atom):\n '''\n For an RDkit atom object, generate a SMARTS pattern that\n matches the atom as strictly as possible\n '''\n\n symbol = atom.GetSmarts()\n if atom.GetSymbol() == 'H':\n symbol = '[#1]'\n\n if '[' not in symbol:\n symbol = '[' + symbol + ']'\n\n # Explicit stereochemistry - *before* H\n if USE_STEREOCHEMISTRY:\n if atom.GetChiralTag() != Chem.rdchem.ChiralType.CHI_UNSPECIFIED:\n if '@' not in symbol:\n # Be explicit when there is a tetrahedral chiral tag\n if atom.GetChiralTag() == Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW:\n tag = '@'\n elif atom.GetChiralTag() == Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW:\n tag = '@@'\n if ':' in symbol:\n symbol = symbol.replace(':', ';{}:'.format(tag))\n else:\n symbol = symbol.replace(']', ';{}]'.format(tag))\n\n if 'H' not in symbol:\n H_symbol = 'H{}'.format(atom.GetTotalNumHs())\n # Explicit number of hydrogens: include \"H0\" when no hydrogens present\n if ':' in symbol: # stick H0 before label\n symbol = symbol.replace(':', ';{}:'.format(H_symbol))\n else:\n symbol = symbol.replace(']', ';{}]'.format(H_symbol))\n \n # Explicit degree\n if ':' in symbol:\n symbol = symbol.replace(':', ';D{}:'.format(atom.GetDegree()))\n else:\n symbol = symbol.replace(']', ';D{}]'.format(atom.GetDegree()))\n\n # Explicit formal charge\n if '+' not in symbol and '-' not in symbol:\n charge = atom.GetFormalCharge()\n charge_symbol = '+' if (charge >= 0) else '-'\n charge_symbol += '{}'.format(abs(charge))\n if ':' in symbol: \n symbol = symbol.replace(':', ';{}:'.format(charge_symbol))\n else:\n symbol = symbol.replace(']', ';{}]'.format(charge_symbol))\n\n return symbol\n\ndef expand_changed_atom_tags(changed_atom_tags, reactant_fragments):\n '''Given a list of changed atom tags (numbers as strings) and a string consisting\n of the reactant_fragments to include in the reaction transform, this function \n adds any tagged atoms found in the reactant side of the template to the \n changed_atom_tags list so that those tagged atoms are included in the products'''\n\n expansion = []\n atom_tags_in_reactant_fragments = re.findall('\\:([0-9]+)\\]', reactant_fragments)\n for atom_tag in atom_tags_in_reactant_fragments:\n if atom_tag not in changed_atom_tags:\n expansion.append(atom_tag)\n if VERBOSE: print('after building reactant fragments, additional labels included: {}'.format(expansion))\n return expansion\n\ndef get_fragments_for_changed_atoms(mols, changed_atom_tags, radius=0, \n category='reactants', expansion=[]):\n '''Given a list of RDKit mols and a list of changed atom tags, this function\n computes the SMILES string of molecular fragments using MolFragmentToSmiles \n for all changed fragments.\n\n expansion: atoms added during reactant expansion that should be included and\n generalized in product fragment\n '''\n fragments = ''\n mols_changed = []\n for mol in mols:\n # Initialize list of replacement symbols (updated during expansion)\n symbol_replacements = []\n\n # Are we looking for special reactive groups? (reactants only)\n if category == 'reactants':\n groups = get_special_groups(mol)\n else:\n groups = []\n\n # Build list of atoms to use\n atoms_to_use = []\n for atom in mol.GetAtoms():\n # Check self (only tagged atoms)\n if ':' in atom.GetSmarts():\n if atom.GetSmarts().split(':')[1][:-1] in changed_atom_tags:\n atoms_to_use.append(atom.GetIdx())\n symbol = get_strict_smarts_for_atom(atom)\n if symbol != atom.GetSmarts():\n symbol_replacements.append((atom.GetIdx(), symbol))\n continue\n\n # Fully define leaving groups and this molecule participates?\n if INCLUDE_ALL_UNMAPPED_REACTANT_ATOMS and len(atoms_to_use) > 0:\n if category == 'reactants':\n for atom in mol.GetAtoms():\n if not atom.HasProp('molAtomMapNumber'):\n atoms_to_use.append(atom.GetIdx())\n\n # Check neighbors (any atom)\n for k in range(radius):\n atoms_to_use, symbol_replacements = expand_atoms_to_use(mol, atoms_to_use, \n groups=groups, symbol_replacements=symbol_replacements)\n\n if category == 'products':\n # Add extra labels to include (for products only)\n if expansion:\n for atom in mol.GetAtoms():\n if ':' not in atom.GetSmarts(): continue\n label = atom.GetSmarts().split(':')[1][:-1]\n if label in expansion and label not in changed_atom_tags:\n atoms_to_use.append(atom.GetIdx())\n # Make the expansion a wildcard\n symbol_replacements.append((atom.GetIdx(), convert_atom_to_wildcard(atom))) \n if VERBOSE: print('expanded label {} to wildcard in products'.format(label))\n \n # Make sure unmapped atoms are included (from products)\n for atom in mol.GetAtoms():\n if not atom.HasProp('molAtomMapNumber'): \n atoms_to_use.append(atom.GetIdx())\n symbol = get_strict_smarts_for_atom(atom)\n symbol_replacements.append((atom.GetIdx(), symbol))\n\n # Define new symbols based on symbol_replacements\n symbols = [atom.GetSmarts() for atom in mol.GetAtoms()]\n for (i, symbol) in symbol_replacements:\n symbols[i] = symbol\n\n if not atoms_to_use: \n continue\n \n # Keep flipping stereocenters until we are happy...\n # this is a sloppy fix during extraction to achieve consistency\n tetra_consistent = False\n num_tetra_flips = 0\n while not tetra_consistent and num_tetra_flips < 100:\n mol_copy = deepcopy(mol)\n [x.ClearProp('molAtomMapNumber') for x in mol_copy.GetAtoms()] \n this_fragment = AllChem.MolFragmentToSmiles(mol_copy, atoms_to_use, \n atomSymbols=symbols, allHsExplicit=True, \n isomericSmiles=USE_STEREOCHEMISTRY, allBondsExplicit=True)\n\n # Figure out what atom maps are tetrahedral centers\n # Set isotopes to make sure we're getting the *exact* match we want\n this_fragment_mol = AllChem.MolFromSmarts(this_fragment)\n tetra_map_nums = []\n for atom in this_fragment_mol.GetAtoms():\n if atom.HasProp('molAtomMapNumber'):\n atom.SetIsotope(int(atom.GetProp('molAtomMapNumber')))\n if atom.GetChiralTag() != Chem.rdchem.ChiralType.CHI_UNSPECIFIED:\n tetra_map_nums.append(atom.GetProp('molAtomMapNumber'))\n map_to_id = {}\n for atom in mol.GetAtoms():\n if atom.HasProp('molAtomMapNumber'):\n atom.SetIsotope(int(atom.GetProp('molAtomMapNumber')))\n map_to_id[atom.GetProp('molAtomMapNumber')] = atom.GetIdx()\n \n # Look for matches\n tetra_consistent = True\n all_matched_ids = []\n \n # skip substructure matching if there are a lot of fragments\n # this can help prevent GetSubstructMatches from hanging \n frag_smi = Chem.MolToSmiles(this_fragment_mol)\n if frag_smi.count('.') > 5:\n break\n \n for matched_ids in mol.GetSubstructMatches(this_fragment_mol, useChirality=True):\n all_matched_ids.extend(matched_ids)\n shuffle(tetra_map_nums)\n for tetra_map_num in tetra_map_nums:\n if VERBOSE: print('Checking consistency of tetrahedral {}'.format(tetra_map_num))\n #print('Using fragment {}'.format(Chem.MolToSmarts(this_fragment_mol, True)))\n if map_to_id[tetra_map_num] not in all_matched_ids:\n tetra_consistent = False\n if VERBOSE: print('@@@@@@@@@@@ FRAGMENT DOES NOT MATCH PARENT MOL @@@@@@@@@@@@@@')\n if VERBOSE: print('@@@@@@@@@@@ FLIPPING CHIRALITY SYMBOL NOW @@@@@@@@@@@@@@')\n prevsymbol = symbols[map_to_id[tetra_map_num]]\n if '@@' in prevsymbol:\n symbol = prevsymbol.replace('@@', '@')\n elif '@' in prevsymbol:\n symbol = prevsymbol.replace('@', '@@')\n else:\n raise ValueError('Need to modify symbol of tetra atom without @ or @@??')\n symbols[map_to_id[tetra_map_num]] = symbol\n num_tetra_flips += 1\n # IMPORTANT: only flip one at a time\n break \n\n # Clear isotopes\n for atom in mol.GetAtoms():\n atom.SetIsotope(0)\n \n if not tetra_consistent:\n raise ValueError('Could not find consistent tetrahedral mapping, {} centers'.format(len(tetra_map_nums)))\n\n fragments += '(' + this_fragment + ').'\n mols_changed.append(Chem.MolToSmiles(clear_mapnum(Chem.MolFromSmiles(Chem.MolToSmiles(mol, True))), True))\n\n # auxiliary template information: is this an intramolecular reaction or dimerization?\n intra_only = (1 == len(mols_changed))\n dimer_only = (1 == len(set(mols_changed))) and (len(mols_changed) == 2)\n \n return fragments[:-1], intra_only, dimer_only\n\ndef canonicalize_transform(transform):\n '''This function takes an atom-mapped SMARTS transform and\n converts it to a canonical form by, if nececssary, rearranging\n the order of reactant and product templates and reassigning\n atom maps.'''\n\n transform_reordered = '>>'.join([canonicalize_template(x) for x in transform.split('>>')])\n return reassign_atom_mapping(transform_reordered)\n\ndef canonicalize_template(template):\n '''This function takes one-half of a template SMARTS string \n (i.e., reactants or products) and re-orders them based on\n an equivalent string without atom mapping.'''\n\n # Strip labels to get sort orders\n template_nolabels = re.sub('\\:[0-9]+\\]', ']', template)\n\n # Split into separate molecules *WITHOUT wrapper parentheses*\n template_nolabels_mols = template_nolabels[1:-1].split(').(')\n template_mols = template[1:-1].split(').(')\n\n # Split into fragments within those molecules\n for i in range(len(template_mols)):\n nolabel_mol_frags = template_nolabels_mols[i].split('.')\n mol_frags = template_mols[i].split('.')\n\n # Get sort order within molecule, defined WITHOUT labels\n sortorder = [j[0] for j in sorted(enumerate(nolabel_mol_frags), key = lambda x:x[1])]\n\n # Apply sorting and merge list back into overall mol fragment\n template_nolabels_mols[i] = '.'.join([nolabel_mol_frags[j] for j in sortorder])\n template_mols[i] = '.'.join([mol_frags[j] for j in sortorder])\n\n # Get sort order between molecules, defined WITHOUT labels\n sortorder = [j[0] for j in sorted(enumerate(template_nolabels_mols), key = lambda x:x[1])]\n\n # Apply sorting and merge list back into overall transform\n template = '(' + ').('.join([template_mols[i] for i in sortorder]) + ')'\n\n return template\n\ndef bond_to_label(bond):\n '''This function takes an RDKit bond and creates a label describing\n the most important attributes'''\n a1_label = str(bond.GetBeginAtom().GetAtomicNum())\n a2_label = str(bond.GetEndAtom().GetAtomicNum())\n if bond.GetBeginAtom().HasProp('molAtomMapNumber'):\n a1_label += bond.GetBeginAtom().GetProp('molAtomMapNumber')\n if bond.GetEndAtom().HasProp('molAtomMapNumber'):\n a2_label += bond.GetEndAtom().GetProp('molAtomMapNumber')\n atoms = sorted([a1_label, a2_label])\n\n return '{}{}{}'.format(atoms[0], bond.GetSmarts(), atoms[1])\n\ndef extract_from_reaction(reaction):\n reactants = mols_from_smiles_list(replace_deuterated(reaction['reactants']).split('.'))\n products = mols_from_smiles_list(replace_deuterated(reaction['products']).split('.'))\n \n # if rdkit cant understand molecule, return\n if None in reactants: return {'reaction_id': reaction['_id']} \n if None in products: return {'reaction_id': reaction['_id']}\n \n # try to sanitize molecules\n try:\n for i in range(len(reactants)):\n reactants[i] = AllChem.RemoveHs(reactants[i]) # *might* not be safe\n for i in range(len(products)):\n products[i] = AllChem.RemoveHs(products[i]) # *might* not be safe\n [Chem.SanitizeMol(mol) for mol in reactants + products] # redundant w/ RemoveHs\n [mol.UpdatePropertyCache() for mol in reactants + products]\n except Exception as e:\n # can't sanitize -> skip\n print(e)\n print('Could not load SMILES or sanitize')\n print('ID: {}'.format(reaction['_id']))\n return {'reaction_id': reaction['_id']}\n \n are_unmapped_product_atoms = False\n extra_reactant_fragment = ''\n for product in products:\n prod_atoms = product.GetAtoms()\n if sum([a.HasProp('molAtomMapNumber') for a in prod_atoms]) < len(prod_atoms):\n if VERBOSE: print('Not all product atoms have atom mapping')\n if VERBOSE: print('ID: {}'.format(reaction['_id']))\n are_unmapped_product_atoms = True\n\n if are_unmapped_product_atoms: # add fragment to template\n for product in products:\n prod_atoms = product.GetAtoms()\n # Get unmapped atoms\n unmapped_ids = [\n a.GetIdx() for a in prod_atoms if not a.HasProp('molAtomMapNumber')\n ]\n if len(unmapped_ids) > MAXIMUM_NUMBER_UNMAPPED_PRODUCT_ATOMS:\n # Skip this example - too many unmapped product atoms!\n return {'reaction_id': reaction['_id']}\n # Define new atom symbols for fragment with atom maps, generalizing fully\n atom_symbols = ['[{}]'.format(a.GetSymbol()) for a in prod_atoms]\n # And bond symbols...\n bond_symbols = ['~' for b in product.GetBonds()]\n if unmapped_ids:\n extra_reactant_fragment += AllChem.MolFragmentToSmiles(\n product, unmapped_ids, \n allHsExplicit = False, isomericSmiles = USE_STEREOCHEMISTRY, \n atomSymbols = atom_symbols, bondSymbols = bond_symbols\n ) + '.'\n if extra_reactant_fragment:\n extra_reactant_fragment = extra_reactant_fragment[:-1]\n if VERBOSE: print(' extra reactant fragment: {}'.format(extra_reactant_fragment))\n\n # Consolidate repeated fragments (stoichometry)\n extra_reactant_fragment = '.'.join(sorted(list(set(extra_reactant_fragment.split('.')))))\n\n\n if None in reactants + products:\n print('Could not parse all molecules in reaction, skipping')\n print('ID: {}'.format(reaction['_id']))\n return {'reaction_id': reaction['_id']}\n\n # Calculate changed atoms\n changed_atoms, changed_atom_tags, err = get_changed_atoms(reactants, products)\n if err: \n if VERBOSE:\n print('Could not get changed atoms')\n print('ID: {}'.format(reaction['_id']))\n return {'reaction_id': reaction['_id']}\n if not changed_atom_tags:\n if VERBOSE:\n print('No atoms changed?')\n print('ID: {}'.format(reaction['_id']))\n # print('Reaction SMILES: {}'.format(example_doc['RXN_SMILES']))\n return {'reaction_id': reaction['_id']}\n\n try:\n # Get fragments for reactants\n reactant_fragments, intra_only, dimer_only = get_fragments_for_changed_atoms(reactants, changed_atom_tags, \n radius = 1, expansion = [], category = 'reactants')\n # Get fragments for products \n # (WITHOUT matching groups but WITH the addition of reactant fragments)\n product_fragments, _, _ = get_fragments_for_changed_atoms(products, changed_atom_tags, \n radius = 0, expansion = expand_changed_atom_tags(changed_atom_tags, reactant_fragments),\n category = 'products')\n except ValueError as e:\n if VERBOSE:\n print(e)\n print(reaction['_id'])\n return {'reaction_id': reaction['_id']}\n\n # Put together and canonicalize (as best as possible)\n rxn_string = '{}>>{}'.format(reactant_fragments, product_fragments)\n rxn_canonical = canonicalize_transform(rxn_string)\n # Change from inter-molecular to intra-molecular \n rxn_canonical_split = rxn_canonical.split('>>')\n rxn_canonical = rxn_canonical_split[0][1:-1].replace(').(', '.') + \\\n '>>' + rxn_canonical_split[1][1:-1].replace(').(', '.')\n\n reactants_string = rxn_canonical.split('>>')[0]\n products_string = rxn_canonical.split('>>')[1]\n\n retro_canonical = products_string + '>>' + reactants_string\n\n # Load into RDKit\n rxn = AllChem.ReactionFromSmarts(retro_canonical)\n if rxn.Validate()[1] != 0: \n print('Could not validate reaction successfully')\n print('ID: {}'.format(reaction['_id']))\n print('retro_canonical: {}'.format(retro_canonical))\n if VERBOSE: raw_input('Pausing...')\n return {'reaction_id': reaction['_id']}\n\n template = {\n 'products': products_string,\n 'reactants': reactants_string,\n 'reaction_smarts': retro_canonical,\n 'intra_only': intra_only,\n 'dimer_only': dimer_only,\n 'reaction_id': reaction['_id'],\n 'necessary_reagent': extra_reactant_fragment,\n }\n \n return template\n"
]
| [
[
"numpy.random.shuffle"
]
]
|
cidimec/opencv-ai-competition-beavers | [
"4849219c7c1525d12c1e681185bbafcf3030079a"
]
| [
"codes/3_calculate_distance.py"
]
| [
"import threading\r\nfrom pathlib import Path\r\nimport math\r\nimport cv2\r\nimport depthai\r\nimport numpy as np\r\nfrom imutils.video import FPS\r\n\r\nnnPathPeople = str((Path(__file__).parent / Path('models/people.blob')).resolve().absolute()) #544x320 NN\r\nnnPathMask = str((Path(__file__).parent / Path('models/facemask.blob')).resolve().absolute()) #300x300 NN\r\nlabelMapMask = [\"background\", \"sin barbijo\", \"con barbijo\"]\r\nstepSize = 0.05\r\n\r\ndef frame_norm(frame, bbox):\r\n norm_vals = np.full(len(bbox), frame.shape[0])\r\n norm_vals[::2] = frame.shape[1]\r\n return (np.clip(np.array(bbox), 0, 1) * norm_vals).astype(int)\r\n\r\ndef to_planar(arr: np.ndarray, shape: tuple) -> list:\r\n return [val for channel in cv2.resize(arr, shape).transpose(2, 0, 1) for y_col in channel for val in y_col]\r\n\r\ndef calculate_distance(point1, point2):\r\n x1, y1, z1 = point1\r\n x2, y2, z2 = point2\r\n dx, dy, dz = x1 - x2, y1 - y2, z1 - z2\r\n distance = math.sqrt(dx ** 2 + dy ** 2 + dz ** 2)\r\n return distance\r\n\r\ndef create_pipeline():\r\n print(\"Creating pipeline...\")\r\n pipeline = depthai.Pipeline()\r\n\r\n print(\"Creating Color Camera...\")\r\n cam = pipeline.createColorCamera()\r\n cam.setPreviewSize(544, 320)\r\n cam.setResolution(depthai.ColorCameraProperties.SensorResolution.THE_1080_P)\r\n cam.setInterleaved(False)\r\n cam.setBoardSocket(depthai.CameraBoardSocket.RGB)\r\n\r\n cam_xout = pipeline.createXLinkOut()\r\n cam_xout.setStreamName(\"cam_out\")\r\n cam.preview.link(cam_xout.input)\r\n\r\n # NeuralNetwork\r\n print(\"Creating People Detection Neural Network...\")\r\n people_nn = pipeline.createNeuralNetwork()\r\n people_nn.setBlobPath(nnPathPeople)\r\n\r\n cam.preview.link(people_nn.input)\r\n\r\n people_nn_xout = pipeline.createXLinkOut()\r\n people_nn_xout.setStreamName(\"people_nn\")\r\n people_nn.out.link(people_nn_xout.input)\r\n\r\n # NeuralNetwork\r\n print(\"Creating Mask Detection Neural Network...\")\r\n mask_nn = pipeline.createNeuralNetwork()\r\n mask_nn.setBlobPath(nnPathMask)\r\n\r\n mask_nn_xin = pipeline.createXLinkIn()\r\n mask_nn_xin.setStreamName(\"mask_in\")\r\n mask_nn_xin.out.link(mask_nn.input)\r\n land_nn_xout = pipeline.createXLinkOut()\r\n land_nn_xout.setStreamName(\"mask_nn\")\r\n mask_nn.out.link(land_nn_xout.input)\r\n\r\n #---------------------------------------spatial calculator\r\n # Define a source - two mono (grayscale) cameras\r\n monoLeft = pipeline.createMonoCamera()\r\n monoRight = pipeline.createMonoCamera()\r\n stereo = pipeline.createStereoDepth()\r\n spatialLocationCalculator = pipeline.createSpatialLocationCalculator()\r\n\r\n xoutDepth = pipeline.createXLinkOut()\r\n xoutSpatialData = pipeline.createXLinkOut()\r\n xinSpatialCalcConfig = pipeline.createXLinkIn()\r\n\r\n xoutDepth.setStreamName(\"depth\")\r\n xoutSpatialData.setStreamName(\"spatialData\")\r\n xinSpatialCalcConfig.setStreamName(\"spatialCalcConfig\")\r\n\r\n # MonoCamera\r\n monoLeft.setResolution(depthai.MonoCameraProperties.SensorResolution.THE_400_P)\r\n monoLeft.setBoardSocket(depthai.CameraBoardSocket.LEFT)\r\n monoRight.setResolution(depthai.MonoCameraProperties.SensorResolution.THE_400_P)\r\n monoRight.setBoardSocket(depthai.CameraBoardSocket.RIGHT)\r\n\r\n outputDepth = True\r\n outputRectified = False\r\n lrcheck = False\r\n subpixel = False\r\n\r\n # StereoDepth\r\n stereo.setOutputDepth(outputDepth)\r\n stereo.setOutputRectified(outputRectified)\r\n stereo.setConfidenceThreshold(255)\r\n\r\n stereo.setLeftRightCheck(lrcheck)\r\n stereo.setSubpixel(subpixel)\r\n\r\n monoLeft.out.link(stereo.left)\r\n monoRight.out.link(stereo.right)\r\n\r\n spatialLocationCalculator.passthroughDepth.link(xoutDepth.input)\r\n stereo.depth.link(spatialLocationCalculator.inputDepth)\r\n\r\n topLeft = depthai.Point2f(0.4, 0.4)\r\n bottomRight = depthai.Point2f(0.6, 0.6)\r\n\r\n spatialLocationCalculator.setWaitForConfigInput(False)\r\n config = depthai.SpatialLocationCalculatorConfigData()\r\n config.depthThresholds.lowerThreshold = 100\r\n config.depthThresholds.upperThreshold = 10000\r\n config.roi = depthai.Rect(topLeft, bottomRight)\r\n spatialLocationCalculator.initialConfig.addROI(config)\r\n spatialLocationCalculator.out.link(xoutSpatialData.input)\r\n xinSpatialCalcConfig.out.link(spatialLocationCalculator.inputConfig)\r\n\r\n # Pipeline defined, now the device is assigned and pipeline is started\r\n device = depthai.Device(pipeline)\r\n device.startPipeline()\r\n return pipeline, config\r\n\r\nclass Main:\r\n def __init__(self, device, config):\r\n self.device = device\r\n self.config = config\r\n print(\"Starting pipeline...\")\r\n self.device.startPipeline()\r\n self.cam_out = self.device.getOutputQueue(\"cam_out\")\r\n self.frame = None\r\n\r\n self.bboxes = []\r\n self.depth_dist = []\r\n self.min_dists = []\r\n self.mask_bboxes = []\r\n self.mask_detections = []\r\n self.current_bbox = None\r\n\r\n self.running = True\r\n self.fps = FPS()\r\n self.fps.start()\r\n\r\n def people_thread(self):\r\n print(\"people thread\")\r\n people_nn = self.device.getOutputQueue(\"people_nn\")\r\n mask_in = self.device.getInputQueue(\"mask_in\")\r\n while self.running:\r\n if self.frame is None:\r\n continue\r\n try:\r\n bboxes = np.array(people_nn.get().getFirstLayerFp16())\r\n except RuntimeError as ex:\r\n continue\r\n bboxes = bboxes.reshape((bboxes.size // 7, 7))\r\n self.bboxes = bboxes[bboxes[:, 2] > 0.7][:, 3:7] #fancy indexing\r\n\r\n cfg = depthai.SpatialLocationCalculatorConfig()\r\n for bbox in self.bboxes:\r\n bbox = frame_norm(self.frame, bbox)\r\n topLeft = depthai.Point2f(bbox[0], bbox[1])\r\n bottomRight = depthai.Point2f(bbox[2], bbox[3])\r\n self.config.roi = depthai.Rect(topLeft, bottomRight)\r\n cfg.addROI(self.config)\r\n\r\n if len(self.bboxes) > 0:\r\n self.device.getInputQueue(\"spatialCalcConfig\").send(cfg)\r\n\r\n maks_data = depthai.NNData()\r\n maks_data.setLayer(\"0\", to_planar(self.frame, (300, 300)))\r\n mask_in.send(maks_data)\r\n\r\n def mask_thread(self):\r\n print(\"mask thread\")\r\n\r\n mask_nn = self.device.getOutputQueue(name=\"mask_nn\", maxSize=1, blocking=False)\r\n\r\n while self.running:\r\n try:\r\n bboxes = np.array(mask_nn.get().getFirstLayerFp16())\r\n except RuntimeError as ex:\r\n continue\r\n bboxes = bboxes.reshape((bboxes.size // 7, 7))\r\n self.mask_bboxes = bboxes[bboxes[:, 2] > 0.7][:, 3:7]\r\n self.mask_detections = bboxes[bboxes[:, 2] > 0.7][:, 1]\r\n\r\n def depth_thread(self):\r\n print(\"depth thread\")\r\n # Output queue will be used to get the depth frames from the outputs defined above\r\n spatialCalcQueue = self.device.getOutputQueue(name=\"spatialData\", maxSize=1, blocking=False)\r\n\r\n while self.running:\r\n try:\r\n inDepthAvg = spatialCalcQueue.get() # blocking call, will wait until a new data has arrived\r\n except RuntimeError as ex:\r\n continue\r\n spatialData = inDepthAvg.getSpatialLocations()\r\n z_dists = list()\r\n\r\n for depthData in spatialData:\r\n z_dists.append([depthData.spatialCoordinates.x, depthData.spatialCoordinates.y, depthData.spatialCoordinates.z])\r\n\r\n min_dits = list()\r\n for p1 in z_dists:\r\n min_dist = math.inf\r\n for p2 in z_dists:\r\n if p1 != p2:\r\n dist = calculate_distance(p1,p2)\r\n min_dist = min(dist, min_dist)\r\n min_dits.append(min_dist)\r\n\r\n self.depth_dist = z_dists\r\n self.min_dists = min_dits\r\n\r\n\r\n def get_frame(self, retries=0):\r\n return np.array(self.cam_out.get().getData()).reshape((3, 320, 544)).transpose(1, 2, 0).astype(np.uint8)\r\n\r\n def run(self):\r\n depthQueue = device.getOutputQueue(name=\"depth\", maxSize=4, blocking=False)\r\n self.threads = [\r\n threading.Thread(target=self.people_thread),\r\n threading.Thread(target=self.mask_thread),\r\n threading.Thread(target=self.depth_thread)\r\n ]\r\n for thread in self.threads:\r\n thread.start()\r\n\r\n while True:\r\n try:\r\n new_frame = self.get_frame()\r\n inDepth = depthQueue.get()\r\n except RuntimeError:\r\n continue\r\n\r\n self.fps.update()\r\n self.frame = new_frame\r\n self.debug_frame = self.frame.copy()\r\n\r\n for raw_bbox, depth, min_dist in zip(self.bboxes, self.depth_dist, self.min_dists):\r\n bbox = frame_norm(self.frame, raw_bbox)\r\n cv2.rectangle(self.debug_frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (10, 245, 10), 2)\r\n cv2.putText(self.debug_frame, \"x: \"+str(round(depth[0],2)), (bbox[0] + 10, bbox[1] + 20),\r\n cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 10))\r\n cv2.putText(self.debug_frame, \"y: \"+str(round(depth[1],2)), (bbox[0] + 10, bbox[1] + 35),\r\n cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 10))\r\n cv2.putText(self.debug_frame, \"z: \"+str(round(depth[2],2)), (bbox[0] + 10, bbox[1] + 50),\r\n cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 10))\r\n cv2.putText(self.debug_frame, \"D: \"+str(round(min_dist,2)), (bbox[0] + 10, bbox[1] + 65),\r\n cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 10))\r\n\r\n for raw_bbox, label in zip(self.mask_bboxes, self.mask_detections):\r\n bbox = frame_norm(self.frame, raw_bbox)\r\n cv2.rectangle(self.debug_frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (10, 215, 10), 2)\r\n cv2.putText(self.debug_frame, str(labelMapMask[int(label)]), (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (10, 215, 10))\r\n\r\n cv2.imshow(\"Camera view\", self.debug_frame)\r\n\r\n if cv2.waitKey(1) == ord('q'):\r\n cv2.destroyAllWindows()\r\n break\r\n\r\n self.fps.stop()\r\n print(\"FPS: {:.2f}\".format(self.fps.fps()))\r\n cv2.destroyAllWindows()\r\n for i in range(1, 5): # https://stackoverflow.com/a/25794701/5494277\r\n cv2.waitKey(1)\r\n self.running = False\r\ndev, conf = create_pipeline()\r\nwith depthai.Device(dev) as device:\r\n app = Main(device, conf)\r\n app.run()\r\n\r\nfor thread in app.threads:\r\n thread.join()\r\n"
]
| [
[
"numpy.array"
]
]
|
bartoszptak/segmentation_models.pytorch | [
"7f443f7ae39a58841adce1f3a7973d6f4bcd052a"
]
| [
"tests/test_models_3d.py"
]
| [
"import os\nimport sys\nimport mock\nimport pytest\nimport torch\n\n# mock detection module\nsys.modules[\"torchvision._C\"] = mock.Mock()\nimport segmentation_models_pytorch as smp\n\n\ndef get_encoders_3d():\n exclude_encoders = [\n \"senet154\",\n \"resnext101_32x16d\",\n \"resnext101_32x32d\",\n \"resnext101_32x48d\",\n ]\n encoders = smp.encoders.get_encoder_names()\n\n encoders_3D = [e for e in encoders if e not in exclude_encoders and e[-3:] == '_3D']\n\n return encoders_3D\n\n\nENCODERS_3D = get_encoders_3d()\n\nDEFAULT_ENCODER_3D = \"resnet18_3D\"\n\n\ndef get_sample(model_class):\n\n if model_class == smp.Unet_3D:\n sample = torch.ones([2, 3, 1, 128, 128])\n else:\n raise ValueError(\"Not supported model class {}\".format(model_class))\n return sample\n\n\ndef _test_forward(model, sample, test_shape=False):\n with torch.no_grad():\n out = model(sample)\n if test_shape:\n assert out.shape[2:] == sample.shape[2:]\n\n\ndef _test_forward_backward(model, sample, test_shape=False):\n out = model(sample)\n out.mean().backward()\n if test_shape:\n assert out.shape[2:] == sample.shape[2:]\n\[email protected](\"encoder_name\", ENCODERS_3D)\[email protected](\"encoder_depth\", [3, 5])\[email protected](\"model_class\", [smp.Unet_3D])\ndef test_forward_3D(model_class, encoder_name, encoder_depth, **kwargs):\n decoder_channels = (256, 128, 64, 32, 16)\n\n model = model_class(\n encoder_name, encoder_depth=encoder_depth, encoder_weights=None, \n decoder_channels=decoder_channels[:encoder_depth], **kwargs\n )\n sample = get_sample(model_class)\n model.eval()\n\n if encoder_depth == 5:\n test_shape = True\n else:\n test_shape = False\n\n _test_forward(model, sample, test_shape)\n\[email protected](\n \"model_class\",\n [smp.Unet_3D]\n)\ndef test_forward_backward(model_class):\n sample = get_sample(model_class)\n model = model_class(DEFAULT_ENCODER_3D, encoder_weights=None)\n _test_forward_backward(model, sample)\n\[email protected](\"model_class\", [smp.Unet_3D])\ndef test_aux_output(model_class):\n model = model_class(\n DEFAULT_ENCODER_3D, encoder_weights=None, aux_params=dict(classes=2)\n )\n sample = get_sample(model_class)\n label_size = (sample.shape[0], 2)\n mask, label = model(sample)\n assert label.size() == label_size\n\n\[email protected](\"model_class\", [smp.Unet_3D])\[email protected](\"encoder_name\", ENCODERS_3D)\[email protected](\"in_channels\", [3])\[email protected](\"temporary\", [1,2,4,5])\ndef test_in_channels_and_temporary(model_class, encoder_name, in_channels, temporary):\n sample = torch.ones([1, in_channels, temporary, 64, 64])\n model = model_class(DEFAULT_ENCODER_3D, encoder_weights=None, in_channels=in_channels, temporal_size=temporary)\n model.eval()\n with torch.no_grad():\n model(sample)\n\n assert model.encoder._in_channels == in_channels\n\nif __name__ == \"__main__\":\n pytest.main([__file__])\n"
]
| [
[
"torch.no_grad",
"torch.ones"
]
]
|
glemaitre/hexrd | [
"b68b1ba72e0f480d29bdaae2adbd6c6e2380cc7c"
]
| [
"hexrd/fitting/peakfunctions.py"
]
| [
"# ============================================================\n# Copyright (c) 2012, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n# Written by Joel Bernier <[email protected]> and others.\n# LLNL-CODE-529294.\n# All rights reserved.\n#\n# This file is part of HEXRD. For details on dowloading the source,\n# see the file COPYING.\n#\n# Please also see the file LICENSE.\n#\n# This program is free software; you can redistribute it and/or modify it under\n# the terms of the GNU Lesser General Public License (as published by the Free\n# Software Foundation) version 2.1 dated February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY\n# or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program (see file LICENSE); if not, write to\n# the Free Software Foundation, Inc., 59 Temple Place, Suite 330,\n# Boston, MA 02111-1307 USA or visit <http://www.gnu.org/licenses/>.\n# ============================================================\n\nimport numpy as np\nimport copy\nfrom hexrd import constants\n\n\ngauss_width_fact = constants.sigma_to_fwhm\nlorentz_width_fact = 2.\n\n# FIXME: we need this for the time being to be able to parse multipeak fitting\n# results; need to wrap all this up in a class in the future!\nmpeak_nparams_dict = {\n 'gaussian': 3,\n 'lorentzian': 3,\n 'pvoigt': 4,\n 'split_pvoigt': 6\n}\n\n\n# =============================================================================\n# 1-D Gaussian Functions\n# =============================================================================\n# Split the unit gaussian so this can be called for 2d and 3d functions\ndef _unit_gaussian(p, x):\n \"\"\"\n Required Arguments:\n p -- (m) [x0,FWHM]\n x -- (n) ndarray of coordinate positions\n\n Outputs:\n f -- (n) ndarray of function values at positions x\n \"\"\"\n\n x0 = p[0]\n FWHM = p[1]\n sigma = FWHM/gauss_width_fact\n\n f = np.exp(-(x-x0)**2/(2.*sigma**2.))\n return f\n\n\ndef _gaussian1d_no_bg(p, x):\n \"\"\"\n Required Arguments:\n p -- (m) [A,x0,FWHM]\n x -- (n) ndarray of coordinate positions\n\n Outputs:\n f -- (n) ndarray of function values at positions x\n \"\"\"\n\n A = p[0]\n f = A*_unit_gaussian(p[[1, 2]], x)\n return f\n\n\ndef gaussian1d(p, x):\n \"\"\"\n Required Arguments:\n p -- (m) [A,x0,FWHM,c0,c1]\n x -- (n) ndarray of coordinate positions\n\n Outputs:\n f -- (n) ndarray of function values at positions x\n \"\"\"\n\n bg0 = p[3]\n bg1 = p[4]\n\n f = _gaussian1d_no_bg(p[:3], x)+bg0+bg1*x\n\n return f\n\n\ndef _gaussian1d_no_bg_deriv(p, x):\n \"\"\"\n Required Arguments:\n p -- (m) [A,x0,FWHM]\n x -- (n) ndarray of coordinate positions\n\n Outputs:\n d_mat -- (3 x n) ndarray of derivative values at positions x\n \"\"\"\n\n x0 = p[1]\n FWHM = p[2]\n\n sigma = FWHM/gauss_width_fact\n\n dydx0 = _gaussian1d_no_bg(p, x)*((x-x0)/(sigma**2.))\n dydA = _unit_gaussian(p[[1, 2]], x)\n dydFWHM = _gaussian1d_no_bg(p, x)*((x-x0)**2./(sigma**3.))/gauss_width_fact\n\n d_mat = np.zeros((len(p), len(x)))\n\n d_mat[0, :] = dydA\n d_mat[1, :] = dydx0\n d_mat[2, :] = dydFWHM\n\n return d_mat\n\n\ndef gaussian1d_deriv(p, x):\n \"\"\"\n Required Arguments:\n p -- (m) [A,x0,FWHM,c0,c1]\n x -- (n) ndarray of coordinate positions\n\n Outputs:\n d_mat -- (5 x n) ndarray of derivative values at positions x\n \"\"\"\n\n d_mat = np.zeros((len(p), len(x)))\n d_mat[0:3, :] = _gaussian1d_no_bg_deriv(p[0:3], x)\n d_mat[3, :] = 1.\n d_mat[4, :] = x\n\n return d_mat\n\n\n# =============================================================================\n# 1-D Lorentzian Functions\n# =============================================================================\n# Split the unit function so this can be called for 2d and 3d functions\ndef _unit_lorentzian(p, x):\n \"\"\"\n Required Arguments:\n p -- (m) [x0,FWHM]\n x -- (n) ndarray of coordinate positions\n\n Outputs:\n f -- (n) ndarray of function values at positions x\n \"\"\"\n\n x0 = p[0]\n FWHM = p[1]\n gamma = FWHM/lorentz_width_fact\n\n f = gamma**2 / ((x-x0)**2 + gamma**2)\n return f\n\n\ndef _lorentzian1d_no_bg(p, x):\n \"\"\"\n Required Arguments:\n p -- (m) [A,x0,FWHM]\n x -- (n) ndarray of coordinate positions\n\n Outputs:\n f -- (n) ndarray of function values at positions x\n \"\"\"\n\n A = p[0]\n f = A*_unit_lorentzian(p[[1, 2]], x)\n\n return f\n\n\ndef lorentzian1d(p, x):\n \"\"\"\n Required Arguments:\n p -- (m) [x0,FWHM,c0,c1]\n x -- (n) ndarray of coordinate positions\n\n Outputs:\n f -- (n) ndarray of function values at positions x\n \"\"\"\n\n bg0 = p[3]\n bg1 = p[4]\n\n f = _lorentzian1d_no_bg(p[:3], x)+bg0+bg1*x\n\n return f\n\n\ndef _lorentzian1d_no_bg_deriv(p, x):\n \"\"\"\n Required Arguments:\n p -- (m) [A,x0,FWHM]\n x -- (n) ndarray of coordinate positions\n\n Outputs:\n d_mat -- (3 x n) ndarray of derivative values at positions x\n \"\"\"\n\n x0 = p[1]\n FWHM = p[2]\n\n gamma = FWHM/lorentz_width_fact\n\n dydx0 = _lorentzian1d_no_bg(p, x)*((2.*(x-x0))/((x-x0)**2 + gamma**2))\n dydA = _unit_lorentzian(p[[1, 2]], x)\n dydFWHM = _lorentzian1d_no_bg(p, x) \\\n * ((2.*(x-x0)**2.)/(gamma*((x-x0)**2 + gamma**2)))/lorentz_width_fact\n\n d_mat = np.zeros((len(p), len(x)))\n d_mat[0, :] = dydA\n d_mat[1, :] = dydx0\n d_mat[2, :] = dydFWHM\n\n return d_mat\n\n\ndef lorentzian1d_deriv(p, x):\n \"\"\"\n Required Arguments:\n p -- (m) [A,x0,FWHM,c0,c1]\n x -- (n) ndarray of coordinate positions\n\n Outputs:\n d_mat -- (5 x n) ndarray of derivative values at positions x\n \"\"\"\n\n d_mat = np.zeros((len(p), len(x)))\n d_mat[0:3, :] = _lorentzian1d_no_bg_deriv(p[0:3], x)\n d_mat[3, :] = 1.\n d_mat[4, :] = x\n\n return d_mat\n\n\n# =============================================================================\n# 1-D Psuedo Voigt Functions\n# =============================================================================\n# Split the unit function so this can be called for 2d and 3d functions\ndef _unit_pvoigt1d(p, x):\n \"\"\"\n Required Arguments:\n p -- (m) [x0,FWHM,n]\n x -- (n) ndarray of coordinate positions\n\n Outputs:\n f -- (n) ndarray of function values at positions x\n \"\"\"\n\n n = p[2]\n\n f = (n*_unit_gaussian(p[:2], x)+(1.-n)*_unit_lorentzian(p[:2], x))\n return f\n\n\ndef _pvoigt1d_no_bg(p, x):\n \"\"\"\n Required Arguments:\n p -- (m) [A,x0,FWHM,n]\n x -- (n) ndarray of coordinate positions\n\n Outputs:\n f -- (n) ndarray of function values at positions x\n \"\"\"\n\n A = p[0]\n f = A*_unit_pvoigt1d(p[[1, 2, 3]], x)\n return f\n\n\ndef pvoigt1d(p, x):\n \"\"\"\n Required Arguments:\n p -- (m) [A,x0,FWHM,n,c0,c1]\n x -- (n) ndarray of coordinate positions\n\n Outputs:\n f -- (n) ndarray of function values at positions x\n \"\"\"\n\n bg0 = p[4]\n bg1 = p[5]\n\n f = _pvoigt1d_no_bg(p[:4], x)+bg0+bg1*x\n\n return f\n\n\n# =============================================================================\n# 1-D Split Psuedo Voigt Functions\n# =============================================================================\n\ndef _split_pvoigt1d_no_bg(p, x):\n \"\"\"\n Required Arguments:\n p -- (m) [A,x0,FWHM-,FWHM+,n-,n+]\n x -- (n) ndarray of coordinate positions\n\n Outputs:\n f -- (n) ndarray of function values at positions x\n \"\"\"\n\n A = p[0]\n x0 = p[1]\n\n f = np.zeros(x.shape[0])\n\n # Define halves, using gthanorequal and lthan, choice is arbitrary\n xr = x >= x0\n xl = x < x0\n\n # +\n right = np.where(xr)[0]\n\n f[right] = A*_unit_pvoigt1d(p[[1, 3, 5]], x[right])\n\n # -\n left = np.where(xl)[0]\n f[left] = A*_unit_pvoigt1d(p[[1, 2, 4]], x[left])\n\n return f\n\n\ndef split_pvoigt1d(p, x):\n \"\"\"\n Required Arguments:\n p -- (m) [A,x0,FWHM-,FWHM+,n-,n+,c0,c1]\n x -- (n) ndarray of coordinate positions\n\n Outputs:\n f -- (n) ndarray of function values at positions x\n \"\"\"\n\n bg0 = p[6]\n bg1 = p[7]\n\n f = _split_pvoigt1d_no_bg(p[:6], x)+bg0+bg1*x\n\n return f\n\n\n# =============================================================================\n# Tanh Step Down\n# =============================================================================\n\ndef tanh_stepdown_nobg(p, x):\n \"\"\"\n Required Arguments:\n p -- (m) [A,x0,w]\n x -- (n) ndarray of coordinate positions\n\n Outputs:\n f -- (n) ndarray of function values at positions x\n \"\"\"\n\n A = p[0]\n x0 = p[1]\n w = p[2]\n\n f = A*(0.5*(1.-np.tanh((x-x0)/w)))\n\n return f\n\n\n# =============================================================================\n# 2-D Rotation Coordinate Transform\n# =============================================================================\n\ndef _2d_coord_transform(theta, x0, y0, x, y):\n xprime = np.cos(theta)*x+np.sin(theta)*y\n yprime = -np.sin(theta)*x+np.cos(theta)*y\n\n x0prime = np.cos(theta)*x0+np.sin(theta)*y0\n y0prime = -np.sin(theta)*x0+np.cos(theta)*y0\n\n return x0prime, y0prime, xprime, yprime\n\n\n# =============================================================================\n# 2-D Gaussian Function\n# =============================================================================\n\ndef _gaussian2d_no_bg(p, x, y):\n \"\"\"\n Required Arguments:\n p -- (m) [A,x0,y0,FWHMx,FWHMy]\n x -- (n x o) ndarray of coordinate positions for dimension 1\n y -- (n x o) ndarray of coordinate positions for dimension 1\n\n Outputs:\n f -- (n x 0) ndarray of function values at positions (x,y)\n \"\"\"\n\n A = p[0]\n f = A*_unit_gaussian(p[[1, 3]], x)*_unit_gaussian(p[[2, 4]], y)\n return f\n\n\ndef _gaussian2d_rot_no_bg(p, x, y):\n \"\"\"\n Required Arguments:\n p -- (m) [A,x0,y0,FWHMx,FWHMy,theta]\n x -- (n x o) ndarray of coordinate positions for dimension 1\n y -- (n x o) ndarray of coordinate positions for dimension 2\n\n Outputs:\n f -- (n x o) ndarray of function values at positions (x,y)\n \"\"\"\n\n theta = p[5]\n\n x0prime, y0prime, xprime, yprime = _2d_coord_transform(\n theta, p[1], p[2], x, y)\n\n # this copy was needed so original parameters set isn't changed\n newp = copy.copy(p)\n\n newp[1] = x0prime\n newp[2] = y0prime\n\n f = _gaussian2d_no_bg(newp[:5], xprime, yprime)\n\n return f\n\n\ndef gaussian2d_rot(p, x, y):\n \"\"\"\n Required Arguments:\n p -- (m) [A,x0,y0,FWHMx,FWHMy,theta,c0,c1x,c1y]\n x -- (n x o) ndarray of coordinate positions for dimension 1\n y -- (n x o) ndarray of coordinate positions for dimension 2\n\n Outputs:\n f -- (n x o) ndarray of function values at positions (x,y)\n \"\"\"\n\n bg0 = p[6]\n bg1x = p[7]\n bg1y = p[8]\n\n f = _gaussian2d_rot_no_bg(p[:6], x, y)+(bg0+bg1x*x+bg1y*y)\n return f\n\n\ndef gaussian2d(p, x, y):\n \"\"\"\n Required Arguments:\n p -- (m) [A,x0,y0,FWHMx,FWHMy,c0,c1x,c1y]\n x -- (n x o) ndarray of coordinate positions for dimension 1\n y -- (n x o) ndarray of coordinate positions for dimension 2\n\n Outputs:\n f -- (n x o) ndarray of function values at positions (x,y)\n \"\"\"\n\n bg0 = p[5]\n bg1x = p[6]\n bg1y = p[7]\n\n f = _gaussian2d_no_bg(p[:5], x, y)+(bg0+bg1x*x+bg1y*y)\n return f\n\n\n# =============================================================================\n# 2-D Split Psuedo Voigt Function\n# =============================================================================\n\ndef _split_pvoigt2d_no_bg(p, x, y):\n \"\"\"\n Required Arguments:\n p -- (m) [A,x0,y0,FWHMx-,FWHMx+,FWHMy-,FWHMy+,nx-,nx+,ny-,ny+]\n x -- (n x o) ndarray of coordinate positions for dimension 1\n y -- (n x o) ndarray of coordinate positions for dimension 2\n\n Outputs:\n f -- (n x o) ndarray of function values at positions (x,y)\n \"\"\"\n\n A = p[0]\n x0 = p[1]\n y0 = p[2]\n\n f = np.zeros([x.shape[0], x.shape[1]])\n\n # Define quadrants, using gthanorequal and lthan, choice is arbitrary\n xr = x >= x0\n xl = x < x0\n yr = y >= y0\n yl = y < y0\n\n # ++\n q1 = np.where(xr & yr)\n f[q1] = A*_unit_pvoigt1d(p[[1, 4, 8]], x[q1]) * \\\n _unit_pvoigt1d(p[[2, 6, 10]], y[q1])\n\n # +-\n q2 = np.where(xr & yl)\n f[q2] = A*_unit_pvoigt1d(p[[1, 4, 8]], x[q2]) * \\\n _unit_pvoigt1d(p[[2, 5, 9]], y[q2])\n\n # -+\n q3 = np.where(xl & yr)\n f[q3] = A*_unit_pvoigt1d(p[[1, 3, 7]], x[q3]) * \\\n _unit_pvoigt1d(p[[2, 6, 10]], y[q3])\n\n # --\n q4 = np.where(xl & yl)\n f[q4] = A*_unit_pvoigt1d(p[[1, 3, 7]], x[q4]) * \\\n _unit_pvoigt1d(p[[2, 5, 9]], y[q4])\n\n return f\n\n\ndef _split_pvoigt2d_rot_no_bg(p, x, y):\n \"\"\"\n Required Arguments:\n p -- (m) [A,x0,y0,FWHMx-,FWHMx+,FWHMy-,FWHMy+,nx-,nx+,ny-,ny+,theta]\n x -- (n x o) ndarray of coordinate positions for dimension 1\n y -- (n x o) ndarray of coordinate positions for dimension 2\n\n Outputs:\n f -- (n x o) ndarray of function values at positions (x,y)\n \"\"\"\n\n theta = p[11]\n\n x0prime, y0prime, xprime, yprime = _2d_coord_transform(\n theta, p[1], p[2], x, y)\n\n # this copy was needed so original parameters set isn't changed\n newp = copy.copy(p)\n\n newp[1] = x0prime\n newp[2] = y0prime\n\n f = _split_pvoigt2d_no_bg(newp[:11], xprime, yprime)\n\n return f\n\n\ndef split_pvoigt2d_rot(p, x, y):\n \"\"\"\n Required Arguments:\n p -- (m) [A,x0,y0,FWHMx-,FWHMx+,FWHMy-,FWHMy+,\n nx-,nx+,ny-,ny+,theta,c0,c1x,c1y]\n x -- (n x o) ndarray of coordinate positions for dimension 1\n y -- (n x o) ndarray of coordinate positions for dimension 2\n\n Outputs:\n f -- (n x o) ndarray of function values at positions (x,y)\n \"\"\"\n\n bg0 = p[12]\n bg1x = p[13]\n bg1y = p[14]\n\n f = _split_pvoigt2d_rot_no_bg(p[:12], x, y)+(bg0+bg1x*x+bg1y*y)\n\n return f\n\n\n# =============================================================================\n# 3-D Gaussian Function\n# =============================================================================\n\ndef _gaussian3d_no_bg(p, x, y, z):\n \"\"\"\n Required Arguments:\n p -- (m) [A,x0,y0,z0,FWHMx,FWHMy,FWHMz]\n x -- (n x o x q) ndarray of coordinate positions for dimension 1\n y -- (n x o x q) ndarray of coordinate positions for dimension 2\n y -- (z x o x q) ndarray of coordinate positions for dimension 3\n\n Outputs:\n f -- (n x o x q) ndarray of function values at positions (x,y)\n \"\"\"\n\n A = p[0]\n f = A * _unit_gaussian(p[[1, 4]], x) \\\n * _unit_gaussian(p[[2, 5]], y) \\\n * _unit_gaussian(p[[3, 6]], z)\n return f\n\n\ndef gaussian3d(p, x, y, z):\n \"\"\"\n Required Arguments:\n p -- (m) [A,x0,y0,z0,FWHMx,FWHMy,FWHMz,c0,c1x,c1y,c1z]\n x -- (n x o x q) ndarray of coordinate positions for dimension 1\n y -- (n x o x q) ndarray of coordinate positions for dimension 2\n y -- (z x o x q) ndarray of coordinate positions for dimension 3\n\n Outputs:\n f -- (n x o x q) ndarray of function values at positions (x,y,z)\n \"\"\"\n\n bg0 = p[7]\n bg1x = p[8]\n bg1y = p[9]\n bg1z = p[10]\n\n f = _gaussian3d_no_bg(p[:5], x, y)+(bg0+bg1x*x+bg1y*y+bg1z*z)\n return f\n\n\n# =============================================================================\n# Mutlipeak\n# =============================================================================\n\ndef _mpeak_1d_no_bg(p, x, pktype, num_pks):\n \"\"\"\n Required Arguments:\n p -- (m x u) list of peak parameters for number of peaks\n where m is the number of parameters per peak\n - \"gaussian\" and \"lorentzian\" - 3\n - \"pvoigt\" - 4\n - \"split_pvoigt\" - 6\n x -- (n) ndarray of coordinate positions for dimension 1\n pktype -- string, type of analytic function; current options are\n \"gaussian\",\"lorentzian\",\"pvoigt\" (psuedo voigt), and\n \"split_pvoigt\" (split psuedo voigt)\n num_pks -- integer 'u' indicating the number of pks, must match length of p\n\n Outputs:\n f -- (n) ndarray of function values at positions (x)\n \"\"\"\n\n f = np.zeros(len(x))\n\n if pktype == 'gaussian' or pktype == 'lorentzian':\n p_fit = np.reshape(p[:3*num_pks], [num_pks, 3])\n elif pktype == 'pvoigt':\n p_fit = np.reshape(p[:4*num_pks], [num_pks, 4])\n elif pktype == 'split_pvoigt':\n p_fit = np.reshape(p[:6*num_pks], [num_pks, 6])\n\n for ii in np.arange(num_pks):\n if pktype == 'gaussian':\n f = f+_gaussian1d_no_bg(p_fit[ii], x)\n elif pktype == 'lorentzian':\n f = f+_lorentzian1d_no_bg(p_fit[ii], x)\n elif pktype == 'pvoigt':\n f = f+_pvoigt1d_no_bg(p_fit[ii], x)\n elif pktype == 'split_pvoigt':\n f = f+_split_pvoigt1d_no_bg(p_fit[ii], x)\n\n return f\n\n\ndef mpeak_1d(p, x, pktype, num_pks, bgtype=None):\n \"\"\"\n Required Arguments:\n p -- (m x u) list of peak parameters for number of peaks where m is the\n number of parameters per peak\n \"gaussian\" and \"lorentzian\" - 3\n \"pvoigt\" - 4\n \"split_pvoigt\" - 6\n x -- (n) ndarray of coordinate positions for dimension 1\n pktype -- string, type of analytic function that will be used;\n current options are \"gaussian\",\"lorentzian\",\"pvoigt\" (psuedo voigt), and\n \"split_pvoigt\" (split psuedo voigt)\n num_pks -- integer 'u' indicating the number of pks, must match length of p\n pktype -- string, background functions, available options are \"constant\",\n \"linear\", and \"quadratic\"\n\n Outputs:\n f -- (n) ndarray of function values at positions (x)\n \"\"\"\n f = _mpeak_1d_no_bg(p, x, pktype, num_pks)\n\n if bgtype == 'linear':\n f = f+p[-2]+p[-1]*x # c0=p[-2], c1=p[-1]\n elif bgtype == 'constant':\n f = f+p[-1] # c0=p[-1]\n elif bgtype == 'quadratic':\n f = f+p[-3]+p[-2]*x+p[-1]*x**2 # c0=p[-3], c1=p[-2], c2=p[-1],\n\n return f\n"
]
| [
[
"numpy.sin",
"numpy.reshape",
"numpy.zeros",
"numpy.exp",
"numpy.where",
"numpy.tanh",
"numpy.arange",
"numpy.cos"
]
]
|
bromjiri/Presto | [
"e5790f60d0935bb1182f676db414b0724ba35c1b"
]
| [
"predictor/diff.py"
]
| [
"import settings\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport os\n\n\nclass Stock:\n\n def __init__(self, subject):\n input_file = settings.PREDICTOR_STOCK + \"/\" + subject + \".csv\"\n self.stock_df = pd.read_csv(input_file, sep=',', index_col='Date')\n\n def get_diff(self, from_date, to_date):\n return self.stock_df['Diff'].loc[from_date:to_date]\n\n\nclass Sent:\n\n def __init__(self, subject, source):\n input_file = settings.PREDICTOR_SENTIMENT + \"/\" + source + \"/\" + source + \"-sent-\" + subject + \".csv\"\n self.sent_df = pd.read_csv(input_file, sep=',', index_col='Date')\n\n def get_weekend(self, col_name, stock_dates):\n\n weekend_df = np.round(self.sent_df, 2)\n\n aggreg = 0\n days = 1\n for idx, row in weekend_df.iterrows():\n value = row[col_name]\n date = pd.to_datetime(idx)\n date_plus = date + datetime.timedelta(days=1)\n if str(date_plus.date()) not in stock_dates:\n # print(\"weekend\")\n value += aggreg\n aggreg = value\n days += 1\n else:\n total = value + aggreg\n mean = total / days\n aggreg = 0\n days = 1\n weekend_df.set_value(idx, col_name, mean)\n\n\n # print(date.date(), row[col_name], value)\n\n return np.round(weekend_df[col_name].to_frame().diff(), 2)\n\n def create_diff(self, precision, stock_dates):\n\n sentiment_col = \"Sent\" + precision\n total_col = \"Tot\" + precision\n\n diff_df = pd.DataFrame(index=stock_dates, columns=['Friday', 'Sunday', \"Weekend\"])\n diff_df.index.name = \"Date\"\n\n # friday_df = self.sent_df[sentiment_col].loc[stock_dates]\n # diff_df['Friday'] = np.round(friday_df.diff(), 2)\n\n temp_df = pd.DataFrame(index=stock_dates, columns=['Friday', 'Sunday', 'Weekend' 'Total'])\n temp_df['Total'] = self.sent_df[total_col]\n\n friday_df = self.sent_df[sentiment_col].loc[stock_dates]\n temp_df['Friday'] = np.round(friday_df.diff(), 2)\n diff_df['Friday'] = temp_df.apply(func, args=('Friday',), axis=1)\n\n sunday_df = np.round(self.sent_df[sentiment_col].to_frame().diff(), 2)\n temp_df['Sunday'] = sunday_df[sentiment_col].loc[stock_dates]\n diff_df['Sunday'] = temp_df.apply(func, args=('Sunday',), axis=1)\n\n weekend_df = self.get_weekend(sentiment_col, stock_dates)\n temp_df['Weekend'] = weekend_df[sentiment_col].loc[stock_dates]\n diff_df['Weekend'] = temp_df.apply(func, args=('Weekend',), axis=1)\n\n return diff_df\n\n\ndef func(row, col):\n if row['Total'] >= 10:\n return row[col]\n else:\n return 0\n\ndef run_one(subject, from_date, to_date, precision):\n\n # stock dataframe\n stock = Stock(subject)\n stock_df = stock.get_diff(from_date, to_date)\n # print(stock_df)\n\n # sentiment dataframe\n sent = Sent(subject, source)\n diff_df = sent.create_diff(precision, stock_df.index.values)\n # print(diff_df)\n\n # combine\n diff_df['Stock'] = stock_df\n # print(diff_df)\n\n # save output\n output_file_path = settings.PREDICTOR_DIFF + '/' + source + '/' + subject + '/' + source + '-diff-' + subject + '-' + precision + '.csv'\n dir = os.path.dirname(os.path.realpath(output_file_path))\n os.makedirs(dir, exist_ok=True)\n diff_df.to_csv(output_file_path)\n\n\ndef run_the(subject, from_date, to_date, precision):\n\n stock = Stock('djia')\n stock_df = stock.get_diff(from_date, to_date)\n\n # sentiment dataframe\n sent = Sent(subject, source)\n diff_df = sent.create_diff(precision, stock_df.index.values)\n\n indexes = ['djia', 'nasdaq', 'snp']\n\n for index in indexes:\n\n # stock dataframe\n stock = Stock(index)\n stock_df = stock.get_diff(from_date, to_date)\n\n # combine\n diff_df[index] = stock_df\n\n # save output\n output_file_path = settings.PREDICTOR_DIFF + '/' + source + '/' + subject + '/' + source + '-diff-' + subject + '-' + precision + '.csv'\n dir = os.path.dirname(os.path.realpath(output_file_path))\n os.makedirs(dir, exist_ok=True)\n diff_df.to_csv(output_file_path)\n\n\n\nfrom_date = '2016-11-01'\nto_date = '2017-08-31'\nsource = \"stwits\"\nsubjects = [\"tesla\"]\n# subjects = [\"tesla\"]\n\nprecisions = [\"0.6\", \"0.8\", \"1.0\"]\n\nfor precision in precisions:\n for subject in subjects:\n print(subject, precision)\n run_one(subject, from_date, to_date, precision)\n # run_the('the', from_date, to_date, precision)\n"
]
| [
[
"numpy.round",
"pandas.DataFrame",
"pandas.read_csv",
"pandas.to_datetime"
]
]
|
mpoziomska/MNE_poprawione | [
"f3b08753dfab3619a65250f6d2aff456a0e595c1"
]
| [
"mne/source_estimate.py"
]
| [
"# Authors: Alexandre Gramfort <[email protected]>\n# Matti Hämäläinen <[email protected]>\n# Martin Luessi <[email protected]>\n# Mads Jensen <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport contextlib\nimport copy\nimport os.path as op\nimport numpy as np\nfrom scipy import linalg, sparse\nfrom scipy.sparse import coo_matrix, block_diag as sparse_block_diag\n\nfrom .cov import Covariance\nfrom .evoked import _get_peak\nfrom .filter import resample\nfrom .fixes import einsum\nfrom .surface import read_surface, _get_ico_surface, mesh_edges\nfrom .source_space import (_ensure_src, _get_morph_src_reordering,\n _ensure_src_subject, SourceSpaces)\nfrom .utils import (get_subjects_dir, _check_subject, logger, verbose,\n _time_mask, warn as warn_, copy_function_doc_to_method_doc,\n fill_doc, _check_option, _validate_type, _check_src_normal,\n _check_stc_units)\nfrom .viz import (plot_source_estimates, plot_vector_source_estimates,\n plot_volume_source_estimates)\nfrom .io.base import ToDataFrameMixin, TimeMixin\nfrom .io.meas_info import Info\nfrom .externals.h5io import read_hdf5, write_hdf5\n\n\ndef _read_stc(filename):\n \"\"\"Aux Function.\"\"\"\n with open(filename, 'rb') as fid:\n buf = fid.read()\n\n stc = dict()\n offset = 0\n num_bytes = 4\n\n # read tmin in ms\n stc['tmin'] = float(np.frombuffer(buf, dtype=\">f4\", count=1,\n offset=offset))\n stc['tmin'] /= 1000.0\n offset += num_bytes\n\n # read sampling rate in ms\n stc['tstep'] = float(np.frombuffer(buf, dtype=\">f4\", count=1,\n offset=offset))\n stc['tstep'] /= 1000.0\n offset += num_bytes\n\n # read number of vertices/sources\n vertices_n = int(np.frombuffer(buf, dtype=\">u4\", count=1, offset=offset))\n offset += num_bytes\n\n # read the source vector\n stc['vertices'] = np.frombuffer(buf, dtype=\">u4\", count=vertices_n,\n offset=offset)\n offset += num_bytes * vertices_n\n\n # read the number of timepts\n data_n = int(np.frombuffer(buf, dtype=\">u4\", count=1, offset=offset))\n offset += num_bytes\n\n if (vertices_n and # vertices_n can be 0 (empty stc)\n ((len(buf) // 4 - 4 - vertices_n) % (data_n * vertices_n)) != 0):\n raise ValueError('incorrect stc file size')\n\n # read the data matrix\n stc['data'] = np.frombuffer(buf, dtype=\">f4\", count=vertices_n * data_n,\n offset=offset)\n stc['data'] = stc['data'].reshape([data_n, vertices_n]).T\n\n return stc\n\n\ndef _write_stc(filename, tmin, tstep, vertices, data):\n \"\"\"Write an STC file.\n\n Parameters\n ----------\n filename : string\n The name of the STC file.\n tmin : float\n The first time point of the data in seconds.\n tstep : float\n Time between frames in seconds.\n vertices : array of integers\n Vertex indices (0 based).\n data : 2D array\n The data matrix (nvert * ntime).\n \"\"\"\n fid = open(filename, 'wb')\n\n # write start time in ms\n fid.write(np.array(1000 * tmin, dtype='>f4').tostring())\n # write sampling rate in ms\n fid.write(np.array(1000 * tstep, dtype='>f4').tostring())\n # write number of vertices\n fid.write(np.array(vertices.shape[0], dtype='>u4').tostring())\n # write the vertex indices\n fid.write(np.array(vertices, dtype='>u4').tostring())\n\n # write the number of timepts\n fid.write(np.array(data.shape[1], dtype='>u4').tostring())\n #\n # write the data\n #\n fid.write(np.array(data.T, dtype='>f4').tostring())\n\n # close the file\n fid.close()\n\n\ndef _read_3(fid):\n \"\"\"Read 3 byte integer from file.\"\"\"\n data = np.fromfile(fid, dtype=np.uint8, count=3).astype(np.int32)\n\n out = np.left_shift(data[0], 16) + np.left_shift(data[1], 8) + data[2]\n\n return out\n\n\ndef _read_w(filename):\n \"\"\"Read a w file.\n\n w files contain activations or source reconstructions for a single time\n point.\n\n Parameters\n ----------\n filename : string\n The name of the w file.\n\n Returns\n -------\n data: dict\n The w structure. It has the following keys:\n vertices vertex indices (0 based)\n data The data matrix (nvert long)\n \"\"\"\n with open(filename, 'rb', buffering=0) as fid: # buffering=0 for np bug\n # skip first 2 bytes\n fid.read(2)\n\n # read number of vertices/sources (3 byte integer)\n vertices_n = int(_read_3(fid))\n\n vertices = np.zeros((vertices_n), dtype=np.int32)\n data = np.zeros((vertices_n), dtype=np.float32)\n\n # read the vertices and data\n for i in range(vertices_n):\n vertices[i] = _read_3(fid)\n data[i] = np.fromfile(fid, dtype='>f4', count=1)[0]\n\n w = dict()\n w['vertices'] = vertices\n w['data'] = data\n\n return w\n\n\ndef _write_3(fid, val):\n \"\"\"Write 3 byte integer to file.\"\"\"\n f_bytes = np.zeros((3), dtype=np.uint8)\n f_bytes[0] = (val >> 16) & 255\n f_bytes[1] = (val >> 8) & 255\n f_bytes[2] = val & 255\n fid.write(f_bytes.tostring())\n\n\ndef _write_w(filename, vertices, data):\n \"\"\"Write a w file.\n\n w files contain activations or source reconstructions for a single time\n point.\n\n Parameters\n ----------\n filename: string\n The name of the w file.\n vertices: array of int\n Vertex indices (0 based).\n data: 1D array\n The data array (nvert).\n \"\"\"\n assert (len(vertices) == len(data))\n\n fid = open(filename, 'wb')\n\n # write 2 zero bytes\n fid.write(np.zeros((2), dtype=np.uint8).tostring())\n\n # write number of vertices/sources (3 byte integer)\n vertices_n = len(vertices)\n _write_3(fid, vertices_n)\n\n # write the vertices and data\n for i in range(vertices_n):\n _write_3(fid, vertices[i])\n # XXX: without float() endianness is wrong, not sure why\n fid.write(np.array(float(data[i]), dtype='>f4').tostring())\n\n # close the file\n fid.close()\n\n\ndef read_source_estimate(fname, subject=None):\n \"\"\"Read a source estimate object.\n\n Parameters\n ----------\n fname : str\n Path to (a) source-estimate file(s).\n subject : str | None\n Name of the subject the source estimate(s) is (are) from.\n It is good practice to set this attribute to avoid combining\n incompatible labels and SourceEstimates (e.g., ones from other\n subjects). Note that due to file specification limitations, the\n subject name isn't saved to or loaded from files written to disk.\n\n Returns\n -------\n stc : SourceEstimate | VectorSourceEstimate | VolSourceEstimate | MixedSourceEstimate\n The source estimate object loaded from file.\n\n Notes\n -----\n - for volume source estimates, ``fname`` should provide the path to a\n single file named '*-vl.stc` or '*-vol.stc'\n - for surface source estimates, ``fname`` should either provide the\n path to the file corresponding to a single hemisphere ('*-lh.stc',\n '*-rh.stc') or only specify the asterisk part in these patterns. In any\n case, the function expects files for both hemisphere with names\n following this pattern.\n - for vector surface source estimates, only HDF5 files are supported.\n - for mixed source estimates, only HDF5 files are supported.\n - for single time point .w files, ``fname`` should follow the same\n pattern as for surface estimates, except that files are named\n '*-lh.w' and '*-rh.w'.\n \"\"\" # noqa: E501\n fname_arg = fname\n _validate_type(fname, 'path-like', 'fname')\n fname = str(fname)\n\n # make sure corresponding file(s) can be found\n ftype = None\n if op.exists(fname):\n if fname.endswith('-vl.stc') or fname.endswith('-vol.stc') or \\\n fname.endswith('-vl.w') or fname.endswith('-vol.w'):\n ftype = 'volume'\n elif fname.endswith('.stc'):\n ftype = 'surface'\n if fname.endswith(('-lh.stc', '-rh.stc')):\n fname = fname[:-7]\n else:\n err = (\"Invalid .stc filename: %r; needs to end with \"\n \"hemisphere tag ('...-lh.stc' or '...-rh.stc')\"\n % fname)\n raise IOError(err)\n elif fname.endswith('.w'):\n ftype = 'w'\n if fname.endswith(('-lh.w', '-rh.w')):\n fname = fname[:-5]\n else:\n err = (\"Invalid .w filename: %r; needs to end with \"\n \"hemisphere tag ('...-lh.w' or '...-rh.w')\"\n % fname)\n raise IOError(err)\n elif fname.endswith('.h5'):\n ftype = 'h5'\n fname = fname[:-3]\n else:\n raise RuntimeError('Unknown extension for file %s' % fname_arg)\n\n if ftype != 'volume':\n stc_exist = [op.exists(f)\n for f in [fname + '-rh.stc', fname + '-lh.stc']]\n w_exist = [op.exists(f)\n for f in [fname + '-rh.w', fname + '-lh.w']]\n if all(stc_exist) and ftype != 'w':\n ftype = 'surface'\n elif all(w_exist):\n ftype = 'w'\n elif op.exists(fname + '.h5'):\n ftype = 'h5'\n elif op.exists(fname + '-stc.h5'):\n ftype = 'h5'\n fname += '-stc'\n elif any(stc_exist) or any(w_exist):\n raise IOError(\"Hemisphere missing for %r\" % fname_arg)\n else:\n raise IOError(\"SourceEstimate File(s) not found for: %r\"\n % fname_arg)\n\n # read the files\n if ftype == 'volume': # volume source space\n if fname.endswith('.stc'):\n kwargs = _read_stc(fname)\n elif fname.endswith('.w'):\n kwargs = _read_w(fname)\n kwargs['data'] = kwargs['data'][:, np.newaxis]\n kwargs['tmin'] = 0.0\n kwargs['tstep'] = 0.0\n else:\n raise IOError('Volume source estimate must end with .stc or .w')\n elif ftype == 'surface': # stc file with surface source spaces\n lh = _read_stc(fname + '-lh.stc')\n rh = _read_stc(fname + '-rh.stc')\n assert lh['tmin'] == rh['tmin']\n assert lh['tstep'] == rh['tstep']\n kwargs = lh.copy()\n kwargs['data'] = np.r_[lh['data'], rh['data']]\n kwargs['vertices'] = [lh['vertices'], rh['vertices']]\n elif ftype == 'w': # w file with surface source spaces\n lh = _read_w(fname + '-lh.w')\n rh = _read_w(fname + '-rh.w')\n kwargs = lh.copy()\n kwargs['data'] = np.atleast_2d(np.r_[lh['data'], rh['data']]).T\n kwargs['vertices'] = [lh['vertices'], rh['vertices']]\n # w files only have a single time point\n kwargs['tmin'] = 0.0\n kwargs['tstep'] = 1.0\n ftype = 'surface'\n elif ftype == 'h5':\n kwargs = read_hdf5(fname + '.h5', title='mnepython')\n ftype = kwargs.pop('src_type', 'surface')\n\n if ftype != 'volume':\n # Make sure the vertices are ordered\n vertices = kwargs['vertices']\n if any(np.any(np.diff(v.astype(int)) <= 0) for v in vertices):\n sidx = [np.argsort(verts) for verts in vertices]\n vertices = [verts[idx] for verts, idx in zip(vertices, sidx)]\n data = kwargs['data'][np.r_[sidx[0], len(sidx[0]) + sidx[1]]]\n kwargs['vertices'] = vertices\n kwargs['data'] = data\n\n if 'subject' not in kwargs:\n kwargs['subject'] = subject\n if subject is not None and subject != kwargs['subject']:\n raise RuntimeError('provided subject name \"%s\" does not match '\n 'subject name from the file \"%s'\n % (subject, kwargs['subject']))\n\n vector = kwargs['data'].ndim == 3\n if ftype in ('volume', 'discrete'):\n klass = VolVectorSourceEstimate if vector else VolSourceEstimate\n elif ftype == 'mixed':\n if vector:\n # XXX we should really support this at some point\n raise NotImplementedError('Vector mixed source estimates not yet '\n 'supported')\n klass = MixedSourceEstimate\n else:\n assert ftype == 'surface'\n klass = VectorSourceEstimate if vector else SourceEstimate\n return klass(**kwargs)\n\n\ndef _get_src_type(src, vertices, warn_text=None):\n src_type = None\n if src is None:\n if warn_text is None:\n warn_(\"src should not be None for a robust guess of stc type.\")\n else:\n warn_(warn_text)\n if isinstance(vertices, list) and len(vertices) == 2:\n src_type = 'surface'\n elif isinstance(vertices, np.ndarray) or isinstance(vertices, list) \\\n and len(vertices) == 1:\n src_type = 'volume'\n elif isinstance(vertices, list) and len(vertices) > 2:\n src_type = 'mixed'\n else:\n src_type = src.kind\n assert src_type in ('surface', 'volume', 'mixed', 'discrete')\n return src_type\n\n\ndef _make_stc(data, vertices, src_type=None, tmin=None, tstep=None,\n subject=None, vector=False, source_nn=None, warn_text=None):\n \"\"\"Generate a surface, vector-surface, volume or mixed source estimate.\"\"\"\n def guess_src_type():\n return _get_src_type(src=None, vertices=vertices, warn_text=warn_text)\n\n src_type = guess_src_type() if src_type is None else src_type\n\n if vector and src_type == 'mixed': # XXX this should be supported someday\n raise NotImplementedError(\n 'Vector source estimates for mixed source spaces are not supported'\n )\n\n if vector and src_type == 'surface' and source_nn is None:\n raise RuntimeError('No source vectors supplied.')\n\n # infer Klass from src_type\n if src_type == 'surface':\n Klass = VectorSourceEstimate if vector else SourceEstimate\n elif src_type in ('volume', 'discrete'):\n Klass = VolVectorSourceEstimate if vector else VolSourceEstimate\n elif src_type == 'mixed':\n Klass = MixedSourceEstimate\n else:\n raise ValueError('vertices has to be either a list with one or more '\n 'arrays or an array')\n\n # massage the data\n if src_type == 'surface' and vector:\n n_vertices = len(vertices[0]) + len(vertices[1])\n data = np.matmul(\n np.transpose(source_nn.reshape(n_vertices, 3, 3), axes=[0, 2, 1]),\n data.reshape(n_vertices, 3, -1)\n )\n elif src_type in ('volume', 'discrete') and vector:\n data = data.reshape((-1, 3, data.shape[-1]))\n else:\n pass # noqa\n\n return Klass(\n data=data, vertices=vertices, tmin=tmin, tstep=tstep, subject=subject\n )\n\n\ndef _verify_source_estimate_compat(a, b):\n \"\"\"Make sure two SourceEstimates are compatible for arith. operations.\"\"\"\n compat = False\n if type(a) != type(b):\n raise ValueError('Cannot combine %s and %s.' % (type(a), type(b)))\n if len(a.vertices) == len(b.vertices):\n if all(np.array_equal(av, vv)\n for av, vv in zip(a.vertices, b.vertices)):\n compat = True\n if not compat:\n raise ValueError('Cannot combine source estimates that do not have '\n 'the same vertices. Consider using stc.expand().')\n if a.subject != b.subject:\n raise ValueError('source estimates do not have the same subject '\n 'names, %r and %r' % (a.subject, b.subject))\n\n\nclass _BaseSourceEstimate(ToDataFrameMixin, TimeMixin):\n \"\"\"Base class for all source estimates.\n\n Parameters\n ----------\n data : array, shape (n_dipoles, n_times) | tuple, shape (2,)\n The data in source space. The data can either be a single array or\n a tuple with two arrays: \"kernel\" shape (n_vertices, n_sensors) and\n \"sens_data\" shape (n_sensors, n_times). In this case, the source\n space data corresponds to ``np.dot(kernel, sens_data)``.\n vertices : array | list of array\n Vertex numbers corresponding to the data.\n tmin : float\n Time point of the first sample in data.\n tstep : float\n Time step between successive samples in data.\n subject : str | None\n The subject name. While not necessary, it is safer to set the\n subject parameter to avoid analysis errors.\n %(verbose)s\n\n Attributes\n ----------\n subject : str | None\n The subject name.\n times : array, shape (n_times,)\n The time vector.\n vertices : array | list of array of shape (n_dipoles,)\n The indices of the dipoles in the different source spaces. Can\n be an array if there is only one source space (e.g., for volumes).\n data : array of shape (n_dipoles, n_times)\n The data in source space.\n shape : tuple\n The shape of the data. A tuple of int (n_dipoles, n_times).\n \"\"\"\n\n @verbose\n def __init__(self, data, vertices=None, tmin=None, tstep=None,\n subject=None, verbose=None): # noqa: D102\n assert hasattr(self, '_data_ndim'), self.__class__.__name__\n assert hasattr(self, '_src_type'), self.__class__.__name__\n kernel, sens_data = None, None\n if isinstance(data, tuple):\n if len(data) != 2:\n raise ValueError('If data is a tuple it has to be length 2')\n kernel, sens_data = data\n data = None\n if kernel.shape[1] != sens_data.shape[0]:\n raise ValueError('kernel and sens_data have invalid '\n 'dimensions')\n if sens_data.ndim != 2:\n raise ValueError('The sensor data must have 2 dimensions, got '\n '%s' % (sens_data.ndim,))\n\n if isinstance(vertices, list):\n vertices = [np.asarray(v, int) for v in vertices]\n if any(np.any(np.diff(v.astype(int)) <= 0) for v in vertices):\n raise ValueError('Vertices must be ordered in increasing '\n 'order.')\n\n n_src = sum([len(v) for v in vertices])\n\n if len(vertices) == 1:\n vertices = vertices[0]\n elif isinstance(vertices, np.ndarray):\n n_src = len(vertices)\n else:\n raise ValueError('Vertices must be a list or numpy array')\n\n # safeguard the user against doing something silly\n if data is not None:\n if data.shape[0] != n_src:\n raise ValueError('Number of vertices (%i) and stc.shape[0] '\n '(%i) must match' % (n_src, data.shape[0]))\n if data.ndim == self._data_ndim - 1: # allow upbroadcasting\n data = data[..., np.newaxis]\n if data.ndim != self._data_ndim:\n raise ValueError('Data (shape %s) must have %s dimensions for '\n '%s' % (data.shape, self._data_ndim,\n self.__class__.__name__))\n\n self._data = data\n self._tmin = tmin\n self._tstep = tstep\n self.vertices = vertices\n self.verbose = verbose\n self._kernel = kernel\n self._sens_data = sens_data\n self._kernel_removed = False\n self._times = None\n self._update_times()\n self.subject = _check_subject(None, subject, False)\n\n def __repr__(self): # noqa: D105\n s = \"%d vertices\" % (sum(len(v) for v in self._vertices_list),)\n if self.subject is not None:\n s += \", subject : %s\" % self.subject\n s += \", tmin : %s (ms)\" % (1e3 * self.tmin)\n s += \", tmax : %s (ms)\" % (1e3 * self.times[-1])\n s += \", tstep : %s (ms)\" % (1e3 * self.tstep)\n s += \", data shape : %s\" % (self.shape,)\n return \"<%s | %s>\" % (type(self).__name__, s)\n\n @property\n def _vertices_list(self):\n return self.vertices\n\n @verbose\n def save(self, fname, ftype='h5', verbose=None):\n \"\"\"Save the full source estimate to an HDF5 file.\n\n Parameters\n ----------\n fname : string\n The file name to write the source estimate to, should end in\n '-stc.h5'.\n ftype : string\n File format to use. Currently, the only allowed values is \"h5\".\n %(verbose_meth)s\n \"\"\"\n _validate_type(fname, 'path-like', 'fname')\n fname = str(fname)\n if ftype != 'h5':\n raise ValueError('%s objects can only be written as HDF5 files.'\n % (self.__class__.__name__,))\n if not fname.endswith('.h5'):\n fname += '-stc.h5'\n write_hdf5(fname,\n dict(vertices=self.vertices, data=self.data, tmin=self.tmin,\n tstep=self.tstep, subject=self.subject,\n src_type=self._src_type),\n title='mnepython', overwrite=True)\n\n @property\n def sfreq(self):\n \"\"\"Sample rate of the data.\"\"\"\n return 1. / self.tstep\n\n def _remove_kernel_sens_data_(self):\n \"\"\"Remove kernel and sensor space data and compute self._data.\"\"\"\n if self._kernel is not None or self._sens_data is not None:\n self._kernel_removed = True\n self._data = np.dot(self._kernel, self._sens_data)\n self._kernel = None\n self._sens_data = None\n\n @fill_doc\n def crop(self, tmin=None, tmax=None, include_tmax=True):\n \"\"\"Restrict SourceEstimate to a time interval.\n\n Parameters\n ----------\n tmin : float | None\n The first time point in seconds. If None the first present is used.\n tmax : float | None\n The last time point in seconds. If None the last present is used.\n %(include_tmax)s\n \"\"\"\n mask = _time_mask(self.times, tmin, tmax, sfreq=self.sfreq,\n include_tmax=include_tmax)\n self.tmin = self.times[np.where(mask)[0][0]]\n if self._kernel is not None and self._sens_data is not None:\n self._sens_data = self._sens_data[..., mask]\n else:\n self.data = self.data[..., mask]\n\n return self # return self for chaining methods\n\n @verbose\n def resample(self, sfreq, npad='auto', window='boxcar', n_jobs=1,\n verbose=None):\n \"\"\"Resample data.\n\n Parameters\n ----------\n sfreq : float\n New sample rate to use.\n npad : int | str\n Amount to pad the start and end of the data.\n Can also be \"auto\" to use a padding that will result in\n a power-of-two size (can be much faster).\n window : string or tuple\n Window to use in resampling. See scipy.signal.resample.\n %(n_jobs)s\n %(verbose_meth)s\n\n Notes\n -----\n For some data, it may be more accurate to use npad=0 to reduce\n artifacts. This is dataset dependent -- check your data!\n\n Note that the sample rate of the original data is inferred from tstep.\n \"\"\"\n # resampling in sensor instead of source space gives a somewhat\n # different result, so we don't allow it\n self._remove_kernel_sens_data_()\n\n o_sfreq = 1.0 / self.tstep\n data = self.data\n if data.dtype == np.float32:\n data = data.astype(np.float64)\n self.data = resample(data, sfreq, o_sfreq, npad, n_jobs=n_jobs)\n\n # adjust indirectly affected variables\n self.tstep = 1.0 / sfreq\n return self\n\n @property\n def data(self):\n \"\"\"Numpy array of source estimate data.\"\"\"\n if self._data is None:\n # compute the solution the first time the data is accessed and\n # remove the kernel and sensor data\n self._remove_kernel_sens_data_()\n return self._data\n\n @data.setter\n def data(self, value):\n value = np.asarray(value)\n if self._data is not None and value.ndim != self._data.ndim:\n raise ValueError('Data array should have %d dimensions.' %\n self._data.ndim)\n\n # vertices can be a single number, so cast to ndarray\n if isinstance(self.vertices, list):\n n_verts = sum([len(v) for v in self.vertices])\n elif isinstance(self.vertices, np.ndarray):\n n_verts = len(self.vertices)\n else:\n raise ValueError('Vertices must be a list or numpy array')\n\n if value.shape[0] != n_verts:\n raise ValueError('The first dimension of the data array must '\n 'match the number of vertices (%d != %d)' %\n (value.shape[0], n_verts))\n\n self._data = value\n self._update_times()\n\n @property\n def shape(self):\n \"\"\"Shape of the data.\"\"\"\n if self._data is not None:\n return self._data.shape\n return (self._kernel.shape[0], self._sens_data.shape[1])\n\n @property\n def tmin(self):\n \"\"\"The first timestamp.\"\"\"\n return self._tmin\n\n @tmin.setter\n def tmin(self, value):\n self._tmin = float(value)\n self._update_times()\n\n @property\n def tstep(self):\n \"\"\"The change in time between two consecutive samples (1 / sfreq).\"\"\"\n return self._tstep\n\n @tstep.setter\n def tstep(self, value):\n if value <= 0:\n raise ValueError('.tstep must be greater than 0.')\n self._tstep = float(value)\n self._update_times()\n\n @property\n def times(self):\n \"\"\"A timestamp for each sample.\"\"\"\n return self._times\n\n @times.setter\n def times(self, value):\n raise ValueError('You cannot write to the .times attribute directly. '\n 'This property automatically updates whenever '\n '.tmin, .tstep or .data changes.')\n\n def _update_times(self):\n \"\"\"Update the times attribute after changing tmin, tmax, or tstep.\"\"\"\n self._times = self.tmin + (self.tstep * np.arange(self.shape[-1]))\n self._times.flags.writeable = False\n\n def __add__(self, a):\n \"\"\"Add source estimates.\"\"\"\n stc = self.copy()\n stc += a\n return stc\n\n def __iadd__(self, a): # noqa: D105\n self._remove_kernel_sens_data_()\n if isinstance(a, _BaseSourceEstimate):\n _verify_source_estimate_compat(self, a)\n self.data += a.data\n else:\n self.data += a\n return self\n\n def mean(self):\n \"\"\"Make a summary stc file with mean over time points.\n\n Returns\n -------\n stc : SourceEstimate | VectorSourceEstimate\n The modified stc.\n \"\"\"\n out = self.sum()\n out /= len(self.times)\n return out\n\n def sum(self):\n \"\"\"Make a summary stc file with sum over time points.\n\n Returns\n -------\n stc : SourceEstimate | VectorSourceEstimate\n The modified stc.\n \"\"\"\n data = self.data\n tmax = self.tmin + self.tstep * data.shape[-1]\n tmin = (self.tmin + tmax) / 2.\n tstep = tmax - self.tmin\n sum_stc = self.__class__(self.data.sum(axis=-1, keepdims=True),\n vertices=self.vertices, tmin=tmin,\n tstep=tstep, subject=self.subject)\n return sum_stc\n\n def __sub__(self, a):\n \"\"\"Subtract source estimates.\"\"\"\n stc = self.copy()\n stc -= a\n return stc\n\n def __isub__(self, a): # noqa: D105\n self._remove_kernel_sens_data_()\n if isinstance(a, _BaseSourceEstimate):\n _verify_source_estimate_compat(self, a)\n self.data -= a.data\n else:\n self.data -= a\n return self\n\n def __truediv__(self, a): # noqa: D105\n return self.__div__(a)\n\n def __div__(self, a): # noqa: D105\n \"\"\"Divide source estimates.\"\"\"\n stc = self.copy()\n stc /= a\n return stc\n\n def __itruediv__(self, a): # noqa: D105\n return self.__idiv__(a)\n\n def __idiv__(self, a): # noqa: D105\n self._remove_kernel_sens_data_()\n if isinstance(a, _BaseSourceEstimate):\n _verify_source_estimate_compat(self, a)\n self.data /= a.data\n else:\n self.data /= a\n return self\n\n def __mul__(self, a):\n \"\"\"Multiply source estimates.\"\"\"\n stc = self.copy()\n stc *= a\n return stc\n\n def __imul__(self, a): # noqa: D105\n self._remove_kernel_sens_data_()\n if isinstance(a, _BaseSourceEstimate):\n _verify_source_estimate_compat(self, a)\n self.data *= a.data\n else:\n self.data *= a\n return self\n\n def __pow__(self, a): # noqa: D105\n stc = self.copy()\n stc **= a\n return stc\n\n def __ipow__(self, a): # noqa: D105\n self._remove_kernel_sens_data_()\n self.data **= a\n return self\n\n def __radd__(self, a): # noqa: D105\n return self + a\n\n def __rsub__(self, a): # noqa: D105\n return self - a\n\n def __rmul__(self, a): # noqa: D105\n return self * a\n\n def __rdiv__(self, a): # noqa: D105\n return self / a\n\n def __neg__(self): # noqa: D105\n \"\"\"Negate the source estimate.\"\"\"\n stc = self.copy()\n stc._remove_kernel_sens_data_()\n stc.data *= -1\n return stc\n\n def __pos__(self): # noqa: D105\n return self\n\n def __abs__(self):\n \"\"\"Compute the absolute value of the data.\n\n Returns\n -------\n stc : instance of _BaseSourceEstimate\n A version of the source estimate, where the data attribute is set\n to abs(self.data).\n \"\"\"\n stc = self.copy()\n stc._remove_kernel_sens_data_()\n stc._data = abs(stc._data)\n return stc\n\n def sqrt(self):\n \"\"\"Take the square root.\n\n Returns\n -------\n stc : instance of SourceEstimate\n A copy of the SourceEstimate with sqrt(data).\n \"\"\"\n return self ** (0.5)\n\n def copy(self):\n \"\"\"Return copy of source estimate instance.\"\"\"\n return copy.deepcopy(self)\n\n def bin(self, width, tstart=None, tstop=None, func=np.mean):\n \"\"\"Return a source estimate object with data summarized over time bins.\n\n Time bins of ``width`` seconds. This method is intended for\n visualization only. No filter is applied to the data before binning,\n making the method inappropriate as a tool for downsampling data.\n\n Parameters\n ----------\n width : scalar\n Width of the individual bins in seconds.\n tstart : scalar | None\n Time point where the first bin starts. The default is the first\n time point of the stc.\n tstop : scalar | None\n Last possible time point contained in a bin (if the last bin would\n be shorter than width it is dropped). The default is the last time\n point of the stc.\n func : callable\n Function that is applied to summarize the data. Needs to accept a\n numpy.array as first input and an ``axis`` keyword argument.\n\n Returns\n -------\n stc : SourceEstimate | VectorSourceEstimate\n The binned source estimate.\n \"\"\"\n if tstart is None:\n tstart = self.tmin\n if tstop is None:\n tstop = self.times[-1]\n\n times = np.arange(tstart, tstop + self.tstep, width)\n nt = len(times) - 1\n data = np.empty(self.shape[:-1] + (nt,), dtype=self.data.dtype)\n for i in range(nt):\n idx = (self.times >= times[i]) & (self.times < times[i + 1])\n data[..., i] = func(self.data[..., idx], axis=-1)\n\n tmin = times[0] + width / 2.\n stc = self.copy()\n stc._data = data\n stc.tmin = tmin\n stc.tstep = width\n return stc\n\n def transform_data(self, func, idx=None, tmin_idx=None, tmax_idx=None):\n \"\"\"Get data after a linear (time) transform has been applied.\n\n The transform is applied to each source time course independently.\n\n\n Parameters\n ----------\n func : callable\n The transform to be applied, including parameters (see, e.g.,\n :func:`functools.partial`). The first parameter of the function is\n the input data. The first return value is the transformed data,\n remaining outputs are ignored. The first dimension of the\n transformed data has to be the same as the first dimension of the\n input data.\n idx : array | None\n Indicices of source time courses for which to compute transform.\n If None, all time courses are used.\n tmin_idx : int | None\n Index of first time point to include. If None, the index of the\n first time point is used.\n tmax_idx : int | None\n Index of the first time point not to include. If None, time points\n up to (and including) the last time point are included.\n\n Returns\n -------\n data_t : ndarray\n The transformed data.\n\n Notes\n -----\n Applying transforms can be significantly faster if the\n SourceEstimate object was created using \"(kernel, sens_data)\", for\n the \"data\" parameter as the transform is applied in sensor space.\n Inverse methods, e.g., \"apply_inverse_epochs\", or \"apply_lcmv_epochs\"\n do this automatically (if possible).\n \"\"\"\n if idx is None:\n # use all time courses by default\n idx = slice(None, None)\n\n if self._kernel is None and self._sens_data is None:\n if self._kernel_removed:\n warn_('Performance can be improved by not accessing the data '\n 'attribute before calling this method.')\n\n # transform source space data directly\n data_t = func(self.data[idx, ..., tmin_idx:tmax_idx])\n\n if isinstance(data_t, tuple):\n # use only first return value\n data_t = data_t[0]\n else:\n # apply transform in sensor space\n sens_data_t = func(self._sens_data[:, tmin_idx:tmax_idx])\n\n if isinstance(sens_data_t, tuple):\n # use only first return value\n sens_data_t = sens_data_t[0]\n\n # apply inverse\n data_shape = sens_data_t.shape\n if len(data_shape) > 2:\n # flatten the last dimensions\n sens_data_t = sens_data_t.reshape(data_shape[0],\n np.prod(data_shape[1:]))\n\n data_t = np.dot(self._kernel[idx, :], sens_data_t)\n\n # restore original shape if necessary\n if len(data_shape) > 2:\n data_t = data_t.reshape(data_t.shape[0], *data_shape[1:])\n\n return data_t\n\n def transform(self, func, idx=None, tmin=None, tmax=None, copy=False):\n \"\"\"Apply linear transform.\n\n The transform is applied to each source time course independently.\n\n Parameters\n ----------\n func : callable\n The transform to be applied, including parameters (see, e.g.,\n :func:`functools.partial`). The first parameter of the function is\n the input data. The first two dimensions of the transformed data\n should be (i) vertices and (ii) time. See Notes for details.\n idx : array | None\n Indices of source time courses for which to compute transform.\n If None, all time courses are used.\n tmin : float | int | None\n First time point to include (ms). If None, self.tmin is used.\n tmax : float | int | None\n Last time point to include (ms). If None, self.tmax is used.\n copy : bool\n If True, return a new instance of SourceEstimate instead of\n modifying the input inplace.\n\n Returns\n -------\n stcs : SourceEstimate | VectorSourceEstimate | list\n The transformed stc or, in the case of transforms which yield\n N-dimensional output (where N > 2), a list of stcs. For a list,\n copy must be True.\n\n Notes\n -----\n Transforms which yield 3D\n output (e.g. time-frequency transforms) are valid, so long as the\n first two dimensions are vertices and time. In this case, the\n copy parameter must be True and a list of\n SourceEstimates, rather than a single instance of SourceEstimate,\n will be returned, one for each index of the 3rd dimension of the\n transformed data. In the case of transforms yielding 2D output\n (e.g. filtering), the user has the option of modifying the input\n inplace (copy = False) or returning a new instance of\n SourceEstimate (copy = True) with the transformed data.\n\n Applying transforms can be significantly faster if the\n SourceEstimate object was created using \"(kernel, sens_data)\", for\n the \"data\" parameter as the transform is applied in sensor space.\n Inverse methods, e.g., \"apply_inverse_epochs\", or \"apply_lcmv_epochs\"\n do this automatically (if possible).\n \"\"\"\n # min and max data indices to include\n times = 1000. * self.times\n t_idx = np.where(_time_mask(times, tmin, tmax, sfreq=self.sfreq))[0]\n if tmin is None:\n tmin_idx = None\n else:\n tmin_idx = t_idx[0]\n\n if tmax is None:\n tmax_idx = None\n else:\n # +1, because upper boundary needs to include the last sample\n tmax_idx = t_idx[-1] + 1\n\n data_t = self.transform_data(func, idx=idx, tmin_idx=tmin_idx,\n tmax_idx=tmax_idx)\n\n # account for change in n_vertices\n if idx is not None:\n idx_lh = idx[idx < len(self.lh_vertno)]\n idx_rh = idx[idx >= len(self.lh_vertno)] - len(self.lh_vertno)\n verts_lh = self.lh_vertno[idx_lh]\n verts_rh = self.rh_vertno[idx_rh]\n else:\n verts_lh = self.lh_vertno\n verts_rh = self.rh_vertno\n verts = [verts_lh, verts_rh]\n\n tmin_idx = 0 if tmin_idx is None else tmin_idx\n tmin = self.times[tmin_idx]\n\n if data_t.ndim > 2:\n # return list of stcs if transformed data has dimensionality > 2\n if copy:\n stcs = [SourceEstimate(data_t[:, :, a], verts, tmin,\n self.tstep, self.subject)\n for a in range(data_t.shape[-1])]\n else:\n raise ValueError('copy must be True if transformed data has '\n 'more than 2 dimensions')\n else:\n # return new or overwritten stc\n stcs = self if not copy else self.copy()\n stcs.vertices = verts\n stcs.data = data_t\n stcs.tmin = tmin\n\n return stcs\n\n\ndef _center_of_mass(vertices, values, hemi, surf, subject, subjects_dir,\n restrict_vertices):\n \"\"\"Find the center of mass on a surface.\"\"\"\n if (values == 0).all() or (values < 0).any():\n raise ValueError('All values must be non-negative and at least one '\n 'must be non-zero, cannot compute COM')\n subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)\n surf = read_surface(op.join(subjects_dir, subject, 'surf',\n hemi + '.' + surf))\n if restrict_vertices is True:\n restrict_vertices = vertices\n elif restrict_vertices is False:\n restrict_vertices = np.arange(surf[0].shape[0])\n elif isinstance(restrict_vertices, SourceSpaces):\n idx = 1 if restrict_vertices.kind == 'surface' and hemi == 'rh' else 0\n restrict_vertices = restrict_vertices[idx]['vertno']\n else:\n restrict_vertices = np.array(restrict_vertices, int)\n pos = surf[0][vertices, :].T\n c_o_m = np.sum(pos * values, axis=1) / np.sum(values)\n vertex = np.argmin(np.sqrt(np.mean((surf[0][restrict_vertices, :] -\n c_o_m) ** 2, axis=1)))\n vertex = restrict_vertices[vertex]\n return vertex\n\n\n@fill_doc\nclass _BaseSurfaceSourceEstimate(_BaseSourceEstimate):\n \"\"\"Abstract base class for surface source estimates.\n\n Parameters\n ----------\n data : array\n The data in source space.\n vertices : list of array, shape (2,)\n Vertex numbers corresponding to the data. The first element of the list\n contains vertices of left hemisphere and the second element contains\n vertices of right hemisphere.\n tmin : scalar\n Time point of the first sample in data.\n tstep : scalar\n Time step between successive samples in data.\n subject : str | None\n The subject name. While not necessary, it is safer to set the\n subject parameter to avoid analysis errors.\n %(verbose)s\n\n Attributes\n ----------\n subject : str | None\n The subject name.\n times : array of shape (n_times,)\n The time vector.\n vertices : list of array, shape (2,)\n Vertex numbers corresponding to the data. The first element of the list\n contains vertices of left hemisphere and the second element contains\n vertices of right hemisphere.\n data : array\n The data in source space.\n shape : tuple\n The shape of the data. A tuple of int (n_dipoles, n_times).\n \"\"\"\n\n _data_ndim = 2\n _src_type = 'surface'\n\n @verbose\n def __init__(self, data, vertices=None, tmin=None, tstep=None,\n subject=None, verbose=None): # noqa: D102\n\n if not (isinstance(vertices, list) and len(vertices) == 2):\n raise ValueError('Vertices must be a list containing two '\n 'numpy arrays, got type %s (%s)'\n % (type(vertices), vertices))\n\n _BaseSourceEstimate.__init__(self, data, vertices=vertices, tmin=tmin,\n tstep=tstep, subject=subject,\n verbose=verbose)\n\n @property\n def lh_data(self):\n \"\"\"Left hemisphere data.\"\"\"\n return self.data[:len(self.lh_vertno)]\n\n @property\n def rh_data(self):\n \"\"\"Right hemisphere data.\"\"\"\n return self.data[len(self.lh_vertno):]\n\n @property\n def lh_vertno(self):\n \"\"\"Left hemisphere vertno.\"\"\"\n return self.vertices[0]\n\n @property\n def rh_vertno(self):\n \"\"\"Right hemisphere vertno.\"\"\"\n return self.vertices[1]\n\n def _hemilabel_stc(self, label):\n if label.hemi == 'lh':\n stc_vertices = self.vertices[0]\n else:\n stc_vertices = self.vertices[1]\n\n # find index of the Label's vertices\n idx = np.nonzero(np.in1d(stc_vertices, label.vertices))[0]\n\n # find output vertices\n vertices = stc_vertices[idx]\n\n # find data\n if label.hemi == 'rh':\n values = self.data[idx + len(self.vertices[0])]\n else:\n values = self.data[idx]\n\n return vertices, values\n\n def in_label(self, label):\n \"\"\"Get a source estimate object restricted to a label.\n\n SourceEstimate contains the time course of\n activation of all sources inside the label.\n\n Parameters\n ----------\n label : Label | BiHemiLabel\n The label (as created for example by mne.read_label). If the label\n does not match any sources in the SourceEstimate, a ValueError is\n raised.\n\n Returns\n -------\n stc : SourceEstimate | VectorSourceEstimate\n The source estimate restricted to the given label.\n \"\"\"\n # make sure label and stc are compatible\n if label.subject is not None and self.subject is not None \\\n and label.subject != self.subject:\n raise RuntimeError('label and stc must have same subject names, '\n 'currently \"%s\" and \"%s\"' % (label.subject,\n self.subject))\n\n if label.hemi == 'both':\n lh_vert, lh_val = self._hemilabel_stc(label.lh)\n rh_vert, rh_val = self._hemilabel_stc(label.rh)\n vertices = [lh_vert, rh_vert]\n values = np.vstack((lh_val, rh_val))\n elif label.hemi == 'lh':\n lh_vert, values = self._hemilabel_stc(label)\n vertices = [lh_vert, np.array([], int)]\n elif label.hemi == 'rh':\n rh_vert, values = self._hemilabel_stc(label)\n vertices = [np.array([], int), rh_vert]\n else:\n raise TypeError(\"Expected Label or BiHemiLabel; got %r\" % label)\n\n if sum([len(v) for v in vertices]) == 0:\n raise ValueError('No vertices match the label in the stc file')\n\n label_stc = self.__class__(values, vertices=vertices, tmin=self.tmin,\n tstep=self.tstep, subject=self.subject)\n return label_stc\n\n def expand(self, vertices):\n \"\"\"Expand SourceEstimate to include more vertices.\n\n This will add rows to stc.data (zero-filled) and modify stc.vertices\n to include all vertices in stc.vertices and the input vertices.\n\n Parameters\n ----------\n vertices : list of array\n New vertices to add. Can also contain old values.\n\n Returns\n -------\n stc : SourceEstimate | VectorSourceEstimate\n The modified stc (note: method operates inplace).\n \"\"\"\n if not isinstance(vertices, list):\n raise TypeError('vertices must be a list')\n if not len(self.vertices) == len(vertices):\n raise ValueError('vertices must have the same length as '\n 'stc.vertices')\n\n # can no longer use kernel and sensor data\n self._remove_kernel_sens_data_()\n\n inserters = list()\n offsets = [0]\n for vi, (v_old, v_new) in enumerate(zip(self.vertices, vertices)):\n v_new = np.setdiff1d(v_new, v_old)\n inds = np.searchsorted(v_old, v_new)\n # newer numpy might overwrite inds after np.insert, copy here\n inserters += [inds.copy()]\n offsets += [len(v_old)]\n self.vertices[vi] = np.insert(v_old, inds, v_new)\n inds = [ii + offset for ii, offset in zip(inserters, offsets[:-1])]\n inds = np.concatenate(inds)\n new_data = np.zeros((len(inds),) + self.data.shape[1:])\n self.data = np.insert(self.data, inds, new_data, axis=0)\n return self\n\n @verbose\n def to_original_src(self, src_orig, subject_orig=None,\n subjects_dir=None, verbose=None):\n \"\"\"Get a source estimate from morphed source to the original subject.\n\n Parameters\n ----------\n src_orig : instance of SourceSpaces\n The original source spaces that were morphed to the current\n subject.\n subject_orig : str | None\n The original subject. For most source spaces this shouldn't need\n to be provided, since it is stored in the source space itself.\n subjects_dir : string, or None\n Path to SUBJECTS_DIR if it is not set in the environment.\n %(verbose_meth)s\n\n Returns\n -------\n stc : SourceEstimate | VectorSourceEstimate\n The transformed source estimate.\n\n See Also\n --------\n morph_source_spaces\n\n Notes\n -----\n .. versionadded:: 0.10.0\n \"\"\"\n if self.subject is None:\n raise ValueError('stc.subject must be set')\n src_orig = _ensure_src(src_orig, kind='surface')\n subject_orig = _ensure_src_subject(src_orig, subject_orig)\n data_idx, vertices = _get_morph_src_reordering(\n self.vertices, src_orig, subject_orig, self.subject, subjects_dir)\n return self.__class__(self._data[data_idx], vertices,\n self.tmin, self.tstep, subject_orig)\n\n\n@fill_doc\nclass SourceEstimate(_BaseSurfaceSourceEstimate):\n \"\"\"Container for surface source estimates.\n\n Parameters\n ----------\n data : array of shape (n_dipoles, n_times) | tuple, shape (2,)\n The data in source space. When it is a single array, the\n left hemisphere is stored in data[:len(vertices[0])] and the right\n hemisphere is stored in data[-len(vertices[1]):].\n When data is a tuple, it contains two arrays:\n\n - \"kernel\" shape (n_vertices, n_sensors) and\n - \"sens_data\" shape (n_sensors, n_times).\n\n In this case, the source space data corresponds to\n ``np.dot(kernel, sens_data)``.\n vertices : list of array, shape (2,)\n Vertex numbers corresponding to the data. The first element of the list\n contains vertices of left hemisphere and the second element contains\n vertices of right hemisphere.\n tmin : scalar\n Time point of the first sample in data.\n tstep : scalar\n Time step between successive samples in data.\n subject : str | None\n The subject name. While not necessary, it is safer to set the\n subject parameter to avoid analysis errors.\n %(verbose)s\n\n Attributes\n ----------\n subject : str | None\n The subject name.\n times : array of shape (n_times,)\n The time vector.\n vertices : list of array, shape (2,)\n The indices of the dipoles in the left and right source space.\n data : array of shape (n_dipoles, n_times)\n The data in source space.\n shape : tuple\n The shape of the data. A tuple of int (n_dipoles, n_times).\n\n See Also\n --------\n VectorSourceEstimate : A container for vector source estimates.\n VolSourceEstimate : A container for volume source estimates.\n MixedSourceEstimate : A container for mixed surface + volume source\n estimates.\n \"\"\"\n\n @verbose\n def save(self, fname, ftype='stc', verbose=None):\n \"\"\"Save the source estimates to a file.\n\n Parameters\n ----------\n fname : string\n The stem of the file name. The file names used for surface source\n spaces are obtained by adding \"-lh.stc\" and \"-rh.stc\" (or \"-lh.w\"\n and \"-rh.w\") to the stem provided, for the left and the right\n hemisphere, respectively.\n ftype : string\n File format to use. Allowed values are \"stc\" (default), \"w\",\n and \"h5\". The \"w\" format only supports a single time point.\n %(verbose_meth)s\n \"\"\"\n _validate_type(fname, 'path-like', 'fname')\n fname = str(fname)\n _check_option('ftype', ftype, ['stc', 'w', 'h5'])\n\n lh_data = self.data[:len(self.lh_vertno)]\n rh_data = self.data[-len(self.rh_vertno):]\n\n if ftype == 'stc':\n logger.info('Writing STC to disk...')\n _write_stc(fname + '-lh.stc', tmin=self.tmin, tstep=self.tstep,\n vertices=self.lh_vertno, data=lh_data)\n _write_stc(fname + '-rh.stc', tmin=self.tmin, tstep=self.tstep,\n vertices=self.rh_vertno, data=rh_data)\n\n elif ftype == 'w':\n if self.shape[1] != 1:\n raise ValueError('w files can only contain a single time '\n 'point')\n logger.info('Writing STC to disk (w format)...')\n _write_w(fname + '-lh.w', vertices=self.lh_vertno,\n data=lh_data[:, 0])\n _write_w(fname + '-rh.w', vertices=self.rh_vertno,\n data=rh_data[:, 0])\n\n elif ftype == 'h5':\n super().save(fname)\n logger.info('[done]')\n\n @copy_function_doc_to_method_doc(plot_source_estimates)\n def plot(self, subject=None, surface='inflated', hemi='lh',\n colormap='auto', time_label='auto', smoothing_steps=10,\n transparent=True, alpha=1.0, time_viewer=False, subjects_dir=None,\n figure=None, views='lat', colorbar=True, clim='auto',\n cortex=\"classic\", size=800, background=\"black\",\n foreground=\"white\", initial_time=None, time_unit='s',\n backend='auto', spacing='oct6', title=None, verbose=None):\n brain = plot_source_estimates(\n self, subject, surface=surface, hemi=hemi, colormap=colormap,\n time_label=time_label, smoothing_steps=smoothing_steps,\n transparent=transparent, alpha=alpha, time_viewer=time_viewer,\n subjects_dir=subjects_dir, figure=figure, views=views,\n colorbar=colorbar, clim=clim, cortex=cortex, size=size,\n background=background, foreground=foreground,\n initial_time=initial_time, time_unit=time_unit, backend=backend,\n spacing=spacing, title=title, verbose=verbose)\n return brain\n\n @verbose\n def extract_label_time_course(self, labels, src, mode='mean_flip',\n allow_empty=False, verbose=None):\n \"\"\"Extract label time courses for lists of labels.\n\n This function will extract one time course for each label. The way the\n time courses are extracted depends on the mode parameter.\n\n Parameters\n ----------\n labels : Label | BiHemiLabel | list of Label or BiHemiLabel\n The labels for which to extract the time courses.\n src : list\n Source spaces for left and right hemisphere.\n mode : str\n Extraction mode, see explanation below.\n allow_empty : bool\n Instead of emitting an error, return all-zero time course for\n labels that do not have any vertices in the source estimate.\n %(verbose_meth)s\n\n Returns\n -------\n label_tc : array, shape=(n_labels, n_times)\n Extracted time course for each label.\n\n See Also\n --------\n extract_label_time_course : extract time courses for multiple STCs\n\n Notes\n -----\n Valid values for mode are:\n\n - 'mean'\n Average within each label.\n - 'mean_flip'\n Average within each label with sign flip depending\n on source orientation.\n - 'pca_flip'\n Apply an SVD to the time courses within each label\n and use the scaled and sign-flipped first right-singular vector\n as the label time course. The scaling is performed such that the\n power of the label time course is the same as the average\n per-vertex time course power within the label. The sign of the\n resulting time course is adjusted by multiplying it with\n \"sign(dot(u, flip))\" where u is the first left-singular vector,\n and flip is a sing-flip vector based on the vertex normals. This\n procedure assures that the phase does not randomly change by 180\n degrees from one stc to the next.\n - 'max'\n Max value within each label.\n \"\"\"\n label_tc = extract_label_time_course(\n self, labels, src, mode=mode, return_generator=False,\n allow_empty=allow_empty, verbose=verbose)\n\n return label_tc\n\n @verbose\n def estimate_snr(self, info, fwd, cov, verbose=None):\n r\"\"\"Compute time-varying SNR in the source space.\n\n This function should only be used with source estimates with units\n nanoAmperes (i.e., MNE-like solutions, *not* dSPM or sLORETA).\n\n .. warning:: This function currently only works properly for fixed\n orientation.\n\n Parameters\n ----------\n info : instance Info\n The measurement info.\n fwd : instance of Forward\n The forward solution used to create the source estimate.\n cov : instance of Covariance\n The noise covariance used to estimate the resting cortical\n activations. Should be an evoked covariance, not empty room.\n %(verbose)s\n\n Returns\n -------\n snr_stc : instance of SourceEstimate\n The source estimate with the SNR computed.\n\n Notes\n -----\n We define the SNR in decibels for each source location at each\n time point as:\n\n .. math::\n\n {\\rm SNR} = 10\\log_10[\\frac{a^2}{N}\\sum_k\\frac{b_k^2}{s_k^2}]\n\n where :math:`\\\\b_k` is the signal on sensor :math:`k` provided by the\n forward model for a source with unit amplitude, :math:`a` is the\n source amplitude, :math:`N` is the number of sensors, and\n :math:`s_k^2` is the noise variance on sensor :math:`k`.\n\n References\n ----------\n .. [1] Goldenholz, D. M., Ahlfors, S. P., Hämäläinen, M. S., Sharon,\n D., Ishitobi, M., Vaina, L. M., & Stufflebeam, S. M. (2009).\n Mapping the Signal-To-Noise-Ratios of Cortical Sources in\n Magnetoencephalography and Electroencephalography.\n Human Brain Mapping, 30(4), 1077–1086. doi:10.1002/hbm.20571\n \"\"\"\n from .forward import convert_forward_solution, Forward\n from .minimum_norm.inverse import _prepare_forward\n _validate_type(fwd, Forward, 'fwd')\n _validate_type(info, Info, 'info')\n _validate_type(cov, Covariance, 'cov')\n _check_stc_units(self)\n if (self.data >= 0).all():\n warn_('This STC appears to be from free orientation, currently SNR'\n ' function is valid only for fixed orientation')\n\n fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=False)\n\n # G is gain matrix [ch x src], cov is noise covariance [ch x ch]\n G, _, _, _, _, _, _, cov, _ = _prepare_forward(\n fwd, info, cov, fixed=True, loose=0, rank=None, pca=False,\n use_cps=True, exp=None, limit_depth_chs=False, combine_xyz='fro',\n allow_fixed_depth=False, limit=None)\n G = G['sol']['data']\n n_channels = cov['dim'] # number of sensors/channels\n b_k2 = (G * G).T\n s_k2 = np.diag(cov['data'])\n scaling = (1 / n_channels) * np.sum(b_k2 / s_k2, axis=1, keepdims=True)\n snr_stc = self.copy()\n snr_stc._data[:] = 10 * np.log10((self.data * self.data) * scaling)\n return snr_stc\n\n def get_peak(self, hemi=None, tmin=None, tmax=None, mode='abs',\n vert_as_index=False, time_as_index=False):\n \"\"\"Get location and latency of peak amplitude.\n\n Parameters\n ----------\n hemi : {'lh', 'rh', None}\n The hemi to be considered. If None, the entire source space is\n considered.\n tmin : float | None\n The minimum point in time to be considered for peak getting.\n tmax : float | None\n The maximum point in time to be considered for peak getting.\n mode : {'pos', 'neg', 'abs'}\n How to deal with the sign of the data. If 'pos' only positive\n values will be considered. If 'neg' only negative values will\n be considered. If 'abs' absolute values will be considered.\n Defaults to 'abs'.\n vert_as_index : bool\n whether to return the vertex index instead of of its ID.\n Defaults to False.\n time_as_index : bool\n Whether to return the time index instead of the latency.\n Defaults to False.\n\n Returns\n -------\n pos : int\n The vertex exhibiting the maximum response, either ID or index.\n latency : float | int\n The time point of the maximum response, either latency in seconds\n or index.\n \"\"\"\n data = {'lh': self.lh_data, 'rh': self.rh_data, None: self.data}[hemi]\n vertno = {'lh': self.lh_vertno, 'rh': self.rh_vertno,\n None: np.concatenate(self.vertices)}[hemi]\n\n vert_idx, time_idx, _ = _get_peak(data, self.times, tmin, tmax, mode)\n\n return (vert_idx if vert_as_index else vertno[vert_idx],\n time_idx if time_as_index else self.times[time_idx])\n\n def center_of_mass(self, subject=None, hemi=None, restrict_vertices=False,\n subjects_dir=None, surf='sphere'):\n \"\"\"Compute the center of mass of activity.\n\n This function computes the spatial center of mass on the surface\n as well as the temporal center of mass as in [1]_.\n\n .. note:: All activity must occur in a single hemisphere, otherwise\n an error is raised. The \"mass\" of each point in space for\n computing the spatial center of mass is computed by summing\n across time, and vice-versa for each point in time in\n computing the temporal center of mass. This is useful for\n quantifying spatio-temporal cluster locations, especially\n when combined with :func:`mne.vertex_to_mni`.\n\n Parameters\n ----------\n subject : string | None\n The subject the stc is defined for.\n hemi : int, or None\n Calculate the center of mass for the left (0) or right (1)\n hemisphere. If None, one of the hemispheres must be all zeroes,\n and the center of mass will be calculated for the other\n hemisphere (useful for getting COM for clusters).\n restrict_vertices : bool | array of int | instance of SourceSpaces\n If True, returned vertex will be one from stc. Otherwise, it could\n be any vertex from surf. If an array of int, the returned vertex\n will come from that array. If instance of SourceSpaces (as of\n 0.13), the returned vertex will be from the given source space.\n For most accuruate estimates, do not restrict vertices.\n subjects_dir : str, or None\n Path to the SUBJECTS_DIR. If None, the path is obtained by using\n the environment variable SUBJECTS_DIR.\n surf : str\n The surface to use for Euclidean distance center of mass\n finding. The default here is \"sphere\", which finds the center\n of mass on the spherical surface to help avoid potential issues\n with cortical folding.\n\n Returns\n -------\n vertex : int\n Vertex of the spatial center of mass for the inferred hemisphere,\n with each vertex weighted by the sum of the stc across time. For a\n boolean stc, then, this would be weighted purely by the duration\n each vertex was active.\n hemi : int\n Hemisphere the vertex was taken from.\n t : float\n Time of the temporal center of mass (weighted by the sum across\n source vertices).\n\n See Also\n --------\n mne.Label.center_of_mass\n mne.vertex_to_mni\n\n References\n ----------\n .. [1] Larson and Lee, \"The cortical dynamics underlying effective\n switching of auditory spatial attention\", NeuroImage 2012.\n \"\"\"\n if not isinstance(surf, str):\n raise TypeError('surf must be a string, got %s' % (type(surf),))\n subject = _check_subject(self.subject, subject)\n if np.any(self.data < 0):\n raise ValueError('Cannot compute COM with negative values')\n values = np.sum(self.data, axis=1) # sum across time\n vert_inds = [np.arange(len(self.vertices[0])),\n np.arange(len(self.vertices[1])) + len(self.vertices[0])]\n if hemi is None:\n hemi = np.where(np.array([np.sum(values[vi])\n for vi in vert_inds]))[0]\n if not len(hemi) == 1:\n raise ValueError('Could not infer hemisphere')\n hemi = hemi[0]\n _check_option('hemi', hemi, [0, 1])\n vertices = self.vertices[hemi]\n values = values[vert_inds[hemi]] # left or right\n del vert_inds\n vertex = _center_of_mass(\n vertices, values, hemi=['lh', 'rh'][hemi], surf=surf,\n subject=subject, subjects_dir=subjects_dir,\n restrict_vertices=restrict_vertices)\n # do time center of mass by using the values across space\n masses = np.sum(self.data, axis=0).astype(float)\n t_ind = np.sum(masses * np.arange(self.shape[1])) / np.sum(masses)\n t = self.tmin + self.tstep * t_ind\n return vertex, hemi, t\n\n\nclass _BaseVectorSourceEstimate(_BaseSourceEstimate):\n _data_ndim = 3\n\n @verbose\n def __init__(self, data, vertices=None, tmin=None, tstep=None,\n subject=None, verbose=None): # noqa: D102\n assert hasattr(self, '_scalar_class')\n super().__init__(data, vertices, tmin, tstep, subject, verbose)\n if self._data is not None and self._data.shape[1] != 3:\n raise ValueError('Data for VectorSourceEstimate must have second '\n 'dimension of length 3, got length %s'\n % (self._data.shape[1],))\n\n def magnitude(self):\n \"\"\"Compute magnitude of activity without directionality.\n\n Returns\n -------\n stc : instance of SourceEstimate\n The source estimate without directionality information.\n \"\"\"\n data_mag = np.linalg.norm(self.data, axis=1)\n return self._scalar_class(\n data_mag, self.vertices, self.tmin, self.tstep, self.subject,\n self.verbose)\n\n def normal(self, src):\n \"\"\"Compute activity orthogonal to the cortex.\n\n Parameters\n ----------\n src : instance of SourceSpaces\n The source space for which this source estimate is specified.\n\n Returns\n -------\n stc : instance of SourceEstimate\n The source estimate only retaining the activity orthogonal to the\n cortex.\n \"\"\"\n _check_src_normal('normal', src)\n normals = np.vstack([s['nn'][v] for s, v in\n zip(src, self._vertices_list)])\n data_norm = einsum('ijk,ij->ik', self.data, normals)\n return self._scalar_class(\n data_norm, self.vertices, self.tmin, self.tstep, self.subject,\n self.verbose)\n\n\nclass _BaseVolSourceEstimate(_BaseSourceEstimate):\n\n _data_ndim = 2\n _src_type = 'volume'\n\n @verbose\n def __init__(self, data, vertices=None, tmin=None, tstep=None,\n subject=None, verbose=None): # noqa: D102\n _validate_type(vertices, (np.ndarray, list), 'vertices')\n _BaseSourceEstimate.__init__(self, data, vertices=vertices, tmin=tmin,\n tstep=tstep, subject=subject,\n verbose=verbose)\n\n @property\n def _vertices_list(self):\n return [self.vertices]\n\n @copy_function_doc_to_method_doc(plot_volume_source_estimates)\n def plot(self, src, subject=None, subjects_dir=None, mode='stat_map',\n bg_img=None, colorbar=True, colormap='auto', clim='auto',\n transparent='auto', show=True, initial_time=None,\n initial_pos=None, verbose=None):\n data = self.magnitude() if self._data_ndim == 3 else self\n return plot_volume_source_estimates(\n data, src=src, subject=subject, subjects_dir=subjects_dir,\n mode=mode, bg_img=bg_img, colorbar=colorbar, colormap=colormap,\n clim=clim, transparent=transparent, show=show,\n initial_time=initial_time, initial_pos=initial_pos,\n verbose=verbose)\n\n def save_as_volume(self, fname, src, dest='mri', mri_resolution=False,\n format='nifti1'):\n \"\"\"Save a volume source estimate in a NIfTI file.\n\n Parameters\n ----------\n fname : string\n The name of the generated nifti file.\n src : list\n The list of source spaces (should all be of type volume).\n dest : 'mri' | 'surf'\n If 'mri' the volume is defined in the coordinate system of\n the original T1 image. If 'surf' the coordinate system\n of the FreeSurfer surface is used (Surface RAS).\n mri_resolution: bool\n It True the image is saved in MRI resolution.\n WARNING: if you have many time points the file produced can be\n huge.\n format : str\n Either 'nifti1' (default) or 'nifti2'.\n\n .. versionadded:: 0.17\n\n\n Returns\n -------\n img : instance Nifti1Image\n The image object.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n import nibabel as nib\n _validate_type(fname, 'path-like', 'fname')\n fname = str(fname)\n img = self.as_volume(src, dest=dest, mri_resolution=mri_resolution,\n format=format)\n nib.save(img, fname)\n\n def as_volume(self, src, dest='mri', mri_resolution=False,\n format='nifti1'):\n \"\"\"Export volume source estimate as a nifti object.\n\n Parameters\n ----------\n src : list\n The list of source spaces (should all be of type volume).\n dest : 'mri' | 'surf'\n If 'mri' the volume is defined in the coordinate system of\n the original T1 image. If 'surf' the coordinate system\n of the FreeSurfer surface is used (Surface RAS).\n mri_resolution: bool\n It True the image is saved in MRI resolution.\n WARNING: if you have many time points the file produced can be\n huge.\n format : str\n Either 'nifti1' (default) or 'nifti2'.\n\n Returns\n -------\n img : instance of Nifti1Image\n The image object.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n from .morph import _interpolate_data\n data = self.magnitude() if self._data_ndim == 3 else self\n return _interpolate_data(data, src, mri_resolution=mri_resolution,\n mri_space=True, output=format)\n\n def get_peak(self, tmin=None, tmax=None, mode='abs',\n vert_as_index=False, time_as_index=False):\n \"\"\"Get location and latency of peak amplitude.\n\n Parameters\n ----------\n tmin : float | None\n The minimum point in time to be considered for peak getting.\n tmax : float | None\n The maximum point in time to be considered for peak getting.\n mode : {'pos', 'neg', 'abs'}\n How to deal with the sign of the data. If 'pos' only positive\n values will be considered. If 'neg' only negative values will\n be considered. If 'abs' absolute values will be considered.\n Defaults to 'abs'.\n vert_as_index : bool\n whether to return the vertex index instead of of its ID.\n Defaults to False.\n time_as_index : bool\n Whether to return the time index instead of the latency.\n Defaults to False.\n\n Returns\n -------\n pos : int\n The vertex exhibiting the maximum response, either ID or index.\n latency : float\n The latency in seconds.\n \"\"\"\n stc = self.magnitude() if self._data_ndim == 3 else self\n vert_idx, time_idx, _ = _get_peak(stc.data, self.times, tmin, tmax,\n mode)\n\n return (vert_idx if vert_as_index else self.vertices[vert_idx],\n time_idx if time_as_index else self.times[time_idx])\n\n\n@fill_doc\nclass VolSourceEstimate(_BaseVolSourceEstimate):\n \"\"\"Container for volume source estimates.\n\n Parameters\n ----------\n data : array of shape (n_dipoles, n_times) | tuple, shape (2,)\n The data in source space. The data can either be a single array or\n a tuple with two arrays: \"kernel\" shape (n_vertices, n_sensors) and\n \"sens_data\" shape (n_sensors, n_times). In this case, the source\n space data corresponds to ``np.dot(kernel, sens_data)``.\n vertices : array of shape (n_dipoles,)\n The indices of the dipoles in the source space.\n tmin : scalar\n Time point of the first sample in data.\n tstep : scalar\n Time step between successive samples in data.\n subject : str | None\n The subject name. While not necessary, it is safer to set the\n subject parameter to avoid analysis errors.\n %(verbose)s\n\n Attributes\n ----------\n subject : str | None\n The subject name.\n times : array of shape (n_times,)\n The time vector.\n vertices : array of shape (n_dipoles,)\n The indices of the dipoles in the source space.\n data : array of shape (n_dipoles, n_times)\n The data in source space.\n shape : tuple\n The shape of the data. A tuple of int (n_dipoles, n_times).\n\n See Also\n --------\n SourceEstimate : A container for surface source estimates.\n VolVectorSourceEstimate : A container for volume vector source estimates.\n MixedSourceEstimate : A container for mixed surface + volume source\n estimates.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n\n @verbose\n def save(self, fname, ftype='stc', verbose=None):\n \"\"\"Save the source estimates to a file.\n\n Parameters\n ----------\n fname : string\n The stem of the file name. The stem is extended with \"-vl.stc\"\n or \"-vl.w\".\n ftype : string\n File format to use. Allowed values are \"stc\" (default), \"w\",\n and \"h5\". The \"w\" format only supports a single time point.\n %(verbose_meth)s\n \"\"\"\n _validate_type(fname, 'path-like', 'fname')\n fname = str(fname)\n _check_option('ftype', ftype, ['stc', 'w', 'h5'])\n if ftype == 'stc':\n logger.info('Writing STC to disk...')\n if not (fname.endswith('-vl.stc') or fname.endswith('-vol.stc')):\n fname += '-vl.stc'\n _write_stc(fname, tmin=self.tmin, tstep=self.tstep,\n vertices=self.vertices, data=self.data)\n elif ftype == 'w':\n logger.info('Writing STC to disk (w format)...')\n if not (fname.endswith('-vl.w') or fname.endswith('-vol.w')):\n fname += '-vl.w'\n _write_w(fname, vertices=self.vertices, data=self.data)\n elif ftype == 'h5':\n super().save(fname, 'h5')\n logger.info('[done]')\n\n\n@fill_doc\nclass VolVectorSourceEstimate(_BaseVectorSourceEstimate,\n _BaseVolSourceEstimate):\n \"\"\"Container for volume source estimates.\n\n Parameters\n ----------\n data : array of shape (n_dipoles, 3, n_times)\n The data in source space. Each dipole contains three vectors that\n denote the dipole strength in X, Y and Z directions over time.\n vertices : array of shape (n_dipoles,)\n The indices of the dipoles in the source space.\n tmin : scalar\n Time point of the first sample in data.\n tstep : scalar\n Time step between successive samples in data.\n subject : str | None\n The subject name. While not necessary, it is safer to set the\n subject parameter to avoid analysis errors.\n %(verbose)s\n\n Attributes\n ----------\n subject : str | None\n The subject name.\n times : array of shape (n_times,)\n The time vector.\n vertices : array of shape (n_dipoles,)\n The indices of the dipoles in the source space.\n data : array of shape (n_dipoles, n_times)\n The data in source space.\n shape : tuple\n The shape of the data. A tuple of int (n_dipoles, n_times).\n\n See Also\n --------\n SourceEstimate : A container for surface source estimates.\n VectorSourceEstimate : A container for vector source estimates.\n MixedSourceEstimate : A container for mixed surface + volume source\n estimates.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n\n _data_ndim = 3\n _scalar_class = VolSourceEstimate\n\n\n@fill_doc\nclass VectorSourceEstimate(_BaseVectorSourceEstimate,\n _BaseSurfaceSourceEstimate):\n \"\"\"Container for vector surface source estimates.\n\n For each vertex, the magnitude of the current is defined in the X, Y and Z\n directions.\n\n Parameters\n ----------\n data : array of shape (n_dipoles, 3, n_times)\n The data in source space. Each dipole contains three vectors that\n denote the dipole strength in X, Y and Z directions over time.\n vertices : list of array, shape (2,)\n Vertex numbers corresponding to the data. The first element of the list\n contains vertices of left hemisphere and the second element contains\n vertices of right hemisphere.\n tmin : float\n Time point of the first sample in data.\n tstep : float\n Time step between successive samples in data.\n subject : str | None\n The subject name. While not necessary, it is safer to set the\n subject parameter to avoid analysis errors.\n %(verbose)s\n\n Attributes\n ----------\n subject : str | None\n The subject name.\n times : array of shape (n_times,)\n The time vector.\n shape : tuple\n The shape of the data. A tuple of int (n_dipoles, n_times).\n\n See Also\n --------\n SourceEstimate : A container for surface source estimates.\n VolSourceEstimate : A container for volume source estimates.\n MixedSourceEstimate : A container for mixed surface + volume source\n estimates.\n\n Notes\n -----\n .. versionadded:: 0.15\n \"\"\"\n\n _data_ndim = 3\n _scalar_class = SourceEstimate\n\n @copy_function_doc_to_method_doc(plot_vector_source_estimates)\n def plot(self, subject=None, hemi='lh', colormap='hot', time_label='auto',\n smoothing_steps=10, transparent=True, brain_alpha=0.4,\n overlay_alpha=None, vector_alpha=1.0, scale_factor=None,\n time_viewer=False, subjects_dir=None, figure=None, views='lat',\n colorbar=True, clim='auto', cortex='classic', size=800,\n background='black', foreground='white', initial_time=None,\n time_unit='s'): # noqa: D102\n\n return plot_vector_source_estimates(\n self, subject=subject, hemi=hemi, colormap=colormap,\n time_label=time_label, smoothing_steps=smoothing_steps,\n transparent=transparent, brain_alpha=brain_alpha,\n overlay_alpha=overlay_alpha, vector_alpha=vector_alpha,\n scale_factor=scale_factor, time_viewer=time_viewer,\n subjects_dir=subjects_dir, figure=figure, views=views,\n colorbar=colorbar, clim=clim, cortex=cortex, size=size,\n background=background, foreground=foreground,\n initial_time=initial_time, time_unit=time_unit\n )\n\n\n@fill_doc\nclass MixedSourceEstimate(_BaseSourceEstimate):\n \"\"\"Container for mixed surface and volume source estimates.\n\n Parameters\n ----------\n data : array of shape (n_dipoles, n_times) | tuple, shape (2,)\n The data in source space. The data can either be a single array or\n a tuple with two arrays: \"kernel\" shape (n_vertices, n_sensors) and\n \"sens_data\" shape (n_sensors, n_times). In this case, the source\n space data corresponds to ``np.dot(kernel, sens_data)``.\n vertices : list of array\n Vertex numbers corresponding to the data. The list contains arrays\n with one array per source space.\n tmin : scalar\n Time point of the first sample in data.\n tstep : scalar\n Time step between successive samples in data.\n subject : str | None\n The subject name. While not necessary, it is safer to set the\n subject parameter to avoid analysis errors.\n %(verbose)s\n\n Attributes\n ----------\n subject : str | None\n The subject name.\n times : array of shape (n_times,)\n The time vector.\n vertices : list of array\n Vertex numbers corresponding to the data. The list contains arrays\n with one array per source space.\n data : array of shape (n_dipoles, n_times)\n The data in source space.\n shape : tuple\n The shape of the data. A tuple of int (n_dipoles, n_times).\n\n See Also\n --------\n SourceEstimate : A container for surface source estimates.\n VectorSourceEstimate : A container for vector source estimates.\n VolSourceEstimate : A container for volume source estimates.\n VolVectorSourceEstimate : A container for Volume vector source estimates.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n\n _data_ndim = 2\n _src_type = 'mixed'\n\n @verbose\n def __init__(self, data, vertices=None, tmin=None, tstep=None,\n subject=None, verbose=None): # noqa: D102\n if not isinstance(vertices, list) or len(vertices) < 2:\n raise ValueError('Vertices must be a list of numpy arrays with '\n 'one array per source space.')\n\n _BaseSourceEstimate.__init__(self, data, vertices=vertices, tmin=tmin,\n tstep=tstep, subject=subject,\n verbose=verbose)\n\n def plot_surface(self, src, subject=None, surface='inflated', hemi='lh',\n colormap='auto', time_label='time=%02.f ms',\n smoothing_steps=10,\n transparent=None, alpha=1.0, time_viewer=False,\n config_opts=None, subjects_dir=None, figure=None,\n views='lat', colorbar=True, clim='auto'):\n \"\"\"Plot surface source estimates with PySurfer.\n\n Note: PySurfer currently needs the SUBJECTS_DIR environment variable,\n which will automatically be set by this function. Plotting multiple\n SourceEstimates with different values for subjects_dir will cause\n PySurfer to use the wrong FreeSurfer surfaces when using methods of\n the returned Brain object. It is therefore recommended to set the\n SUBJECTS_DIR environment variable or always use the same value for\n subjects_dir (within the same Python session).\n\n Parameters\n ----------\n src : SourceSpaces\n The source spaces to plot.\n subject : str | None\n The subject name corresponding to FreeSurfer environment\n variable SUBJECT. If None stc.subject will be used. If that\n is None, the environment will be used.\n surface : str\n The type of surface (inflated, white etc.).\n hemi : str, 'lh' | 'rh' | 'split' | 'both'\n The hemisphere to display. Using 'both' or 'split' requires\n PySurfer version 0.4 or above.\n colormap : str | np.ndarray of float, shape(n_colors, 3 | 4)\n Name of colormap to use. See `plot_source_estimates`.\n time_label : str\n How to print info about the time instant visualized.\n smoothing_steps : int\n The amount of smoothing.\n transparent : bool | None\n If True, use a linear transparency between fmin and fmid.\n None will choose automatically based on colormap type.\n alpha : float\n Alpha value to apply globally to the overlay.\n time_viewer : bool\n Display time viewer GUI.\n config_opts : dict\n Keyword arguments for Brain initialization.\n See pysurfer.viz.Brain.\n subjects_dir : str\n The path to the FreeSurfer subjects reconstructions.\n It corresponds to FreeSurfer environment variable SUBJECTS_DIR.\n figure : instance of mayavi.mlab.Figure | None\n If None, the last figure will be cleaned and a new figure will\n be created.\n views : str | list\n View to use. See `surfer.Brain`.\n colorbar : bool\n If True, display colorbar on scene.\n clim : str | dict\n Colorbar properties specification. See `plot_source_estimates`.\n\n Returns\n -------\n brain : instance of surfer.Brain\n A instance of `surfer.Brain` from PySurfer.\n \"\"\"\n # extract surface source spaces\n surf = _ensure_src(src, kind='surface')\n\n # extract surface source estimate\n data = self.data[:surf[0]['nuse'] + surf[1]['nuse']]\n vertices = [s['vertno'] for s in surf]\n\n stc = SourceEstimate(data, vertices, self.tmin, self.tstep,\n self.subject, self.verbose)\n\n return plot_source_estimates(stc, subject, surface=surface, hemi=hemi,\n colormap=colormap, time_label=time_label,\n smoothing_steps=smoothing_steps,\n transparent=transparent, alpha=alpha,\n time_viewer=time_viewer,\n config_opts=config_opts,\n subjects_dir=subjects_dir, figure=figure,\n views=views, colorbar=colorbar, clim=clim)\n\n\n###############################################################################\n# Morphing\n\n\ndef _get_vol_mask(src):\n \"\"\"Get the volume source space mask.\"\"\"\n assert len(src) == 1 # not a mixed source space\n shape = src[0]['shape'][::-1]\n mask = np.zeros(shape, bool)\n mask.flat[src[0]['vertno']] = True\n return mask\n\n\ndef _spatio_temporal_src_connectivity_vol(src, n_times):\n from sklearn.feature_extraction import grid_to_graph\n mask = _get_vol_mask(src)\n edges = grid_to_graph(*mask.shape, mask=mask)\n connectivity = _get_connectivity_from_edges(edges, n_times)\n return connectivity\n\n\ndef _spatio_temporal_src_connectivity_surf(src, n_times):\n if src[0]['use_tris'] is None:\n # XXX It would be nice to support non oct source spaces too...\n raise RuntimeError(\"The source space does not appear to be an ico \"\n \"surface. Connectivity cannot be extracted from\"\n \" non-ico source spaces.\")\n used_verts = [np.unique(s['use_tris']) for s in src]\n offs = np.cumsum([0] + [len(u_v) for u_v in used_verts])[:-1]\n tris = np.concatenate([np.searchsorted(u_v, s['use_tris']) + off\n for u_v, s, off in zip(used_verts, src, offs)])\n connectivity = spatio_temporal_tris_connectivity(tris, n_times)\n\n # deal with source space only using a subset of vertices\n masks = [np.in1d(u, s['vertno']) for s, u in zip(src, used_verts)]\n if sum(u.size for u in used_verts) != connectivity.shape[0] / n_times:\n raise ValueError('Used vertices do not match connectivity shape')\n if [np.sum(m) for m in masks] != [len(s['vertno']) for s in src]:\n raise ValueError('Vertex mask does not match number of vertices')\n masks = np.concatenate(masks)\n missing = 100 * float(len(masks) - np.sum(masks)) / len(masks)\n if missing:\n warn_('%0.1f%% of original source space vertices have been'\n ' omitted, tri-based connectivity will have holes.\\n'\n 'Consider using distance-based connectivity or '\n 'morphing data to all source space vertices.' % missing)\n masks = np.tile(masks, n_times)\n masks = np.where(masks)[0]\n connectivity = connectivity.tocsr()\n connectivity = connectivity[masks]\n connectivity = connectivity[:, masks]\n # return to original format\n connectivity = connectivity.tocoo()\n return connectivity\n\n\n@verbose\ndef spatio_temporal_src_connectivity(src, n_times, dist=None, verbose=None):\n \"\"\"Compute connectivity for a source space activation over time.\n\n Parameters\n ----------\n src : instance of SourceSpaces\n The source space. It can be a surface source space or a\n volume source space.\n n_times : int\n Number of time instants.\n dist : float, or None\n Maximal geodesic distance (in m) between vertices in the\n source space to consider neighbors. If None, immediate neighbors\n are extracted from an ico surface.\n %(verbose)s\n\n Returns\n -------\n connectivity : ~scipy.sparse.coo_matrix\n The connectivity matrix describing the spatio-temporal\n graph structure. If N is the number of vertices in the\n source space, the N first nodes in the graph are the\n vertices are time 1, the nodes from 2 to 2N are the vertices\n during time 2, etc.\n \"\"\"\n # XXX we should compute connectivity for each source space and then\n # use scipy.sparse.block_diag to concatenate them\n if src[0]['type'] == 'vol':\n if dist is not None:\n raise ValueError('dist must be None for a volume '\n 'source space. Got %s.' % dist)\n\n connectivity = _spatio_temporal_src_connectivity_vol(src, n_times)\n elif dist is not None:\n # use distances computed and saved in the source space file\n connectivity = spatio_temporal_dist_connectivity(src, n_times, dist)\n else:\n connectivity = _spatio_temporal_src_connectivity_surf(src, n_times)\n return connectivity\n\n\n@verbose\ndef grade_to_tris(grade, verbose=None):\n \"\"\"Get tris defined for a certain grade.\n\n Parameters\n ----------\n grade : int\n Grade of an icosahedral mesh.\n %(verbose)s\n\n Returns\n -------\n tris : list\n 2-element list containing Nx3 arrays of tris, suitable for use in\n spatio_temporal_tris_connectivity.\n \"\"\"\n a = _get_ico_tris(grade, None, False)\n tris = np.concatenate((a, a + (np.max(a) + 1)))\n return tris\n\n\n@verbose\ndef spatio_temporal_tris_connectivity(tris, n_times, remap_vertices=False,\n verbose=None):\n \"\"\"Compute connectivity from triangles and time instants.\n\n Parameters\n ----------\n tris : array\n N x 3 array defining triangles.\n n_times : int\n Number of time points.\n remap_vertices : bool\n Reassign vertex indices based on unique values. Useful\n to process a subset of triangles. Defaults to False.\n %(verbose)s\n\n Returns\n -------\n connectivity : ~scipy.sparse.coo_matrix\n The connectivity matrix describing the spatio-temporal\n graph structure. If N is the number of vertices in the\n source space, the N first nodes in the graph are the\n vertices are time 1, the nodes from 2 to 2N are the vertices\n during time 2, etc.\n \"\"\"\n if remap_vertices:\n logger.info('Reassigning vertex indices.')\n tris = np.searchsorted(np.unique(tris), tris)\n\n edges = mesh_edges(tris).tocoo()\n return _get_connectivity_from_edges(edges, n_times)\n\n\n@verbose\ndef spatio_temporal_dist_connectivity(src, n_times, dist, verbose=None):\n \"\"\"Compute connectivity from distances in a source space and time instants.\n\n Parameters\n ----------\n src : instance of SourceSpaces\n The source space must have distances between vertices computed, such\n that src['dist'] exists and is useful. This can be obtained\n with a call to :func:`mne.setup_source_space` with the\n ``add_dist=True`` option.\n n_times : int\n Number of time points.\n dist : float\n Maximal geodesic distance (in m) between vertices in the\n source space to consider neighbors.\n %(verbose)s\n\n Returns\n -------\n connectivity : ~scipy.sparse.coo_matrix\n The connectivity matrix describing the spatio-temporal\n graph structure. If N is the number of vertices in the\n source space, the N first nodes in the graph are the\n vertices are time 1, the nodes from 2 to 2N are the vertices\n during time 2, etc.\n \"\"\"\n if src[0]['dist'] is None:\n raise RuntimeError('src must have distances included, consider using '\n 'setup_source_space with add_dist=True')\n edges = sparse_block_diag([s['dist'][s['vertno'], :][:, s['vertno']]\n for s in src])\n edges.data[:] = np.less_equal(edges.data, dist)\n # clean it up and put it in coo format\n edges = edges.tocsr()\n edges.eliminate_zeros()\n edges = edges.tocoo()\n return _get_connectivity_from_edges(edges, n_times)\n\n\n@verbose\ndef spatial_src_connectivity(src, dist=None, verbose=None):\n \"\"\"Compute connectivity for a source space activation.\n\n Parameters\n ----------\n src : instance of SourceSpaces\n The source space. It can be a surface source space or a\n volume source space.\n dist : float, or None\n Maximal geodesic distance (in m) between vertices in the\n source space to consider neighbors. If None, immediate neighbors\n are extracted from an ico surface.\n %(verbose)s\n\n Returns\n -------\n connectivity : ~scipy.sparse.coo_matrix\n The connectivity matrix describing the spatial graph structure.\n \"\"\"\n return spatio_temporal_src_connectivity(src, 1, dist)\n\n\n@verbose\ndef spatial_tris_connectivity(tris, remap_vertices=False, verbose=None):\n \"\"\"Compute connectivity from triangles.\n\n Parameters\n ----------\n tris : array\n N x 3 array defining triangles.\n remap_vertices : bool\n Reassign vertex indices based on unique values. Useful\n to process a subset of triangles. Defaults to False.\n %(verbose)s\n\n Returns\n -------\n connectivity : ~scipy.sparse.coo_matrix\n The connectivity matrix describing the spatial graph structure.\n \"\"\"\n return spatio_temporal_tris_connectivity(tris, 1, remap_vertices)\n\n\n@verbose\ndef spatial_dist_connectivity(src, dist, verbose=None):\n \"\"\"Compute connectivity from distances in a source space.\n\n Parameters\n ----------\n src : instance of SourceSpaces\n The source space must have distances between vertices computed, such\n that src['dist'] exists and is useful. This can be obtained\n with a call to :func:`mne.setup_source_space` with the\n ``add_dist=True`` option.\n dist : float\n Maximal geodesic distance (in m) between vertices in the\n source space to consider neighbors.\n %(verbose)s\n\n Returns\n -------\n connectivity : ~scipy.sparse.coo_matrix\n The connectivity matrix describing the spatial graph structure.\n \"\"\"\n return spatio_temporal_dist_connectivity(src, 1, dist)\n\n\n@verbose\ndef spatial_inter_hemi_connectivity(src, dist, verbose=None):\n \"\"\"Get vertices on each hemisphere that are close to the other hemisphere.\n\n Parameters\n ----------\n src : instance of SourceSpaces\n The source space. Must be surface type.\n dist : float\n Maximal Euclidean distance (in m) between vertices in one hemisphere\n compared to the other to consider neighbors.\n %(verbose)s\n\n Returns\n -------\n connectivity : ~scipy.sparse.coo_matrix\n The connectivity matrix describing the spatial graph structure.\n Typically this should be combined (addititively) with another\n existing intra-hemispheric connectivity matrix, e.g. computed\n using geodesic distances.\n \"\"\"\n from scipy.spatial.distance import cdist\n src = _ensure_src(src, kind='surface')\n conn = cdist(src[0]['rr'][src[0]['vertno']],\n src[1]['rr'][src[1]['vertno']])\n conn = sparse.csr_matrix(conn <= dist, dtype=int)\n empties = [sparse.csr_matrix((nv, nv), dtype=int) for nv in conn.shape]\n conn = sparse.vstack([sparse.hstack([empties[0], conn]),\n sparse.hstack([conn.T, empties[1]])])\n return conn\n\n\n@verbose\ndef _get_connectivity_from_edges(edges, n_times, verbose=None):\n \"\"\"Given edges sparse matrix, create connectivity matrix.\"\"\"\n n_vertices = edges.shape[0]\n logger.info(\"-- number of connected vertices : %d\" % n_vertices)\n nnz = edges.col.size\n aux = n_vertices * np.arange(n_times)[:, None] * np.ones((1, nnz), np.int)\n col = (edges.col[None, :] + aux).ravel()\n row = (edges.row[None, :] + aux).ravel()\n if n_times > 1: # add temporal edges\n o = (n_vertices * np.arange(n_times - 1)[:, None] +\n np.arange(n_vertices)[None, :]).ravel()\n d = (n_vertices * np.arange(1, n_times)[:, None] +\n np.arange(n_vertices)[None, :]).ravel()\n row = np.concatenate((row, o, d))\n col = np.concatenate((col, d, o))\n data = np.ones(edges.data.size * n_times + 2 * n_vertices * (n_times - 1),\n dtype=np.int)\n connectivity = coo_matrix((data, (row, col)),\n shape=(n_times * n_vertices,) * 2)\n return connectivity\n\n\n@verbose\ndef _get_ico_tris(grade, verbose=None, return_surf=False):\n \"\"\"Get triangles for ico surface.\"\"\"\n ico = _get_ico_surface(grade)\n if not return_surf:\n return ico['tris']\n else:\n return ico\n\n\ndef _pca_flip(flip, data):\n U, s, V = linalg.svd(data, full_matrices=False)\n # determine sign-flip\n sign = np.sign(np.dot(U[:, 0], flip))\n # use average power in label for scaling\n scale = linalg.norm(s) / np.sqrt(len(data))\n return sign * scale * V[0]\n\n\n_label_funcs = {\n 'mean': lambda flip, data: np.mean(data, axis=0),\n 'mean_flip': lambda flip, data: np.mean(flip * data, axis=0),\n 'max': lambda flip, data: np.max(np.abs(data), axis=0),\n 'pca_flip': _pca_flip,\n}\n\n\[email protected]\ndef _temporary_vertices(src, vertices):\n orig_vertices = [s['vertno'] for s in src]\n for s, v in zip(src, vertices):\n s['vertno'] = v\n try:\n yield\n finally:\n for s, v in zip(src, orig_vertices):\n s['vertno'] = v\n\n\ndef _prepare_label_extraction(stc, labels, src, mode, allow_empty):\n \"\"\"Prepare indices and flips for extract_label_time_course.\"\"\"\n # if src is a mixed src space, the first 2 src spaces are surf type and\n # the other ones are vol type. For mixed source space n_labels will be the\n # given by the number of ROIs of the cortical parcellation plus the number\n # of vol src space\n from .label import label_sign_flip\n\n # get vertices from source space, they have to be the same as in the stcs\n vertno = stc.vertices\n nvert = [len(vn) for vn in vertno]\n\n # do the initialization\n label_vertidx = list()\n label_flip = list()\n for s, v, hemi in zip(src, stc.vertices, ('left', 'right')):\n n_missing = (~np.in1d(v, s['vertno'])).sum()\n if n_missing:\n raise ValueError('%d/%d %s hemisphere stc vertices missing from '\n 'the source space, likely mismatch'\n % (n_missing, len(v), hemi))\n for label in labels:\n if label.hemi == 'both':\n # handle BiHemiLabel\n sub_labels = [label.lh, label.rh]\n else:\n sub_labels = [label]\n this_vertidx = list()\n for slabel in sub_labels:\n if slabel.hemi == 'lh':\n this_vertices = np.intersect1d(vertno[0], slabel.vertices)\n vertidx = np.searchsorted(vertno[0], this_vertices)\n elif slabel.hemi == 'rh':\n this_vertices = np.intersect1d(vertno[1], slabel.vertices)\n vertidx = nvert[0] + np.searchsorted(vertno[1], this_vertices)\n else:\n raise ValueError('label %s has invalid hemi' % label.name)\n this_vertidx.append(vertidx)\n\n # convert it to an array\n this_vertidx = np.concatenate(this_vertidx)\n this_flip = None\n if len(this_vertidx) == 0:\n msg = ('source space does not contain any vertices for label %s'\n % label.name)\n if not allow_empty:\n raise ValueError(msg)\n else:\n warn_(msg + '. Assigning all-zero time series to label.')\n this_vertidx = None # to later check if label is empty\n elif mode not in ('mean', 'max'): # mode-dependent initialization\n # label_sign_flip uses two properties:\n #\n # - src[ii]['nn']\n # - src[ii]['vertno']\n #\n # So if we override vertno with the stc vertices, it will pick\n # the correct normals.\n with _temporary_vertices(src, stc.vertices):\n this_flip = label_sign_flip(label, src[:2])[:, None]\n\n label_vertidx.append(this_vertidx)\n label_flip.append(this_flip)\n\n return label_vertidx, label_flip\n\n\ndef _gen_extract_label_time_course(stcs, labels, src, mode='mean',\n allow_empty=False, verbose=None):\n # loop through source estimates and extract time series\n _check_option('mode', mode, sorted(_label_funcs.keys()))\n func = _label_funcs[mode]\n if len(src) > 2:\n if src[0]['type'] != 'surf' or src[1]['type'] != 'surf':\n raise ValueError('The first 2 source spaces have to be surf type')\n if any(np.any(s['type'] != 'vol') for s in src[2:]):\n raise ValueError('source spaces have to be of vol type')\n\n n_aparc = len(labels)\n n_aseg = len(src[2:])\n n_labels = n_aparc + n_aseg\n else:\n n_labels = len(labels)\n vertno = None\n for stc in stcs:\n if vertno is None:\n vertno = copy.deepcopy(stc.vertices)\n nvert = [len(v) for v in vertno]\n label_vertidx, src_flip = _prepare_label_extraction(\n stc, labels, src, mode, allow_empty)\n # make sure the stc is compatible with the source space\n for i in range(len(vertno)):\n if len(stc.vertices[i]) != nvert[i]:\n raise ValueError('stc not compatible with source space. '\n 'stc has %s time series but there are %s '\n 'vertices in source space'\n % (len(stc.vertices[i]), nvert[i]))\n\n if any(np.any(svn != vn) for svn, vn in zip(stc.vertices, vertno)):\n raise ValueError('stc not compatible with source space')\n if sum(nvert) != stc.shape[0]:\n raise ValueError('stc not compatible with source space. '\n 'stc has %s vertices but the source space '\n 'has %s vertices'\n % (stc.shape[0], sum(nvert)))\n\n logger.info('Extracting time courses for %d labels (mode: %s)'\n % (n_labels, mode))\n\n # do the extraction\n label_tc = np.zeros((n_labels, stc.data.shape[1]),\n dtype=stc.data.dtype)\n for i, (vertidx, flip) in enumerate(zip(label_vertidx, src_flip)):\n if vertidx is not None:\n label_tc[i] = func(flip, stc.data[vertidx, :])\n\n # extract label time series for the vol src space\n if len(src) > 2:\n v1 = nvert[0] + nvert[1]\n for i, nv in enumerate(nvert[2:]):\n\n v2 = v1 + nv\n v = range(v1, v2)\n if nv != 0:\n label_tc[n_aparc + i] = np.mean(stc.data[v, :], axis=0)\n\n v1 = v2\n\n # this is a generator!\n yield label_tc\n\n\n@verbose\ndef extract_label_time_course(stcs, labels, src, mode='mean_flip',\n allow_empty=False, return_generator=False,\n verbose=None):\n \"\"\"Extract label time course for lists of labels and source estimates.\n\n This function will extract one time course for each label and source\n estimate. The way the time courses are extracted depends on the mode\n parameter (see Notes).\n\n Parameters\n ----------\n stcs : SourceEstimate | list (or generator) of SourceEstimate\n The source estimates from which to extract the time course.\n labels : Label | BiHemiLabel | list of Label or BiHemiLabel\n The labels for which to extract the time course.\n src : list\n Source spaces for left and right hemisphere.\n mode : str\n Extraction mode, see explanation above.\n allow_empty : bool\n Instead of emitting an error, return all-zero time courses for labels\n that do not have any vertices in the source estimate.\n return_generator : bool\n If True, a generator instead of a list is returned.\n %(verbose)s\n\n Returns\n -------\n label_tc : array | list (or generator) of array, shape (n_labels, n_times)\n Extracted time course for each label and source estimate.\n\n Notes\n -----\n Valid values for mode are:\n\n ``'mean'``\n Average within each label.\n ``'mean_flip'``\n Average within each label with sign flip depending\n on source orientation.\n ``'pca_flip'``\n Apply an SVD to the time courses within each label\n and use the scaled and sign-flipped first right-singular vector\n as the label time course. The scaling is performed such that the\n power of the label time course is the same as the average\n per-vertex time course power within the label. The sign of the\n resulting time course is adjusted by multiplying it with\n \"sign(dot(u, flip))\" where u is the first left-singular vector,\n and flip is a sing-flip vector based on the vertex normals. This\n procedure assures that the phase does not randomly change by 180\n degrees from one stc to the next.\n ``'max'``\n Max value within each label.\n\n If encountering a ``ValueError`` due to mismatch between number of\n source points in the subject source space and computed ``stc`` object set\n ``src`` argument to ``fwd['src']`` to ensure the source space is\n compatible between forward and inverse routines.\n \"\"\"\n # convert inputs to lists\n if isinstance(stcs, SourceEstimate):\n stcs = [stcs]\n return_several = False\n return_generator = False\n else:\n return_several = True\n\n if not isinstance(labels, list):\n labels = [labels]\n\n label_tc = _gen_extract_label_time_course(stcs, labels, src, mode=mode,\n allow_empty=allow_empty)\n\n if not return_generator:\n # do the extraction and return a list\n label_tc = list(label_tc)\n\n if not return_several:\n # input was a single SoureEstimate, return single array\n label_tc = label_tc[0]\n\n return label_tc\n"
]
| [
[
"numpy.dot",
"numpy.array_equal",
"scipy.linalg.svd",
"numpy.tile",
"numpy.mean",
"numpy.where",
"sklearn.feature_extraction.grid_to_graph",
"numpy.frombuffer",
"numpy.concatenate",
"numpy.max",
"numpy.linalg.norm",
"numpy.empty",
"scipy.sparse.block_diag",
"numpy.prod",
"numpy.arange",
"numpy.in1d",
"numpy.log10",
"scipy.sparse.csr_matrix",
"numpy.vstack",
"numpy.atleast_2d",
"scipy.sparse.coo_matrix",
"numpy.array",
"numpy.zeros",
"numpy.fromfile",
"numpy.argsort",
"numpy.intersect1d",
"numpy.searchsorted",
"scipy.linalg.norm",
"scipy.spatial.distance.cdist",
"numpy.insert",
"numpy.left_shift",
"numpy.asarray",
"numpy.setdiff1d",
"numpy.sum",
"scipy.sparse.hstack",
"numpy.ones",
"numpy.less_equal",
"numpy.any",
"numpy.abs",
"numpy.diag",
"numpy.unique"
]
]
|
Sugoshnr/faiss | [
"48ae55348a58624337e0b5125fb865142d2b9e19"
]
| [
"tests/test_index_accuracy.py"
]
| [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom __future__ import absolute_import, division, print_function\n# noqa E741\n# translation of test_knn.lua\n\nimport numpy as np\nimport unittest\nimport faiss\n\nfrom common_faiss_tests import Randu10k, get_dataset_2, Randu10kUnbalanced\n\nev = Randu10k()\n\nd = ev.d\n\n# Parameters inverted indexes\nncentroids = int(4 * np.sqrt(ev.nb))\nkprobe = int(np.sqrt(ncentroids))\n\n# Parameters for LSH\nnbits = d\n\n# Parameters for indexes involving PQ\nM = int(d / 8) # for PQ: #subquantizers\nnbits_per_index = 8 # for PQ\n\n\nclass IndexAccuracy(unittest.TestCase):\n\n def test_IndexFlatIP(self):\n q = faiss.IndexFlatIP(d) # Ask inner product\n res = ev.launch('FLAT / IP', q)\n e = ev.evalres(res)\n assert e[1] == 1.0\n\n def test_IndexFlatL2(self):\n q = faiss.IndexFlatL2(d)\n res = ev.launch('FLAT / L2', q)\n e = ev.evalres(res)\n assert e[1] == 1.0\n\n def test_ivf_kmeans(self):\n ivfk = faiss.IndexIVFFlat(faiss.IndexFlatL2(d), d, ncentroids)\n ivfk.nprobe = kprobe\n res = ev.launch('IndexIVFFlat', ivfk)\n e = ev.evalres(res)\n # should give 0.260 0.260 0.260\n assert e[1] > 0.2\n\n # test parallel mode\n Dref, Iref = ivfk.search(ev.xq, 100)\n ivfk.parallel_mode = 1\n Dnew, Inew = ivfk.search(ev.xq, 100)\n print((Iref != Inew).sum(), Iref.size)\n assert (Iref != Inew).sum() < Iref.size / 5000.0\n assert np.all(Dref == Dnew)\n\n def test_indexLSH(self):\n q = faiss.IndexLSH(d, nbits)\n res = ev.launch('FLAT / LSH Cosine', q)\n e = ev.evalres(res)\n # should give 0.070 0.250 0.580\n assert e[10] > 0.2\n\n def test_IndexLSH_32_48(self):\n # CHECK: the difference between 32 and 48 does not make much sense\n for nbits2 in 32, 48:\n q = faiss.IndexLSH(d, nbits2)\n res = ev.launch('LSH half size', q)\n e = ev.evalres(res)\n # should give 0.003 0.019 0.108\n assert e[10] > 0.018\n\n def test_IndexPQ(self):\n q = faiss.IndexPQ(d, M, nbits_per_index)\n res = ev.launch('FLAT / PQ L2', q)\n e = ev.evalres(res)\n # should give 0.070 0.230 0.260\n assert e[10] > 0.2\n\n # Approximate search module: PQ with inner product distance\n def test_IndexPQ_ip(self):\n q = faiss.IndexPQ(d, M, nbits_per_index, faiss.METRIC_INNER_PRODUCT)\n res = ev.launch('FLAT / PQ IP', q)\n e = ev.evalres(res)\n # should give 0.070 0.230 0.260\n #(same result as regular PQ on normalized distances)\n assert e[10] > 0.2\n\n def test_IndexIVFPQ(self):\n ivfpq = faiss.IndexIVFPQ(faiss.IndexFlatL2(d), d, ncentroids, M, 8)\n ivfpq.nprobe = kprobe\n res = ev.launch('IVF PQ', ivfpq)\n e = ev.evalres(res)\n # should give 0.070 0.230 0.260\n assert e[10] > 0.2\n\n # TODO: translate evaluation of nested\n\n # Approximate search: PQ with full vector refinement\n def test_IndexPQ_refined(self):\n q = faiss.IndexPQ(d, M, nbits_per_index)\n res = ev.launch('PQ non-refined', q)\n e = ev.evalres(res)\n q.reset()\n\n rq = faiss.IndexRefineFlat(q)\n res = ev.launch('PQ refined', rq)\n e2 = ev.evalres(res)\n assert e2[10] >= e[10]\n rq.k_factor = 4\n\n res = ev.launch('PQ refined*4', rq)\n e3 = ev.evalres(res)\n assert e3[10] >= e2[10]\n\n def test_polysemous(self):\n index = faiss.IndexPQ(d, M, nbits_per_index)\n index.do_polysemous_training = True\n # reduce nb iterations to speed up training for the test\n index.polysemous_training.n_iter = 50000\n index.polysemous_training.n_redo = 1\n res = ev.launch('normal PQ', index)\n e_baseline = ev.evalres(res)\n index.search_type = faiss.IndexPQ.ST_polysemous\n\n index.polysemous_ht = int(M / 16. * 58)\n\n stats = faiss.cvar.indexPQ_stats\n stats.reset()\n\n res = ev.launch('Polysemous ht=%d' % index.polysemous_ht,\n index)\n e_polysemous = ev.evalres(res)\n print(e_baseline, e_polysemous, index.polysemous_ht)\n print(stats.n_hamming_pass, stats.ncode)\n # The randu dataset is difficult, so we are not too picky on\n # the results. Here we assert that we have < 10 % loss when\n # computing full PQ on fewer than 20% of the data.\n assert stats.n_hamming_pass < stats.ncode / 5\n # Test disabled because difference is 0.17 on aarch64\n # TODO check why???\n # assert e_polysemous[10] > e_baseline[10] - 0.1\n\n def test_ScalarQuantizer(self):\n quantizer = faiss.IndexFlatL2(d)\n ivfpq = faiss.IndexIVFScalarQuantizer(\n quantizer, d, ncentroids,\n faiss.ScalarQuantizer.QT_8bit)\n ivfpq.nprobe = kprobe\n res = ev.launch('IVF SQ', ivfpq)\n e = ev.evalres(res)\n # should give 0.234 0.236 0.236\n assert e[10] > 0.235\n\n def test_polysemous_OOM(self):\n \"\"\" this used to cause OOM when training polysemous with large\n nb bits\"\"\"\n d = 32\n xt, xb, xq = get_dataset_2(d, 10000, 0, 0)\n index = faiss.IndexPQ(d, M, 13)\n index.do_polysemous_training = True\n index.pq.cp.niter = 0\n index.polysemous_training.max_memory = 128 * 1024 * 1024\n self.assertRaises(RuntimeError, index.train, xt)\n\n\nclass TestSQFlavors(unittest.TestCase):\n \"\"\" tests IP in addition to L2, non multiple of 8 dimensions\n \"\"\"\n\n def add2columns(self, x):\n return np.hstack((\n x, np.zeros((x.shape[0], 2), dtype='float32')\n ))\n\n def subtest_add2col(self, xb, xq, index, qname):\n \"\"\"Test with 2 additional dimensions to take also the non-SIMD\n codepath. We don't retrain anything but add 2 dims to the\n queries, the centroids and the trained ScalarQuantizer.\n \"\"\"\n nb, d = xb.shape\n\n d2 = d + 2\n xb2 = self.add2columns(xb)\n xq2 = self.add2columns(xq)\n\n nlist = index.nlist\n quantizer = faiss.downcast_index(index.quantizer)\n quantizer2 = faiss.IndexFlat(d2, index.metric_type)\n centroids = faiss.vector_to_array(quantizer.xb).reshape(nlist, d)\n centroids2 = self.add2columns(centroids)\n quantizer2.add(centroids2)\n index2 = faiss.IndexIVFScalarQuantizer(\n quantizer2, d2, index.nlist, index.sq.qtype,\n index.metric_type)\n index2.nprobe = 4\n if qname in ('8bit', '4bit'):\n trained = faiss.vector_to_array(index.sq.trained).reshape(2, -1)\n nt = trained.shape[1]\n # 2 lines: vmins and vdiffs\n new_nt = int(nt * d2 / d)\n trained2 = np.hstack((\n trained,\n np.zeros((2, new_nt - nt), dtype='float32')\n ))\n trained2[1, nt:] = 1.0 # set vdiff to 1 to avoid div by 0\n faiss.copy_array_to_vector(trained2.ravel(), index2.sq.trained)\n else:\n index2.sq.trained = index.sq.trained\n\n index2.is_trained = True\n index2.add(xb2)\n return index2.search(xq2, 10)\n\n\n # run on Sept 18, 2018 with nprobe=4 + 4 bit bugfix\n ref_results = {\n (0, '8bit'): 984,\n (0, '4bit'): 978,\n (0, '8bit_uniform'): 985,\n (0, '4bit_uniform'): 979,\n (0, 'fp16'): 985,\n (1, '8bit'): 979,\n (1, '4bit'): 973,\n (1, '8bit_uniform'): 979,\n (1, '4bit_uniform'): 972,\n (1, 'fp16'): 979,\n # added 2019-06-26\n (0, '6bit'): 985,\n (1, '6bit'): 987,\n }\n\n def subtest(self, mt):\n d = 32\n xt, xb, xq = get_dataset_2(d, 2000, 1000, 200)\n nlist = 64\n\n gt_index = faiss.IndexFlat(d, mt)\n gt_index.add(xb)\n gt_D, gt_I = gt_index.search(xq, 10)\n quantizer = faiss.IndexFlat(d, mt)\n for qname in '8bit 4bit 8bit_uniform 4bit_uniform fp16 6bit'.split():\n qtype = getattr(faiss.ScalarQuantizer, 'QT_' + qname)\n index = faiss.IndexIVFScalarQuantizer(\n quantizer, d, nlist, qtype, mt)\n index.train(xt)\n index.add(xb)\n index.nprobe = 4 # hopefully more robust than 1\n D, I = index.search(xq, 10)\n ninter = faiss.eval_intersection(I, gt_I)\n print('(%d, %s): %d, ' % (mt, repr(qname), ninter))\n assert abs(ninter - self.ref_results[(mt, qname)]) <= 10\n\n if qname == '6bit':\n # the test below fails triggers ASAN. TODO check what's wrong\n continue\n\n D2, I2 = self.subtest_add2col(xb, xq, index, qname)\n assert np.all(I2 == I)\n\n # also test range search\n\n if mt == faiss.METRIC_INNER_PRODUCT:\n radius = float(D[:, -1].max())\n else:\n radius = float(D[:, -1].min())\n print('radius', radius)\n\n lims, D3, I3 = index.range_search(xq, radius)\n ntot = ndiff = 0\n for i in range(len(xq)):\n l0, l1 = lims[i], lims[i + 1]\n Inew = set(I3[l0:l1])\n if mt == faiss.METRIC_INNER_PRODUCT:\n mask = D2[i] > radius\n else:\n mask = D2[i] < radius\n Iref = set(I2[i, mask])\n ndiff += len(Inew ^ Iref)\n ntot += len(Iref)\n print('ndiff %d / %d' % (ndiff, ntot))\n assert ndiff < ntot * 0.01\n\n for pm in 1, 2:\n print('parallel_mode=%d' % pm)\n index.parallel_mode = pm\n lims4, D4, I4 = index.range_search(xq, radius)\n print('sizes', lims4[1:] - lims4[:-1])\n for qno in range(len(lims) - 1):\n Iref = I3[lims[qno]: lims[qno+1]]\n Inew = I4[lims4[qno]: lims4[qno+1]]\n assert set(Iref) == set(Inew), \"q %d ref %s new %s\" % (\n qno, Iref, Inew)\n\n def test_SQ_IP(self):\n self.subtest(faiss.METRIC_INNER_PRODUCT)\n\n def test_SQ_L2(self):\n self.subtest(faiss.METRIC_L2)\n\n def test_parallel_mode(self):\n d = 32\n xt, xb, xq = get_dataset_2(d, 2000, 1000, 200)\n\n index = faiss.index_factory(d, \"IVF64,SQ8\")\n index.train(xt)\n index.add(xb)\n index.nprobe = 4 # hopefully more robust than 1\n Dref, Iref = index.search(xq, 10)\n\n for pm in 1, 2, 3:\n index.parallel_mode = pm\n\n Dnew, Inew = index.search(xq, 10)\n np.testing.assert_array_equal(Iref, Inew)\n np.testing.assert_array_equal(Dref, Dnew)\n\n\nclass TestSQByte(unittest.TestCase):\n\n def subtest_8bit_direct(self, metric_type, d):\n xt, xb, xq = get_dataset_2(d, 500, 1000, 30)\n\n # rescale everything to get integer\n tmin, tmax = xt.min(), xt.max()\n\n def rescale(x):\n x = np.floor((x - tmin) * 256 / (tmax - tmin))\n x[x < 0] = 0\n x[x > 255] = 255\n return x\n\n xt = rescale(xt)\n xb = rescale(xb)\n xq = rescale(xq)\n\n gt_index = faiss.IndexFlat(d, metric_type)\n gt_index.add(xb)\n Dref, Iref = gt_index.search(xq, 10)\n\n index = faiss.IndexScalarQuantizer(\n d, faiss.ScalarQuantizer.QT_8bit_direct, metric_type)\n index.add(xb)\n D, I = index.search(xq, 10)\n\n assert np.all(I == Iref)\n assert np.all(D == Dref)\n\n # same, with IVF\n\n nlist = 64\n quantizer = faiss.IndexFlat(d, metric_type)\n\n gt_index = faiss.IndexIVFFlat(quantizer, d, nlist, metric_type)\n gt_index.nprobe = 4\n gt_index.train(xt)\n gt_index.add(xb)\n Dref, Iref = gt_index.search(xq, 10)\n\n index = faiss.IndexIVFScalarQuantizer(\n quantizer, d, nlist,\n faiss.ScalarQuantizer.QT_8bit_direct, metric_type)\n index.nprobe = 4\n index.by_residual = False\n index.train(xt)\n index.add(xb)\n D, I = index.search(xq, 10)\n\n assert np.all(I == Iref)\n assert np.all(D == Dref)\n\n def test_8bit_direct(self):\n for d in 13, 16, 24:\n for metric_type in faiss.METRIC_L2, faiss.METRIC_INNER_PRODUCT:\n self.subtest_8bit_direct(metric_type, d)\n\n\nclass TestNNDescent(unittest.TestCase):\n\n def test_L1(self):\n search_Ls = [10, 20, 30]\n thresholds = [0.83, 0.92, 0.95]\n for search_L, threshold in zip(search_Ls, thresholds):\n self.subtest(32, faiss.METRIC_L1, 10, search_L, threshold)\n\n def test_L2(self):\n search_Ls = [10, 20, 30]\n thresholds = [0.83, 0.92, 0.95]\n for search_L, threshold in zip(search_Ls, thresholds):\n self.subtest(32, faiss.METRIC_L2, 10, search_L, threshold)\n\n def test_IP(self):\n search_Ls = [10, 20, 30]\n thresholds = [0.80, 0.90, 0.93]\n for search_L, threshold in zip(search_Ls, thresholds):\n self.subtest(32, faiss.METRIC_INNER_PRODUCT, 10, search_L, threshold)\n\n def subtest(self, d, metric, topk, search_L, threshold):\n metric_names = {faiss.METRIC_L1: 'L1',\n faiss.METRIC_L2: 'L2',\n faiss.METRIC_INNER_PRODUCT: 'IP'}\n topk = 10\n nt, nb, nq = 2000, 1000, 200\n xt, xb, xq = get_dataset_2(d, nt, nb, nq)\n gt_index = faiss.IndexFlat(d, metric)\n gt_index.add(xb)\n gt_D, gt_I = gt_index.search(xq, topk)\n\n K = 16\n index = faiss.IndexNNDescentFlat(d, K, metric)\n index.nndescent.S = 10\n index.nndescent.R = 32\n index.nndescent.L = K + 20\n index.nndescent.iter = 5\n index.verbose = False\n\n index.nndescent.search_L = search_L\n\n index.add(xb)\n D, I = index.search(xq, topk)\n recalls = 0\n for i in range(nq):\n for j in range(topk):\n for k in range(topk):\n if I[i, j] == gt_I[i, k]:\n recalls += 1\n break\n recall = 1.0 * recalls / (nq * topk)\n print('Metric: {}, L: {}, Recall@{}: {}'.format(\n metric_names[metric], search_L, topk, recall))\n assert recall > threshold, '{} <= {}'.format(recall, threshold)\n\n\nclass TestPQFlavors(unittest.TestCase):\n\n # run on Dec 14, 2018\n ref_results = {\n (1, True): 800,\n (1, True, 20): 794,\n (1, False): 769,\n (0, True): 831,\n (0, True, 20): 828,\n (0, False): 829,\n }\n\n def test_IVFPQ_IP(self):\n self.subtest(faiss.METRIC_INNER_PRODUCT)\n\n def test_IVFPQ_L2(self):\n self.subtest(faiss.METRIC_L2)\n\n def subtest(self, mt):\n d = 32\n xt, xb, xq = get_dataset_2(d, 2000, 1000, 200)\n nlist = 64\n\n gt_index = faiss.IndexFlat(d, mt)\n gt_index.add(xb)\n gt_D, gt_I = gt_index.search(xq, 10)\n quantizer = faiss.IndexFlat(d, mt)\n for by_residual in True, False:\n\n index = faiss.IndexIVFPQ(\n quantizer, d, nlist, 4, 8)\n index.metric_type = mt\n index.by_residual = by_residual\n if by_residual:\n # perform cheap polysemous training\n index.do_polysemous_training = True\n pt = faiss.PolysemousTraining()\n pt.n_iter = 50000\n pt.n_redo = 1\n index.polysemous_training = pt\n\n index.train(xt)\n index.add(xb)\n index.nprobe = 4\n D, I = index.search(xq, 10)\n\n ninter = faiss.eval_intersection(I, gt_I)\n print('(%d, %s): %d, ' % (mt, by_residual, ninter))\n\n assert abs(ninter - self.ref_results[mt, by_residual]) <= 3\n\n index.use_precomputed_table = 0\n D2, I2 = index.search(xq, 10)\n assert np.all(I == I2)\n\n if by_residual:\n\n index.use_precomputed_table = 1\n index.polysemous_ht = 20\n D, I = index.search(xq, 10)\n ninter = faiss.eval_intersection(I, gt_I)\n print('(%d, %s, %d): %d, ' % (\n mt, by_residual, index.polysemous_ht, ninter))\n\n # polysemous behaves bizarrely on ARM\n assert (ninter >= self.ref_results[\n mt, by_residual, index.polysemous_ht] - 4)\n\n # also test range search\n\n if mt == faiss.METRIC_INNER_PRODUCT:\n radius = float(D[:, -1].max())\n else:\n radius = float(D[:, -1].min())\n print('radius', radius)\n\n lims, D3, I3 = index.range_search(xq, radius)\n ntot = ndiff = 0\n for i in range(len(xq)):\n l0, l1 = lims[i], lims[i + 1]\n Inew = set(I3[l0:l1])\n if mt == faiss.METRIC_INNER_PRODUCT:\n mask = D2[i] > radius\n else:\n mask = D2[i] < radius\n Iref = set(I2[i, mask])\n ndiff += len(Inew ^ Iref)\n ntot += len(Iref)\n print('ndiff %d / %d' % (ndiff, ntot))\n assert ndiff < ntot * 0.02\n\n def test_IVFPQ_non8bit(self):\n d = 16\n xt, xb, xq = get_dataset_2(d, 10000, 2000, 200)\n nlist = 64\n\n gt_index = faiss.IndexFlat(d)\n gt_index.add(xb)\n gt_D, gt_I = gt_index.search(xq, 10)\n\n quantizer = faiss.IndexFlat(d)\n ninter = {}\n for v in '2x8', '8x2':\n if v == '8x2':\n index = faiss.IndexIVFPQ(\n quantizer, d, nlist, 2, 8)\n else:\n index = faiss.IndexIVFPQ(\n quantizer, d, nlist, 8, 2)\n index.train(xt)\n index.add(xb)\n index.npobe = 16\n\n D, I = index.search(xq, 10)\n ninter[v] = faiss.eval_intersection(I, gt_I)\n print('ninter=', ninter)\n # this should be the case but we don't observe\n # that... Probavly too few test points\n # assert ninter['2x8'] > ninter['8x2']\n # ref numbers on 2019-11-02\n assert abs(ninter['2x8'] - 458) < 4\n assert abs(ninter['8x2'] - 465) < 4\n\n\nclass TestFlat1D(unittest.TestCase):\n\n def test_flat_1d(self):\n rs = np.random.RandomState(123545)\n k = 10\n xb = rs.uniform(size=(100, 1)).astype('float32')\n # make sure to test below and above\n xq = rs.uniform(size=(1000, 1)).astype('float32') * 1.1 - 0.05\n\n ref = faiss.IndexFlatL2(1)\n ref.add(xb)\n ref_D, ref_I = ref.search(xq, k)\n\n new = faiss.IndexFlat1D()\n new.add(xb)\n\n new_D, new_I = new.search(xq, 10)\n\n ndiff = (np.abs(ref_I - new_I) != 0).sum()\n\n assert(ndiff < 100)\n new_D = new_D ** 2\n max_diff_D = np.abs(ref_D - new_D).max()\n assert(max_diff_D < 1e-5)\n\n\nclass OPQRelativeAccuracy(unittest.TestCase):\n # translated from test_opq.lua\n\n def test_OPQ(self):\n\n M = 4\n\n ev = Randu10kUnbalanced()\n d = ev.d\n index = faiss.IndexPQ(d, M, 8)\n\n res = ev.launch('PQ', index)\n e_pq = ev.evalres(res)\n\n index_pq = faiss.IndexPQ(d, M, 8)\n opq_matrix = faiss.OPQMatrix(d, M)\n # opq_matrix.verbose = true\n opq_matrix.niter = 10\n opq_matrix.niter_pq = 4\n index = faiss.IndexPreTransform(opq_matrix, index_pq)\n\n res = ev.launch('OPQ', index)\n e_opq = ev.evalres(res)\n\n print('e_pq=%s' % e_pq)\n print('e_opq=%s' % e_opq)\n\n # verify that OPQ better than PQ\n for r in 1, 10, 100:\n assert(e_opq[r] > e_pq[r])\n\n def test_OIVFPQ(self):\n # Parameters inverted indexes\n ncentroids = 50\n M = 4\n\n ev = Randu10kUnbalanced()\n d = ev.d\n quantizer = faiss.IndexFlatL2(d)\n index = faiss.IndexIVFPQ(quantizer, d, ncentroids, M, 8)\n index.nprobe = 5\n\n res = ev.launch('IVFPQ', index)\n e_ivfpq = ev.evalres(res)\n\n quantizer = faiss.IndexFlatL2(d)\n index_ivfpq = faiss.IndexIVFPQ(quantizer, d, ncentroids, M, 8)\n index_ivfpq.nprobe = 5\n opq_matrix = faiss.OPQMatrix(d, M)\n opq_matrix.niter = 10\n index = faiss.IndexPreTransform(opq_matrix, index_ivfpq)\n\n res = ev.launch('O+IVFPQ', index)\n e_oivfpq = ev.evalres(res)\n\n # verify same on OIVFPQ\n for r in 1, 10, 100:\n print(e_oivfpq[r], e_ivfpq[r])\n assert(e_oivfpq[r] >= e_ivfpq[r])\n\n\nclass TestRoundoff(unittest.TestCase):\n\n def test_roundoff(self):\n # params that force use of BLAS implementation\n nb = 100\n nq = 25\n d = 4\n xb = np.zeros((nb, d), dtype='float32')\n\n xb[:, 0] = np.arange(nb) + 12345\n xq = xb[:nq] + 0.3\n\n index = faiss.IndexFlat(d)\n index.add(xb)\n\n D, I = index.search(xq, 1)\n\n # this does not work\n assert not np.all(I.ravel() == np.arange(nq))\n\n index = faiss.IndexPreTransform(\n faiss.CenteringTransform(d),\n faiss.IndexFlat(d))\n\n index.train(xb)\n index.add(xb)\n\n D, I = index.search(xq, 1)\n\n # this works\n assert np.all(I.ravel() == np.arange(nq))\n\n\nclass TestSpectralHash(unittest.TestCase):\n\n # run on 2019-04-02\n ref_results = {\n (32, 'global', 10): 505,\n (32, 'centroid', 10): 524,\n (32, 'centroid_half', 10): 21,\n (32, 'median', 10): 510,\n (32, 'global', 1): 8,\n (32, 'centroid', 1): 20,\n (32, 'centroid_half', 1): 26,\n (32, 'median', 1): 14,\n (64, 'global', 10): 768,\n (64, 'centroid', 10): 767,\n (64, 'centroid_half', 10): 21,\n (64, 'median', 10): 765,\n (64, 'global', 1): 28,\n (64, 'centroid', 1): 21,\n (64, 'centroid_half', 1): 20,\n (64, 'median', 1): 29,\n (128, 'global', 10): 968,\n (128, 'centroid', 10): 945,\n (128, 'centroid_half', 10): 21,\n (128, 'median', 10): 958,\n (128, 'global', 1): 271,\n (128, 'centroid', 1): 279,\n (128, 'centroid_half', 1): 171,\n (128, 'median', 1): 253,\n }\n\n def test_sh(self):\n d = 32\n xt, xb, xq = get_dataset_2(d, 2000, 1000, 200)\n nlist, nprobe = 1, 1\n\n gt_index = faiss.IndexFlatL2(d)\n gt_index.add(xb)\n gt_D, gt_I = gt_index.search(xq, 10)\n\n for nbit in 32, 64, 128:\n quantizer = faiss.IndexFlatL2(d)\n\n index_lsh = faiss.IndexLSH(d, nbit, True)\n index_lsh.add(xb)\n D, I = index_lsh.search(xq, 10)\n ninter = faiss.eval_intersection(I, gt_I)\n\n print('LSH baseline: %d' % ninter)\n\n for period in 10.0, 1.0:\n\n for tt in 'global centroid centroid_half median'.split():\n index = faiss.IndexIVFSpectralHash(quantizer, d, nlist,\n nbit, period)\n index.nprobe = nprobe\n index.threshold_type = getattr(\n faiss.IndexIVFSpectralHash,\n 'Thresh_' + tt\n )\n\n index.train(xt)\n index.add(xb)\n D, I = index.search(xq, 10)\n\n ninter = faiss.eval_intersection(I, gt_I)\n key = (nbit, tt, period)\n\n print('(%d, %s, %g): %d, ' % (nbit, repr(tt), period, ninter))\n assert abs(ninter - self.ref_results[key]) <= 12\n\n\nclass TestRefine(unittest.TestCase):\n\n def do_test(self, metric):\n d = 32\n xt, xb, xq = get_dataset_2(d, 2000, 1000, 200)\n index1 = faiss.index_factory(d, \"PQ4x4np\", metric)\n Dref, Iref = faiss.knn(xq, xb, 10, metric)\n\n index1.train(xt)\n index1.add(xb)\n\n D1, I1 = index1.search(xq, 100)\n\n recall1 = (I1 == Iref[:, :1]).sum()\n\n # add refine index on top\n index_flat = faiss.IndexFlat(d, metric)\n index_flat.add(xb)\n\n index2 = faiss.IndexRefine(index1, index_flat)\n index2.k_factor = 10.0\n D2, I2 = index2.search(xq, 10)\n\n # check distance is computed properly\n for i in range(len(xq)):\n x1 = xq[i]\n x2 = xb[I2[i, 5]]\n if metric == faiss.METRIC_L2:\n dref = ((x1 - x2) ** 2).sum()\n else:\n dref = np.dot(x1, x2)\n np.testing.assert_almost_equal(dref, D2[i, 5], decimal=5)\n\n # check that with refinement, the recall@10 is the same as\n # the original recall@100\n recall2 = (I2 == Iref[:, :1]).sum()\n # print(\"recalls\", recall1, recall2)\n self.assertEqual(recall1, recall2)\n\n def test_IP(self):\n self.do_test(faiss.METRIC_INNER_PRODUCT)\n\n def test_L2(self):\n self.do_test(faiss.METRIC_L2)\n"
]
| [
[
"numpy.dot",
"numpy.zeros",
"numpy.random.RandomState",
"numpy.testing.assert_almost_equal",
"numpy.testing.assert_array_equal",
"numpy.arange",
"numpy.sqrt",
"numpy.abs",
"numpy.all",
"numpy.floor"
]
]
|
monish33/Continuous-Sign-Language-Recognition | [
"66f137c7066cbf738c7859e00d34a2ca248208a3"
]
| [
"mm.py"
]
| [
"#!/usr/bin/env python3\n\n# Continuous Sign Language Recognition\n# Created by Monish Murale, https://github.com/monish33\n\nimport os\nfrom os.path import join, exists\nfrom tqdm import tqdm\nimport hand as h\nimport find_frames as ff\nimport numpy as np\nimport cv2\nimport pickle\nimport argparse\n\nhc = []\n# assumption only first @default_fps frames are important\ndefault_fps = fps = 183 # most test cases fall under 183 frames\nfull_load = False # Process only upto @default_fps frames in a video\n# Uncomment the below line if you want to process every frame (Might cause huge runtimes)\n# full_load = True # Process every frame in a video\n\n# Perform Auto Canny in automatic mode\ndef auto_canny(image, sigma=0.33):\n # compute the median of the single channel pixel intensities\n v = np.median(image)\n # apply automatic Canny edge detection using the computed median\n lower = int(max(0, (1.0 - sigma) * v))\n upper = int(min(255, (1.0 + sigma) * v))\n edged = cv2.Canny(image, lower, upper)\n return(edged)\n # return the edged image\n\n\n# Extract Edges from Hand Frames\ndef convertToEdge(gesture_folder, target_folder, swap_):\n rP = os.getcwd()\n mData = os.path.abspath(target_folder)\n if not exists(mData):\n os.makedirs(mData)\n gesture_folder = os.path.abspath(gesture_folder)\n os.chdir(gesture_folder)\n gestures = os.listdir(os.getcwd())\n print(\"Source Directory containing gestures: %s\" % (gesture_folder))\n print(\"Destination Directory containing frames: %s\\n\" % (mData))\n for gesture in tqdm(gestures, unit='actions', ascii=True):\n gesture_path = os.path.join(gesture_folder, gesture)\n os.chdir(gesture_path)\n gesture_frames_path = os.path.join(mData, gesture)\n if not os.path.exists(gesture_frames_path):\n os.makedirs(gesture_frames_path)\n framedir = os.listdir(os.getcwd())\n for imagePath in framedir:\n if(imagePath.endswith(\".jpeg\") or imagePath.endswith(\".jpg\")):\n fName = (os.getcwd()+ \"\\\\\" +imagePath)\n fName = fName.replace(swap_,target_folder)\n print(\"Extracting edges in \",fName)\n # load the image, convert it to grayscale, and blur it slightly\n image = cv2.imread(imagePath)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n blurred = cv2.GaussianBlur(gray, (3, 3), 0)\n # apply Canny edge detection using a wide threshold, tight\n # threshold, and automatically determined threshold\n wide = cv2.Canny(blurred, 10, 200)\n tight = cv2.Canny(blurred, 225, 250)\n auto = auto_canny(blurred)\n # show the images\n # fName = mData + \"\\\\\" + imagePath\n # print(\"Storing: \", fName)\n cv2.imwrite(fName , wide)\n #cv2.imshow(\"Original\", image)\n #cv2.imshow(\"Edges\", np.hstack([wide, tight, auto]))\n\n\n# Extract Hands from Frames\ndef convertToHand(gesture_folder, target_folder):\n rP = os.getcwd()\n mData = os.path.abspath(target_folder)\n if not exists(mData):\n os.makedirs(mData)\n gesture_folder = os.path.abspath(gesture_folder)\n os.chdir(gesture_folder)\n gestures = os.listdir(os.getcwd())\n print(\"Source Directory containing gestures: %s\" % (gesture_folder))\n print(\"Destination Directory containing frames: %s\\n\" % (mData))\n for gesture in tqdm(gestures, unit='actions', ascii=True):\n #gesture_path = os.path.join(gesture_folder, gesture)\n gesture_path = gesture_folder\n #print(gesture_folder)\n os.chdir(gesture_path)\n gesture_frames_path = os.path.join(mData, gesture)\n if not os.path.exists(gesture_frames_path):\n os.makedirs(gesture_frames_path)\n videos = os.listdir(os.getcwd())\n videos = [video for video in videos if(os.path.isfile(video))]\n for video in tqdm(videos, unit='videos', ascii=True):\n name = os.path.abspath(video)\n cap = cv2.VideoCapture(name) # capturing input video\n frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n lastFrame = None\n os.chdir(gesture_frames_path)\n count = 0\n if full_load:\n fps = default_fps\n fps = ftf.find_frames(name)\n # assumption only first @fps frames are important\n while count < fps:\n ret, f = cap.read() # extract frame\n if ret is False:\n break\n fName = os.path.splitext(video)[0]\n fName = fName + \"_frame_\" + str(count) + \".jpeg\"\n hc.append([join(gesture_frames_path, fName), gesture, frameCount])\n\n if not os.path.exists(fName):\n f = th.handsegment(f)\n lastFrame = f\n cv2.imwrite(fName, f)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n count += 1\n # repeat last frame untill we @fps value is reached (Exception/ if full_load = False)\n while count < fps:\n fName = os.path.splitext(video)[0]\n fName = fName + \"_frame_\" + str(count) + \".jpeg\"\n hc.append([join(gesture_frames_path, fName), gesture, frameCount])\n if not os.path.exists(fName):\n cv2.imwrite(fName, lastFrame)\n count += 1\n os.chdir(gesture_path)\n cap.release()\n cv2.destroyAllWindows()\n os.chdir(rP)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Extract Individual Frames from gesture videos.')\n parser.add_argument('gesture_folder', help='folders of videos of different gestures.')\n parser.add_argument('target_folder', help='folder where extracted frames should be kept.')\n parser.add_argument('final_folder', help='folder where the final edge frames should be kept.')\n #parser.add_argument('sum_folder', help='folder where the summated frames should be kept.')\n\n args = parser.parse_args()\n convertToHand(args.gesture_folder, args.target_folder)\n convertToEdge(args.target_folder, args.final_folder, args.target_folder)\n #summateEdge(args.final_folder, args.sum_folder, args.target_folder)\n"
]
| [
[
"numpy.median"
]
]
|
AbuBakkar32/ML-DL-NLP-TP-FE-MP | [
"2525b6b32fc1876e65643b8c221ffda591981623"
]
| [
"Dimensionality/pca.py"
]
| [
"import pandas as pd # pandas is a dataframe library\nimport matplotlib.pyplot as plt\n\n\n#Read the data\ndf = pd.read_csv(\"pima-data.csv\")\n\n#Check the Correlation\n#df.corr()\n#Delete the correlated feature\ndel df['skin']\n\n#Data Molding\ndiabetes_map = {True : 1, False : 0}\ndf['diabetes'] = df['diabetes'].map(diabetes_map)\n\n#Splitting the data\nfrom sklearn.model_selection import train_test_split\n\n#This will copy all columns from 0 to 7(8 - second place counts from 1)\nX = df.iloc[:, 0:8]\ny = df.iloc[:, 8]\n\nsplit_test_size = 0.30\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=split_test_size, random_state=42) \n\n#Imputing\nfrom sklearn.impute import SimpleImputer \n\n#Impute with mean all 0 readings\nfill_0 = SimpleImputer(missing_values=0, strategy=\"mean\")\n\nX_train = fill_0.fit_transform(X_train)\nX_test = fill_0.transform(X_test)\n\nfrom sklearn.preprocessing import StandardScaler\nss_X = StandardScaler()\nX_train = ss_X.fit_transform(X_train)\nX_test = ss_X.transform(X_test)\n\n#Applying PCA\nfrom sklearn.decomposition import PCA\npca = PCA(n_components=2)\nX_train = pca.fit_transform(X_train)\nX_test = pca.transform(X_test)\nexplain_var = pca.explained_variance_ratio_\nprint(explain_var)\n\nfrom sklearn.naive_bayes import GaussianNB\n\nnb_model = GaussianNB()\nnb_model.fit(X_train, y_train.ravel())\n\nnb_predict_train = nb_model.predict(X_train)\n\nfrom sklearn import metrics\nprint(metrics.accuracy_score(y_train, nb_predict_train))\n\n\n\nnb_predict_test = nb_model.predict(X_test)\n\nfrom sklearn import metrics\nprint(metrics.accuracy_score(y_test, nb_predict_test))\n\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, nb_predict_test)\nprint(cm)\n\n#import numpy as np\n#plt.plot(np.cumsum(pca.explained_variance_ratio_))\n#plt.xlabel('Number of components')\n#plt.ylabel('cumulative explained variance')\n\nfrom matplotlib.colors import ListedColormap\nimport numpy as np\n#Define Variables\nclf = nb_model\nh = 0.01\nX_plot, z_plot = X_test, y_test \n\n#Standard Template to draw graph\nx_min, x_max = X_plot[:, 0].min() - 1, X_plot[:, 0].max() + 1\ny_min, y_max = X_plot[:, 1].min() - 1, X_plot[:, 1].max() + 1\nxx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n\n\n# Plot the decision boundary. For that, we will assign a color to each\n# point in the mesh\nZ = clf.predict(np.array([xx.ravel(), yy.ravel()]).T)\nZ = Z.reshape(xx.shape)\nplt.contourf(xx, yy, Z,\n alpha = 0.7, cmap = ListedColormap(('blue', 'red')))\n\n\nfor i, j in enumerate(np.unique(z_plot)):\n plt.scatter(X_plot[z_plot == j, 0], X_plot[z_plot == j, 1],\n c = ['blue', 'red'][i], cmap = ListedColormap(('blue', 'red')), label = j)\n #X[:, 0], X[:, 1] \nplt.xlim(xx.min(), xx.max())\nplt.ylim(yy.min(), yy.max())\nplt.title('Naive Bayes with PCA')\nplt.xlabel('PC1')\nplt.ylabel('PC2')\nplt.legend()\n\nplt.show()\n\n\n\n\n\n\n\n\n"
]
| [
[
"sklearn.impute.SimpleImputer",
"sklearn.metrics.confusion_matrix",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"sklearn.naive_bayes.GaussianNB",
"matplotlib.colors.ListedColormap",
"sklearn.metrics.accuracy_score",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.show",
"pandas.read_csv",
"sklearn.decomposition.PCA",
"numpy.unique"
]
]
|
caibobit/monte-carlo-tree-search | [
"1657c2fb1b182c0802e278a7d40467349998a91f"
]
| [
"tests/test_result.py"
]
| [
"import numpy as np\n\nfrom mctspy.tree.nodes import TwoPlayersGameMonteCarloTreeSearchNode\nfrom mctspy.tree.search import MonteCarloTreeSearch\nfrom mctspy.games.examples.tictactoe import TicTacToeGameState\n\n\ndef test_tic_tac_toe_best_action():\n state = np.zeros((10, 10))\n initial_board_state = TicTacToeGameState(state=state, next_to_move=1)\n\n root = TwoPlayersGameMonteCarloTreeSearchNode(state=initial_board_state,\n parent=None)\n mcts = MonteCarloTreeSearch(root)\n return mcts.best_action(1000)\n\n\nif __name__ == \"__main__\":\n best_node = test_tic_tac_toe_best_action()\n for node in best_node.children:\n print(node.q)\n print(node.state.runningx, node.state.runningy)\n"
]
| [
[
"numpy.zeros"
]
]
|
vusd/vizer-unmix | [
"436ab0ed171995371740771898d15cb9bcaf2c35"
]
| [
"eval.py"
]
| [
"import argparse\nimport musdb\nimport museval\nimport test\nimport multiprocessing\nimport functools\nfrom pathlib import Path\nimport torch\nimport tqdm\n\n\ndef separate_and_evaluate(\n track,\n targets,\n model_name,\n niter,\n alpha,\n softmask,\n output_dir,\n eval_dir,\n device='cpu'\n):\n estimates = test.separate(\n audio=track.audio,\n targets=targets,\n model_name=model_name,\n niter=niter,\n alpha=alpha,\n softmask=softmask,\n device=device\n )\n if output_dir:\n mus.save_estimates(estimates, track, output_dir)\n\n scores = museval.eval_mus_track(\n track, estimates, output_dir=eval_dir\n )\n return scores\n\n\nif __name__ == '__main__':\n # Training settings\n parser = argparse.ArgumentParser(\n description='MUSDB18 Evaluation',\n add_help=False\n )\n\n parser.add_argument(\n '--targets',\n nargs='+',\n default=['vocals', 'drums', 'bass', 'other'],\n type=str,\n help='provide targets to be processed. \\\n If none, all available targets will be computed'\n )\n\n parser.add_argument(\n '--model',\n default='umxhq',\n type=str,\n help='path to mode base directory of pretrained models'\n )\n\n parser.add_argument(\n '--outdir',\n type=str,\n help='Results path where audio evaluation results are stored'\n )\n\n parser.add_argument(\n '--evaldir',\n type=str,\n help='Results path for museval estimates'\n )\n\n parser.add_argument(\n '--root',\n type=str,\n help='Path to MUSDB18'\n )\n\n parser.add_argument(\n '--subset',\n type=str,\n default='test',\n help='MUSDB subset (`train`/`test`)'\n )\n\n parser.add_argument(\n '--cores',\n type=int,\n default=1\n )\n\n parser.add_argument(\n '--no-cuda',\n action='store_true',\n default=False,\n help='disables CUDA inference'\n )\n\n parser.add_argument(\n '--is-wav',\n action='store_true', default=False,\n help='flags wav version of the dataset'\n )\n\n args, _ = parser.parse_known_args()\n args = test.inference_args(parser, args)\n\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n mus = musdb.DB(\n root=args.root,\n download=args.root is None,\n subsets=args.subset,\n is_wav=args.is_wav\n )\n if args.cores > 1:\n pool = multiprocessing.Pool(args.cores)\n results = museval.EvalStore()\n scores_list = list(\n pool.imap_unordered(\n func=functools.partial(\n separate_and_evaluate,\n targets=args.targets,\n model_name=args.model,\n niter=args.niter,\n alpha=args.alpha,\n softmask=args.softmask,\n output_dir=args.outdir,\n eval_dir=args.evaldir,\n device=device\n ),\n iterable=mus.tracks[:2],\n chunksize=1\n )\n )\n pool.close()\n pool.join()\n for scores in scores_list:\n results.add_track(scores)\n\n else:\n results = museval.EvalStore()\n for track in tqdm.tqdm(mus.tracks):\n scores = separate_and_evaluate(\n track,\n targets=args.targets,\n model_name=args.model,\n niter=args.niter,\n alpha=args.alpha,\n softmask=args.softmask,\n output_dir=args.outdir,\n eval_dir=args.evaldir,\n device=device\n )\n results.add_track(scores)\n\n print(results)\n method = museval.MethodStore()\n method.add_evalstore(results, args.model)\n method.save(args.model + '.pandas')\n"
]
| [
[
"torch.device",
"torch.cuda.is_available"
]
]
|
zbwxp/confidence-aware-learning | [
"30a0c8a5f4e7929e8760cede05988093a2fd585f"
]
| [
"metrics.py"
]
| [
"import numpy as np\r\nimport torch\r\nimport torch.nn.functional as F\r\nfrom sklearn import metrics\r\n\r\ndef calc_metrics(loader, label, label_onehot, model, criterion):\r\n acc, softmax, correct, logit = get_metric_values(loader, model, criterion)\r\n # aurc, eaurc\r\n aurc, eaurc = calc_aurc_eaurc(softmax, correct)\r\n # fpr, aupr\r\n aupr, fpr = calc_fpr_aupr(softmax, correct)\r\n # calibration measure ece , mce, rmsce\r\n ece = calc_ece(softmax, label, bins=15)\r\n # brier, nll\r\n nll, brier = calc_nll_brier(softmax, logit, label, label_onehot)\r\n\r\n return acc, aurc, eaurc, aupr, fpr, ece, nll, brier\r\n\r\n# AURC, EAURC\r\ndef calc_aurc_eaurc(softmax, correct):\r\n softmax = np.array(softmax)\r\n correctness = np.array(correct)\r\n softmax_max = np.max(softmax, 1)\r\n\r\n sort_values = sorted(zip(softmax_max[:], correctness[:]), key=lambda x:x[0], reverse=True)\r\n sort_softmax_max, sort_correctness = zip(*sort_values)\r\n risk_li, coverage_li = coverage_risk(sort_softmax_max, sort_correctness)\r\n aurc, eaurc = aurc_eaurc(risk_li)\r\n\r\n return aurc, eaurc\r\n\r\n# AUPR ERROR\r\ndef calc_fpr_aupr(softmax, correct):\r\n softmax = np.array(softmax)\r\n correctness = np.array(correct)\r\n softmax_max = np.max(softmax, 1)\r\n\r\n fpr, tpr, thresholds = metrics.roc_curve(correctness, softmax_max)\r\n idx_tpr_95 = np.argmin(np.abs(tpr - 0.95))\r\n fpr_in_tpr_95 = fpr[idx_tpr_95]\r\n\r\n aupr_err = metrics.average_precision_score(-1 * correctness + 1, -1 * softmax_max)\r\n\r\n print(\"AUPR {0:.2f}\".format(aupr_err*100))\r\n print('FPR {0:.2f}'.format(fpr_in_tpr_95*100))\r\n\r\n return aupr_err, fpr_in_tpr_95\r\n\r\n# ECE\r\ndef calc_ece(softmax, label, bins=15):\r\n bin_boundaries = torch.linspace(0, 1, bins + 1)\r\n bin_lowers = bin_boundaries[:-1]\r\n bin_uppers = bin_boundaries[1:]\r\n\r\n softmax = torch.tensor(softmax)\r\n labels = torch.tensor(label)\r\n\r\n softmax_max, predictions = torch.max(softmax, 1)\r\n correctness = predictions.eq(labels)\r\n\r\n ece = torch.zeros(1)\r\n\r\n for bin_lower, bin_upper in zip(bin_lowers, bin_uppers):\r\n in_bin = softmax_max.gt(bin_lower.item()) * softmax_max.le(bin_upper.item())\r\n prop_in_bin = in_bin.float().mean()\r\n\r\n if prop_in_bin.item() > 0.0:\r\n accuracy_in_bin = correctness[in_bin].float().mean()\r\n avg_confidence_in_bin = softmax_max[in_bin].mean()\r\n\r\n ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin\r\n\r\n print(\"ECE {0:.2f} \".format(ece.item()*100))\r\n\r\n return ece.item()\r\n\r\n# NLL & Brier Score\r\ndef calc_nll_brier(softmax, logit, label, label_onehot):\r\n brier_score = np.mean(np.sum((softmax - label_onehot) ** 2, axis=1))\r\n\r\n logit = torch.tensor(logit, dtype=torch.float)\r\n label = torch.tensor(label, dtype=torch.int)\r\n logsoftmax = torch.nn.LogSoftmax(dim=1)\r\n\r\n log_softmax = logsoftmax(logit)\r\n nll = calc_nll(log_softmax, label)\r\n\r\n print(\"NLL {0:.2f} \".format(nll.item()*10))\r\n print('Brier {0:.2f}'.format(brier_score*100))\r\n\r\n return nll.item(), brier_score\r\n\r\n# Calc NLL\r\ndef calc_nll(log_softmax, label):\r\n out = torch.zeros_like(label, dtype=torch.float)\r\n for i in range(len(label)):\r\n out[i] = log_softmax[i][label[i]]\r\n\r\n return -out.sum()/len(out)\r\n\r\n# Calc coverage, risk\r\ndef coverage_risk(confidence, correctness):\r\n risk_list = []\r\n coverage_list = []\r\n risk = 0\r\n for i in range(len(confidence)):\r\n coverage = (i + 1) / len(confidence)\r\n coverage_list.append(coverage)\r\n\r\n if correctness[i] == 0:\r\n risk += 1\r\n\r\n risk_list.append(risk / (i + 1))\r\n\r\n return risk_list, coverage_list\r\n\r\n# Calc aurc, eaurc\r\ndef aurc_eaurc(risk_list):\r\n r = risk_list[-1]\r\n risk_coverage_curve_area = 0\r\n optimal_risk_area = r + (1 - r) * np.log(1 - r)\r\n for risk_value in risk_list:\r\n risk_coverage_curve_area += risk_value * (1 / len(risk_list))\r\n\r\n aurc = risk_coverage_curve_area\r\n eaurc = risk_coverage_curve_area - optimal_risk_area\r\n\r\n print(\"AURC {0:.2f}\".format(aurc*1000))\r\n print(\"EAURC {0:.2f}\".format(eaurc*1000))\r\n\r\n return aurc, eaurc\r\n\r\n# Get softmax, logit\r\ndef get_metric_values(loader, model, criterion):\r\n model.eval()\r\n with torch.no_grad():\r\n total_loss = 0\r\n total_acc = 0\r\n accuracy = 0\r\n\r\n list_softmax = []\r\n list_correct = []\r\n list_logit = []\r\n\r\n for input, target, idx in loader:\r\n input = input.cuda()\r\n target = target.cuda()\r\n\r\n output = model(input)\r\n loss = criterion(output, target).cuda()\r\n\r\n total_loss += loss.mean().item()\r\n pred = output.data.max(1, keepdim=True)[1]\r\n\r\n total_acc += pred.eq(target.data.view_as(pred)).sum()\r\n\r\n for i in output:\r\n list_logit.append(i.cpu().data.numpy())\r\n\r\n list_softmax.extend(F.softmax(output).cpu().data.numpy())\r\n\r\n for j in range(len(pred)):\r\n if pred[j] == target[j]:\r\n accuracy += 1\r\n cor = 1\r\n else:\r\n cor = 0\r\n list_correct.append(cor)\r\n\r\n total_loss /= len(loader)\r\n total_acc = 100. * total_acc / len(loader.dataset)\r\n\r\n print('Accuracy {:.2f}'.format(total_acc))\r\n\r\n return total_acc.item(), list_softmax, list_correct, list_logit\r\n"
]
| [
[
"numpy.max",
"torch.zeros",
"numpy.array",
"torch.nn.LogSoftmax",
"numpy.log",
"torch.max",
"numpy.sum",
"torch.no_grad",
"torch.linspace",
"torch.abs",
"sklearn.metrics.average_precision_score",
"torch.tensor",
"numpy.abs",
"torch.nn.functional.softmax",
"torch.zeros_like",
"sklearn.metrics.roc_curve"
]
]
|
swapnilmastekar/ga-learner-dsmp-repo | [
"6442f75d11dc0662fff2a4f36cc1840a3903fc16"
]
| [
"Feature-Selection/Forest-Type-Cover-Prediction/code.py"
]
| [
"# --------------\nimport pandas as pd\nfrom sklearn import preprocessing\n\n#path : File path\n\n# Code starts here\n\n\n# read the dataset\ndataset=pd.read_csv(path)\n\n# look at the first five columns\ndataset.head()\n\n# Check if there's any column which is not useful and remove it like the column id\ndataset.drop(columns='Id',inplace=True)\n\n# check the statistical description\ndataset.describe()\n\n\n\n# --------------\n# We will visualize all the attributes using Violin Plot - a combination of box and density plots\nimport seaborn as sns\nfrom matplotlib import pyplot as plt\n\n#names of all the attributes \ncols=dataset.columns\n\n#number of attributes (exclude target)\nsize=dataset.iloc[:,0:54].shape\n\n#x-axis has target attribute to distinguish between classes\nx=dataset['Cover_Type'].to_string()\n\n#y-axis shows values of an attribute\ny=dataset.iloc[:,0:54]\n\n\n#Plot violin for all attributes\nax = sns.violinplot(size,data=dataset)\n\n\n\n# --------------\nimport numpy\nupper_threshold = 0.5\nlower_threshold = -0.5\n\n\n# Code Starts Here\n# create a subset of dataframe with only the first 10 features\nsubset_train = dataset.iloc[:, :10]\n\n# Calculate the Pearson correlation\ndata_corr = subset_train.corr()\n\n# Plot a heatmap\nf, ax = plt.subplots(figsize = (10,8))\nsns.heatmap(data_corr,vmax=0.8,square=True);\n\n# List the correlation pairs\ncorrelation = data_corr.unstack().sort_values(kind='quicksort')\n\n# Select the highest correlation pairs using slicing\ncorr_var_list = correlation[((correlation>upper_threshold) | (correlation<lower_threshold)) & (correlation!=1)]\nprint(corr_var_list)\n# Code ends here\n# Code ends here\n\n\n\n\n# --------------\n#Import libraries\nimport numpy \nfrom sklearn import cross_validation\nfrom sklearn.preprocessing import StandardScaler\n\n# Identify the unnecessary columns and remove it \ndataset.drop(columns=['Soil_Type7', 'Soil_Type15'], inplace=True)\n\n\n# Scales are not the same for all variables. Hence, rescaling and standardization may be necessary for some a\n\nr,c = dataset.shape\nX = dataset.iloc[:,:-1]\nY = dataset.iloc[:,-1]\n\n# Scales are not the same for all variables. Hence, rescaling and standardization may be necessary for some algorithm to be applied on it.\nX_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X, Y, test_size=0.2, random_state=0)\n\n\n\n#Standardized\n#Apply transform only for non-categorical data\nscaler = StandardScaler()\nX_train_temp = scaler.fit_transform(X_train.iloc[:,:10])\nX_test_temp = scaler.fit_transform(X_test.iloc[:,:10])\n\n#Concatenate non-categorical data and categorical\nX_train1 = numpy.concatenate((X_train_temp,X_train.iloc[:,10:c-1]),axis=1)\nX_test1 = numpy.concatenate((X_test_temp,X_test.iloc[:,10:c-1]),axis=1)\n\nscaled_features_train_df = pd.DataFrame(X_train1, index=X_train.index, columns=X_train.columns)\nscaled_features_test_df = pd.DataFrame(X_test1, index=X_test.index, columns=X_test.columns)\n\n\n# --------------\nfrom sklearn.feature_selection import SelectPercentile\nfrom sklearn.feature_selection import f_classif\nimport numpy as np\n\n# Write your solution here:\n\n\nskb = SelectPercentile(score_func=f_classif,percentile=90)\npredictors = skb.fit_transform(X_train1, Y_train)\nscores = list(skb.scores_)\n\nFeatures = scaled_features_train_df.columns\n\ndataframe = pd.DataFrame({'Features':Features,'Scores':scores})\n\ndataframe=dataframe.sort_values(by='Scores',ascending=False)\n\ntop_k_predictors = list(dataframe['Features'][:predictors.shape[1]])\n\nprint(top_k_predictors)\n\n\n# --------------\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score, classification_report, confusion_matrix, precision_score\nclf = OneVsRestClassifier(LogisticRegression())\nclf1 = OneVsRestClassifier(LogisticRegression())\n\nmodel_fit_all_features = clf1.fit(X_train, Y_train)\n\npredictions_all_features = model_fit_all_features.predict(X_test)\n\nscore_all_features = accuracy_score(Y_test, predictions_all_features)\n\nprint(score_all_features)\n\nmodel_fit_top_features = clf.fit(scaled_features_train_df[top_k_predictors], Y_train)\n\npredictions_top_features = model_fit_top_features.predict(scaled_features_test_df[top_k_predictors])\n\nscore_top_features = accuracy_score(Y_test, predictions_top_features)\n\nprint(score_top_features)\n\n\n"
]
| [
[
"numpy.concatenate",
"sklearn.preprocessing.StandardScaler",
"pandas.DataFrame",
"matplotlib.pyplot.subplots",
"sklearn.metrics.accuracy_score",
"sklearn.linear_model.LogisticRegression",
"sklearn.feature_selection.SelectPercentile",
"pandas.read_csv",
"sklearn.cross_validation.train_test_split"
]
]
|
ucb-bar/autophase | [
"0c2f6e3cdedf41da95a8c8166b740005e3d67e25"
]
| [
"gym-hls/gym_hls/envs/geomean.py"
]
| [
"import numpy as np\ndef geomean(iterable):\n \"\"\"\n Examples :\n >>> print(geomean([1, 3, 27]))\n 4.32674871092\n \n\t\t>>> print(geomean([1,9,5,6,6,7])\n 4.73989632394\n\n\n Args:\n iterable (iterable): This parameter can be a list, a tuple, a dictionary, .. etc any type of object that we can iterate through.\n Returns:\n\t\treturn the prod of all elements of the array to the power of (1/number of all \telements of array)\n\n \"\"\"\n\n a = np.array(iterable).astype(float)\n prod = a.prod()\n prod = -prod if prod < 0 else prod\n return prod**(1.0/len(a))\n\n"
]
| [
[
"numpy.array"
]
]
|
ashleefv/covid19fibrosis | [
"3cdd4871708346d0a866340f64290b6a75302c89"
]
| [
"Analysis/mean_response_comparison_between_cases.py"
]
| [
"import glob\nfrom Butyrate.mcds.pyMCDS import pyMCDS\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.pyplot as plt # if you want to plot results\nimport random, pickle\nimport os\nimport seaborn as sns\nfrom pylab import array, linspace, subplots\n\n#path = [\"\" for _ in range(4)]\npath = [\"\" for _ in range(2)]\n\n\npath[0] = 'C:/Users/maislam4/OneDrive - University at Buffalo/Desktop/PhysiCell_R/Final_updated/replicate_51/plot/'\npath[1] = 'C:/Users/maislam4/OneDrive - University at Buffalo/Desktop/PhysiCell_R/Final_updated/replicate_61/plot/'\n#path[2] = 'C:/Users/maislam4/OneDrive - University at Buffalo/Desktop/PhysiCell_R/Final_updated/replicate_72/plot/'\n#path[3] = 'C:/Users/maislam4/OneDrive - University at Buffalo/Desktop/PhysiCell_R/Final_updated/replicate_71/plot/'\n\nplt.rcParams.update({'font.size': 15})\nfig1, ax1 = plt.subplots()\nfig2, ax2 = plt.subplots()\nfig3, ax3 = plt.subplots()\nfig4, ax4 = plt.subplots()\nfig5, ax5 = plt.subplots()\nfig6, ax6 = plt.subplots()\n\nfor i in range(len(path)):\n os.chdir(path[i])\n\n cell1 = np.array(pickle.load(open('cell1.p', 'rb')))\n cell2 = np.array(pickle.load(open('cell2.p', 'rb')))\n cell3 = np.array(pickle.load(open('cell3.p', 'rb')))\n cell4 = np.array(pickle.load(open('cell4.p', 'rb')))\n cell5 = np.array(pickle.load(open('cell5.p', 'rb')))\n cell6 = np.array(pickle.load(open('cell6.p', 'rb')))\n cell7 = np.array(pickle.load(open('cell7.p', 'rb')))\n cell8 = np.array(pickle.load(open('cell8.p', 'rb')))\n cell9 = np.array(pickle.load(open('cell9.p', 'rb')))\n cell10 = np.array(pickle.load(open('cell10.p', 'rb')))\n cell11 = np.array(pickle.load(open('cell11.p', 'rb')))\n cell12 = np.array(pickle.load(open('cell12.p', 'rb')))\n cell13 = np.array(pickle.load(open('cell13.p', 'rb')))\n cell14 = np.array(pickle.load(open('cell14.p', 'rb')))\n cell15 = np.array(pickle.load(open('cell15.p', 'rb')))\n time = np.array(pickle.load(open('time.p', 'rb')))\n\n CD8 = np.array(\n [cell1[0], cell2[0], cell3[0], cell4[0], cell5[0], cell6[0], cell7[0], cell8[0], cell9[0], cell10[0], cell11[0],\n cell12[0], cell13[0], cell14[0], cell15[0]])\n macrophage = np.array(\n [cell1[1], cell2[1], cell3[1], cell4[1], cell5[1], cell6[1], cell7[1], cell8[1], cell9[1], cell10[1], cell11[1],\n cell12[1], cell13[1], cell14[1], cell15[1]])\n secreteing_agent = np.array(\n [cell1[2], cell2[2], cell3[2], cell4[2], cell5[2], cell6[2], cell7[2], cell8[2], cell9[2], cell10[2], cell11[2],\n cell12[2], cell13[2], cell14[2], cell15[2]])\n fibroblast = np.array(\n [cell1[3], cell2[3], cell3[3], cell4[3], cell5[3], cell6[3], cell7[3], cell8[3], cell9[3], cell10[3], cell11[3],\n cell12[3], cell13[3], cell14[3], cell15[3]])\n TGF = np.array(\n [cell1[7], cell2[7], cell3[7], cell4[7], cell5[7], cell6[7], cell7[7], cell8[7], cell9[7], cell10[7], cell11[7],\n cell12[7], cell13[7], cell14[7], cell15[7]])\n collagen = np.array(\n [cell1[8], cell2[8], cell3[8], cell4[8], cell5[8], cell6[8], cell7[8], cell8[8], cell9[8], cell10[8], cell11[8],\n cell12[8], cell13[8], cell14[8], cell15[8]])\n t = np.array([time, time, time, time, time, time, time, time, time, time, time, time, time, time, time])\n\n mean_CD8 = np.mean(CD8, axis=0)\n mean_macrophage = np.mean(macrophage, axis=0)\n mean_secreteing_agent = np.mean(secreteing_agent, axis=0)\n mean_fibroblast = np.mean(fibroblast, axis=0)\n mean_TGF = np.mean(TGF, axis=0)\n mean_collagen = np.mean(collagen, axis=0)\n\n\n ax1.plot(time, mean_CD8, linewidth=2)\n ax2.plot(time, mean_macrophage, linewidth=2)\n ax3.plot(time, mean_secreteing_agent, linewidth=2)\n ax4.plot(time, mean_fibroblast, linewidth=2)\n ax5.plot(time, mean_TGF, linewidth=2)\n ax6.plot(time, mean_collagen, linewidth=2)\n\npathC = 'C:/Users/maislam4/OneDrive - University at Buffalo/Desktop/PhysiCell_R/Final_updated/plot_average/'\nos.chdir(pathC)\n#plt.legend(loc='upper left', prop={\"size\":15})\nax1.set_xlabel('Time (day)')\nax1.set_ylabel('Number of CD8+ T')\nax1.set_ylim([-20,450])\nax1.legend(['Baseline', 'Delayed rule'])\n#ax1.legend(['$D = 3000$ $μm^2/min$', '$D = 300$ $μm^2/min$', '$D = 30$ $μm^2/min$', '$D = 3$ $μm^2/min$'])\nfig1.savefig(\"CD8.png\", dpi=300, bbox_inches='tight')\n\nax2.set_xlabel('Time (day)')\nax2.set_ylabel('Number of Macrophage')\nax2.set_ylim([-20,450])\nax2.legend(['Baseline', 'Delayed rule'])\n#ax2.legend(['$D = 3000$ $μm^2/min$', '$D = 300$ $μm^2/min$', '$D = 30$ $μm^2/min$', '$D = 3$ $μm^2/min$'])\nfig2.savefig(\"Macrophage.png\", dpi=300, bbox_inches='tight')\n\nax3.set_xlabel('Time (day)')\nax3.set_ylabel('Number of Secreting agent')\nax3.set_ylim([-20,450])\nax3.legend(['Baseline', 'Delayed rule'])\n#ax3.legend(['$D = 3000$ $μm^2/min$', '$D = 300$ $μm^2/min$', '$D = 30$ $μm^2/min$', '$D = 3$ $μm^2/min$'])\nfig3.savefig(\"Secreting agent.png\", dpi=300, bbox_inches='tight')\n\nax4.set_xlabel('Time (day)')\nax4.set_ylabel('Number of Fibroblast')\nax4.set_ylim([-20,450])\nax4.legend(['Baseline', 'Delayed rule'])\n#ax4.legend(['$D = 3000$ $μm^2/min$', '$D = 300$ $μm^2/min$', '$D = 30$ $μm^2/min$', '$D = 3$ $μm^2/min$'])\nfig4.savefig(\"Fibroblast\", dpi=300, bbox_inches='tight')\n\nax5.set_xlabel('Time (day)')\nax5.set_ylabel('TGF-β ($ng/mL$)')\nax5.set_ylim([-0.25, 11])\nax5.legend(['Baseline', 'Delayed rule'])\n#ax5.legend(['$D = 3000$ $μm^2/min$', '$D = 300$ $μm^2/min$', '$D = 30$ $μm^2/min$', '$D = 3$ $μm^2/min$'])\nfig5.savefig(\"TGF\", dpi=300, bbox_inches='tight')\n\nax6.set_xlabel('Time (day)')\nax6.set_ylabel('Collagen ($μg/μm^3$)')\nax6.set_ylim([-0.25e-8,11e-8])\nax6.legend(['Baseline', 'Delayed rule'])\n#ax6.legend(['$D = 3000$ $μm^2/min$', '$D = 300$ $μm^2/min$', '$D = 30$ $μm^2/min$', '$D = 3$ $μm^2/min$'])\nfig6.savefig(\"Collagen\", dpi=300, bbox_inches='tight')\n\n"
]
| [
[
"matplotlib.use",
"matplotlib.pyplot.rcParams.update",
"numpy.array",
"numpy.mean",
"matplotlib.pyplot.subplots"
]
]
|
phosseini/bert | [
"6d91a1578a2c963e26a7d057fc673b258585d3c4"
]
| [
"modeling.py"
]
| [
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"The main BERT model and related functions.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport copy\nimport json\nimport math\nimport re\nimport numpy as np\nimport six\nimport tensorflow as tf\n\n# making changes compatible with tensorflow v2\ntf.variable_scope = tf.compat.v1.variable_scope\ntf.get_variable = tf.compat.v1.get_variable\ntf.truncated_normal_initializer = tf.compat.v1.truncated_normal_initializer\ntf.assert_less_equal = tf.compat.v1.assert_less_equal\ntf.layers = tf.compat.v1.layers\n\n\nclass BertConfig(object):\n \"\"\"Configuration for `BertModel`.\"\"\"\n\n def __init__(self,\n vocab_size,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n initializer_range=0.02):\n \"\"\"Constructs BertConfig.\n\n Args:\n vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e., feed-forward)\n layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string) in the\n encoder and pooler.\n hidden_dropout_prob: The dropout probability for all fully connected\n layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this model might\n ever be used with. Typically set this to something large just in case\n (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed into\n `BertModel`.\n initializer_range: The stdev of the truncated_normal_initializer for\n initializing all weight matrices.\n \"\"\"\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range\n\n @classmethod\n def from_dict(cls, json_object):\n \"\"\"Constructs a `BertConfig` from a Python dictionary of parameters.\"\"\"\n config = BertConfig(vocab_size=None)\n for (key, value) in six.iteritems(json_object):\n config.__dict__[key] = value\n return config\n\n @classmethod\n def from_json_file(cls, json_file):\n \"\"\"Constructs a `BertConfig` from a json file of parameters.\"\"\"\n with tf.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))\n\n def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output\n\n def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n\n\nclass BertModel(object):\n \"\"\"BERT model (\"Bidirectional Encoder Representations from Transformers\").\n\n Example usage:\n\n ```python\n # Already been converted into WordPiece token ids\n input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])\n input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])\n token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])\n\n config = modeling.BertConfig(vocab_size=32000, hidden_size=512,\n num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)\n\n model = modeling.BertModel(config=config, is_training=True,\n input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids)\n\n label_embeddings = tf.get_variable(...)\n pooled_output = model.get_pooled_output()\n logits = tf.matmul(pooled_output, label_embeddings)\n ...\n ```\n \"\"\"\n\n def __init__(self,\n config,\n is_training,\n input_ids,\n input_mask=None,\n token_type_ids=None,\n use_one_hot_embeddings=False,\n scope=None):\n \"\"\"Constructor for BertModel.\n\n Args:\n config: `BertConfig` instance.\n is_training: bool. true for training model, false for eval model. Controls\n whether dropout will be applied.\n input_ids: int32 Tensor of shape [batch_size, seq_length].\n input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n use_one_hot_embeddings: (optional) bool. Whether to use one-hot word\n embeddings or tf.embedding_lookup() for the word embeddings.\n scope: (optional) variable scope. Defaults to \"bert\".\n\n Raises:\n ValueError: The config is invalid or one of the input tensor shapes\n is invalid.\n \"\"\"\n config = copy.deepcopy(config)\n if not is_training:\n config.hidden_dropout_prob = 0.0\n config.attention_probs_dropout_prob = 0.0\n\n input_shape = get_shape_list(input_ids, expected_rank=2)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n\n if input_mask is None:\n input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)\n\n if token_type_ids is None:\n token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)\n\n with tf.variable_scope(scope, default_name=\"bert\"):\n with tf.variable_scope(\"embeddings\"):\n # Perform embedding lookup on the word ids.\n (self.embedding_output, self.embedding_table) = embedding_lookup(\n input_ids=input_ids,\n vocab_size=config.vocab_size,\n embedding_size=config.hidden_size,\n initializer_range=config.initializer_range,\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n # Add positional embeddings and token type embeddings, then layer\n # normalize and perform dropout.\n self.embedding_output = embedding_postprocessor(\n input_tensor=self.embedding_output,\n use_token_type=True,\n token_type_ids=token_type_ids,\n token_type_vocab_size=config.type_vocab_size,\n token_type_embedding_name=\"token_type_embeddings\",\n use_position_embeddings=True,\n position_embedding_name=\"position_embeddings\",\n initializer_range=config.initializer_range,\n max_position_embeddings=config.max_position_embeddings,\n dropout_prob=config.hidden_dropout_prob)\n\n with tf.variable_scope(\"encoder\"):\n # This converts a 2D mask of shape [batch_size, seq_length] to a 3D\n # mask of shape [batch_size, seq_length, seq_length] which is used\n # for the attention scores.\n attention_mask = create_attention_mask_from_input_mask(\n input_ids, input_mask)\n\n # Run the stacked transformer.\n # `sequence_output` shape = [batch_size, seq_length, hidden_size].\n self.all_encoder_layers = transformer_model(\n input_tensor=self.embedding_output,\n attention_mask=attention_mask,\n hidden_size=config.hidden_size,\n num_hidden_layers=config.num_hidden_layers,\n num_attention_heads=config.num_attention_heads,\n intermediate_size=config.intermediate_size,\n intermediate_act_fn=get_activation(config.hidden_act),\n hidden_dropout_prob=config.hidden_dropout_prob,\n attention_probs_dropout_prob=config.attention_probs_dropout_prob,\n initializer_range=config.initializer_range,\n do_return_all_layers=True)\n\n self.sequence_output = self.all_encoder_layers[-1]\n # The \"pooler\" converts the encoded sequence tensor of shape\n # [batch_size, seq_length, hidden_size] to a tensor of shape\n # [batch_size, hidden_size]. This is necessary for segment-level\n # (or segment-pair-level) classification tasks where we need a fixed\n # dimensional representation of the segment.\n with tf.variable_scope(\"pooler\"):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token. We assume that this has been pre-trained\n first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)\n self.pooled_output = tf.layers.dense(\n first_token_tensor,\n config.hidden_size,\n activation=tf.tanh,\n kernel_initializer=create_initializer(config.initializer_range))\n\n def get_pooled_output(self):\n return self.pooled_output\n\n def get_sequence_output(self):\n \"\"\"Gets final hidden layer of encoder.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the final hidden of the transformer encoder.\n \"\"\"\n return self.sequence_output\n\n def get_all_encoder_layers(self):\n return self.all_encoder_layers\n\n def get_embedding_output(self):\n \"\"\"Gets output of the embedding lookup (i.e., input to the transformer).\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size] corresponding\n to the output of the embedding layer, after summing the word\n embeddings with the positional embeddings and the token type embeddings,\n then performing layer normalization. This is the input to the transformer.\n \"\"\"\n return self.embedding_output\n\n def get_embedding_table(self):\n return self.embedding_table\n\n\ndef gelu(x):\n \"\"\"Gaussian Error Linear Unit.\n\n This is a smoother version of the RELU.\n Original paper: https://arxiv.org/abs/1606.08415\n Args:\n x: float Tensor to perform activation.\n\n Returns:\n `x` with the GELU activation applied.\n \"\"\"\n cdf = 0.5 * (1.0 + tf.tanh(\n (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))\n return x * cdf\n\n\ndef get_activation(activation_string):\n \"\"\"Maps a string to a Python function, e.g., \"relu\" => `tf.nn.relu`.\n\n Args:\n activation_string: String name of the activation function.\n\n Returns:\n A Python function corresponding to the activation function. If\n `activation_string` is None, empty, or \"linear\", this will return None.\n If `activation_string` is not a string, it will return `activation_string`.\n\n Raises:\n ValueError: The `activation_string` does not correspond to a known\n activation.\n \"\"\"\n\n # We assume that anything that\"s not a string is already an activation\n # function, so we just return it.\n if not isinstance(activation_string, six.string_types):\n return activation_string\n\n if not activation_string:\n return None\n\n act = activation_string.lower()\n if act == \"linear\":\n return None\n elif act == \"relu\":\n return tf.nn.relu\n elif act == \"gelu\":\n return gelu\n elif act == \"tanh\":\n return tf.tanh\n else:\n raise ValueError(\"Unsupported activation: %s\" % act)\n\n\ndef get_assignment_map_from_checkpoint(tvars, init_checkpoint):\n \"\"\"Compute the union of the current variables and checkpoint variables.\"\"\"\n assignment_map = {}\n initialized_variable_names = {}\n\n name_to_variable = collections.OrderedDict()\n for var in tvars:\n name = var.name\n m = re.match(\"^(.*):\\\\d+$\", name)\n if m is not None:\n name = m.group(1)\n name_to_variable[name] = var\n\n init_vars = tf.train.list_variables(init_checkpoint)\n\n assignment_map = collections.OrderedDict()\n for x in init_vars:\n (name, var) = (x[0], x[1])\n if name not in name_to_variable:\n continue\n assignment_map[name] = name\n initialized_variable_names[name] = 1\n initialized_variable_names[name + \":0\"] = 1\n\n return (assignment_map, initialized_variable_names)\n\n\ndef dropout(input_tensor, dropout_prob):\n \"\"\"Perform dropout.\n\n Args:\n input_tensor: float Tensor.\n dropout_prob: Python float. The probability of dropping out a value (NOT of\n *keeping* a dimension as in `tf.nn.dropout`).\n\n Returns:\n A version of `input_tensor` with dropout applied.\n \"\"\"\n if dropout_prob is None or dropout_prob == 0.0:\n return input_tensor\n\n output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)\n return output\n\n\ndef layer_norm(input_tensor, name=None):\n \"\"\"Run layer normalization on the last dimension of the tensor.\"\"\"\n \"\"\"Run layer normalization on the last dimension of the tensor.\"\"\"\n layer_norma = tf.keras.layers.LayerNormalization(axis=-1)\n return layer_norma(input_tensor)\n\n\ndef layer_norm_and_dropout(input_tensor, dropout_prob, name=None):\n \"\"\"Runs layer normalization followed by dropout.\"\"\"\n output_tensor = layer_norm(input_tensor, name)\n output_tensor = dropout(output_tensor, dropout_prob)\n return output_tensor\n\n\ndef create_initializer(initializer_range=0.02):\n \"\"\"Creates a `truncated_normal_initializer` with the given range.\"\"\"\n return tf.truncated_normal_initializer(stddev=initializer_range)\n\n\ndef embedding_lookup(input_ids,\n vocab_size,\n embedding_size=128,\n initializer_range=0.02,\n word_embedding_name=\"word_embeddings\",\n use_one_hot_embeddings=False):\n \"\"\"Looks up words embeddings for id tensor.\n\n Args:\n input_ids: int32 Tensor of shape [batch_size, seq_length] containing word\n ids.\n vocab_size: int. Size of the embedding vocabulary.\n embedding_size: int. Width of the word embeddings.\n initializer_range: float. Embedding initialization range.\n word_embedding_name: string. Name of the embedding table.\n use_one_hot_embeddings: bool. If True, use one-hot method for word\n embeddings. If False, use `tf.gather()`.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, embedding_size].\n \"\"\"\n # This function assumes that the input is of shape [batch_size, seq_length,\n # num_inputs].\n #\n # If the input is a 2D tensor of shape [batch_size, seq_length], we\n # reshape to [batch_size, seq_length, 1].\n if input_ids.shape.ndims == 2:\n input_ids = tf.expand_dims(input_ids, axis=[-1])\n\n embedding_table = tf.get_variable(\n name=word_embedding_name,\n shape=[vocab_size, embedding_size],\n initializer=create_initializer(initializer_range))\n\n flat_input_ids = tf.reshape(input_ids, [-1])\n if use_one_hot_embeddings:\n one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)\n output = tf.matmul(one_hot_input_ids, embedding_table)\n else:\n output = tf.gather(embedding_table, flat_input_ids)\n\n input_shape = get_shape_list(input_ids)\n\n output = tf.reshape(output,\n input_shape[0:-1] + [input_shape[-1] * embedding_size])\n return (output, embedding_table)\n\n\ndef embedding_postprocessor(input_tensor,\n use_token_type=False,\n token_type_ids=None,\n token_type_vocab_size=16,\n token_type_embedding_name=\"token_type_embeddings\",\n use_position_embeddings=True,\n position_embedding_name=\"position_embeddings\",\n initializer_range=0.02,\n max_position_embeddings=512,\n dropout_prob=0.1):\n \"\"\"Performs various post-processing on a word embedding tensor.\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length,\n embedding_size].\n use_token_type: bool. Whether to add embeddings for `token_type_ids`.\n token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].\n Must be specified if `use_token_type` is True.\n token_type_vocab_size: int. The vocabulary size of `token_type_ids`.\n token_type_embedding_name: string. The name of the embedding table variable\n for token type ids.\n use_position_embeddings: bool. Whether to add position embeddings for the\n position of each token in the sequence.\n position_embedding_name: string. The name of the embedding table variable\n for positional embeddings.\n initializer_range: float. Range of the weight initialization.\n max_position_embeddings: int. Maximum sequence length that might ever be\n used with this model. This can be longer than the sequence length of\n input_tensor, but cannot be shorter.\n dropout_prob: float. Dropout probability applied to the final output tensor.\n\n Returns:\n float tensor with same shape as `input_tensor`.\n\n Raises:\n ValueError: One of the tensor shapes or input values is invalid.\n \"\"\"\n input_shape = get_shape_list(input_tensor, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n width = input_shape[2]\n\n output = input_tensor\n\n if use_token_type:\n if token_type_ids is None:\n raise ValueError(\"`token_type_ids` must be specified if\"\n \"`use_token_type` is True.\")\n token_type_table = tf.get_variable(\n name=token_type_embedding_name,\n shape=[token_type_vocab_size, width],\n initializer=create_initializer(initializer_range))\n # This vocab will be small so we always do one-hot here, since it is always\n # faster for a small vocabulary.\n flat_token_type_ids = tf.reshape(token_type_ids, [-1])\n one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)\n token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)\n token_type_embeddings = tf.reshape(token_type_embeddings,\n [batch_size, seq_length, width])\n output += token_type_embeddings\n\n if use_position_embeddings:\n assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)\n with tf.control_dependencies([assert_op]):\n full_position_embeddings = tf.get_variable(\n name=position_embedding_name,\n shape=[max_position_embeddings, width],\n initializer=create_initializer(initializer_range))\n # Since the position embedding table is a learned variable, we create it\n # using a (long) sequence length `max_position_embeddings`. The actual\n # sequence length might be shorter than this, for faster training of\n # tasks that do not have long sequences.\n #\n # So `full_position_embeddings` is effectively an embedding table\n # for position [0, 1, 2, ..., max_position_embeddings-1], and the current\n # sequence has positions [0, 1, 2, ... seq_length-1], so we can just\n # perform a slice.\n position_embeddings = tf.slice(full_position_embeddings, [0, 0],\n [seq_length, -1])\n num_dims = len(output.shape.as_list())\n\n # Only the last two dimensions are relevant (`seq_length` and `width`), so\n # we broadcast among the first dimensions, which is typically just\n # the batch size.\n position_broadcast_shape = []\n for _ in range(num_dims - 2):\n position_broadcast_shape.append(1)\n position_broadcast_shape.extend([seq_length, width])\n position_embeddings = tf.reshape(position_embeddings,\n position_broadcast_shape)\n output += position_embeddings\n\n output = layer_norm_and_dropout(output, dropout_prob)\n return output\n\n\ndef create_attention_mask_from_input_mask(from_tensor, to_mask):\n \"\"\"Create 3D attention mask from a 2D tensor mask.\n\n Args:\n from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].\n to_mask: int32 Tensor of shape [batch_size, to_seq_length].\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length, to_seq_length].\n \"\"\"\n from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n\n to_shape = get_shape_list(to_mask, expected_rank=2)\n to_seq_length = to_shape[1]\n\n to_mask = tf.cast(\n tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)\n\n # We don't assume that `from_tensor` is a mask (although it could be). We\n # don't actually care if we attend *from* padding tokens (only *to* padding)\n # tokens so we create a tensor of all ones.\n #\n # `broadcast_ones` = [batch_size, from_seq_length, 1]\n broadcast_ones = tf.ones(\n shape=[batch_size, from_seq_length, 1], dtype=tf.float32)\n\n # Here we broadcast along two dimensions to create the mask.\n mask = broadcast_ones * to_mask\n\n return mask\n\n\ndef attention_layer(from_tensor,\n to_tensor,\n attention_mask=None,\n num_attention_heads=1,\n size_per_head=512,\n query_act=None,\n key_act=None,\n value_act=None,\n attention_probs_dropout_prob=0.0,\n initializer_range=0.02,\n do_return_2d_tensor=False,\n batch_size=None,\n from_seq_length=None,\n to_seq_length=None):\n \"\"\"Performs multi-headed attention from `from_tensor` to `to_tensor`.\n\n This is an implementation of multi-headed attention based on \"Attention\n is all you Need\". If `from_tensor` and `to_tensor` are the same, then\n this is self-attention. Each timestep in `from_tensor` attends to the\n corresponding sequence in `to_tensor`, and returns a fixed-with vector.\n\n This function first projects `from_tensor` into a \"query\" tensor and\n `to_tensor` into \"key\" and \"value\" tensors. These are (effectively) a list\n of tensors of length `num_attention_heads`, where each tensor is of shape\n [batch_size, seq_length, size_per_head].\n\n Then, the query and key tensors are dot-producted and scaled. These are\n softmaxed to obtain attention probabilities. The value tensors are then\n interpolated by these probabilities, then concatenated back to a single\n tensor and returned.\n\n In practice, the multi-headed attention are done with transposes and\n reshapes rather than actual separate tensors.\n\n Args:\n from_tensor: float Tensor of shape [batch_size, from_seq_length,\n from_width].\n to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].\n attention_mask: (optional) int32 Tensor of shape [batch_size,\n from_seq_length, to_seq_length]. The values should be 1 or 0. The\n attention scores will effectively be set to -infinity for any positions in\n the mask that are 0, and will be unchanged for positions that are 1.\n num_attention_heads: int. Number of attention heads.\n size_per_head: int. Size of each attention head.\n query_act: (optional) Activation function for the query transform.\n key_act: (optional) Activation function for the key transform.\n value_act: (optional) Activation function for the value transform.\n attention_probs_dropout_prob: (optional) float. Dropout probability of the\n attention probabilities.\n initializer_range: float. Range of the weight initializer.\n do_return_2d_tensor: bool. If True, the output will be of shape [batch_size\n * from_seq_length, num_attention_heads * size_per_head]. If False, the\n output will be of shape [batch_size, from_seq_length, num_attention_heads\n * size_per_head].\n batch_size: (Optional) int. If the input is 2D, this might be the batch size\n of the 3D version of the `from_tensor` and `to_tensor`.\n from_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `from_tensor`.\n to_seq_length: (Optional) If the input is 2D, this might be the seq length\n of the 3D version of the `to_tensor`.\n\n Returns:\n float Tensor of shape [batch_size, from_seq_length,\n num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is\n true, this will be of shape [batch_size * from_seq_length,\n num_attention_heads * size_per_head]).\n\n Raises:\n ValueError: Any of the arguments or tensor shapes are invalid.\n \"\"\"\n\n def transpose_for_scores(input_tensor, batch_size, num_attention_heads,\n seq_length, width):\n output_tensor = tf.reshape(\n input_tensor, [batch_size, seq_length, num_attention_heads, width])\n\n output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])\n return output_tensor\n\n from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])\n to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])\n\n if len(from_shape) != len(to_shape):\n raise ValueError(\n \"The rank of `from_tensor` must match the rank of `to_tensor`.\")\n\n if len(from_shape) == 3:\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n to_seq_length = to_shape[1]\n elif len(from_shape) == 2:\n if (batch_size is None or from_seq_length is None or to_seq_length is None):\n raise ValueError(\n \"When passing in rank 2 tensors to attention_layer, the values \"\n \"for `batch_size`, `from_seq_length`, and `to_seq_length` \"\n \"must all be specified.\")\n\n # Scalar dimensions referenced here:\n # B = batch size (number of sequences)\n # F = `from_tensor` sequence length\n # T = `to_tensor` sequence length\n # N = `num_attention_heads`\n # H = `size_per_head`\n\n from_tensor_2d = reshape_to_matrix(from_tensor)\n to_tensor_2d = reshape_to_matrix(to_tensor)\n\n # `query_layer` = [B*F, N*H]\n query_layer = tf.layers.dense(\n from_tensor_2d,\n num_attention_heads * size_per_head,\n activation=query_act,\n name=\"query\",\n kernel_initializer=create_initializer(initializer_range))\n\n # `key_layer` = [B*T, N*H]\n key_layer = tf.layers.dense(\n to_tensor_2d,\n num_attention_heads * size_per_head,\n activation=key_act,\n name=\"key\",\n kernel_initializer=create_initializer(initializer_range))\n\n # `value_layer` = [B*T, N*H]\n value_layer = tf.layers.dense(\n to_tensor_2d,\n num_attention_heads * size_per_head,\n activation=value_act,\n name=\"value\",\n kernel_initializer=create_initializer(initializer_range))\n\n # `query_layer` = [B, N, F, H]\n query_layer = transpose_for_scores(query_layer, batch_size,\n num_attention_heads, from_seq_length,\n size_per_head)\n\n # `key_layer` = [B, N, T, H]\n key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,\n to_seq_length, size_per_head)\n\n # Take the dot product between \"query\" and \"key\" to get the raw\n # attention scores.\n # `attention_scores` = [B, N, F, T]\n attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)\n attention_scores = tf.multiply(attention_scores,\n 1.0 / math.sqrt(float(size_per_head)))\n\n if attention_mask is not None:\n # `attention_mask` = [B, 1, F, T]\n attention_mask = tf.expand_dims(attention_mask, axis=[1])\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0\n\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n attention_scores += adder\n\n # Normalize the attention scores to probabilities.\n # `attention_probs` = [B, N, F, T]\n attention_probs = tf.nn.softmax(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = dropout(attention_probs, attention_probs_dropout_prob)\n\n # `value_layer` = [B, T, N, H]\n value_layer = tf.reshape(\n value_layer,\n [batch_size, to_seq_length, num_attention_heads, size_per_head])\n\n # `value_layer` = [B, N, T, H]\n value_layer = tf.transpose(value_layer, [0, 2, 1, 3])\n\n # `context_layer` = [B, N, F, H]\n context_layer = tf.matmul(attention_probs, value_layer)\n\n # `context_layer` = [B, F, N, H]\n context_layer = tf.transpose(context_layer, [0, 2, 1, 3])\n\n if do_return_2d_tensor:\n # `context_layer` = [B*F, N*H]\n context_layer = tf.reshape(\n context_layer,\n [batch_size * from_seq_length, num_attention_heads * size_per_head])\n else:\n # `context_layer` = [B, F, N*H]\n context_layer = tf.reshape(\n context_layer,\n [batch_size, from_seq_length, num_attention_heads * size_per_head])\n\n return context_layer\n\n\ndef transformer_model(input_tensor,\n attention_mask=None,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n intermediate_act_fn=gelu,\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n initializer_range=0.02,\n do_return_all_layers=False):\n \"\"\"Multi-headed, multi-layer Transformer from \"Attention is All You Need\".\n\n This is almost an exact implementation of the original Transformer encoder.\n\n See the original paper:\n https://arxiv.org/abs/1706.03762\n\n Also see:\n https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py\n\n Args:\n input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].\n attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,\n seq_length], with 1 for positions that can be attended to and 0 in\n positions that should not be.\n hidden_size: int. Hidden size of the Transformer.\n num_hidden_layers: int. Number of layers (blocks) in the Transformer.\n num_attention_heads: int. Number of attention heads in the Transformer.\n intermediate_size: int. The size of the \"intermediate\" (a.k.a., feed\n forward) layer.\n intermediate_act_fn: function. The non-linear activation function to apply\n to the output of the intermediate/feed-forward layer.\n hidden_dropout_prob: float. Dropout probability for the hidden layers.\n attention_probs_dropout_prob: float. Dropout probability of the attention\n probabilities.\n initializer_range: float. Range of the initializer (stddev of truncated\n normal).\n do_return_all_layers: Whether to also return all layers or just the final\n layer.\n\n Returns:\n float Tensor of shape [batch_size, seq_length, hidden_size], the final\n hidden layer of the Transformer.\n\n Raises:\n ValueError: A Tensor shape or parameter is invalid.\n \"\"\"\n if hidden_size % num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (hidden_size, num_attention_heads))\n\n attention_head_size = int(hidden_size / num_attention_heads)\n input_shape = get_shape_list(input_tensor, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n input_width = input_shape[2]\n\n # The Transformer performs sum residuals on all layers so the input needs\n # to be the same as the hidden size.\n if input_width != hidden_size:\n raise ValueError(\"The width of the input tensor (%d) != hidden size (%d)\" %\n (input_width, hidden_size))\n\n # We keep the representation as a 2D tensor to avoid re-shaping it back and\n # forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on\n # the GPU/CPU but may not be free on the TPU, so we want to minimize them to\n # help the optimizer.\n prev_output = reshape_to_matrix(input_tensor)\n\n all_layer_outputs = []\n for layer_idx in range(num_hidden_layers):\n with tf.variable_scope(\"layer_%d\" % layer_idx):\n layer_input = prev_output\n\n with tf.variable_scope(\"attention\"):\n attention_heads = []\n with tf.variable_scope(\"self\"):\n attention_head = attention_layer(\n from_tensor=layer_input,\n to_tensor=layer_input,\n attention_mask=attention_mask,\n num_attention_heads=num_attention_heads,\n size_per_head=attention_head_size,\n attention_probs_dropout_prob=attention_probs_dropout_prob,\n initializer_range=initializer_range,\n do_return_2d_tensor=True,\n batch_size=batch_size,\n from_seq_length=seq_length,\n to_seq_length=seq_length)\n attention_heads.append(attention_head)\n\n attention_output = None\n if len(attention_heads) == 1:\n attention_output = attention_heads[0]\n else:\n # In the case where we have other sequences, we just concatenate\n # them to the self-attention head before the projection.\n attention_output = tf.concat(attention_heads, axis=-1)\n\n # Run a linear projection of `hidden_size` then add a residual\n # with `layer_input`.\n with tf.variable_scope(\"output\"):\n attention_output = tf.layers.dense(\n attention_output,\n hidden_size,\n kernel_initializer=create_initializer(initializer_range))\n attention_output = dropout(attention_output, hidden_dropout_prob)\n attention_output = layer_norm(attention_output + layer_input)\n\n # The activation is only applied to the \"intermediate\" hidden layer.\n with tf.variable_scope(\"intermediate\"):\n intermediate_output = tf.layers.dense(\n attention_output,\n intermediate_size,\n activation=intermediate_act_fn,\n kernel_initializer=create_initializer(initializer_range))\n\n # Down-project back to `hidden_size` then add the residual.\n with tf.variable_scope(\"output\"):\n layer_output = tf.layers.dense(\n intermediate_output,\n hidden_size,\n kernel_initializer=create_initializer(initializer_range))\n layer_output = dropout(layer_output, hidden_dropout_prob)\n layer_output = layer_norm(layer_output + attention_output)\n prev_output = layer_output\n all_layer_outputs.append(layer_output)\n\n if do_return_all_layers:\n final_outputs = []\n for layer_output in all_layer_outputs:\n final_output = reshape_from_matrix(layer_output, input_shape)\n final_outputs.append(final_output)\n return final_outputs\n else:\n final_output = reshape_from_matrix(prev_output, input_shape)\n return final_output\n\n\ndef get_shape_list(tensor, expected_rank=None, name=None):\n \"\"\"Returns a list of the shape of tensor, preferring static dimensions.\n\n Args:\n tensor: A tf.Tensor object to find the shape of.\n expected_rank: (optional) int. The expected rank of `tensor`. If this is\n specified and the `tensor` has a different rank, and exception will be\n thrown.\n name: Optional name of the tensor for the error message.\n\n Returns:\n A list of dimensions of the shape of tensor. All static dimensions will\n be returned as python integers, and dynamic dimensions will be returned\n as tf.Tensor scalars.\n \"\"\"\n if name is None:\n name = tensor.name\n\n if expected_rank is not None:\n assert_rank(tensor, expected_rank, name)\n\n shape = tensor.shape.as_list()\n\n non_static_indexes = []\n for (index, dim) in enumerate(shape):\n if dim is None:\n non_static_indexes.append(index)\n\n if not non_static_indexes:\n return shape\n\n dyn_shape = tf.shape(tensor)\n for index in non_static_indexes:\n shape[index] = dyn_shape[index]\n return shape\n\n\ndef reshape_to_matrix(input_tensor):\n \"\"\"Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).\"\"\"\n ndims = input_tensor.shape.ndims\n if ndims < 2:\n raise ValueError(\"Input tensor must have at least rank 2. Shape = %s\" %\n (input_tensor.shape))\n if ndims == 2:\n return input_tensor\n\n width = input_tensor.shape[-1]\n output_tensor = tf.reshape(input_tensor, [-1, width])\n return output_tensor\n\n\ndef reshape_from_matrix(output_tensor, orig_shape_list):\n \"\"\"Reshapes a rank 2 tensor back to its original rank >= 2 tensor.\"\"\"\n if len(orig_shape_list) == 2:\n return output_tensor\n\n output_shape = get_shape_list(output_tensor)\n\n orig_dims = orig_shape_list[0:-1]\n width = output_shape[-1]\n\n return tf.reshape(output_tensor, orig_dims + [width])\n\n\ndef assert_rank(tensor, expected_rank, name=None):\n \"\"\"Raises an exception if the tensor rank is not of the expected rank.\n\n Args:\n tensor: A tf.Tensor to check the rank of.\n expected_rank: Python integer or list of integers, expected rank.\n name: Optional name of the tensor for the error message.\n\n Raises:\n ValueError: If the expected shape doesn't match the actual shape.\n \"\"\"\n if name is None:\n name = tensor.name\n\n expected_rank_dict = {}\n if isinstance(expected_rank, six.integer_types):\n expected_rank_dict[expected_rank] = True\n else:\n for x in expected_rank:\n expected_rank_dict[x] = True\n\n actual_rank = tensor.shape.ndims\n if actual_rank not in expected_rank_dict:\n scope_name = tf.get_variable_scope().name\n raise ValueError(\n \"For the tensor `%s` in scope `%s`, the actual rank \"\n \"`%d` (shape = %s) is not equal to the expected rank `%s`\" %\n (name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))\n"
]
| [
[
"tensorflow.ones",
"tensorflow.matmul",
"tensorflow.reshape",
"tensorflow.control_dependencies",
"tensorflow.nn.softmax",
"tensorflow.one_hot",
"tensorflow.cast",
"tensorflow.shape",
"tensorflow.concat",
"tensorflow.transpose",
"tensorflow.train.list_variables",
"tensorflow.variable_scope",
"tensorflow.squeeze",
"tensorflow.get_variable_scope",
"numpy.sqrt",
"tensorflow.nn.dropout",
"tensorflow.zeros",
"tensorflow.expand_dims",
"tensorflow.gfile.GFile",
"tensorflow.assert_less_equal",
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.gather",
"tensorflow.truncated_normal_initializer",
"tensorflow.slice",
"tensorflow.pow"
]
]
|
shijun18/torch_deploy | [
"2055ec5a9be79bb55c8fbbf459abb2173b3f9e0f"
]
| [
"test_deploy/transformer.py"
]
| [
"import torch \nimport numpy as np \nfrom skimage.transform import resize\n\n\n\n\nclass Trunc_and_Normalize(object):\n '''\n truncate gray scale and normalize to [0,1]\n '''\n def __init__(self, scale):\n self.scale = scale\n assert len(self.scale) == 2, 'scale error'\n\n def __call__(self, image):\n \n # gray truncation\n image = image - self.scale[0]\n gray_range = self.scale[1] - self.scale[0]\n image[image < 0] = 0\n image[image > gray_range] = gray_range\n \n image = image / gray_range\n\n return image\n\n\n\n\nclass CropResize(object):\n '''\n Data preprocessing.\n Adjust the size of input data to fixed size by cropping and resize\n Args:\n - dim: tuple of integer, fixed size\n - crop: single integer, factor of cropping, H/W ->[crop:-crop,crop:-crop]\n '''\n def __init__(self, dim=None,crop=0):\n self.dim = dim\n self.crop = crop\n\n def __call__(self, image):\n\n # image: numpy array\n # crop\n if self.crop != 0:\n image = image[self.crop:-self.crop, self.crop:-self.crop]\n # resize\n if self.dim is not None and image.shape != self.dim:\n image = resize(image, self.dim, anti_aliasing=True)\n\n return image\n\n\nclass To_Tensor(object):\n '''\n Convert the data in sample to torch Tensor.\n '''\n\n def __call__(self,image):\n # expand dims\n if len(image.shape) == 2:\n image = np.expand_dims(image,axis=0)\n else:\n image = np.transpose(image,(2,0,1))\n # convert to Tensor\n image = torch.from_numpy(image)\n \n return image"
]
| [
[
"numpy.transpose",
"numpy.expand_dims",
"torch.from_numpy"
]
]
|
chrisnbattista/multi-agent-kinetics | [
"01af3bbd8a44038e7e8744975000e5474fa1124b"
]
| [
"experiments.py"
]
| [
"import numpy as np\nimport torch\nimport random, math\nfrom . import worlds\n\ndef initialize_random_sphere(n_particles,\n radius,\n center=None,\n min_dist=4,\n random_speed=0,\n spatial_dims=3,\n mass=1):\n '''\n n_particles: initial number of particles []\n radius: initial radius of particle distribution [m]\n '''\n\n schema = worlds.schemas[str(spatial_dims) + 'd']\n\n if center == None:\n center = np.zeros((spatial_dims,))\n\n ## Set up initial conditions (ICs)\n world_state = torch.empty ( (n_particles, len(schema)) )\n\n # create a random distribution of particles\n for i in range(n_particles):\n\n try:\n iter(mass)\n m_i = mass[i] * random.random()\n except TypeError: m_i = mass * random.random()\n\n smallest_interparticle_distance = 0\n\n vs = []\n for j in range(spatial_dims):\n vs.append(2*(random.random()*0.5) * random_speed)\n \n pos = worlds.pos[spatial_dims]\n \n if spatial_dims == 1:\n while smallest_interparticle_distance < min_dist:\n r = 2*(random.random()-0.5) * radius\n candidate_b_1 = center[0] + r\n distances = np.abs(world_state[:i, 3] - candidate_b_1)\n if i > 0:\n smallest_interparticle_distance = distances.min()\n else:\n break\n \n world_state[i, :] = torch.tensor((0, i, m_i, candidate_b_1, vs[0]))\n\n elif spatial_dims > 1:\n while smallest_interparticle_distance < min_dist:\n test_pos = torch.zeros(spatial_dims)\n for j in range(spatial_dims):\n test_pos[j] = center[j] + 2*(random.random()-0.5) * radius\n offset = world_state[:i, pos] - test_pos\n norms = np.linalg.norm(offset, axis=1)\n if i > 0:\n smallest_interparticle_distance = norms.min()\n else:\n break\n \n world_state[i, :] = torch.tensor((0, i, m_i, *test_pos, *vs))\n\n return world_state"
]
| [
[
"torch.zeros",
"numpy.linalg.norm",
"numpy.zeros",
"torch.tensor",
"numpy.abs"
]
]
|
sebi06/czi_demos | [
"b3f7801f46de0138a8a1ac245e9c80787e0a3f17"
]
| [
"test_stardist_cziRGB.py"
]
| [
"from __future__ import print_function, unicode_literals, absolute_import, division\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\n# %matplotlib inline\n# %config InlineBackend.figure_format = 'retina'\n\nfrom glob import glob\nfrom tifffile import imread\nfrom csbdeep.utils import Path, normalize\nfrom csbdeep.io import save_tiff_imagej_compatible\n\n#from stardist import random_label_cmap, _draw_polygons, export_imagej_rois\n#from stardist.models import StarDist2D\n\nfrom aicspylibczi import CziFile\n\nimport sys\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport imgfile_tools as imf\nimport segmentation_tools as sgt\nfrom aicsimageio import AICSImage, imread\nfrom skimage import measure, segmentation\nfrom skimage.measure import regionprops\nfrom skimage.color import label2rgb\nfrom skimage.color import rgb2gray\nimport progressbar\nfrom IPython.display import display, HTML\nfrom MightyMosaic import MightyMosaic\n\n# specify the filename of the CZI file\n#filename = r\"C:\\Users\\m1srh\\OneDrive - Carl Zeiss AG\\Testdata_Zeiss\\Atomic\\Nuclei\\nuclei_RGB\\H&E\\Tumor_H&E_small2.czi\"\nfilename = r\"/datadisk1/tuxedo/testpictures/Testdata_Zeiss/Nuclei/nuclei_RGB/H+E/Tumor_H+E_small2.czi\"\n\n\n# get the metadata from the czi file\nmd, addmd = imf.get_metadata(filename)\n\n# show some metainformation\nprint('------------------ Show Metainformation ------------------')\n\n# shape and dimension entry from CZI file as returned by czifile.py\nprint('Array Shape (czifile) : ', md['Shape'])\nprint('Dimension Entry (czifile) : ', md['Axes'])\nprint('Array Shape (aicsimageio) : ', md['Shape_aics'])\nprint('Dimension Entry (aicsimageio) : ', md['Axes_aics'])\nprint('------------------------------------------------------------')\nprint('SizeS : ', md['SizeS'])\nprint('SizeT : ', md['SizeT'])\nprint('SizeZ : ', md['SizeZ'])\nprint('SizeC : ', md['SizeC'])\nprint('SizeX (czifile) : ', md['SizeX'])\nprint('SizeY (czifile) : ', md['SizeY'])\nprint('SizeY (aicsimageio) : ', md['SizeX_aics'])\nprint('SizeY (aicsimageio) : ', md['SizeY_aics'])\nprint('XScale : ', md['XScale'])\nprint('Yscale : ', md['YScale'])\nprint('Zscale : ', md['ZScale'])\n\n\n####################\n\nczi = CziFile(filename)\n\n# Get the shape of the data, the coordinate pairs are (start index, size)\ndimensions = czi.dims_shape()\nprint(dimensions)\nprint(czi.dims)\nprint(czi.size)\nprint(czi.is_mosaic()) # True\n# Mosaic files ignore the S dimension and use an internal mIndex to reconstruct, the scale factor allows one to generate a manageable image\nmosaic_data = czi.read_mosaic(C=0, scale_factor=1)\nprint('CZI Mosaic Data Shape : ', mosaic_data.shape)\n\n\nplt.figure(figsize=(8, 8))\nimage2d = mosaic_data[0, :, 0, :, :]\nimage2d = np.moveaxis(image2d, 0, -1)\n\n# convert ZEN BGR into RGB\nimage2d = image2d[..., ::-1]\n\n\"\"\"\nplt.imshow(image2d)\nplt.axis('off')\nplt.show()\n\"\"\"\n\n\"\"\"\n# Load the image slice I want from the file\nfor m in range(0, 4):\n img, shp = czi.read_image(M=m, C=0)\n print('CZI Single Tile Shape : ', img.shape)\n # print(shp)\n\n bgr = img[0, 0, :, 0, :, :]\n bgr = np.moveaxis(bgr, 0, -1)\n # convert ZEN BGR into RGB\n rgb = bgr[..., ::-1]\n\n plt.imshow(rgb)\n plt.axis('off')\n plt.show()\n\n################\n\"\"\"\n\n\n# get the current plane indicies and store them\nvalues = {'S': 0, 'T': 0, 'Z': 0, 'C': 0, 'Number': 0}\n\n\nn_channel = 1 if image2d.ndim == 2 else image2d.shape[-1]\naxis_norm = (0, 1) # normalize channels independently\n# axis_norm = (0, 1, 2) # normalize channels jointly\nif n_channel > 1:\n print(\"Normalizing image channels %s.\" % ('jointly' if axis_norm is None or 2 in axis_norm else 'independently'))\n\n\nos.environ['KMP_DUPLICATE_LIB_OK'] = 'True'\n\n\"\"\"Predict instance segmentation from input image.\nParameters\n----------\nimg : :class:`numpy.ndarray`\n Input image\naxes : str or None\n Axes of the input ``img``.\n ``None`` denotes that axes of img are the same as denoted in the config.\nnormalizer : :class:`csbdeep.data.Normalizer` or None\n (Optional) normalization of input image before prediction.\n Note that the default (``None``) assumes ``img`` to be already normalized.\nprob_thresh : float or None\n Consider only object candidates from pixels with predicted object probability\n above this threshold (also see `optimize_thresholds`).\nnms_thresh : float or None\n Perform non-maximum suppression that considers two objects to be the same\n when their area/surface overlap exceeds this threshold (also see `optimize_thresholds`).\nn_tiles : iterable or None\n Out of memory (OOM) errors can occur if the input image is too large.\n To avoid this problem, the input image is broken up into (overlapping) tiles\n that are processed independently and re-assembled.\n This parameter denotes a tuple of the number of tiles for every image axis (see ``axes``).\n ``None`` denotes that no tiling should be used.\nshow_tile_progress: bool\n Whether to show progress during tiled prediction.\npredict_kwargs: dict\n Keyword arguments for ``predict`` function of Keras model.\nnms_kwargs: dict\n Keyword arguments for non-maximum suppression.\noverlap_label: scalar or None\n if not None, label the regions where polygons overlap with that value\nReturns\n-------\n(:class:`numpy.ndarray`, dict)\n Returns a tuple of the label instances image and also\n a dictionary with the details (coordinates, etc.) of all remaining polygons/polyhedra.\n\"\"\"\n\n\ndemo_model = True\n\nif demo_model:\n print(\n \"NOTE: This is loading a previously trained demo model!\\n\"\n \" Please set the variable 'demo_model = False' to load your own trained model.\",\n file=sys.stderr, flush=True\n )\n model = StarDist2D.from_pretrained('Versatile (H&E nuclei)')\n\nelse:\n model = StarDist2D(None, name='stardist', basedir='models')\nNone\n\nimg = normalize(image2d,\n pmin=1,\n pmax=99.8,\n axis=axis_norm,\n clip=False,\n eps=1e-20,\n dtype=np.float32)\n\nmask, details = model.predict_instances(img,\n axes=None,\n normalizer=None,\n prob_thresh=0.7,\n nms_thresh=0.3,\n n_tiles=None,\n show_tile_progress=True,\n overlap_label=None\n )\n\n#plt.figure(figsize=(8, 8))\n#plt.imshow(img if img.ndim == 2 else img[..., 0], clim=(0, 1), cmap='gray')\n#plt.imshow(mask, cmap=lbl_cmap, alpha=0.5)\n# plt.axis('off')\n\n\n# define measure region properties\nto_measure = ('label',\n 'area',\n 'centroid',\n 'max_intensity',\n 'mean_intensity',\n 'min_intensity',\n 'bbox')\n\n# measure the specified parameters store in dataframe\nprops = pd.DataFrame(\n measure.regionprops_table(\n mask,\n # intensity_image=rgb2gray(image2d),\n intensity_image=image2d[:, :, 2],\n properties=to_measure\n )\n).set_index('label')\n\n# filter objects by size and intensity\n\nmaxR = 120\nmaxG = 130\nmaxB = 220\n\nmax_meanint = 0.2125 * maxR + 0.7154 * maxG + 0.0721 * maxB\nprint('MeanIntensty (max) : ', max_meanint)\n\n\nprops = props[(props['area'] >= 50) & (props['area'] <= 1000)]\nprops = props[(props['mean_intensity'] <= max_meanint)]\n\n# add plane indices\nprops['S'] = 0\nprops['T'] = 0\nprops['Z'] = 0\nprops['C'] = 0\n\n# count the number of objects\nvalues['Number'] = props.shape[0]\n\nprint(values)\nprint(props)\n\nax = sgt.plot_results(image2d, mask, props, add_bbox=True)\n"
]
| [
[
"numpy.moveaxis",
"matplotlib.pyplot.figure"
]
]
|
Richard-L-Johnson/pyalgotrader | [
"ad2bcc6b25c06c66eee4a8d522ce844504d8ec62"
]
| [
"testcases/technical_hurst_test.py"
]
| [
"# PyAlgoTrade\n#\n# Copyright 2011-2018 Gabriel Martin Becedillas Ruiz\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\n.. moduleauthor:: Gabriel Martin Becedillas Ruiz <[email protected]>\n\"\"\"\n\nimport numpy as np\n\nfrom . import common\n\nfrom pyalgotrade.technical import hurst\nfrom pyalgotrade import dataseries\n\n\ndef build_hurst(values, period, minLags, maxLags):\n ds = dataseries.SequenceDataSeries()\n ret = hurst.HurstExponent(ds, period, minLags, maxLags)\n for value in values:\n ds.append(value)\n return ret\n\n\nclass TestCase(common.TestCase):\n def testHurstExpFunRandomWalk(self):\n values = np.cumsum(np.random.randn(50000)) + 1000\n h = hurst.hurst_exp(np.log10(values), 2, 20)\n self.assertEquals(round(h, 1), 0.5)\n\n def testHurstExpFunTrending(self):\n values = np.cumsum(np.random.randn(50000)+1) + 1000\n h = hurst.hurst_exp(np.log10(values), 2, 20)\n self.assertEquals(round(h), 1)\n\n def testHurstExpFunMeanRev(self):\n values = (np.random.randn(50000)) + 1000\n h = hurst.hurst_exp(np.log10(values), 2, 20)\n self.assertEquals(round(h), 0)\n\n def testRandomWalk(self):\n num_values = 10000\n values = np.cumsum(np.random.randn(num_values)) + 1000\n hds = build_hurst(values, num_values - 10, 2, 20)\n self.assertEquals(round(hds[-1], 1), 0.5)\n self.assertEquals(round(hds[-2], 1), 0.5)\n\n def testTrending(self):\n num_values = 10000\n values = np.cumsum(np.random.randn(num_values) + 10) + 1000\n hds = build_hurst(values, num_values - 10, 2, 20)\n self.assertEquals(round(hds[-1], 1), 1)\n self.assertEquals(round(hds[-2], 1), 1)\n\n def testMeanRev(self):\n num_values = 10000\n values = np.random.randn(num_values) + 100\n hds = build_hurst(values, num_values - 10, 2, 20)\n self.assertEquals(round(hds[-1], 1), 0)\n self.assertEquals(round(hds[-2], 1), 0)\n"
]
| [
[
"numpy.log10",
"numpy.random.randn"
]
]
|
mfalkiewicz/nipype | [
"373bdddba9f675ef153951afa368729e2d8950d2"
]
| [
"nipype/interfaces/cmtk/tests/test_nbs.py"
]
| [
"from __future__ import unicode_literals\nfrom ..nbs import NetworkBasedStatistic\nfrom ....utils.misc import package_check\nimport numpy as np\nimport networkx as nx\nimport pytest\n\nhave_cv = True\ntry:\n package_check('cviewer')\nexcept Exception as e:\n have_cv = False\n\[email protected]()\ndef creating_graphs(tmpdir):\n graphlist = []\n graphnames = [\"name\"+str(i) for i in range(6)]\n for idx, name in enumerate(graphnames):\n graph = np.random.rand(10,10)\n G = nx.from_numpy_matrix(graph)\n out_file = tmpdir.strpath + graphnames[idx] + '.pck'\n # Save as pck file\n nx.write_gpickle(G, out_file)\n graphlist.append(out_file)\n return graphlist\n\n\[email protected](have_cv, reason=\"tests for import error, cviewer available\")\ndef test_importerror(creating_graphs, tmpdir):\n tmpdir.chdir()\n graphlist = creating_graphs\n group1 = graphlist[:3]\n group2 = graphlist[3:]\n\n nbs = NetworkBasedStatistic()\n nbs.inputs.in_group1 = group1\n nbs.inputs.in_group2 = group2\n nbs.inputs.edge_key = \"weight\"\n\n with pytest.raises(ImportError) as e:\n nbs.run()\n assert \"cviewer library is not available\" == str(e.value)\n\n\[email protected](not have_cv, reason=\"cviewer has to be available\")\ndef test_keyerror(creating_graphs):\n graphlist =creating_graphs\n\n group1 = graphlist[:3]\n group2 = graphlist[3:]\n\n nbs = NetworkBasedStatistic()\n nbs.inputs.in_group1 = group1\n nbs.inputs.in_group2 = group2\n nbs.inputs.edge_key = \"Your_edge\"\n\n with pytest.raises(KeyError) as e:\n nbs.run()\n assert \"the graph edges do not have Your_edge attribute\" in str(e.value)\n"
]
| [
[
"numpy.random.rand"
]
]
|
johnamcleod/tensorflow | [
"e25272e74334cb2a4c6b256c132786f72c28fcd0"
]
| [
"tensorflow/python/distribute/strategy_common_test.py"
]
| [
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for common methods in strategy classes.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\n\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.distribute import combinations\nfrom tensorflow.python.distribute import multi_worker_test_base\nfrom tensorflow.python.distribute import reduce_util\nfrom tensorflow.python.distribute import strategy_combinations\nfrom tensorflow.python.distribute import strategy_test_lib\nfrom tensorflow.python.distribute.collective_all_reduce_strategy import CollectiveAllReduceStrategy\nfrom tensorflow.python.distribute.tpu_strategy import TPUStrategy\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import test\n\n\nclass StrategyReduceTest(test.TestCase, parameterized.TestCase):\n\n @combinations.generate(\n combinations.combine(\n strategy=[\n strategy_combinations.multi_worker_mirrored_2x1_cpu,\n strategy_combinations.multi_worker_mirrored_2x1_gpu,\n ] + strategy_combinations.strategies_minus_tpu,\n mode=['eager']))\n def testSimpleReduce(self, strategy):\n\n def fn_eager():\n\n def replica_fn():\n return array_ops.ones((), dtypes.float32)\n\n per_replica_value = strategy.run(replica_fn)\n return strategy.reduce(\n reduce_util.ReduceOp.SUM, value=per_replica_value, axis=None)\n\n fn_graph = def_function.function(fn_eager)\n\n # Run reduce under the strategy scope to explicitly enter\n # strategy default_device scope.\n with strategy.scope():\n self.assertEqual(fn_eager().numpy(), 1.0 * strategy.num_replicas_in_sync)\n self.assertEqual(fn_graph().numpy(), 1.0 * strategy.num_replicas_in_sync)\n\n # Run reduce without a strategy scope to implicitly enter\n # strategy default_device scope.\n self.assertEqual(fn_eager().numpy(), 1.0 * strategy.num_replicas_in_sync)\n self.assertEqual(fn_graph().numpy(), 1.0 * strategy.num_replicas_in_sync)\n\n\[email protected](\n combinations.combine(\n strategy=[\n strategy_combinations.multi_worker_mirrored_2x1_cpu,\n strategy_combinations.multi_worker_mirrored_2x1_gpu,\n ],\n mode=['eager']))\nclass DistributedCollectiveAllReduceStrategyTest(\n strategy_test_lib.DistributionTestBase,\n parameterized.TestCase):\n\n def testDatasetFromFunction(self, strategy):\n def dataset_fn(input_context):\n global_batch_size = 10\n batch_size = input_context.get_per_replica_batch_size(global_batch_size)\n d = dataset_ops.DatasetV2.range(100).repeat().batch(batch_size)\n return d.shard(input_context.num_input_pipelines,\n input_context.input_pipeline_id)\n\n expected_sum_on_workers = {'chief': 10, 'worker': 35}\n input_iterator = iter(\n strategy.experimental_distribute_datasets_from_function(dataset_fn))\n\n @def_function.function\n def run(iterator):\n return strategy.experimental_local_results(iterator.get_next())\n\n result = run(input_iterator)\n sum_value = math_ops.reduce_sum(result)\n self.assertEqual(\n sum_value.numpy(),\n expected_sum_on_workers[multi_worker_test_base.get_task_type()])\n\n def testReduceHostTensor(self, strategy):\n reduced = strategy.reduce(\n reduce_util.ReduceOp.SUM, array_ops.identity(1.), axis=None)\n self.assertEqual(reduced.numpy(), 2.)\n\n def testReduceToHostTensor(self, strategy):\n value = array_ops.identity(1.)\n reduced = strategy.extended.reduce_to(reduce_util.ReduceOp.SUM, value,\n value)\n self.assertEqual(reduced.numpy(), 2.)\n\n def testBatchReduceToHostTensor(self, strategy):\n value = array_ops.identity(1.)\n reduced = strategy.extended.batch_reduce_to(reduce_util.ReduceOp.SUM,\n [(value, value),\n (value, value)])\n self.assertAllEqual(reduced, [2., 2.])\n\n def testReduceDeviceTensors(self, strategy):\n value = strategy.run(lambda: array_ops.identity(1.))\n reduced = strategy.reduce(reduce_util.ReduceOp.SUM, value, axis=None)\n self.assertEqual(reduced.numpy(), 2.)\n\n def testReduceToDeviceTensors(self, strategy):\n value = strategy.run(lambda: array_ops.identity(1.))\n reduced = strategy.extended.reduce_to(reduce_util.ReduceOp.SUM, value,\n value)\n self.assertEqual(reduced.numpy(), 2.)\n\n def testBatchReduceToDeviceTensors(self, strategy):\n value = strategy.run(lambda: array_ops.identity(1.))\n reduced = strategy.extended.batch_reduce_to(reduce_util.ReduceOp.SUM,\n [(value, value),\n (value, value)])\n self.assertAllEqual(reduced, [2., 2.])\n\n # TODO(crccw): add a test that mixes device and host tensors after multi\n # worker strategy combinations can run on a fixed number of GPUs.\n\n\nclass StrategyClusterResolverTest(test.TestCase, parameterized.TestCase):\n\n @combinations.generate(\n combinations.combine(\n strategy=[strategy_combinations.multi_worker_mirrored_2x1_cpu] +\n strategy_combinations.all_strategies,\n mode=['eager']))\n def testClusterResolverProperty(self, strategy):\n # CollectiveAllReduceStrategy and TPUStrategy must have a cluster resolver.\n # `None` otherwise.\n resolver = strategy.cluster_resolver\n if not isinstance(strategy, CollectiveAllReduceStrategy) and not isinstance(\n strategy, TPUStrategy):\n self.assertIsNone(resolver)\n return\n\n with strategy.scope():\n self.assertIs(strategy.cluster_resolver, resolver)\n self.assertTrue(hasattr(resolver, 'cluster_spec'))\n if isinstance(strategy, TPUStrategy):\n self.skipTest('b/159747888')\n self.assertTrue(hasattr(resolver, 'environment'))\n self.assertTrue(hasattr(resolver, 'master'))\n self.assertTrue(hasattr(resolver, 'num_accelerators'))\n self.assertIsNone(resolver.rpc_layer)\n if isinstance(strategy, CollectiveAllReduceStrategy):\n self.assertEqual(resolver.task_id, 0)\n self.assertAllInSet(resolver.task_type, ['chief', 'worker'])\n elif isinstance(strategy, TPUStrategy):\n # TPUStrategy does not have task_id and task_type applicable.\n self.assertIsNone(resolver.task_id)\n self.assertIsNone(resolver.task_type)\n\n\nif __name__ == '__main__':\n combinations.main()\n"
]
| [
[
"tensorflow.python.distribute.multi_worker_test_base.get_task_type",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.distribute.combinations.combine",
"tensorflow.python.eager.def_function.function",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.data.ops.dataset_ops.DatasetV2.range",
"tensorflow.python.distribute.combinations.main"
]
]
|
dburkhardt/slalom | [
"547a56316e5c3ccc63e592eb907dc53b00212466"
]
| [
"slalom/bayesnet/misc.py"
]
| [
"# Copyright(c) 2014, The scLVM developers (Forian Buettner, Paolo Francesco Casale, Oliver Stegle)\n#\n#Licensed under the Apache License, Version 2.0 (the \"License\");\n#you may not use this file except in compliance with the License.\n#You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#Unless required by applicable law or agreed to in writing, software\n#distributed under the License is distributed on an \"AS IS\" BASIS,\n#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#See the License for the specific language governing permissions and\n#limitations under the License.\n\n# Helper functions\n\nimport sys\nimport scipy as SP\nimport h5py\nimport pdb\nimport scipy.linalg as LA\n\ndef smartAppend(table,name,value):\n\t\"\"\"\n\thelper function for apppending in a dictionary\t\n\t\"\"\"\t\n\tif name not in list(table.keys()):\n\t\ttable[name] = []\n\ttable[name].append(value)\n\n\ndef dumpDictHdf5(RV,o):\n\t\"\"\" Dump a dictionary where each page is a list or an array \"\"\"\n\tfor key in list(RV.keys()):\n\t\to.create_dataset(name=key,data=SP.array(RV[key]),chunks=True,compression='gzip')\n\ndef smartDumpDictHdf5(RV,o, chunks=True, close_file=True):\n\t\"\"\" Dump a dictionary where each page is a list or an array or still a dictionary (in this case, it iterates)\"\"\"\n\tfor key in list(RV.keys()):\n\t\tif type(RV[key])==dict:\n\t\t\tg = o.create_group(key)\n\t\t\tsmartDumpDictHdf5(RV[key],g)\n\t\telse:\n\t\t\tif SP.isscalar(RV[key]):\n\t\t\t\to.create_dataset(name=key,data=SP.array(RV[key]),chunks=False)\n\t\t\telse:\n\t\t\t\to.create_dataset(name=key,data=SP.array(RV[key]),chunks=True,compression='gzip')\n\t#if close_file==True: \n\t\t#o.close()\n \ndef smartGetDictHdf5(o):\n RV={} \n for key in list(o.keys()):\n if type(o[key])==dict:\n smartGetDictHdf5(RV[key],o[key])\n else:\n if len(o[key].shape)==0:\n RV[key] = o[key][()]\n else:\n RV[key] = o[key][:]\n return RV\n\ndef warning_on_one_line(message, category, filename, lineno, file=None, line=None):\n return ' %s:%s: %s:%s' % (filename, lineno, category.__name__, message)\n\ndef regressOut(Y,X):\n\t\"\"\"\n\tregresses out X from Y\n\t\"\"\"\n\tXd = LA.pinv(X)\n\tY_out = Y-X.dot(Xd.dot(Y))\n\treturn Y_out\n\ndef PCA(Y, components):\n\t\"\"\"run PCA, retrieving the first (components) principle components\n\treturn [s0, eig, w0]\n\ts0: factors\n\tw0: weights\n\t\"\"\"\n\tsv = LA.svd(Y, full_matrices=0);\n\t[s0, w0] = [sv[0][:, 0:components], SP.dot(SP.diag(sv[1]), sv[2]).T[:, 0:components]]\n\tv = s0.std(axis=0)\n\ts0 /= v;\n\tw0 *= v;\n\treturn [s0, w0]\n"
]
| [
[
"scipy.linalg.svd",
"scipy.isscalar",
"scipy.array",
"scipy.linalg.pinv",
"scipy.diag"
]
]
|
james-choncholas/coral-webcam-segmentation | [
"dac2f3d0d84b12a14c41d660000db9b89171cfcf"
]
| [
"bodypix.py"
]
| [
"# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport time\nimport svgwrite\nimport re\nimport PIL\nimport argparse\nfrom functools import partial\nfrom collections import deque\n\nimport numpy as np\nimport scipy.ndimage\nimport scipy.misc\nfrom PIL import Image\n\nimport gstreamer\n#from pose_engine import PoseEngine, EDGES, BODYPIX_PARTS\n\nfrom pycoral.adapters import common\nfrom pycoral.adapters import segment\nfrom pycoral.utils.edgetpu import make_interpreter\n\n# Color mapping for bodyparts\n#RED_BODYPARTS = [k for k,v in BODYPIX_PARTS.items() if \"right\" in v]\n#GREEN_BODYPARTS = [k for k,v in BODYPIX_PARTS.items() if \"hand\" in v or \"torso\" in v]\n#BLUE_BODYPARTS = [k for k,v in BODYPIX_PARTS.items() if \"leg\" in v or \"arm\" in v or \"face\" in v or \"hand\" in v]\n\n#def shadow_text(dwg, x, y, text, font_size=16):\n# dwg.add(dwg.text(text, insert=(x + 1, y + 1), fill='black',\n# font_size=font_size, style='font-family:sans-serif'))\n# dwg.add(dwg.text(text, insert=(x, y), fill='white',\n# font_size=font_size, style='font-family:sans-serif'))\n#\n#def draw_pose(dwg, pose, color='blue', threshold=0.2):\n# xys = {}\n# for label, keypoint in pose.keypoints.items():\n# if keypoint.score < threshold: continue\n# xys[label] = (int(keypoint.yx[1]), int(keypoint.yx[0]))\n# dwg.add(dwg.circle(center=(int(keypoint.yx[1]), int(keypoint.yx[0])), r=5,\n# fill='cyan', stroke=color))\n# for a, b in EDGES:\n# if a not in xys or b not in xys: continue\n# ax, ay = xys[a]\n# bx, by = xys[b]\n# dwg.add(dwg.line(start=(ax, ay), end=(bx, by), stroke=color, stroke_width=2))\n\n\ndef create_pascal_label_colormap():\n \"\"\"Creates a label colormap used in PASCAL VOC segmentation benchmark.\n Returns:\n A Colormap for visualizing segmentation results.\n \"\"\"\n #colormap = np.zeros((256, 3), dtype=int)\n #indices = np.arange(256, dtype=int)\n\n #for shift in reversed(range(8)):\n # for channel in range(3):\n # colormap[:, channel] |= ((indices >> channel) & 1) << shift\n # indices >>= 3\n\n\n\n #15 is person, 20 is wall?\n colormap = np.zeros((256, 3), dtype=np.uint8)\n colormap[15] = (255,255,255)\n return colormap\n\n\ndef label_to_color_image(label):\n \"\"\"Adds color defined by the dataset colormap to the label.\n Args:\n label: A 2D array with integer type, storing the segmentation label.\n Returns:\n result: A 2D array with floating type. The element of the array\n is the color indexed by the corresponding element in the input label\n to the PASCAL color map.\n Raises:\n ValueError: If label is not of rank 2 or its value is larger than color\n map maximum entry.\n \"\"\"\n if label.ndim != 2:\n raise ValueError('Expect 2-D input label')\n\n colormap = create_pascal_label_colormap()\n\n if np.max(label) >= len(colormap):\n raise ValueError('label value too large.')\n\n return colormap[label]\n\nclass Callback:\n ##def __init__(self, engine, anonymize=True, bodyparts=True):\n def __init__(self, interpreter):\n ##self.engine = engine\n self.interpreter = interpreter\n ##self.anonymize = anonymize\n ##self.bodyparts = bodyparts\n self.background_image = None\n self.last_time = time.monotonic()\n self.frames = 0\n self.sum_fps = 0\n self.sum_process_time = 0\n #self.sum_inference_time = 0\n\n def __call__(self, image):\n #i = Image.frombytes('RGB', (image.shape[1], image.shape[0]), image.copy())#, \"raw\", 'RGB', stride) # this works\n #i.save(\"/tmp/wtf.jpg\")\n\n common.set_input(self.interpreter, np.ascontiguousarray(image))\n self.interpreter.invoke()\n result = segment.get_output(self.interpreter)\n if len(result.shape) == 3:\n result = np.argmax(result, axis=-1)\n\n ##mask = Image.fromarray(label_to_color_image(result).astype(np.uint8))\n mask = label_to_color_image(result)\n #output_image = mask\n\n bg = np.full(image.shape, [0, 255, 0], dtype=np.uint8)\n #output_image = bg\n\n # use mask to combine with background\n tmp1 = np.bitwise_and(image, mask)\n tmp2 = np.bitwise_and(bg, ~mask)\n output_image = tmp1+tmp2\n\n end_time = time.monotonic()\n\n self.frames += 1\n self.sum_fps += 1.0 / (end_time - self.last_time)\n ##self.sum_process_time += 1000 * (end_time - start_time) - inference_time\n ##self.sum_inference_time += inference_time\n self.last_time = end_time\n text_line = 'PoseNet: %.1fms Frame IO: %.2fms TrueFPS: %.2f Nposes %d' % (\n ##self.sum_inference_time / self.frames,\n 0,\n ##self.sum_process_time / self.frames,\n 0,\n self.sum_fps / self.frames,\n #len(poses)\n 0\n )\n\n print(text_line)\n return output_image\n\ndef main():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--mirror', help='flip video horizontally', action='store_true')\n parser.add_argument('--model', help='.tflite model path.', required=False)\n parser.add_argument('--width', help='Source width', default='640')\n parser.add_argument('--height', help='Source height', default='480')\n parser.add_argument('--videosrc', help='Which video source to use', default='/dev/video0')\n\n parser.add_argument('--anonymize', dest='anonymize', action='store_true', help='Use anonymizer mode [--noanonymize]')\n parser.add_argument('--noanonymize', dest='anonymize', action='store_false', help=argparse.SUPPRESS)\n parser.set_defaults(anonymize=False)\n\n parser.add_argument('--bodyparts', dest='bodyparts', action='store_true', help='Color by bodyparts [--nobodyparts]')\n parser.add_argument('--nobodyparts', dest='bodyparts', action='store_false', help=argparse.SUPPRESS)\n parser.set_defaults(bodyparts=True)\n\n parser.add_argument('--h264', help='Use video/x-h264 input', action='store_true')\n parser.add_argument('--jpeg', help='Use video/jpeg input', action='store_true')\n args = parser.parse_args()\n\n if args.h264 and args.jpeg:\n print('Error: both mutually exclusive options h264 and jpeg set')\n sys.exit(1)\n\n #default_model = 'models/bodypix_mobilenet_v1_075_640_480_16_quant_edgetpu_decoder.tflite'\n #default_model = 'models/deeplabv3_mnv2_dm05_pascal_quant_edgetpu.tflite'\n default_model = 'models/deeplabv3_mnv2_pascal_quant_edgetpu.tflite'\n model = args.model if args.model else default_model\n print('Model: {}'.format(model))\n\n\n ##engine = PoseEngine(model)\n ##inference_size = (engine.image_width, engine.image_height)\n interpreter = make_interpreter(model, device=':0')\n interpreter.allocate_tensors()\n inference_size = common.input_size(interpreter)\n #inference_size = [512,512]\n print('Inference size: {}'.format(inference_size))\n\n src_size = (int(args.width), int(args.height))\n if args.videosrc.startswith('/dev/video'):\n print('Source size: {}'.format(src_size))\n\n gstreamer.run_pipeline(##Callback(engine,\n ## anonymize=args.anonymize,\n ## bodyparts=args.bodyparts),\n Callback(interpreter),\n src_size, inference_size,\n mirror=args.mirror,\n videosrc=args.videosrc,\n h264=args.h264,\n jpeg=args.jpeg)\n\n\nif __name__ == '__main__':\n main()\n"
]
| [
[
"numpy.max",
"numpy.full",
"numpy.zeros",
"numpy.ascontiguousarray",
"numpy.bitwise_and",
"numpy.argmax"
]
]
|
holmesal/fastai | [
"cbd2a0c91d01842fb2e780072aed510b1325d1e5"
]
| [
"alonso/planet.py"
]
| [
"from fastai.imports import *\nfrom fastai.transforms import *\nfrom fastai.dataset import *\nfrom sklearn.metrics import fbeta_score\nimport warnings\n\ndef f2(preds, targs, start=0.17, end=0.24, step=0.01):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return max([fbeta_score(targs, (preds>th), 2, average='samples')\n for th in np.arange(start,end,step)])\n\ndef opt_th(preds, targs, start=0.17, end=0.24, step=0.01):\n ths = np.arange(start,end,step)\n idx = np.argmax([fbeta_score(targs, (preds>th), 2, average='samples')\n for th in ths])\n return ths[idx]\n\ndef get_data(path, tfms,bs, n, cv_idx):\n val_idxs = get_cv_idxs(n, cv_idx)\n return ImageClassifierData.from_csv(path, 'train-jpg', f'{path}train_v2.csv', bs, tfms,\n suffix='.jpg', val_idxs=val_idxs, test_name='test-jpg')\n\ndef get_data_zoom(f_model, path, sz, bs, n, cv_idx):\n tfms = tfms_from_model(f_model, sz, aug_tfms=transforms_top_down, max_zoom=1.05)\n return get_data(path, tfms, bs, n, cv_idx)\n\ndef get_data_pad(f_model, path, sz, bs, n, cv_idx):\n transforms_pt = [RandomRotateZoom(9, 0.18, 0.1), RandomLighting(0.05, 0.1), RandomDihedral()]\n tfms = tfms_from_model(f_model, sz, aug_tfms=transforms_pt, pad=sz//12)\n return get_data(path, tfms, bs, n, cv_idx)\n"
]
| [
[
"sklearn.metrics.fbeta_score"
]
]
|
raphael0202/models | [
"2bc90622ab8b278103a3692ac760e6e9aefb38f3"
]
| [
"research/object_detection/model_lib.py"
]
| [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nr\"\"\"Constructs model, inputs, and training environment.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport functools\nimport os\n\nimport tensorflow as tf\n\nfrom object_detection import eval_util\nfrom object_detection import exporter as exporter_lib\nfrom object_detection import inputs\nfrom object_detection.builders import graph_rewriter_builder\nfrom object_detection.builders import model_builder\nfrom object_detection.builders import optimizer_builder\nfrom object_detection.core import standard_fields as fields\nfrom object_detection.utils import config_util\nfrom object_detection.utils import label_map_util\nfrom object_detection.utils import ops\nfrom object_detection.utils import shape_utils\nfrom object_detection.utils import variables_helper\nfrom object_detection.utils import visualization_utils as vis_utils\n\n# A map of names to methods that help build the model.\nMODEL_BUILD_UTIL_MAP = {\n 'get_configs_from_pipeline_file':\n config_util.get_configs_from_pipeline_file,\n 'create_pipeline_proto_from_configs':\n config_util.create_pipeline_proto_from_configs,\n 'merge_external_params_with_configs':\n config_util.merge_external_params_with_configs,\n 'create_train_input_fn':\n inputs.create_train_input_fn,\n 'create_eval_input_fn':\n inputs.create_eval_input_fn,\n 'create_predict_input_fn':\n inputs.create_predict_input_fn,\n 'detection_model_fn_base': model_builder.build,\n}\n\n\ndef _prepare_groundtruth_for_eval(detection_model, class_agnostic,\n max_number_of_boxes):\n \"\"\"Extracts groundtruth data from detection_model and prepares it for eval.\n\n Args:\n detection_model: A `DetectionModel` object.\n class_agnostic: Whether the detections are class_agnostic.\n max_number_of_boxes: Max number of groundtruth boxes.\n\n Returns:\n A tuple of:\n groundtruth: Dictionary with the following fields:\n 'groundtruth_boxes': [batch_size, num_boxes, 4] float32 tensor of boxes,\n in normalized coordinates.\n 'groundtruth_classes': [batch_size, num_boxes] int64 tensor of 1-indexed\n classes.\n 'groundtruth_masks': 4D float32 tensor of instance masks (if provided in\n groundtruth)\n 'groundtruth_is_crowd': [batch_size, num_boxes] bool tensor indicating\n is_crowd annotations (if provided in groundtruth).\n 'num_groundtruth_boxes': [batch_size] tensor containing the maximum number\n of groundtruth boxes per image..\n class_agnostic: Boolean indicating whether detections are class agnostic.\n \"\"\"\n input_data_fields = fields.InputDataFields()\n groundtruth_boxes = tf.stack(\n detection_model.groundtruth_lists(fields.BoxListFields.boxes))\n groundtruth_boxes_shape = tf.shape(groundtruth_boxes)\n # For class-agnostic models, groundtruth one-hot encodings collapse to all\n # ones.\n if class_agnostic:\n groundtruth_classes_one_hot = tf.ones(\n [groundtruth_boxes_shape[0], groundtruth_boxes_shape[1], 1])\n else:\n groundtruth_classes_one_hot = tf.stack(\n detection_model.groundtruth_lists(fields.BoxListFields.classes))\n label_id_offset = 1 # Applying label id offset (b/63711816)\n groundtruth_classes = (\n tf.argmax(groundtruth_classes_one_hot, axis=2) + label_id_offset)\n groundtruth = {\n input_data_fields.groundtruth_boxes: groundtruth_boxes,\n input_data_fields.groundtruth_classes: groundtruth_classes\n }\n if detection_model.groundtruth_has_field(fields.BoxListFields.masks):\n groundtruth[input_data_fields.groundtruth_instance_masks] = tf.stack(\n detection_model.groundtruth_lists(fields.BoxListFields.masks))\n\n if detection_model.groundtruth_has_field(fields.BoxListFields.is_crowd):\n groundtruth[input_data_fields.groundtruth_is_crowd] = tf.stack(\n detection_model.groundtruth_lists(fields.BoxListFields.is_crowd))\n\n groundtruth[input_data_fields.num_groundtruth_boxes] = (\n tf.tile([max_number_of_boxes], multiples=[groundtruth_boxes_shape[0]]))\n return groundtruth\n\n\ndef unstack_batch(tensor_dict, unpad_groundtruth_tensors=True):\n \"\"\"Unstacks all tensors in `tensor_dict` along 0th dimension.\n\n Unstacks tensor from the tensor dict along 0th dimension and returns a\n tensor_dict containing values that are lists of unstacked, unpadded tensors.\n\n Tensors in the `tensor_dict` are expected to be of one of the three shapes:\n 1. [batch_size]\n 2. [batch_size, height, width, channels]\n 3. [batch_size, num_boxes, d1, d2, ... dn]\n\n When unpad_groundtruth_tensors is set to true, unstacked tensors of form 3\n above are sliced along the `num_boxes` dimension using the value in tensor\n field.InputDataFields.num_groundtruth_boxes.\n\n Note that this function has a static list of input data fields and has to be\n kept in sync with the InputDataFields defined in core/standard_fields.py\n\n Args:\n tensor_dict: A dictionary of batched groundtruth tensors.\n unpad_groundtruth_tensors: Whether to remove padding along `num_boxes`\n dimension of the groundtruth tensors.\n\n Returns:\n A dictionary where the keys are from fields.InputDataFields and values are\n a list of unstacked (optionally unpadded) tensors.\n\n Raises:\n ValueError: If unpad_tensors is True and `tensor_dict` does not contain\n `num_groundtruth_boxes` tensor.\n \"\"\"\n unbatched_tensor_dict = {\n key: tf.unstack(tensor) for key, tensor in tensor_dict.items()\n }\n if unpad_groundtruth_tensors:\n if (fields.InputDataFields.num_groundtruth_boxes not in\n unbatched_tensor_dict):\n raise ValueError('`num_groundtruth_boxes` not found in tensor_dict. '\n 'Keys available: {}'.format(\n unbatched_tensor_dict.keys()))\n unbatched_unpadded_tensor_dict = {}\n unpad_keys = set([\n # List of input data fields that are padded along the num_boxes\n # dimension. This list has to be kept in sync with InputDataFields in\n # standard_fields.py.\n fields.InputDataFields.groundtruth_instance_masks,\n fields.InputDataFields.groundtruth_classes,\n fields.InputDataFields.groundtruth_boxes,\n fields.InputDataFields.groundtruth_keypoints,\n fields.InputDataFields.groundtruth_group_of,\n fields.InputDataFields.groundtruth_difficult,\n fields.InputDataFields.groundtruth_is_crowd,\n fields.InputDataFields.groundtruth_area,\n fields.InputDataFields.groundtruth_weights\n ]).intersection(set(unbatched_tensor_dict.keys()))\n\n for key in unpad_keys:\n unpadded_tensor_list = []\n for num_gt, padded_tensor in zip(\n unbatched_tensor_dict[fields.InputDataFields.num_groundtruth_boxes],\n unbatched_tensor_dict[key]):\n tensor_shape = shape_utils.combined_static_and_dynamic_shape(\n padded_tensor)\n slice_begin = tf.zeros([len(tensor_shape)], dtype=tf.int32)\n slice_size = tf.stack(\n [num_gt] + [-1 if dim is None else dim for dim in tensor_shape[1:]])\n unpadded_tensor = tf.slice(padded_tensor, slice_begin, slice_size)\n unpadded_tensor_list.append(unpadded_tensor)\n unbatched_unpadded_tensor_dict[key] = unpadded_tensor_list\n unbatched_tensor_dict.update(unbatched_unpadded_tensor_dict)\n\n return unbatched_tensor_dict\n\n\ndef provide_groundtruth(model, labels):\n \"\"\"Provides the labels to a model as groundtruth.\n\n This helper function extracts the corresponding boxes, classes,\n keypoints, weights, masks, etc. from the labels, and provides it\n as groundtruth to the models.\n\n Args:\n model: The detection model to provide groundtruth to.\n labels: The labels for the training or evaluation inputs.\n \"\"\"\n gt_boxes_list = labels[fields.InputDataFields.groundtruth_boxes]\n gt_classes_list = labels[fields.InputDataFields.groundtruth_classes]\n gt_masks_list = None\n if fields.InputDataFields.groundtruth_instance_masks in labels:\n gt_masks_list = labels[\n fields.InputDataFields.groundtruth_instance_masks]\n gt_keypoints_list = None\n if fields.InputDataFields.groundtruth_keypoints in labels:\n gt_keypoints_list = labels[fields.InputDataFields.groundtruth_keypoints]\n gt_weights_list = None\n if fields.InputDataFields.groundtruth_weights in labels:\n gt_weights_list = labels[fields.InputDataFields.groundtruth_weights]\n gt_confidences_list = None\n if fields.InputDataFields.groundtruth_confidences in labels:\n gt_confidences_list = labels[\n fields.InputDataFields.groundtruth_confidences]\n gt_is_crowd_list = None\n if fields.InputDataFields.groundtruth_is_crowd in labels:\n gt_is_crowd_list = labels[fields.InputDataFields.groundtruth_is_crowd]\n model.provide_groundtruth(\n groundtruth_boxes_list=gt_boxes_list,\n groundtruth_classes_list=gt_classes_list,\n groundtruth_confidences_list=gt_confidences_list,\n groundtruth_masks_list=gt_masks_list,\n groundtruth_keypoints_list=gt_keypoints_list,\n groundtruth_weights_list=gt_weights_list,\n groundtruth_is_crowd_list=gt_is_crowd_list)\n\n\ndef create_model_fn(detection_model_fn, configs, hparams, use_tpu=False,\n postprocess_on_cpu=False):\n \"\"\"Creates a model function for `Estimator`.\n\n Args:\n detection_model_fn: Function that returns a `DetectionModel` instance.\n configs: Dictionary of pipeline config objects.\n hparams: `HParams` object.\n use_tpu: Boolean indicating whether model should be constructed for\n use on TPU.\n postprocess_on_cpu: When use_tpu and postprocess_on_cpu is true, postprocess\n is scheduled on the host cpu.\n\n Returns:\n `model_fn` for `Estimator`.\n \"\"\"\n train_config = configs['train_config']\n eval_input_config = configs['eval_input_config']\n eval_config = configs['eval_config']\n\n def model_fn(features, labels, mode, params=None):\n \"\"\"Constructs the object detection model.\n\n Args:\n features: Dictionary of feature tensors, returned from `input_fn`.\n labels: Dictionary of groundtruth tensors if mode is TRAIN or EVAL,\n otherwise None.\n mode: Mode key from tf.estimator.ModeKeys.\n params: Parameter dictionary passed from the estimator.\n\n Returns:\n An `EstimatorSpec` that encapsulates the model and its serving\n configurations.\n \"\"\"\n params = params or {}\n total_loss, train_op, detections, export_outputs = None, None, None, None\n is_training = mode == tf.estimator.ModeKeys.TRAIN\n\n # Make sure to set the Keras learning phase. True during training,\n # False for inference.\n tf.keras.backend.set_learning_phase(is_training)\n # Set policy for mixed-precision training with Keras-based models.\n if use_tpu and train_config.use_bfloat16:\n from tensorflow.python.keras.engine import base_layer_utils # pylint: disable=g-import-not-at-top\n # Enable v2 behavior, as `mixed_bfloat16` is only supported in TF 2.0.\n base_layer_utils.enable_v2_dtype_behavior()\n tf.compat.v2.keras.mixed_precision.experimental.set_policy(\n 'mixed_bfloat16')\n detection_model = detection_model_fn(\n is_training=is_training, add_summaries=(not use_tpu))\n scaffold_fn = None\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n labels = unstack_batch(\n labels,\n unpad_groundtruth_tensors=train_config.unpad_groundtruth_tensors)\n elif mode == tf.estimator.ModeKeys.EVAL:\n # For evaling on train data, it is necessary to check whether groundtruth\n # must be unpadded.\n boxes_shape = (\n labels[fields.InputDataFields.groundtruth_boxes].get_shape()\n .as_list())\n unpad_groundtruth_tensors = boxes_shape[1] is not None and not use_tpu\n labels = unstack_batch(\n labels, unpad_groundtruth_tensors=unpad_groundtruth_tensors)\n\n if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL):\n provide_groundtruth(detection_model, labels)\n\n preprocessed_images = features[fields.InputDataFields.image]\n if use_tpu and train_config.use_bfloat16:\n with tf.contrib.tpu.bfloat16_scope():\n prediction_dict = detection_model.predict(\n preprocessed_images,\n features[fields.InputDataFields.true_image_shape])\n prediction_dict = ops.bfloat16_to_float32_nested(prediction_dict)\n else:\n prediction_dict = detection_model.predict(\n preprocessed_images,\n features[fields.InputDataFields.true_image_shape])\n\n def postprocess_wrapper(args):\n return detection_model.postprocess(args[0], args[1])\n\n if mode in (tf.estimator.ModeKeys.EVAL, tf.estimator.ModeKeys.PREDICT):\n if use_tpu and postprocess_on_cpu:\n detections = tf.contrib.tpu.outside_compilation(\n postprocess_wrapper,\n (prediction_dict,\n features[fields.InputDataFields.true_image_shape]))\n else:\n detections = postprocess_wrapper((\n prediction_dict,\n features[fields.InputDataFields.true_image_shape]))\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n load_pretrained = hparams.load_pretrained if hparams else False\n if train_config.fine_tune_checkpoint and load_pretrained:\n if not train_config.fine_tune_checkpoint_type:\n # train_config.from_detection_checkpoint field is deprecated. For\n # backward compatibility, set train_config.fine_tune_checkpoint_type\n # based on train_config.from_detection_checkpoint.\n if train_config.from_detection_checkpoint:\n train_config.fine_tune_checkpoint_type = 'detection'\n else:\n train_config.fine_tune_checkpoint_type = 'classification'\n asg_map = detection_model.restore_map(\n fine_tune_checkpoint_type=train_config.fine_tune_checkpoint_type,\n load_all_detection_checkpoint_vars=(\n train_config.load_all_detection_checkpoint_vars))\n available_var_map = (\n variables_helper.get_variables_available_in_checkpoint(\n asg_map,\n train_config.fine_tune_checkpoint,\n include_global_step=False))\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(train_config.fine_tune_checkpoint,\n available_var_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(train_config.fine_tune_checkpoint,\n available_var_map)\n\n if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL):\n losses_dict = detection_model.loss(\n prediction_dict, features[fields.InputDataFields.true_image_shape])\n losses = [loss_tensor for loss_tensor in losses_dict.values()]\n if train_config.add_regularization_loss:\n regularization_losses = detection_model.regularization_losses()\n if use_tpu and train_config.use_bfloat16:\n regularization_losses = ops.bfloat16_to_float32_nested(\n regularization_losses)\n if regularization_losses:\n regularization_loss = tf.add_n(\n regularization_losses, name='regularization_loss')\n losses.append(regularization_loss)\n losses_dict['Loss/regularization_loss'] = regularization_loss\n total_loss = tf.add_n(losses, name='total_loss')\n losses_dict['Loss/total_loss'] = total_loss\n\n if 'graph_rewriter_config' in configs:\n graph_rewriter_fn = graph_rewriter_builder.build(\n configs['graph_rewriter_config'], is_training=is_training)\n graph_rewriter_fn()\n\n # TODO(rathodv): Stop creating optimizer summary vars in EVAL mode once we\n # can write learning rate summaries on TPU without host calls.\n global_step = tf.train.get_or_create_global_step()\n training_optimizer, optimizer_summary_vars = optimizer_builder.build(\n train_config.optimizer)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n if use_tpu:\n training_optimizer = tf.contrib.tpu.CrossShardOptimizer(\n training_optimizer)\n\n # Optionally freeze some layers by setting their gradients to be zero.\n trainable_variables = None\n include_variables = (\n train_config.update_trainable_variables\n if train_config.update_trainable_variables else None)\n exclude_variables = (\n train_config.freeze_variables\n if train_config.freeze_variables else None)\n trainable_variables = tf.contrib.framework.filter_variables(\n tf.trainable_variables(),\n include_patterns=include_variables,\n exclude_patterns=exclude_variables)\n\n clip_gradients_value = None\n if train_config.gradient_clipping_by_norm > 0:\n clip_gradients_value = train_config.gradient_clipping_by_norm\n\n if not use_tpu:\n for var in optimizer_summary_vars:\n tf.summary.scalar(var.op.name, var)\n summaries = [] if use_tpu else None\n if train_config.summarize_gradients:\n summaries = ['gradients', 'gradient_norm', 'global_gradient_norm']\n train_op = tf.contrib.layers.optimize_loss(\n loss=total_loss,\n global_step=global_step,\n learning_rate=None,\n clip_gradients=clip_gradients_value,\n optimizer=training_optimizer,\n update_ops=detection_model.updates(),\n variables=trainable_variables,\n summaries=summaries,\n name='') # Preventing scope prefix on all variables.\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n exported_output = exporter_lib.add_output_tensor_nodes(detections)\n export_outputs = {\n tf.saved_model.signature_constants.PREDICT_METHOD_NAME:\n tf.estimator.export.PredictOutput(exported_output)\n }\n\n eval_metric_ops = None\n scaffold = None\n if mode == tf.estimator.ModeKeys.EVAL:\n class_agnostic = (\n fields.DetectionResultFields.detection_classes not in detections)\n groundtruth = _prepare_groundtruth_for_eval(\n detection_model, class_agnostic,\n eval_input_config.max_number_of_boxes)\n use_original_images = fields.InputDataFields.original_image in features\n if use_original_images:\n eval_images = features[fields.InputDataFields.original_image]\n true_image_shapes = tf.slice(\n features[fields.InputDataFields.true_image_shape], [0, 0], [-1, 3])\n original_image_spatial_shapes = features[fields.InputDataFields\n .original_image_spatial_shape]\n else:\n eval_images = features[fields.InputDataFields.image]\n true_image_shapes = None\n original_image_spatial_shapes = None\n\n eval_dict = eval_util.result_dict_for_batched_example(\n eval_images,\n features[inputs.HASH_KEY],\n detections,\n groundtruth,\n class_agnostic=class_agnostic,\n scale_to_absolute=True,\n original_image_spatial_shapes=original_image_spatial_shapes,\n true_image_shapes=true_image_shapes)\n\n if fields.InputDataFields.image_additional_channels in features:\n eval_dict[fields.InputDataFields.image_additional_channels] = features[\n fields.InputDataFields.image_additional_channels]\n\n if class_agnostic:\n category_index = label_map_util.create_class_agnostic_category_index()\n else:\n category_index = label_map_util.create_category_index_from_labelmap(\n eval_input_config.label_map_path)\n vis_metric_ops = None\n if not use_tpu and use_original_images:\n eval_metric_op_vis = vis_utils.VisualizeSingleFrameDetections(\n category_index,\n max_examples_to_draw=eval_config.num_visualizations,\n max_boxes_to_draw=eval_config.max_num_boxes_to_visualize,\n min_score_thresh=eval_config.min_score_threshold,\n use_normalized_coordinates=False)\n vis_metric_ops = eval_metric_op_vis.get_estimator_eval_metric_ops(\n eval_dict)\n\n # Eval metrics on a single example.\n eval_metric_ops = eval_util.get_eval_metric_ops_for_evaluators(\n eval_config, list(category_index.values()), eval_dict)\n for loss_key, loss_tensor in iter(losses_dict.items()):\n eval_metric_ops[loss_key] = tf.metrics.mean(loss_tensor)\n for var in optimizer_summary_vars:\n eval_metric_ops[var.op.name] = (var, tf.no_op())\n if vis_metric_ops is not None:\n eval_metric_ops.update(vis_metric_ops)\n eval_metric_ops = {str(k): v for k, v in eval_metric_ops.items()}\n\n if eval_config.use_moving_averages:\n variable_averages = tf.train.ExponentialMovingAverage(0.0)\n variables_to_restore = variable_averages.variables_to_restore()\n keep_checkpoint_every_n_hours = (\n train_config.keep_checkpoint_every_n_hours)\n saver = tf.train.Saver(\n variables_to_restore,\n keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)\n scaffold = tf.train.Scaffold(saver=saver)\n\n # EVAL executes on CPU, so use regular non-TPU EstimatorSpec.\n if use_tpu and mode != tf.estimator.ModeKeys.EVAL:\n return tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n scaffold_fn=scaffold_fn,\n predictions=detections,\n loss=total_loss,\n train_op=train_op,\n eval_metrics=eval_metric_ops,\n export_outputs=export_outputs)\n else:\n if scaffold is None:\n keep_checkpoint_every_n_hours = (\n train_config.keep_checkpoint_every_n_hours)\n saver = tf.train.Saver(\n sharded=True,\n keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,\n save_relative_paths=True)\n tf.add_to_collection(tf.GraphKeys.SAVERS, saver)\n scaffold = tf.train.Scaffold(saver=saver)\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=detections,\n loss=total_loss,\n train_op=train_op,\n eval_metric_ops=eval_metric_ops,\n export_outputs=export_outputs,\n scaffold=scaffold)\n\n return model_fn\n\n\ndef create_estimator_and_inputs(run_config,\n hparams,\n pipeline_config_path,\n config_override=None,\n train_steps=None,\n sample_1_of_n_eval_examples=None,\n sample_1_of_n_eval_on_train_examples=1,\n model_fn_creator=create_model_fn,\n use_tpu_estimator=False,\n use_tpu=False,\n num_shards=1,\n params=None,\n override_eval_num_epochs=True,\n save_final_config=False,\n postprocess_on_cpu=False,\n export_to_tpu=None,\n **kwargs):\n \"\"\"Creates `Estimator`, input functions, and steps.\n\n Args:\n run_config: A `RunConfig`.\n hparams: A `HParams`.\n pipeline_config_path: A path to a pipeline config file.\n config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to\n override the config from `pipeline_config_path`.\n train_steps: Number of training steps. If None, the number of training steps\n is set from the `TrainConfig` proto.\n sample_1_of_n_eval_examples: Integer representing how often an eval example\n should be sampled. If 1, will sample all examples.\n sample_1_of_n_eval_on_train_examples: Similar to\n `sample_1_of_n_eval_examples`, except controls the sampling of training\n data for evaluation.\n model_fn_creator: A function that creates a `model_fn` for `Estimator`.\n Follows the signature:\n\n * Args:\n * `detection_model_fn`: Function that returns `DetectionModel` instance.\n * `configs`: Dictionary of pipeline config objects.\n * `hparams`: `HParams` object.\n * Returns:\n `model_fn` for `Estimator`.\n\n use_tpu_estimator: Whether a `TPUEstimator` should be returned. If False,\n an `Estimator` will be returned.\n use_tpu: Boolean, whether training and evaluation should run on TPU. Only\n used if `use_tpu_estimator` is True.\n num_shards: Number of shards (TPU cores). Only used if `use_tpu_estimator`\n is True.\n params: Parameter dictionary passed from the estimator. Only used if\n `use_tpu_estimator` is True.\n override_eval_num_epochs: Whether to overwrite the number of epochs to 1 for\n eval_input.\n save_final_config: Whether to save final config (obtained after applying\n overrides) to `estimator.model_dir`.\n postprocess_on_cpu: When use_tpu and postprocess_on_cpu are true,\n postprocess is scheduled on the host cpu.\n export_to_tpu: When use_tpu and export_to_tpu are true,\n `export_savedmodel()` exports a metagraph for serving on TPU besides the\n one on CPU.\n **kwargs: Additional keyword arguments for configuration override.\n\n Returns:\n A dictionary with the following fields:\n 'estimator': An `Estimator` or `TPUEstimator`.\n 'train_input_fn': A training input function.\n 'eval_input_fns': A list of all evaluation input functions.\n 'eval_input_names': A list of names for each evaluation input.\n 'eval_on_train_input_fn': An evaluation-on-train input function.\n 'predict_input_fn': A prediction input function.\n 'train_steps': Number of training steps. Either directly from input or from\n configuration.\n \"\"\"\n get_configs_from_pipeline_file = MODEL_BUILD_UTIL_MAP[\n 'get_configs_from_pipeline_file']\n merge_external_params_with_configs = MODEL_BUILD_UTIL_MAP[\n 'merge_external_params_with_configs']\n create_pipeline_proto_from_configs = MODEL_BUILD_UTIL_MAP[\n 'create_pipeline_proto_from_configs']\n create_train_input_fn = MODEL_BUILD_UTIL_MAP['create_train_input_fn']\n create_eval_input_fn = MODEL_BUILD_UTIL_MAP['create_eval_input_fn']\n create_predict_input_fn = MODEL_BUILD_UTIL_MAP['create_predict_input_fn']\n detection_model_fn_base = MODEL_BUILD_UTIL_MAP['detection_model_fn_base']\n\n configs = get_configs_from_pipeline_file(\n pipeline_config_path, config_override=config_override)\n kwargs.update({\n 'train_steps': train_steps,\n 'use_bfloat16': configs['train_config'].use_bfloat16 and use_tpu\n })\n if sample_1_of_n_eval_examples >= 1:\n kwargs.update({\n 'sample_1_of_n_eval_examples': sample_1_of_n_eval_examples\n })\n if override_eval_num_epochs:\n kwargs.update({'eval_num_epochs': 1})\n tf.logging.warning(\n 'Forced number of epochs for all eval validations to be 1.')\n configs = merge_external_params_with_configs(\n configs, hparams, kwargs_dict=kwargs)\n model_config = configs['model']\n train_config = configs['train_config']\n train_input_config = configs['train_input_config']\n eval_config = configs['eval_config']\n eval_input_configs = configs['eval_input_configs']\n eval_on_train_input_config = copy.deepcopy(train_input_config)\n eval_on_train_input_config.sample_1_of_n_examples = (\n sample_1_of_n_eval_on_train_examples)\n if override_eval_num_epochs and eval_on_train_input_config.num_epochs != 1:\n tf.logging.warning('Expected number of evaluation epochs is 1, but '\n 'instead encountered `eval_on_train_input_config'\n '.num_epochs` = '\n '{}. Overwriting `num_epochs` to 1.'.format(\n eval_on_train_input_config.num_epochs))\n eval_on_train_input_config.num_epochs = 1\n\n # update train_steps from config but only when non-zero value is provided\n if train_steps is None and train_config.num_steps != 0:\n train_steps = train_config.num_steps\n\n detection_model_fn = functools.partial(\n detection_model_fn_base, model_config=model_config)\n\n # Create the input functions for TRAIN/EVAL/PREDICT.\n train_input_fn = create_train_input_fn(\n train_config=train_config,\n train_input_config=train_input_config,\n model_config=model_config)\n eval_input_fns = [\n create_eval_input_fn(\n eval_config=eval_config,\n eval_input_config=eval_input_config,\n model_config=model_config) for eval_input_config in eval_input_configs\n ]\n eval_input_names = [\n eval_input_config.name for eval_input_config in eval_input_configs\n ]\n eval_on_train_input_fn = create_eval_input_fn(\n eval_config=eval_config,\n eval_input_config=eval_on_train_input_config,\n model_config=model_config)\n predict_input_fn = create_predict_input_fn(\n model_config=model_config, predict_input_config=eval_input_configs[0])\n\n # Read export_to_tpu from hparams if not passed.\n if export_to_tpu is None:\n export_to_tpu = hparams.get('export_to_tpu', False)\n tf.logging.info('create_estimator_and_inputs: use_tpu %s, export_to_tpu %s',\n use_tpu, export_to_tpu)\n model_fn = model_fn_creator(detection_model_fn, configs, hparams, use_tpu,\n postprocess_on_cpu)\n if use_tpu_estimator:\n estimator = tf.contrib.tpu.TPUEstimator(\n model_fn=model_fn,\n train_batch_size=train_config.batch_size,\n # For each core, only batch size 1 is supported for eval.\n eval_batch_size=num_shards * 1 if use_tpu else 1,\n use_tpu=use_tpu,\n config=run_config,\n export_to_tpu=export_to_tpu,\n eval_on_tpu=False, # Eval runs on CPU, so disable eval on TPU\n params=params if params else {})\n else:\n estimator = tf.estimator.Estimator(model_fn=model_fn, config=run_config)\n\n # Write the as-run pipeline config to disk.\n if run_config.is_chief and save_final_config:\n pipeline_config_final = create_pipeline_proto_from_configs(configs)\n config_util.save_pipeline_config(pipeline_config_final, estimator.model_dir)\n\n return dict(\n estimator=estimator,\n train_input_fn=train_input_fn,\n eval_input_fns=eval_input_fns,\n eval_input_names=eval_input_names,\n eval_on_train_input_fn=eval_on_train_input_fn,\n predict_input_fn=predict_input_fn,\n train_steps=train_steps)\n\n\ndef create_train_and_eval_specs(train_input_fn,\n eval_input_fns,\n eval_on_train_input_fn,\n predict_input_fn,\n train_steps,\n eval_on_train_data=False,\n final_exporter_name='Servo',\n eval_spec_names=None):\n \"\"\"Creates a `TrainSpec` and `EvalSpec`s.\n\n Args:\n train_input_fn: Function that produces features and labels on train data.\n eval_input_fns: A list of functions that produce features and labels on eval\n data.\n eval_on_train_input_fn: Function that produces features and labels for\n evaluation on train data.\n predict_input_fn: Function that produces features for inference.\n train_steps: Number of training steps.\n eval_on_train_data: Whether to evaluate model on training data. Default is\n False.\n final_exporter_name: String name given to `FinalExporter`.\n eval_spec_names: A list of string names for each `EvalSpec`.\n\n Returns:\n Tuple of `TrainSpec` and list of `EvalSpecs`. If `eval_on_train_data` is\n True, the last `EvalSpec` in the list will correspond to training data. The\n rest EvalSpecs in the list are evaluation datas.\n \"\"\"\n train_spec = tf.estimator.TrainSpec(\n input_fn=train_input_fn, max_steps=train_steps)\n\n if eval_spec_names is None:\n eval_spec_names = [str(i) for i in range(len(eval_input_fns))]\n\n eval_specs = []\n for index, (eval_spec_name, eval_input_fn) in enumerate(\n zip(eval_spec_names, eval_input_fns)):\n # Uses final_exporter_name as exporter_name for the first eval spec for\n # backward compatibility.\n if index == 0:\n exporter_name = final_exporter_name\n else:\n exporter_name = '{}_{}'.format(final_exporter_name, eval_spec_name)\n\n exporter = tf.estimator.BestExporter(\n name=exporter_name,\n serving_input_receiver_fn=predict_input_fn,\n event_file_pattern='eval_0/*.tfevents.*',\n exports_to_keep=5)\n\n eval_specs.append(\n tf.estimator.EvalSpec(\n name=eval_spec_name,\n input_fn=eval_input_fn,\n steps=None,\n exporters=exporter))\n\n if eval_on_train_data:\n eval_specs.append(\n tf.estimator.EvalSpec(\n name='eval_on_train', input_fn=eval_on_train_input_fn, steps=None))\n\n return train_spec, eval_specs\n\n\ndef continuous_eval(estimator, model_dir, input_fn, train_steps, name):\n \"\"\"Perform continuous evaluation on checkpoints written to a model directory.\n\n Args:\n estimator: Estimator object to use for evaluation.\n model_dir: Model directory to read checkpoints for continuous evaluation.\n input_fn: Input function to use for evaluation.\n train_steps: Number of training steps. This is used to infer the last\n checkpoint and stop evaluation loop.\n name: Namescope for eval summary.\n \"\"\"\n\n def terminate_eval():\n tf.logging.info('Terminating eval after 180 seconds of no checkpoints')\n return True\n\n for ckpt in tf.contrib.training.checkpoints_iterator(\n model_dir, min_interval_secs=180, timeout=None,\n timeout_fn=terminate_eval):\n\n tf.logging.info('Starting Evaluation.')\n try:\n eval_results = estimator.evaluate(\n input_fn=input_fn, steps=None, checkpoint_path=ckpt, name=name)\n tf.logging.info('Eval results: %s' % eval_results)\n\n # Terminate eval job when final checkpoint is reached\n current_step = int(os.path.basename(ckpt).split('-')[1])\n if current_step >= train_steps:\n tf.logging.info(\n 'Evaluation finished after training step %d' % current_step)\n break\n\n except tf.errors.NotFoundError:\n tf.logging.info(\n 'Checkpoint %s no longer exists, skipping checkpoint' % ckpt)\n\n\ndef populate_experiment(run_config,\n hparams,\n pipeline_config_path,\n train_steps=None,\n eval_steps=None,\n model_fn_creator=create_model_fn,\n **kwargs):\n \"\"\"Populates an `Experiment` object.\n\n EXPERIMENT CLASS IS DEPRECATED. Please switch to\n tf.estimator.train_and_evaluate. As an example, see model_main.py.\n\n Args:\n run_config: A `RunConfig`.\n hparams: A `HParams`.\n pipeline_config_path: A path to a pipeline config file.\n train_steps: Number of training steps. If None, the number of training steps\n is set from the `TrainConfig` proto.\n eval_steps: Number of evaluation steps per evaluation cycle. If None, the\n number of evaluation steps is set from the `EvalConfig` proto.\n model_fn_creator: A function that creates a `model_fn` for `Estimator`.\n Follows the signature:\n\n * Args:\n * `detection_model_fn`: Function that returns `DetectionModel` instance.\n * `configs`: Dictionary of pipeline config objects.\n * `hparams`: `HParams` object.\n * Returns:\n `model_fn` for `Estimator`.\n\n **kwargs: Additional keyword arguments for configuration override.\n\n Returns:\n An `Experiment` that defines all aspects of training, evaluation, and\n export.\n \"\"\"\n tf.logging.warning('Experiment is being deprecated. Please use '\n 'tf.estimator.train_and_evaluate(). See model_main.py for '\n 'an example.')\n train_and_eval_dict = create_estimator_and_inputs(\n run_config,\n hparams,\n pipeline_config_path,\n train_steps=train_steps,\n eval_steps=eval_steps,\n model_fn_creator=model_fn_creator,\n save_final_config=True,\n **kwargs)\n estimator = train_and_eval_dict['estimator']\n train_input_fn = train_and_eval_dict['train_input_fn']\n eval_input_fns = train_and_eval_dict['eval_input_fns']\n predict_input_fn = train_and_eval_dict['predict_input_fn']\n train_steps = train_and_eval_dict['train_steps']\n\n export_strategies = [\n tf.contrib.learn.utils.saved_model_export_utils.make_export_strategy(\n serving_input_fn=predict_input_fn)\n ]\n\n return tf.contrib.learn.Experiment(\n estimator=estimator,\n train_input_fn=train_input_fn,\n eval_input_fn=eval_input_fns[0],\n train_steps=train_steps,\n eval_steps=None,\n export_strategies=export_strategies,\n eval_delay_secs=120,\n )\n"
]
| [
[
"tensorflow.contrib.tpu.TPUEstimatorSpec",
"tensorflow.contrib.tpu.TPUEstimator",
"tensorflow.contrib.tpu.CrossShardOptimizer",
"tensorflow.ones",
"tensorflow.metrics.mean",
"tensorflow.stack",
"tensorflow.tile",
"tensorflow.logging.warning",
"tensorflow.add_to_collection",
"tensorflow.no_op",
"tensorflow.trainable_variables",
"tensorflow.shape",
"tensorflow.estimator.TrainSpec",
"tensorflow.python.keras.engine.base_layer_utils.enable_v2_dtype_behavior",
"tensorflow.argmax",
"tensorflow.train.Saver",
"tensorflow.logging.info",
"tensorflow.add_n",
"tensorflow.contrib.training.checkpoints_iterator",
"tensorflow.contrib.tpu.bfloat16_scope",
"tensorflow.estimator.EstimatorSpec",
"tensorflow.contrib.learn.utils.saved_model_export_utils.make_export_strategy",
"tensorflow.contrib.learn.Experiment",
"tensorflow.summary.scalar",
"tensorflow.estimator.export.PredictOutput",
"tensorflow.estimator.EvalSpec",
"tensorflow.train.init_from_checkpoint",
"tensorflow.unstack",
"tensorflow.compat.v2.keras.mixed_precision.experimental.set_policy",
"tensorflow.contrib.tpu.outside_compilation",
"tensorflow.train.Scaffold",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.train.get_or_create_global_step",
"tensorflow.estimator.Estimator",
"tensorflow.slice",
"tensorflow.keras.backend.set_learning_phase",
"tensorflow.estimator.BestExporter"
]
]
|
DLPerf/graphics | [
"c42eb846f1a9b2b326c86ec08c2ba10f5903a460"
]
| [
"tensorflow_graphics/projects/points_to_3Dobjects/utils/evaluator.py"
]
| [
"# Copyright 2020 The TensorFlow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Evaluator computing metrics over given pairs of predictions and labels.\"\"\"\n\nimport os\nimport pickle\nfrom absl import logging\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow_graphics.geometry.representation import grid\nfrom tensorflow_graphics.math.interpolation import trilinear\nfrom tensorflow_graphics.projects.points_to_3Dobjects.models import centernet_utils\nfrom tensorflow_graphics.projects.points_to_3Dobjects.utils import tf_utils\nfrom google3.pyglib import gfile\nfrom google3.third_party.google_research.google_research.tf3d.object_detection.box_utils import np_box_ops\n\n\nclass ShapeAccuracyMetric:\n \"\"\"Computes the accuracy of shpe prediction.\"\"\"\n\n def __init__(self, k=1):\n self.metric = tf.keras.metrics.SparseTopKCategoricalAccuracy(k)\n\n def update(self, sparse_labels, predicted_probabilities, sample_weights=None):\n self.metric.update_state(sparse_labels, predicted_probabilities,\n sample_weights)\n\n def evaluate(self):\n return self.metric.result().numpy()\n\n def reset(self):\n self.metric.reset_states()\n\n\ndef get_2d_bounding_box_iou(box1, box2):\n \"\"\"Compute IoU between two 2D bounding boxes.\n\n Args:\n box1: Input tensor with shape [4] [x_min, y_min, x_max, y_max]\n box2: Input tensor with shape [4] [x_min, y_min, x_max, y_max]\n\n Returns:\n The intersection over union as a float.\n \"\"\"\n x_min1, y_min1, x_max1, y_max1 = box1\n x_min2, y_min2, x_max2, y_max2 = box2\n ma = np.maximum\n mi = np.minimum\n intersection = ma(0, mi(x_max1, x_max2) - ma(x_min1, x_min2)) * \\\n ma(0, mi(y_max1, y_max2) - ma(y_min1, y_min2))\n area1 = (x_max1 - x_min1) * (y_max1 - y_min1)\n area2 = (x_max2 - x_min2) * (y_max2 - y_min2)\n union = area1 + area2 - intersection\n print(intersection / union)\n return intersection / (union + 1e-5)\n\n\ndef get_3d_bounding_box_iou(box1, box2):\n \"\"\"Computes intersection between two given 3d bounding boxes.\n\n Args:\n box1: Input tensor with shape [B, 7] where the inner dimensions are as\n follows:[x, y, z, length, width, height, yaw].\n box2: Input tensor with shape [B, 7] where the inner dimensions are as\n follows:[x, y, z, length, width, height, yaw].\n\n Returns:\n The IoU between the two bounding boxes.\n \"\"\"\n box1 = box1.numpy() if isinstance(box1, tf.Tensor) else box1\n box2 = box2.numpy() if isinstance(box2, tf.Tensor) else box2\n\n box1 = box1.astype(np.float32)\n box2 = box2.astype(np.float32)\n\n # rotates around z, while we rotate around y so need to swap\n center_1 = tf.reshape(box1[0:3][[0, 2, 1]], [1, 3])\n center_2 = tf.reshape(box2[0:3][[0, 2, 1]], [1, 3])\n\n rotation_z_1 = tf.reshape(box1[-1], [1])\n rotation_z_2 = tf.reshape(box2[-1], [1])\n\n length_1 = tf.reshape(box1[3 + 0], [1])\n height_1 = tf.reshape(box1[3 + 2], [1])\n width_1 = tf.reshape(box1[3 + 1], [1])\n\n length_2 = tf.reshape(box2[3 + 0], [1])\n height_2 = tf.reshape(box2[3 + 2], [1])\n width_2 = tf.reshape(box2[3 + 1], [1])\n\n iou = np.squeeze(np_box_ops.iou3d_7dof_box(\n length_1, height_1, width_1, center_1, rotation_z_1,\n length_2, height_2, width_2, center_2, rotation_z_2))\n\n return iou\n\n\nclass IoUMetric:\n \"\"\"IoU metric.\"\"\"\n\n def __init__(self, max_num_classes=6, resolution=128, tol=0.05, slave=False,\n path=None):\n self.max_num_classes = max_num_classes\n self.iou_per_class = {i: [] for i in range(self.max_num_classes)}\n self.resolution = resolution\n self.slave = slave\n self.path = path\n self.tol = tol\n\n def update(self, labeled_sdfs, labeled_classes, labeled_poses,\n predicted_sdfs, predicted_classes, predicted_poses):\n \"\"\"Update.\"\"\"\n labeled_rotations = labeled_poses[0]\n labeled_translations = labeled_poses[1]\n labeled_sizes = labeled_poses[2]\n\n status = True\n if status:\n box_limits_x = [100, -100]\n # box_limits_y = [100, -100]\n box_limits_z = [100, -100]\n for i in range(labeled_translations.shape[0]):\n rot = tf.reshape(tf.gather(labeled_rotations[i], [0, 2, 6, 8]), [2, 2])\n\n min_x = tf.cast(0.0 - labeled_sizes[i][0] / 2.0, dtype=tf.float32)\n max_x = tf.cast(0.0 + labeled_sizes[i][0] / 2.0, dtype=tf.float32)\n # min_y = tf.cast(0.0 - labeled_sizes[i][1] / 2.0, dtype=tf.float32)\n # max_y = tf.cast(0.0 + labeled_sizes[i][1] / 2.0, dtype=tf.float32)\n min_z = tf.cast(0.0 - labeled_sizes[i][2] / 2.0, dtype=tf.float32)\n max_z = tf.cast(0.0 + labeled_sizes[i][2] / 2.0, dtype=tf.float32)\n\n translation = tf.reshape([labeled_translations[i][0],\n labeled_translations[i][2]], [2, 1])\n\n pt_0 = rot @ tf.reshape([min_x, min_z], [2, 1]) + translation\n pt_1 = rot @ tf.reshape([min_x, max_z], [2, 1]) + translation\n pt_2 = rot @ tf.reshape([max_x, min_z], [2, 1]) + translation\n pt_3 = rot @ tf.reshape([max_x, max_z], [2, 1]) + translation\n\n for pt in [pt_0, pt_1, pt_2, pt_3]:\n if pt[0] < box_limits_x[0]:\n box_limits_x[0] = pt[0]\n\n if pt[0] > box_limits_x[1]:\n box_limits_x[1] = pt[0]\n\n if pt[1] < box_limits_z[0]:\n box_limits_z[0] = pt[1]\n\n if pt[1] > box_limits_z[1]:\n box_limits_z[1] = pt[1]\n mean_x = tf.reduce_mean(box_limits_x)\n mean_z = tf.reduce_mean(box_limits_z)\n else:\n mean_x = tf.reduce_mean(labeled_translations[:, 0])\n mean_z = tf.reduce_mean(labeled_translations[:, 2])\n samples_world = grid.generate(\n (mean_x - 0.5, 0.0, mean_z - 0.5), (mean_x + 0.5, 1.0, mean_z + 0.5),\n [self.resolution, self.resolution, self.resolution])\n # samples_world = grid.generate(\n # (box_limits_x[0][0], box_limits_y[0], box_limits_z[0][0]),\n # (box_limits_x[1][0], box_limits_y[1], box_limits_z[1][0]),\n # [self.resolution, self.resolution, self.resolution])\n # samples_world = grid.generate(\n # (-5.0, -5.0, -5.0),\n # (5.0, 5.0, 5.0),\n # [self.resolution, self.resolution, self.resolution])\n samples_world = tf.reshape(samples_world, [-1, 3])\n ious = []\n\n status = False\n if status:\n _, axs = plt.subplots(labeled_translations.shape[0], 5)\n fig_obj_count = 0\n for class_id in range(self.max_num_classes):\n # Do the same for the ground truth and predictions\n sdf_values = tf.zeros_like(samples_world)[:, 0:1]\n for mtype, (classes, sdfs, poses) in enumerate([\n (labeled_classes, labeled_sdfs, labeled_poses),\n (predicted_classes, predicted_sdfs, predicted_poses)]):\n for i in range(classes.shape[0]):\n if class_id == classes[i]:\n sdf = tf.expand_dims(sdfs[i], -1)\n sdf = sdf * -1.0 # inside positive, outside zero\n samples_object = centernet_utils.transform_pointcloud(\n tf.reshape(samples_world, [1, 1, -1, 3]),\n tf.reshape(poses[2][i], [1, 1, 3]),\n tf.reshape(poses[0][i], [1, 1, 3, 3]),\n tf.reshape(poses[1][i], [1, 1, 3]), inverse=True) * 2.0\n samples_object = \\\n (samples_object * (29.0/32.0) / 2.0 + 0.5) * 32.0 - 0.5\n samples = tf.squeeze(samples_object)\n interpolated = trilinear.interpolate(sdf, samples)\n\n sdf_values += tf.math.sign(tf.nn.relu(interpolated + self.tol))\n status2 = False\n if status2:\n a = 2\n values = interpolated\n inter = tf.reshape(values, [self.resolution,\n self.resolution,\n self.resolution])\n inter = tf.transpose(tf.reduce_max(inter, axis=a))\n im = axs[fig_obj_count, mtype * 2 + 0].matshow(inter.numpy())\n plt.colorbar(im, ax=axs[fig_obj_count, mtype * 2 + 0])\n print(mtype, fig_obj_count, 0)\n\n values = tf.math.sign(tf.nn.relu(interpolated + self.tol))\n inter = tf.reshape(values, [self.resolution,\n self.resolution,\n self.resolution])\n inter = tf.transpose(tf.reduce_max(inter, axis=a))\n im = axs[fig_obj_count, mtype * 2 + 1].matshow(inter.numpy())\n plt.colorbar(im, ax=axs[fig_obj_count, mtype * 2 + 1])\n print(mtype, fig_obj_count, 1)\n\n if mtype == 1:\n values = sdf_values\n inter = tf.reshape(values, [self.resolution,\n self.resolution,\n self.resolution])\n inter = tf.transpose(tf.reduce_max(inter, axis=a))\n im = axs[fig_obj_count, 4].matshow(inter.numpy())\n plt.colorbar(im, ax=axs[fig_obj_count, 4])\n print(mtype, fig_obj_count, 2)\n fig_obj_count += 1\n\n intersection = tf.reduce_sum(tf.math.sign(tf.nn.relu(sdf_values - 1)))\n union = tf.reduce_sum(tf.math.sign(sdf_values))\n iou = intersection / union\n if not tf.math.is_nan(iou):\n ious.append(iou)\n status3 = False\n if status3:\n _ = plt.figure(figsize=(5, 5))\n plt.clf()\n # mask = (sdf_values.numpy() > 0)[:, 0]\n # plt.scatter(samples_world.numpy()[mask, 0],\n # samples_world.numpy()[mask, 1],\n # marker='.', c=sdf_values.numpy()[mask, 0])\n\n plt.scatter(samples_world.numpy()[:, 0],\n samples_world.numpy()[:, 1],\n marker='.', c=sdf_values.numpy()[:, 0])\n plt.colorbar()\n if not tf.math.is_nan(iou):\n self.iou_per_class[class_id].append(iou)\n if ious:\n ious = [0]\n return np.mean(ious), np.min(ious)\n\n def evaluate(self):\n \"\"\"Evaluate.\"\"\"\n if self.slave:\n data = self.iou_per_class\n with gfile.Open(self.path, 'wb') as file:\n pickle.dump(data, file)\n logging.info(file)\n return\n else:\n iou_per_class_means = []\n for _, v in self.iou_per_class.items():\n if v:\n iou_per_class_means.append(np.mean(v))\n return np.mean(iou_per_class_means)\n\n def reset(self):\n self.iou_per_class = {i: [] for i in range(self.max_num_classes)}\n\n\nclass CollisionMetric:\n \"\"\"Collision.\"\"\"\n\n def __init__(self, max_num_classes=6, resolution=128,\n tol=0.04, slave=False, path=None):\n self.max_num_classes = max_num_classes\n self.collisions = []\n self.intersections = []\n self.ious = []\n self.resolution = resolution\n self.slave = slave\n self.path = path\n self.tol = tol\n\n def update(self, labeled_sdfs, labeled_classes, labeled_poses,\n predicted_sdfs, predicted_classes, predicted_poses):\n \"\"\"Update.\"\"\"\n if labeled_sdfs or labeled_classes:\n print(labeled_sdfs)\n mean_x = tf.reduce_mean(labeled_poses[1][:, 0])\n mean_z = tf.reduce_mean(labeled_poses[1][:, 2])\n samples_world = grid.generate(\n (mean_x - 0.5, 0.0, mean_z - 0.5), (mean_x + 0.5, 1.0, mean_z + 0.5),\n [self.resolution, self.resolution, self.resolution])\n samples_world = tf.reshape(samples_world, [-1, 3])\n\n status = False\n if status:\n _, axs = plt.subplots(3, 3)\n fig_obj_count = 0\n\n # Do the same for the ground truth and predictions\n num_collisions = 0\n prev_intersection = 0\n sdf_values = tf.zeros_like(samples_world)[:, 0:1]\n for classes, sdfs, poses in [(predicted_classes,\n predicted_sdfs,\n predicted_poses)]:\n for i in range(classes.shape[0]):\n sdf = tf.expand_dims(sdfs[i], -1)\n sdf = sdf * -1.0 # inside positive, outside zero\n samples_object = centernet_utils.transform_pointcloud(\n tf.reshape(samples_world, [1, 1, -1, 3]),\n tf.reshape(poses[2][i], [1, 1, 3]),\n tf.reshape(poses[0][i], [1, 1, 3, 3]),\n tf.reshape(poses[1][i], [1, 1, 3]), inverse=True) * 2.0\n samples_object = (samples_object * (29.0/32.0) / 2.0 + 0.5) * 32.0 - 0.5\n samples = tf.squeeze(samples_object)\n interpolated = trilinear.interpolate(sdf, samples)\n occupancy_value = tf.math.sign(tf.nn.relu(interpolated + self.tol))\n sdf_values += occupancy_value\n intersection = tf.reduce_sum(tf.math.sign(tf.nn.relu(sdf_values - 1)))\n if intersection > prev_intersection:\n prev_intersection = intersection\n num_collisions += 1\n status2 = False\n if status2:\n a = 1\n values = interpolated\n inter = tf.reshape(values, [self.resolution,\n self.resolution,\n self.resolution])\n inter = tf.transpose(tf.reduce_max(inter, axis=a))\n im = axs[fig_obj_count, 0].matshow(inter.numpy())\n plt.colorbar(im, ax=axs[fig_obj_count, 0])\n\n values = tf.math.sign(tf.nn.relu(interpolated + self.tol))\n inter = tf.reshape(values, [self.resolution,\n self.resolution,\n self.resolution])\n inter = tf.transpose(tf.reduce_max(inter, axis=a))\n im = axs[fig_obj_count, 1].matshow(inter.numpy())\n plt.colorbar(im, ax=axs[fig_obj_count, 1])\n\n values = sdf_values\n inter = tf.reshape(values, [self.resolution,\n self.resolution,\n self.resolution])\n inter = tf.transpose(tf.reduce_max(inter, axis=a))\n im = axs[fig_obj_count, 2].matshow(inter.numpy())\n plt.colorbar(im, ax=axs[fig_obj_count, 2])\n\n fig_obj_count += 1\n\n intersection = tf.reduce_sum(tf.math.sign(tf.nn.relu(sdf_values - 1)))\n union = tf.reduce_sum(tf.math.sign(sdf_values))\n iou = intersection / union\n self.collisions.append(num_collisions)\n self.intersections.append(intersection)\n self.ious.append(iou)\n return num_collisions, intersection, iou\n\n def evaluate(self):\n \"\"\"Evaluate.\"\"\"\n if self.slave:\n data = {'collisions': self.collisions,\n 'intersections': self.intersections,\n 'ious': self.ious}\n with gfile.Open(self.path, 'wb') as file:\n pickle.dump(data, file)\n logging.info(file)\n return\n else:\n # self.collisions = []\n # for k, v in self.iou_per_class.items():\n # if len(v) > 0:\n # iou_per_class_means.append(np.mean(v))\n return np.sum(self.collisions)\n\n def reset(self):\n self.intersections = []\n self.ious = []\n self.collisions = []\n\n\nclass BoxIoUMetric:\n \"\"\"BoxIOU.\"\"\"\n\n def __init__(self, t=0.5, threed=False):\n self.labeled_boxes = {}\n self.predicted_boxes = {}\n self.threshold = t\n self.threed = threed\n self.get_iou_func = get_2d_bounding_box_iou\n if self.threed:\n self.get_iou_func = get_3d_bounding_box_iou\n\n def update(self, scene_id, labeled_boxes, labeled_classes, predicted_boxes,\n predicted_classes, confidences):\n \"\"\"For one scene, provide all ground-truth and all predicted detections.\"\"\"\n self.labeled_boxes[scene_id] = (labeled_boxes, labeled_classes)\n self.predicted_boxes[scene_id] = (predicted_boxes, predicted_classes,\n confidences)\n\n def evaluate(self):\n \"\"\"Eval.\"\"\"\n predictions_per_class = {} # map {classname: pred}\n labels_per_class = {} # map {classname: gt}\n\n for scene_id in self.predicted_boxes:\n bboxes, classnames, scores = self.predicted_boxes[scene_id]\n classnames = classnames.numpy()\n bboxes = bboxes.numpy()\n scores = scores.numpy()\n for i in range(classnames.shape[0]):\n classname = classnames[i]\n bbox = bboxes[i]\n score = scores[i]\n # for classname, bbox, score in self.predicted_boxes[scene_id]:\n if classname not in predictions_per_class:\n predictions_per_class[classname] = {}\n if scene_id not in predictions_per_class[classname]:\n predictions_per_class[classname][scene_id] = []\n if classname not in labels_per_class:\n labels_per_class[classname] = {}\n if scene_id not in labels_per_class[classname]:\n labels_per_class[classname][scene_id] = []\n predictions_per_class[classname][scene_id].append((bbox, score))\n\n for scene_id in self.labeled_boxes:\n bboxes, classnames = self.labeled_boxes[scene_id]\n classnames = classnames.numpy()\n bboxes = bboxes.numpy()\n for i in range(classnames.shape[0]):\n classname = classnames[i]\n bbox = bboxes[i]\n if classname not in labels_per_class:\n labels_per_class[classname] = {}\n if scene_id not in labels_per_class[classname]:\n labels_per_class[classname][scene_id] = []\n labels_per_class[classname][scene_id].append(bbox)\n\n recall_per_class = {}\n precision_per_class = {}\n ap_per_class = {}\n for classname in labels_per_class:\n print('Computing AP for class: ', classname)\n if classname in predictions_per_class:\n recall, precision, ap = self._eval_detections_per_class(\n # this does not work when class was never predicted\n predictions_per_class[classname],\n labels_per_class[classname],\n self.threshold)\n else:\n recall, precision, ap = 0.0, 0.0, 0.0\n recall_per_class[classname] = recall\n precision_per_class[classname] = precision\n ap_per_class[classname] = ap\n print(classname, ap)\n # return recall_per_class, precision_per_class, ap_per_class\n mean = np.mean(np.array([v for k, v in ap_per_class.items()]))\n print(mean)\n return mean\n\n def _get_iou_main(self, get_iou_func, args):\n return get_iou_func(*args)\n\n def _voc_ap(self, rec, prec):\n \"\"\"Compute VOC AP given precision and recall.\"\"\"\n mrec = np.concatenate(([0.], rec, [1.]))\n mpre = np.concatenate(([0.], prec, [0.]))\n # compute the precision envelope\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n # to calculate area under PR curve, look for points\n # where X axis (recall) changes value\n i = np.where(mrec[1:] != mrec[:-1])[0]\n # and sum (\\Delta recall) * prec\n return np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n\n def _eval_detections_per_class(self, pred, gt, ovthresh=0.25):\n \"\"\"Generic functions to compute precision/recall for object detection.\"\"\"\n\n # construct gt objects\n class_recs = {} # {img_id: {'bbox': bbox list, 'det': matched list}}\n npos = 0\n for img_id in gt.keys():\n bbox = np.array(gt[img_id])\n det = [False] * len(bbox)\n npos += len(bbox)\n class_recs[img_id] = {'bbox': bbox, 'det': det}\n # pad empty list to all other imgids\n for img_id in pred:\n if img_id not in gt:\n class_recs[img_id] = {'bbox': np.array([]), 'det': []}\n\n # construct dets\n image_ids = []\n confidence = []\n bb = []\n for img_id in pred:\n for box, score in pred[img_id]:\n image_ids.append(img_id)\n confidence.append(score)\n bb.append(box)\n confidence = np.array(confidence)\n bb = np.array(bb) # (nd,4 or 8,3 or 6)\n\n # sort by confidence\n sorted_ind = np.argsort(-confidence)\n bb = bb[sorted_ind, ...]\n image_ids = [image_ids[x] for x in sorted_ind]\n\n # go down dets and mark TPs and FPs\n nd = len(image_ids)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n for d in range(nd):\n r = class_recs[image_ids[d]]\n bb = bb[d, ...].astype(float)\n ovmax = -np.inf\n bbgt = r['bbox'].astype(float)\n\n if bbgt.size > 0:\n # compute overlaps\n for j in range(bbgt.shape[0]):\n iou = self._get_iou_main(self.get_iou_func, (bb, bbgt[j, ...]))\n if iou > ovmax:\n ovmax = iou\n jmax = j\n\n if ovmax > ovthresh:\n if not r['det'][jmax]:\n tp[d] = 1.\n r['det'][jmax] = 1\n else:\n fp[d] = 1.\n else:\n fp[d] = 1.\n\n # compute precision recall\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(npos + 1e-5)\n # avoid divide by zero in case the first detection matches a difficult\n # ground truth\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n ap = self._voc_ap(rec, prec)\n\n return rec, prec, ap\n\n def reset(self):\n self.labeled_boxes = {}\n self.predicted_boxes = {}\n\n\nclass Evaluator:\n \"\"\"Evaluator for specified metrics.\"\"\"\n\n def __init__(self, metrics, split, shapenet_dir):\n self.metrics = metrics\n self.split = split\n self.shapenet_dir = shapenet_dir\n\n def add_detections(self, sample, detections):\n \"\"\"Add detections to evaluation.\n\n Args:\n sample: the ground truth information\n detections: the predicted detections\n\n Returns:\n dict of intermediate results.\n\n \"\"\"\n result_dict = {'iou_mean': -1, 'iou_min': -1, 'collisions': 0,\n 'collision_intersection': 0, 'collision_iou': 0}\n num_boxes = sample['num_boxes'].numpy()\n labeled_boxes_init = tf.gather(\n sample['groundtruth_boxes'], axis=1, indices=[1, 0, 3, 2]) * 256.0\n\n for _, metric in self.metrics.items():\n if isinstance(metric, ShapeAccuracyMetric):\n labels = sample['shapes']\n weights = tf.math.sign(labels + 1) # -1 is mapped to zero, else 1\n metric.update(labels, detections['shapes_logits'], weights)\n elif isinstance(metric, BoxIoUMetric):\n scene_id = str(sample['scene_filename'].numpy(), 'utf-8')\n\n # Get ground truth boxes\n labeled_boxes = labeled_boxes_init\n if metric.threed:\n rotations_y = tf.concat([tf_utils.euler_from_rotation_matrix(\n tf.reshape(detections['rotations_3d'][i], [3, 3]),\n 1) for i in range(num_boxes)], axis=0)\n rotations_y = tf.reshape(rotations_y, [-1, 1])\n labeled_boxes = tf.concat([sample['translations_3d'],\n sample['sizes_3d'],\n rotations_y], axis=1)\n\n # Get predicted boxes\n predicted_boxes = detections['detection_boxes']\n if metric.threed:\n rotations_y = tf.concat([tf_utils.euler_from_rotation_matrix(\n tf.reshape(detections['rotations_3d'][i], [3, 3]),\n 1) for i in range(num_boxes)], axis=0)\n rotations_y = tf.reshape(rotations_y, [-1, 1])\n predicted_boxes = tf.concat([detections['translations_3d'],\n detections['sizes_3d'],\n rotations_y], axis=1)\n\n labeled_classes = tf.cast(sample['groundtruth_valid_classes'], tf.int64)\n predicted_classes = tf.cast(detections['detection_classes'], tf.int64)\n confidences = detections['detection_scores']\n metric.update(scene_id, labeled_boxes, labeled_classes, predicted_boxes,\n predicted_classes, confidences)\n elif isinstance(metric, IoUMetric):\n classes = sample['classes']\n mesh_names = sample['mesh_names']\n labeled_sdfs = []\n for i in range(num_boxes):\n class_id = str(classes[i].numpy()).zfill(8)\n model_name = str(mesh_names[i].numpy(), 'utf-8')\n path_prefix = os.path.join(self.shapenet_dir, class_id, model_name)\n file_sdf = os.path.join(path_prefix, 'model_normalized_sdf.npy')\n with gfile.Open(file_sdf, 'rb') as f:\n labeled_sdfs.append(tf.expand_dims(np.load(f).astype(np.float32),\n 0))\n labeled_sdfs = tf.concat(labeled_sdfs, axis=0)\n\n labeled_classes = tf.cast(sample['groundtruth_valid_classes'], tf.int64)\n labeled_permutation = np.argsort(labeled_classes)\n\n labeled_sdfs = labeled_sdfs.numpy()[labeled_permutation]\n labeled_classes = labeled_classes.numpy()[labeled_permutation]\n labeled_rotations_3d = sample['rotations_3d'].numpy()\n labeled_rotations_3d = labeled_rotations_3d[labeled_permutation]\n labeled_translations_3d = sample['translations_3d'].numpy()\n labeled_translations_3d = labeled_translations_3d[labeled_permutation]\n labeled_sizes_3d = sample['sizes_3d'].numpy()[labeled_permutation]\n labeled_poses = (labeled_rotations_3d, labeled_translations_3d,\n labeled_sizes_3d)\n\n # Predictions\n predicted_classes = tf.cast(detections['detection_classes'], tf.int64)\n predicted_permutation = np.argsort(predicted_classes)\n predicted_classes = predicted_classes.numpy()[predicted_permutation]\n\n predicted_sdfs = \\\n detections['predicted_sdfs'].numpy()[predicted_permutation]\n predicted_rotations_3d = \\\n detections['rotations_3d'].numpy()[predicted_permutation]\n predicted_translations_3d = \\\n detections['translations_3d'].numpy()[predicted_permutation]\n predicted_sizes_3d = \\\n detections['sizes_3d'].numpy()[predicted_permutation]\n predicted_poses = (predicted_rotations_3d, predicted_translations_3d,\n predicted_sizes_3d)\n\n full_oracle = False\n if full_oracle:\n predicted_sdfs = detections['groundtruth_sdfs'].numpy()\n predicted_sdfs = predicted_sdfs[labeled_permutation]\n predicted_classes = labeled_classes\n predicted_poses = labeled_poses\n\n print('----------------------------')\n print(predicted_sdfs.shape)\n print(predicted_classes.shape)\n print(predicted_poses[0].shape)\n print(predicted_poses[1].shape)\n print(predicted_poses[2].shape)\n\n pose_oracle = False\n if pose_oracle:\n predicted_sdfs = detections['predicted_sdfs'].numpy()\n predicted_sdfs = predicted_sdfs[predicted_permutation]\n predicted_poses = (labeled_rotations_3d, labeled_translations_3d,\n labeled_sizes_3d)\n\n class_oracle = True\n if class_oracle:\n predicted_classes *= 0\n labeled_classes *= 0\n\n iou_mean, iou_min = metric.update(\n labeled_sdfs, labeled_classes, labeled_poses, predicted_sdfs,\n predicted_classes, predicted_poses, sample['dot'])\n result_dict['iou_mean'] = iou_mean\n result_dict['iou_min'] = iou_min\n elif isinstance(metric, CollisionMetric):\n\n labeled_sdfs = detections['groundtruth_sdfs']\n labeled_classes = tf.cast(sample['groundtruth_valid_classes'], tf.int64)\n labeled_poses = (sample['rotations_3d'],\n sample['translations_3d'],\n sample['sizes_3d'])\n\n predicted_classes = tf.cast(detections['detection_classes'], tf.int64)\n predicted_sdfs = detections['predicted_sdfs']\n predicted_poses = (detections['rotations_3d'],\n detections['translations_3d'],\n detections['sizes_3d'])\n\n full_oracle = False\n if full_oracle:\n predicted_sdfs = detections['groundtruth_sdfs'].numpy()\n predicted_classes = labeled_classes\n predicted_poses = labeled_poses\n\n num_collisions, intersection, iou = metric.update(\n labeled_sdfs, labeled_classes, labeled_poses,\n predicted_sdfs, predicted_classes, predicted_poses)\n result_dict['collisions'] = num_collisions\n result_dict['collision_intersection'] = intersection\n result_dict['collision_iou'] = iou\n\n return result_dict\n\n def evaluate(self):\n \"\"\"Runs metrics over provided pairs and returns metric dict.\"\"\"\n metrics = {}\n for name, metric in self.metrics.items():\n metrics[name] = metric.evaluate()\n return metrics\n\n def reset_metrics(self):\n for _, metric in self.metrics.items():\n metric.reset()\n"
]
| [
[
"numpy.load",
"numpy.min",
"tensorflow.reshape",
"numpy.mean",
"numpy.where",
"tensorflow.zeros_like",
"numpy.finfo",
"numpy.cumsum",
"tensorflow.cast",
"numpy.concatenate",
"matplotlib.pyplot.colorbar",
"tensorflow.concat",
"matplotlib.pyplot.subplots",
"tensorflow.math.is_nan",
"tensorflow.squeeze",
"tensorflow.keras.metrics.SparseTopKCategoricalAccuracy",
"numpy.array",
"tensorflow.nn.relu",
"numpy.zeros",
"tensorflow.expand_dims",
"matplotlib.pyplot.figure",
"numpy.argsort",
"matplotlib.pyplot.clf",
"numpy.sum",
"tensorflow.math.sign",
"tensorflow.reduce_max",
"tensorflow.gather",
"tensorflow.reduce_mean",
"numpy.maximum"
]
]
|
CEAC33/sanic-skeleton-api | [
"0003789e467f6bb111dd68629959e0a6caa70f74"
]
| [
"apps/utils/mongo_utils.py"
]
| [
"from pymongo import MongoClient\nimport pandas as pd\nfrom sanic.log import logger\nfrom config import *\nfrom bson.objectid import ObjectId\nimport copy\nimport json\n\n\nasync def save_csv_to_mongo(config, csv_file, config_id): \n client = MongoClient(config.MONGO_HOST, config.MONGO_PORT)\n db=client.exago\n\n if config_id:\n logger.info('CREATING MONGO COLLECTION')\n logger.info(config_id)\n reports = db[config_id]\n else:\n reports = db.report\n x = reports.delete_many({}) #delete all previous documents\n logger.info(\"{} documents to be updated\".format(x.deleted_count))\n df = pd.read_csv(csv_file, low_memory=False) #csv file which you want to import\n df = df.where((pd.notnull(df)), None) # changa NaN value for None (null in mongo)\n records_ = df.to_dict(orient = 'records')\n result = reports.insert_many(records_)\n\nasync def save_to_mongo(config, data, collection):\n client = MongoClient(config.MONGO_HOST, config.MONGO_PORT)\n db=client.exago\n my_collection = db[collection]\n my_collection_id = my_collection.insert_one(data).inserted_id\n return str(my_collection_id)\n\nasync def get_from_mongo(config, collection, value_match, key_match='_id'):\n client = MongoClient(config.MONGO_HOST, config.MONGO_PORT)\n db=client.exago\n my_collection = db[collection]\n\n if key_match == '_id':\n for x in my_collection.find({ key_match : ObjectId(value_match)}):\n result = x\n\n result[key_match] = str(result[key_match])\n else:\n for x in my_collection.find({ key_match : value_match}):\n result = x\n\n return result\n\n"
]
| [
[
"pandas.notnull",
"pandas.read_csv"
]
]
|
sidd0529/COSMOS_Lens | [
"203c97948f8ba30d8874ec6bbc0d00e3c82639da"
]
| [
"gauss_smooth.py"
]
| [
"from __future__ import division\nimport numpy as np\n\n# ----------- Gaussian Smoothing (self-defined) ------------------------------------------------------------\n''' https://matthew-brett.github.io/teaching/smoothing_intro.html '''\n\ndef sigma2fwhm(sigmaa):\n\treturn sigmaa * np.sqrt(8 * np.log(2))\n\n\ndef fwhm2sigma(fwhm):\n\treturn fwhm / np.sqrt(8 * np.log(2))\n\t\n\ndef gauss_smooth_func(x_vals, y_vals, sigmaa):\n smoothed_vals = np.zeros(y_vals.shape)\n \n for i in range(len(y_vals)):\n x_position = x_vals[i]\n kernel = (1.0/np.sqrt(2*np.pi *sigmaa**2)) * np.exp( -(x_vals-x_position)** 2 / (2 * sigmaa ** 2))\n kernel = kernel / sum(kernel)\n smoothed_vals[i] = np.sum(y_vals * kernel)\n \n return smoothed_vals \n"
]
| [
[
"numpy.log",
"numpy.zeros",
"numpy.sum",
"numpy.exp",
"numpy.sqrt"
]
]
|
johnrbnsn/Composite-Plate | [
"aa59af3d8cde768ead8212c75f45846e80030e4a"
]
| [
"composite_plate/classical_plate_theory_test.py"
]
| [
"import unittest\nimport numpy as np\nfrom classical_plate_theory import Ply, InputError, Laminae, Laminate\n\nclass TestCompositePlateClasses(unittest.TestCase):\n \"\"\" Defines a series of tests for the CompositePlate Module\n \n \"\"\"\n def test_ply_E1_value(self):\n \"\"\" Checks to see if the input value for E1 is taken by the class\n \"\"\"\n test_ply = Ply(E1=1.0,E2=1.0,G12=1.0,nu12=1.0,h=1.0);\n self.assertEqual(test_ply.E1, 1.0)\n\n def test_ply_h_gt_0(self):\n \"\"\" Checks to see that an InputError is raised for a h=0 input\n \"\"\"\n with self.assertRaises(InputError):\n test_ply = Ply(E1=1.0,E2=1.0,G12=1.0,nu12=1.0,h=0.0);\n \n def test_laminae_matrix_orotropic(self):\n \"\"\" Test the laminae matrix against a known example\n \n 'Fiber-Reinforced Composites' by Mallick (3rd edition), \n Example 3.6\n \n Test will check to see that the laminae stiffness matrix \n matches the expected stiffness matrix from Example 3.6 with \n a maximum error of 1 decimal place (values in book given to \n 1 decimal place).\n \"\"\"\n \n ply = Ply(E1=133.4, E2=8.78, nu12=0.26, G12=3.254, h=1.0) # h is not used, just adding 1 as a placeholder\n laminae = Laminae(ply=ply, theta_rad=(45.0*np.pi/180.0))\n Q_bar = laminae.Q_bar\n Q_bar_expected = np.matrix([[40.11, 33.61, 31.3],\n [33.61, 40.11, 31.3],\n [31.3, 31.3, 34.57]])\n Q_max_diff = np.max(np.abs(Q_bar -Q_bar_expected))\n \n self.assertAlmostEqual(Q_max_diff,0,places=1)\n \n \n def test_laminate_matrix_angleply(self):\n \"\"\" Test an angle ply laminate matrix against a known example.\n \n 'Fiber-Reinforced Composites' by Mallick (3rd edition), \n Example 3.7a\n \n Test will check that the laminate stiffness matrix matches \n the expected stiffness matrices from example 3.7a with a \n maximum normalized error of 3 decimal places (< 0.1% error)\n \n This test will throw a 'RuntimeWarning: invalid value encountered\n in divide' because of the zero elements in the A, B and D \n matrices, this is okay, ignore the warning.\n \"\"\"\n \n ply = Ply(E1=133.4e9, E2=8.78e9, nu12=0.26, G12=3.254e9, h=0.006) # h is in [mm]\n \n laminae_pos45 = Laminae(ply=ply, theta_rad=(45.0*np.pi/180.0))\n laminae_neg45 = Laminae(ply=ply, theta_rad=(-45.0*np.pi/180.0))\n \n laminae_list = [laminae_pos45, laminae_neg45]\n laminate = Laminate(laminae_list)\n \n A_expected = np.power(10.0,6.0)*np.matrix([[481.32, 403.32, 0.0], \n [403.32, 481.32, 0.0],\n [0.0, 0.0, 414.84]]);\n \n A_diff_norm_max = np.nanmax(np.abs(A_expected -laminate.A)/laminate.A)\n\n B_expected = np.power(10.0,3.0)*np.matrix([[0.0, 0.0, -1126.8],\n [0.0, 0.0, -1126.8],\n [-1126.8, -1126.8, 0.0]])\n \n B_diff_norm_max = np.nanmax(np.abs(B_expected -laminate.B)/laminate.B) \n \n D_expected = np.matrix([[5775.84, 4839.84, 0.0],\n [4839.84, 5775.84, 0.0],\n [0.0, 0.0, 4978.08]])\n D_diff_norm_max = np.nanmax(np.abs(D_expected -laminate.D)/laminate.D)\n \n max_norm_diff = np.max([A_diff_norm_max,B_diff_norm_max,D_diff_norm_max])\n \n self.assertAlmostEqual(max_norm_diff,0.0,places=3) \n \n def test_laminate_matrix_symmetricbalanced(self):\n \"\"\" Test a symmetric balanced ply laminate matrix against a \n known example.\n \n 'Fiber-Reinforced Composites' by Mallick (3rd edition), \n Example 3.7b\n \n Test will check that the laminate stiffness matrix matches \n the expected stiffness matrices from example 3.7b with a \n maximum normalized error of 3 decimal places (< 0.1% error)\n \n Symmetric Laminate should have A16, A26 = 0, B = 0\n \"\"\"\n \n ply = Ply(E1=133.4e9, E2=8.78e9, nu12=0.26, G12=3.254e9, h=0.006) # h is in [mm]\n \n laminae_pos45 = Laminae(ply=ply, theta_rad=(45.0*np.pi/180.0))\n laminae_neg45 = Laminae(ply=ply, theta_rad=(-45.0*np.pi/180.0))\n \n laminae_list = [laminae_pos45, laminae_neg45, \n laminae_neg45, laminae_pos45]\n \n laminate = Laminate(laminae_list)\n \n A_expected = np.power(10.0,6.0)*np.matrix([[962.64, 806.64, 0.0], \n [806.64, 962.64, 0.0],\n [0.0, 0.0, 829.68]]);\n \n A_diff_norm_max = np.nanmax(np.abs(A_expected -laminate.A)/laminate.A)\n\n B_expected = np.matrix([[0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0]])\n \n B_diff_norm_max = np.max(np.abs(B_expected -laminate.B)) \n \n D_expected = np.power(10.,3.)*np.matrix([[46.21, 38.72, 27.04],\n [38.72, 46.21, 27.04],\n [27.04, 27.04, 39.82]])\n D_diff_norm_max = np.nanmax(np.abs(D_expected -laminate.D)/laminate.D)\n \n max_norm_diff = np.max([A_diff_norm_max,B_diff_norm_max,D_diff_norm_max])\n \n self.assertAlmostEqual(max_norm_diff,0.0,places=3) \n \n \n def test_laminate_matrix_inverse(self):\n \"\"\"Checks the inverse (A1,B1,C1,D1) matrices to see they form properly.\n \n 'Fiber-Reinforced Composites' by Mallick (3rd edition), \n Example 3.13\n \n Test will use data from expample 3.13 to check the \"inverse\"\n relationship stiffness matrices with a maximum normalized \n error of 2 decimal places (< 1% error).\n \"\"\"\n \n ply = Ply(E1=133.4e9, E2=8.78e9, nu12=0.26, G12=3.254e9, h=0.006) # h is in [mm]\n \n laminae_pos45 = Laminae(ply=ply, theta_rad=(45.0*np.pi/180.0))\n laminae_neg45 = Laminae(ply=ply, theta_rad=(-45.0*np.pi/180.0))\n \n laminae_list = [laminae_pos45, laminae_neg45]\n laminate = Laminate(laminae_list)\n \n A1_expected = np.power(10.0,-9.0)*np.matrix([[7.7385, -5.0715, 0.0], \n [-5.0715, 7.7385, 0.0],\n [0.0, 0.0, 5.683]]);\n \n A1_diff_norm_max = np.nanmax(np.abs(A1_expected -laminate.A_1)/laminate.A_1)\n\n B1_expected = np.power(10.0,-9.0)*np.matrix([[0.0, 0.0, 603.54],\n [0.0, 0.0, 603.54],\n [602.74, 602.74, 0.0]])\n \n B1_diff_norm_max = np.nanmax(np.abs(B1_expected -laminate.B_1)/laminate.B_1) \n \n C1_expected = np.power(10.0,-9.0)*np.matrix([[0.0, 0.0, 602.74],\n [0.0, 0.0, 602.74],\n [603.54, 603.54, 0.0]])\n C1_diff_norm_max = np.nanmax(np.abs(C1_expected -laminate.C_1)/laminate.C_1)\n \n D1_expected = np.power(10.,-4.0)*np.matrix([[6.45, -4.23, 0.0],\n [-4.23, 6.45, 0.0],\n [0.0, 0.0, 4.74]])\n D1_diff_norm_max = np.nanmax(np.abs(D1_expected -laminate.D_1)/laminate.D_1)\n \n max_norm_diff = np.nanmax([A1_diff_norm_max,B1_diff_norm_max,C1_diff_norm_max,D1_diff_norm_max])\n\n self.assertAlmostEqual(max_norm_diff,0.0,places=2)\n \n \n def test_laminate_applied_force_for_strains(self):\n \"\"\"Apply a force and calculate the resultant laminate midplane strains.\n \n 'Fiber-Reinforced Composites' by Mallick (3rd edition), \n Example 3.13\n \n Test will check that the resultant strains match the expected \n strains from example 3.13 with a maximum normalized error of\n 2 decimal places (< 1% error)\n \"\"\"\n \n ply = Ply(E1=133.4e9, E2=8.78e9, nu12=0.26, G12=3.254e9, h=0.006) # h is in [mm]\n \n laminae_pos45 = Laminae(ply=ply, theta_rad=(45.0*np.pi/180.0))\n laminae_neg45 = Laminae(ply=ply, theta_rad=(-45.0*np.pi/180.0))\n \n laminae_list = [laminae_pos45, laminae_neg45]\n laminate = Laminate(laminae_list)\n \n N = np.matrix.transpose(np.matrix([100.0e3, 0.0, 0.0])); # N[0] is in [N/m]\n M = np.matrix.transpose(np.matrix([0.0, 0.0, 0.0]));\n strain_dictionary = laminate.applied_stress(N,M)\n \n Epsilon = strain_dictionary['Epsilon']\n Kappa = strain_dictionary['Kappa']\n \n Epsilon_expected = np.matrix.transpose(np.matrix([77.385e-5, -50.715e-5, 0.0]))\n Kappa_expected = np.matrix.transpose(np.matrix([0.0, 0.0, 0.060354]))\n \n Epsilon_diff_norm_max = np.nanmax(np.abs(Epsilon_expected -Epsilon)/Epsilon)\n Kappa_diff_norm_max = np.nanmax(np.abs(Kappa_expected -Kappa)/Kappa)\n \n max_norm_diff = np.nanmax([Epsilon_diff_norm_max, Kappa_diff_norm_max])\n \n self.assertAlmostEqual(max_norm_diff,0.0,places=2)\n \n \n def test_laminate_applied_force_for_laminae_midplane_stress(self):\n \"\"\"Apply a force to a laminate and determine the midplane stresses \n of the laminae in the laminate.\n \n 'Fiber-Reinforced Composites' by Mallick (3rd edition), \n Example 3.13\n \n Test will look at the midplane stresses of the two laminae in\n the laminate.\n \"\"\"\n \n ply = Ply(E1=133.4e9, E2=8.78e9, nu12=0.26, G12=3.254e9, h=0.006) # h is in [mm]\n \n laminae_pos45 = Laminae(ply=ply, theta_rad=(45.0*np.pi/180.0))\n laminae_neg45 = Laminae(ply=ply, theta_rad=(-45.0*np.pi/180.0))\n \n laminae_list = [laminae_pos45, laminae_neg45]\n laminate = Laminate(laminae_list)\n \n N = np.matrix.transpose(np.matrix([100.0e3, 0.0, 0.0])); # N[0] is in [N/m]\n M = np.matrix.transpose(np.matrix([0.0, 0.0, 0.0]));\n strain_dictionary = laminate.applied_stress(N,M)\n \n Epsilon = strain_dictionary['Epsilon']\n Kappa = strain_dictionary['Kappa']\n \n laminae_midplane_strains = laminate.laminae_midplane_strain(Epsilon, Kappa)\n \n laminae_midplane_stresses = laminate.laminae_stress(Epsilon, Kappa)\n \n laminae_midplane_stress_expected = np.power(10.0,6.0)*np.matrix.transpose(np.matrix([8.33, 0.0, 2.09]))\n self.assertMatrixAlmostEqualPercent(laminae_midplane_stresses[0],laminae_midplane_stress_expected)\n \n laminae_midplane_stress_expected = np.power(10.0,6.0)*np.matrix.transpose(np.matrix([8.33, 0.0, -2.09]))\n self.assertMatrixAlmostEqualPercent(laminae_midplane_stresses[1],laminae_midplane_stress_expected)\n \n \n def assertMatrixAlmostEqualPercent(self, matrix, matrix_expected, msg=None, percent=0.01, places=7):\n \"\"\"Will compare a numpy matrix to see if all values are equal within \n the defined percentage (default = 1%)\n \"\"\"\n self.assertEqual(matrix.size,matrix_expected.size)\n \n if (matrix_expected == 0.0).any(): # Check any expected = 0 values against a fixed number of decimal places, not a percent (percent can be high when expecting = 0)\n matrix_expected_neq0 = []\n matrix_neq0 = []\n matrix_expected_eq0 = []\n matrix_eq0 = []\n \n matrix_expected_neq0 = matrix_expected[matrix_expected != 0.0]\n matrix_neq0 = matrix[matrix_expected != 0.0]\n matrix_expected_eq0 = matrix_expected[matrix_expected == 0.0]\n matrix_eq0 = matrix[matrix_expected == 0.0]\n \n if (matrix_expected_neq0.any()):\n self.assertMatrixAlmostEqualPercent(matrix_neq0, matrix_expected_neq0, msg=msg, percent=percent);\n \n if (matrix_expected_eq0.any()):\n assertMatrixAlmostEqual(matrix_eq0, matrix_expected_eq0, msg=msg, places=places);\n \n \n else:\n diff_norm_max = np.nanmax(np.abs(matrix_expected -matrix)/matrix_expected)\n self.assertAlmostEqual(diff_norm_max,0.0, msg=msg, delta=percent)\n \n \n def assertMatrixAlmostEqual(self, matrix, matrix_expected, places=7, msg=None):\n \"\"\"Will compare a numpy matrix to see if all values are equal within \n the defined number of decimal places (default = 7)\n \"\"\"\n self.assertEqual(matrix.size,matrix_expected.size)\n \n diff_norm_max = np.nanmax(np.abs(matrix_expected -matrix))\n self.assertAlmostEqual(diff_norm_max,0.0,places=places, msg=msg, delta=delta)\n \n \nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(TestCompositePlateClasses)\n unittest.TextTestRunner(verbosity=2).run(suite)\n"
]
| [
[
"numpy.max",
"numpy.matrix",
"numpy.power",
"numpy.abs",
"numpy.nanmax"
]
]
|
RevanMacQueen/LearningFromHumans | [
"2a899f103a329dba17ea132ce4de167da3f34752"
]
| [
"lfh/envs/atari.py"
]
| [
"import numpy as np\nfrom collections import deque\nimport gym\nfrom gym import spaces\nimport cv2\ncv2.ocl.setUseOpenCL(False)\n# from dqn.environment.monitor import Monitor\n\nclass NoopResetEnv(gym.Wrapper):\n def __init__(self, env, noop_max=30):\n \"\"\"Sample initial states by taking random number of no-ops on reset.\n No-op is assumed to be action 0.\n \"\"\"\n gym.Wrapper.__init__(self, env)\n self.noop_max = noop_max\n self.override_num_noops = None\n self.noop_action = 0\n assert env.unwrapped.get_action_meanings()[0] == 'NOOP'\n\n def step(self, action):\n return self.env.step(action)\n\n def reset(self, **kwargs):\n \"\"\" Do no-op action for a number of steps in [1, noop_max].\"\"\"\n self.env.reset(**kwargs)\n if self.override_num_noops is not None:\n noops = self.override_num_noops\n else:\n noops = self.unwrapped.np_random.randint(1, self.noop_max + 1)\n # pylint: disable=E1101\n assert noops > 0\n obs = None\n for _ in range(noops):\n obs, _, done, _ = self.env.step(self.noop_action)\n if done:\n obs = self.env.reset(**kwargs)\n return obs\n\n\nclass FireResetEnv(gym.Wrapper):\n def __init__(self, env):\n \"\"\"Take action on reset for environments that are fixed until firing.\"\"\"\n gym.Wrapper.__init__(self, env)\n assert env.unwrapped.get_action_meanings()[1] == 'FIRE'\n assert len(env.unwrapped.get_action_meanings()) >= 3\n\n def step(self, action):\n return self.env.step(action)\n\n def reset(self, **kwargs):\n self.env.reset(**kwargs)\n obs, _, done, _ = self.env.step(1)\n if done:\n self.env.reset(**kwargs)\n obs, _, done, _ = self.env.step(2)\n if done:\n self.env.reset(**kwargs)\n return obs\n\n\nclass EpisodicLifeEnv(gym.Wrapper):\n def __init__(self, env):\n \"\"\"Make end-of-life == end-of-episode, but only reset on true game over.\n Done by DeepMind for the DQN and co. since it helps value estimation.\n \"\"\"\n gym.Wrapper.__init__(self, env)\n self.lives = 0\n self.was_real_done = True\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n self.was_real_done = done\n # check current lives, make loss of life terminal,\n # then update lives to handle bonus lives\n lives = self.env.unwrapped.ale.lives()\n if self.lives > lives > 0:\n # for Qbert somtimes we stay in lives == 0 condtion for a few frames\n # so its important to keep lives > 0, so that we only reset once\n # the environment advertises done.\n done = True\n self.lives = lives\n return obs, reward, done, info\n\n def reset(self, **kwargs):\n \"\"\"Reset only when lives are exhausted.\n This way all states are still reachable even though lives are episodic,\n and the learner need not know about any of this behind-the-scenes.\n \"\"\"\n if self.was_real_done:\n obs = self.env.reset(**kwargs)\n else:\n # no-op step to advance from terminal/lost life state\n obs, _, _, _ = self.env.step(0)\n self.lives = self.env.unwrapped.ale.lives()\n return obs\n\n\nclass MaxAndSkipEnv(gym.Wrapper):\n def __init__(self, env, skip=4, render_on_action=False):\n \"\"\"Return only every `skip`-th frame\"\"\"\n gym.Wrapper.__init__(self, env)\n # most recent raw observations (for max pooling across time steps)\n self._obs_buffer = np.zeros((2,) + env.observation_space.shape,\n dtype=np.uint8)\n self._skip = skip\n self._render = render_on_action\n self._window_open = False\n\n @property\n def window_open(self):\n return self._window_open\n\n def step(self, action):\n \"\"\"Repeat action, sum reward, and max over last observations.\"\"\"\n total_reward = 0.0\n done, info = None, None\n\n for i in range(self._skip):\n obs, reward, done, info = self.env.step(action)\n if i == self._skip - 2:\n self._obs_buffer[0] = obs\n if i == self._skip - 1:\n self._obs_buffer[1] = obs\n total_reward += reward\n\n if self._render:\n self._window_open = self.env.render()\n\n if done:\n break\n # Note that the observation on the done=True frame\n # doesn't matter\n max_frame = self._obs_buffer.max(axis=0)\n\n return max_frame, total_reward, done, info\n\n def reset(self, **kwargs):\n if self._render:\n self._window_open = self.env.render()\n return self.env.reset(**kwargs)\n\n\nclass ClipRewardEnv(gym.RewardWrapper):\n def __init__(self, env):\n gym.RewardWrapper.__init__(self, env)\n\n def reward(self, reward):\n \"\"\"Bin reward to {+1, 0, -1} by its sign.\"\"\"\n return np.sign(reward)\n\n\nclass WarpFrame(gym.ObservationWrapper):\n def __init__(self, env):\n \"\"\"Warp frames to 84x84 as done in the Nature paper and later work.\"\"\"\n gym.ObservationWrapper.__init__(self, env)\n self.width = 84\n self.height = 84\n self.observation_space = spaces.Box(\n low=0, high=255,\n shape=(self.height, self.width, 1), dtype=np.uint8)\n\n def observation(self, frame):\n _frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n _frame = cv2.resize(_frame, (self.width, self.height),\n interpolation=cv2.INTER_AREA)\n return _frame[:, :, None], frame[:, :, :, None]\n\n\nclass FrameStack(gym.Wrapper):\n def __init__(self, env, k):\n \"\"\"Stack k last frames.\n\n Returns lazy array, which is much more memory efficient.\n\n See Also\n --------\n baselines.common.atari_wrappers.LazyFrames\n \"\"\"\n gym.Wrapper.__init__(self, env)\n self.k = k\n self.frames = deque([], maxlen=k)\n shp = env.observation_space.shape\n self.observation_space = spaces.Box(\n low=0, high=255, shape=(shp[0]*k, shp[1], shp[2]),\n dtype=np.uint8)\n\n def reset(self):\n ob = self.env.reset()\n for _ in range(self.k):\n self.frames.append(ob)\n return self._get_ob()\n\n def step(self, action):\n ob, reward, done, info = self.env.step(action)\n self.frames.append(ob)\n return self._get_ob(), reward, done, info\n\n def _get_ob(self):\n assert len(self.frames) == self.k\n _gray_frames = [self.frames[i][0] for i in range(self.k)]\n _colored_frames = [self.frames[i][1] for i in range(self.k)]\n return np.asarray(np.concatenate(_gray_frames, axis=0)), np.asarray(\n np.concatenate(_colored_frames, axis=0))\n # return LazyFrames(_gray_frames), LazyFrames(_colored_frames)\n\n\nclass LazyFrames(object):\n def __init__(self, frames):\n \"\"\"This object ensures that common frames between the observations\n are only stored once. It exists purely to optimize memory usage which\n can be huge for DQN's 1M frames replay buffers.\n\n This object should only be converted to numpy array before being passed\n to the model.\n\n You'd not believe how complex the previous solution was.\"\"\"\n self._frames = frames\n self._out = None\n\n def _force(self):\n if self._out is None:\n self._out = np.concatenate(self._frames, axis=0)\n self._frames = None\n return self._out\n\n def __array__(self, dtype=None):\n out = self._force()\n if dtype is not None:\n out = out.astype(dtype)\n return out\n\n def __len__(self):\n return len(self._force())\n\n def __getitem__(self, i):\n return self._force()[i]\n\n\nclass ImageToPyTorch(gym.ObservationWrapper):\n \"\"\"\n Change image shape to CWH\n \"\"\"\n def __init__(self, env):\n super(ImageToPyTorch, self).__init__(env)\n old_shape = self.observation_space.shape\n self.observation_space = gym.spaces.Box(\n low=0, high=255,\n shape=(old_shape[-1], old_shape[0], old_shape[1]),\n dtype=np.uint8)\n\n def observation(self, observation):\n return [observation[0].transpose(2, 0, 1),\n observation[1].transpose(3, 0, 1, 2)]\n\n\ndef make_atari(env_id, skip_frame=4, render_every_frame=False):\n\n env = gym.make(env_id)\n assert 'NoFrameskip' in env.spec.id\n env = NoopResetEnv(env, noop_max=30)\n env = MaxAndSkipEnv(env, skip=skip_frame, render_on_action=render_every_frame)\n return env\n\n\ndef wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=None):\n \"\"\"Configure environment for DeepMind-style Atari.\n \"\"\"\n if episode_life:\n env = EpisodicLifeEnv(env)\n if 'FIRE' in env.unwrapped.get_action_meanings():\n env = FireResetEnv(env)\n env = WarpFrame(env)\n env = ImageToPyTorch(env)\n if frame_stack:\n env = FrameStack(env, frame_stack)\n if clip_rewards:\n env = ClipRewardEnv(env)\n return env\n\n\n# def make_env(env_name, episode_life, skip_frame, clip_rewards, frame_stack, logdir):\ndef make_env(env_name, episode_life, skip_frame, clip_rewards, frame_stack, render_every_frame=False):\n _env = make_atari(env_name, skip_frame, render_every_frame=render_every_frame)\n\n # Added by Daniel ----------------------------------------------------------\n # _env = Monitor(_env, logdir, allow_early_resets=True)\n # --------------------------------------------------------------------------\n\n _env = wrap_deepmind(\n _env,\n episode_life=episode_life,\n clip_rewards=clip_rewards,\n frame_stack=frame_stack)\n\n return _env\n"
]
| [
[
"numpy.concatenate",
"numpy.sign",
"numpy.zeros"
]
]
|
mark-nick-o/mavlink_experiments | [
"a9e6d4a87ef7c8e58ad36ece2533b3fecb39dbb1"
]
| [
"newtests/250222_hb4.py"
]
| [
"# ===============================================================================================================================\r\n#\r\n# Name : mavlinkSonyCamWriteVals.py\r\n# Desc : Global memory value class for use to write mavlink to sony cam\r\n# Auth : AIR-obots Ai-Robots\r\n#\r\n# ===============================================================================================================================\r\n\r\n#\r\n# for paralel tasking of the camera action routines\r\n#\r\n# from multiprocessing import Process\r\nimport multiprocessing\r\n\r\n# for debug\r\nimport logging\r\n\r\n# for signal interypt handling\r\nimport signal\r\n\r\nimport time\r\n\r\nclass fastGlobals:\r\n __slots__ = ('take_picture','start_video',) # __slots__ defines a fast variable\r\n take_picture: int \r\n start_video: int \r\n\r\n# ==== enumerated camera state class\r\n#\r\nimport enum\r\nclass camStateClass(enum.IntEnum):\r\n\r\n idle = 0\r\n taking_photo = 1\r\n photo_ack = 2\r\n uploading_photo = 3\r\n photo_complete = 4\r\n recording_vid = 5\r\n video_ack = 6\r\n uploading_vid = 7\r\n video_complete = 8\r\n configuring_photo = 9\r\n configuring_video = 10\r\n photo_continuos = 11\r\n \r\nclass mavlinkSonyCamWriteVals():\r\n\r\n # state for multi-process object\r\n STATE_INIT = 99\r\n STATE_READY = 1\r\n STATE_CAM_WRITING = 2\r\n STATE_MAV_READING = 3\r\n STATE_MAV_WRITING = 4\r\n STATE_CAM_READING = 5\r\n # global counter for values\r\n numberOfVals = 8\r\n \r\n # flags to incdicate write action for previous store register\r\n WRITE_PREV_DATA = 1\r\n DONT_WRITE_PREV_DATA = 0\r\n\r\n # mavlink write actions (requests from GCS)\r\n MAV_REQ_ALL_PARAM = 255\r\n ParamStillCap = 1\r\n ParamWhiteBala = 2\r\n ParamShutSpd = 4\r\n ParamIso = 8\r\n ParamFocus = 16\r\n ParamFocusArea = 32\r\n ParamAperture = 64\r\n ParamExPro = 128\r\n\r\n # indiviual states when a sequential priority queue is required\r\n FUNC_EX_PRO = 7\r\n FUNC_APER = 8\r\n FUNC_FOCUS = 9\r\n FUNC_ISO = 10\r\n FUNC_SS = 11\r\n FUNC_WB = 12\r\n FUNC_SC = 13\r\n\r\n # bit numbers representing the write protect status for each feature\r\n # true is write_protect state on\r\n #\r\n WriPro_EX_PRO = 1\r\n WriPro_APER = 2\r\n WriPro_FOCUS = 3\r\n WriPro_ISO = 4\r\n WriPro_SS = 5\r\n WriPro_WB = 6\r\n WriPro_SC = 7\r\n WriPro_FOCUSA = 8\r\n \r\n def __init__ (self):\r\n self.set_sony_iso = multiprocessing.Value('i', mavlinkSonyCamWriteVals.STATE_INIT)\r\n self.set_sony_aperture = multiprocessing.Value('i', mavlinkSonyCamWriteVals.STATE_INIT)\r\n self.set_sony_ex_pro = multiprocessing.Value('i', mavlinkSonyCamWriteVals.STATE_INIT)\r\n self.set_sony_focus_area = multiprocessing.Value('i', mavlinkSonyCamWriteVals.STATE_INIT)\r\n self.set_sony_focus = multiprocessing.Value('i', mavlinkSonyCamWriteVals.STATE_INIT)\r\n self.set_sony_shutter = multiprocessing.Value('i', mavlinkSonyCamWriteVals.STATE_INIT)\r\n self.set_sony_white_bal = multiprocessing.Value('i', mavlinkSonyCamWriteVals.STATE_INIT)\r\n self.set_sony_still_cap_mode = multiprocessing.Value('i', mavlinkSonyCamWriteVals.STATE_INIT)\r\n self.prev_sony_iso = multiprocessing.Value('i', mavlinkSonyCamWriteVals.STATE_INIT)\r\n self.prev_sony_aperture = multiprocessing.Value('i', mavlinkSonyCamWriteVals.STATE_INIT)\r\n self.prev_sony_ex_pro = multiprocessing.Value('i', mavlinkSonyCamWriteVals.STATE_INIT)\r\n self.prev_sony_focus_area = multiprocessing.Value('i', mavlinkSonyCamWriteVals.STATE_INIT)\r\n self.prev_sony_focus = multiprocessing.Value('i', mavlinkSonyCamWriteVals.STATE_INIT)\r\n self.prev_sony_shutter = multiprocessing.Value('i', mavlinkSonyCamWriteVals.STATE_INIT)\r\n self.prev_sony_white_bal = multiprocessing.Value('i', mavlinkSonyCamWriteVals.STATE_INIT)\r\n self.prev_sony_still_cap_mode = multiprocessing.Value('i', mavlinkSonyCamWriteVals.STATE_INIT)\r\n self.mav_req_all_param = multiprocessing.Value('i', 0)\r\n self.mav_ext_req_all_param = multiprocessing.Value('i', 0)\r\n self.mav_write_pro_word = multiprocessing.Value('l', 0)\r\n self.take_photo = multiprocessing.Value('b', False)\r\n self.take_continuos = multiprocessing.Value('b', False) \r\n self.reset_cam = multiprocessing.Value('b', False) \r\n self.state = multiprocessing.Value('i', mavlinkSonyCamWriteVals.STATE_INIT)\r\n mavlinkSonyCamWriteVals.numberOfVals += 8 # global counter of the number of values\r\n \r\n def __del__(self): \r\n class_name = self.__class__.__name__ \r\n mavlinkSonyCamWriteVals.numberOfVals -= 1 # global counter of the number of values\r\n print('{} Deleted'.format(class_name))\r\n\r\n def get_value_counter(self): \r\n print('mavlink to sony writes has %d set-points' % (mavlinkSonyCamWriteVals.numberOfVals))\r\n return mavlinkSonyCamWriteVals.numberOfVals\t\r\n\r\n def init_class_state( self ):\r\n if (self.state.value == mavlinkSonyCamWriteVals.STATE_INIT):\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n\r\n def set_WritePro(self, myId, bit, timeout=20, reset_state=False):\r\n timeCnt = 0\r\n while (not (self.state.value == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):\r\n time.sleep(0.1)\r\n timeCnt += 1\r\n\r\n if (timeCnt < timeout):\r\n with self.state.get_lock():\r\n self.state.value = myId\r\n with self.mav_write_pro_word.get_lock(): \r\n self.mav_write_pro_word.value |= (1 << bit)\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n print(f\"\\033[37m set the write protect word for {bit} \\033[0m\")\r\n return True\r\n else:\r\n if (reset_state == True):\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return False \r\n\r\n def clear_WritePro(self, myId, bit, timeout=20, reset_state=False):\r\n timeCnt = 0\r\n while (not (self.state.value == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):\r\n time.sleep(0.1)\r\n timeCnt += 1\r\n\r\n if (timeCnt < timeout):\r\n with self.state.get_lock():\r\n self.state.value = myId\r\n with self.mav_write_pro_word.get_lock(): \r\n self.mav_write_pro_word.value &= ~(1 << bit)\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n print(f\"\\033[37m cleared the write protect word for {bit} \\033[0m\")\r\n return True\r\n else:\r\n if (reset_state == True):\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return False\r\n \r\n def setVal_sony_iso(self,value,myId,mode=0,timeout=20,reset_state=False):\r\n timeCnt = 0\r\n while (not (self.state.value == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):\r\n time.sleep(0.1)\r\n timeCnt += 1\r\n\r\n if (timeCnt < timeout):\r\n with self.state.get_lock():\r\n self.state.value = myId\r\n if mode == 1:\r\n with self.prev_sony_iso.get_lock(): \r\n self.prev_sony_iso.value = self.set_sony_iso.value\r\n with self.set_sony_iso.get_lock(): \r\n self.set_sony_iso.value = int(value)\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n print(f\"\\033[37m wrote the value {value} to {self.prev_sony_iso.value} {self.set_sony_iso.value} \\033[0m\")\r\n #exit(99)\r\n return True\r\n else:\r\n if (reset_state == True):\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n #exit(90)\r\n return False \r\n\r\n def clearReq_sony_iso(self,myId,timeout=20,reset_state=False):\r\n timeCnt = 0\r\n while (not (self.state.value == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):\r\n time.sleep(0.1)\r\n timeCnt += 1\r\n\r\n if (timeCnt < timeout):\r\n with self.state.get_lock():\r\n self.state.value = myId\r\n with self.set_sony_iso.get_lock(): \r\n self.set_sony_iso.value = self.prev_sony_iso.value\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n print(f\"\\033[37m Reset the sony Iso request its not writable \\033[0m\")\r\n return True\r\n else:\r\n if (reset_state == True):\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return False \r\n \r\n def getVal_sony_iso(self,YourID,timeout=20,reset_state=False): \r\n timeCnt = 0\r\n while (not (self.state.value == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):\r\n time.sleep(0.1)\r\n timeCnt += 1\r\n\r\n if (timeCnt < timeout):\r\n with self.state.get_lock():\r\n self.state.value = YourID\r\n print(' value: {} previous: {}'.format(self.set_sony_iso.value,self.prev_sony_iso.value))\r\n c = self.set_sony_iso.value\r\n p = self.prev_sony_iso.value\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return c,p,True\r\n else:\r\n if (reset_state == True):\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return self.set_sony_iso.value,self.prev_sony_iso.value,False\r\n \r\n def setVal_sony_aperture(self,value,myId,mode=0,timeout=20,reset_state=False):\r\n timeCnt = 0\r\n while (not (self.state.value == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):\r\n time.sleep(0.1)\r\n timeCnt += 1\r\n\r\n if (timeCnt < timeout):\r\n with self.state.get_lock():\r\n self.state.value = myId\r\n if mode == 1:\r\n with self.prev_sony_aperture.get_lock():\r\n self.prev_sony_aperture.value= self.set_sony_aperture.value\r\n with self.set_sony_aperture.get_lock():\r\n self.set_sony_aperture.value = int(value)\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return True\r\n else:\r\n if (reset_state == True):\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return False \r\n\r\n def clearReq_sony_aperture(self,myId,timeout=20,reset_state=False):\r\n timeCnt = 0\r\n while (not (self.state.value == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):\r\n time.sleep(0.1)\r\n timeCnt += 1\r\n\r\n if (timeCnt < timeout):\r\n with self.state.get_lock():\r\n self.state.value = myId\r\n with self.set_sony_aperture.get_lock():\r\n self.set_sony_aperture.value = self.prev_sony_aperture.value\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return True\r\n else:\r\n if (reset_state == True):\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return False \r\n \r\n def getVal_sony_aperture(self,YourID,timeout=20,reset_state=False): \r\n timeCnt = 0\r\n while (not (self.state.value == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):\r\n time.sleep(0.1)\r\n timeCnt += 1\r\n\r\n if (timeCnt < timeout):\r\n with self.state.get_lock():\r\n self.state.value = YourID\r\n print(' value: {} previous: {}'.format(self.set_sony_aperture.value,self.prev_sony_aperture.value))\r\n c = self.set_sony_aperture.value\r\n p = self.prev_sony_aperture.value\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return c,p,True\r\n else:\r\n if (reset_state == True):\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return self.set_sony_aperture.value,self.prev_sony_aperture.value,False\r\n \r\n def setVal_sony_ex_pro(self,value,myId,mode=0,timeout=20,reset_state=False):\r\n timeCnt = 0\r\n while (not (self.state.value == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):\r\n time.sleep(0.1)\r\n timeCnt += 1\r\n\r\n if (timeCnt < timeout):\r\n with self.state.get_lock():\r\n self.state.value = myId\r\n if mode == 1:\r\n with self.set_sony_ex_pro.get_lock():\r\n self.prev_sony_ex_pro.value = self.set_sony_ex_pro.value\r\n with self.set_sony_ex_pro.get_lock():\r\n self.set_sony_ex_pro.value = int(value)\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return True\r\n else:\r\n if (reset_state == True):\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return False \r\n\r\n def clearReq_sony_ex_pro(self,myId,timeout=20,reset_state=False):\r\n timeCnt = 0\r\n while (not (self.state.value == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):\r\n time.sleep(0.1)\r\n timeCnt += 1\r\n\r\n if (timeCnt < timeout):\r\n with self.state.get_lock():\r\n self.state.value = myId\r\n with self.set_sony_ex_pro.get_lock():\r\n self.set_sony_ex_pro.value = self.prev_sony_ex_pro.value\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return True\r\n else:\r\n if (reset_state == True):\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return False \r\n \r\n def getVal_sony_ex_pro(self,YourID,timeout=20,reset_state=False): \r\n timeCnt = 0\r\n while (not (self.state.value == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):\r\n time.sleep(0.1)\r\n timeCnt += 1\r\n\r\n if (timeCnt < timeout):\r\n with self.state.get_lock():\r\n self.state.value = YourID\r\n print(' value: {} previous: {}'.format(self.set_sony_ex_pro.value,self.prev_sony_ex_pro.value))\r\n c = self.set_sony_ex_pro.value\r\n p = self.prev_sony_ex_pro.value\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return c,p,True\r\n else:\r\n if (reset_state == True):\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return self.set_sony_ex_pro.value,self.prev_sony_ex_pro.value,False\r\n \r\n def setVal_sony_focus_area(self,value,myId,mode=0,timeout=20,reset_state=False):\r\n timeCnt = 0\r\n while (not (self.state.value == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):\r\n time.sleep(0.1)\r\n timeCnt += 1\r\n\r\n if (timeCnt < timeout):\r\n with self.state.get_lock():\r\n self.state.value = myId\r\n if mode == 1:\r\n with self.prev_sony_focus_area.get_lock():\r\n self.prev_sony_focus_area.value = self.set_sony_focus_area.value\r\n with self.set_sony_focus_area.get_lock():\r\n self.set_sony_focus_area.value = int(value)\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return True\r\n else:\r\n if (reset_state == True):\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return False \r\n\r\n def clearReq_sony_focus_area(self,myId,timeout=20,reset_state=False):\r\n timeCnt = 0\r\n while (not (self.state.value == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):\r\n time.sleep(0.1)\r\n timeCnt += 1\r\n\r\n if (timeCnt < timeout):\r\n with self.state.get_lock():\r\n self.state.value = myId\r\n with self.set_sony_focus_area.get_lock():\r\n self.set_sony_focus_area.value = self.prev_sony_focus_area.value\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return True\r\n else:\r\n if (reset_state == True):\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return False \r\n \r\n def getVal_sony_focus_area(self,YourID,timeout=20,reset_state=False): \r\n timeCnt = 0\r\n while (not (self.state.value == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):\r\n time.sleep(0.1)\r\n timeCnt += 1\r\n\r\n if (timeCnt < timeout):\r\n with self.state.get_lock():\r\n self.state.value = YourID\r\n print(' value: {} previous: {}'.format(self.set_sony_focus_area.value,self.prev_sony_focus_area.value))\r\n c = self.set_sony_focus_area.value\r\n p = self.prev_sony_focus_area.value\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return c,p,True\r\n else:\r\n if (reset_state == True):\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return self.set_sony_focus_area.value,self.prev_sony_focus_area.value,False\r\n \r\n def setVal_sony_focus(self,value,myId,mode=0,timeout=20,reset_state=False):\r\n timeCnt = 0\r\n while (not (self.state.value == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):\r\n time.sleep(0.1)\r\n timeCnt += 1\r\n\r\n if (timeCnt < timeout):\r\n with self.state.get_lock():\r\n self.state.value = myId\r\n if mode == 1:\r\n with self.prev_sony_focus.get_lock():\r\n self.prev_sony_focus.value = self.set_sony_focus.value\r\n with self.set_sony_focus.get_lock():\r\n self.set_sony_focus.value = int(value)\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return True\r\n else:\r\n if (reset_state == True):\r\n with self.state.get_lock(): \r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return False \r\n\r\n def clearReq_sony_focus(self,myId,timeout=20,reset_state=False):\r\n timeCnt = 0\r\n while (not (self.state.value == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):\r\n time.sleep(0.1)\r\n timeCnt += 1\r\n\r\n if (timeCnt < timeout):\r\n with self.state.get_lock():\r\n self.state.value = myId\r\n with self.set_sony_focus.get_lock():\r\n self.set_sony_focus.value = self.prev_sony_focus.value\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return True\r\n else:\r\n if (reset_state == True):\r\n with self.state.get_lock(): \r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return False\r\n \r\n def getVal_sony_focus(self,YourID,timeout=20,reset_state=False): \r\n timeCnt = 0\r\n while (not (self.state.value == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):\r\n time.sleep(0.1)\r\n timeCnt += 1\r\n\r\n if (timeCnt < timeout):\r\n with self.state.get_lock():\r\n self.state.value = YourID\r\n print(' value: {} previous: {}'.format(self.set_sony_focus.value,self.prev_sony_focus.value))\r\n c = self.set_sony_focus.value\r\n p = self.prev_sony_focus.value\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return c,p,True\r\n else:\r\n if (reset_state == True): \r\n with self.state.get_lock(): \r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return self.set_sony_focus.value,self.prev_sony_focus.value,False\r\n \r\n def setVal_sony_shutter(self,value,myId,mode=0,timeout=20,reset_state=False):\r\n timeCnt = 0\r\n while (not (self.state.value == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):\r\n time.sleep(0.1)\r\n timeCnt += 1\r\n\r\n if (timeCnt < timeout):\r\n with self.state.get_lock():\r\n self.state.value = myId\r\n if mode == 1:\r\n with self.prev_sony_shutter.get_lock():\r\n self.prev_sony_shutter.value = self.set_sony_shutter.value\r\n with self.set_sony_shutter.get_lock():\r\n self.set_sony_shutter.value = int(value)\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return True\r\n else:\r\n if (reset_state == True):\r\n with self.state.get_lock(): \r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return False\r\n\r\n def clearReq_sony_shutter(self,myId,timeout=20,reset_state=False):\r\n timeCnt = 0\r\n while (not (self.state.value == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):\r\n time.sleep(0.1)\r\n timeCnt += 1\r\n\r\n if (timeCnt < timeout):\r\n with self.state.get_lock():\r\n self.state.value = myId\r\n with self.set_sony_shutter.get_lock():\r\n self.set_sony_shutter.value = self.prev_sony_shutter.value\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return True\r\n else:\r\n if (reset_state == True):\r\n with self.state.get_lock(): \r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return False\r\n \r\n def getVal_sony_shutter(self,YourID,timeout=20,reset_state=False): \r\n timeCnt = 0\r\n while (not (self.state.value == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):\r\n time.sleep(0.1)\r\n timeCnt += 1\r\n\r\n if (timeCnt < timeout):\r\n with self.state.get_lock():\r\n self.state.value = YourID\r\n print('value: {} previous: {}'.format(self.set_sony_shutter.value,self.prev_sony_shutter.value))\r\n c = self.set_sony_shutter.value\r\n p = self.prev_sony_shutter.value\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return c,p,True\r\n else:\r\n if (reset_state == True): \r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return self.set_sony_shutter.value,self.prev_sony_shutter.value,False\r\n \r\n def setVal_sony_white_bal(self,value,myId,mode=0,timeout=20,reset_state=False):\r\n timeCnt = 0\r\n while (not (self.state.value == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):\r\n time.sleep(0.1)\r\n timeCnt += 1\r\n\r\n if (timeCnt < timeout):\r\n with self.state.get_lock():\r\n self.state.value = myId\r\n if mode == 1:\r\n with self.set_sony_white_bal.get_lock():\r\n self.prev_sony_white_bal.value = self.set_sony_white_bal.value\r\n with self.set_sony_white_bal.get_lock():\r\n self.set_sony_white_bal.value = int(value)\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return True\r\n else:\r\n if (reset_state == True):\r\n with self.state.get_lock(): \r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return False\r\n\r\n def clearReq_sony_white_bal(self,myId,timeout=20,reset_state=False):\r\n timeCnt = 0\r\n while (not (self.state.value == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):\r\n time.sleep(0.1)\r\n timeCnt += 1\r\n\r\n if (timeCnt < timeout):\r\n with self.state.get_lock():\r\n self.state.value = myId\r\n with self.set_sony_white_bal.get_lock():\r\n self.set_sony_white_bal.value = self.prev_sony_white_bal.value\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return True\r\n else:\r\n if (reset_state == True):\r\n with self.state.get_lock(): \r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return False\r\n \r\n def getVal_sony_white_bal(self,YourID,timeout=20,reset_state=False): \r\n timeCnt = 0\r\n while (not (self.state.value == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):\r\n time.sleep(0.1)\r\n timeCnt += 1\r\n\r\n if (timeCnt < timeout):\r\n with self.state.get_lock():\r\n self.state.value = YourID\r\n print('value: {} previous: {}'.format(self.set_sony_white_bal.value,self.prev_sony_white_bal.value))\r\n c = self.set_sony_white_bal.value\r\n p = self.prev_sony_white_bal.value\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return c,p,True\r\n else:\r\n if (reset_state == True): \r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return self.set_sony_white_bal.value,self.prev_sony_white_bal.value,False\r\n \r\n def setVal_sony_still_cap_mode(self,value,myId,mode=0,timeout=20,reset_state=False):\r\n timeCnt = 0\r\n while (not (self.state.value == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):\r\n time.sleep(0.1)\r\n timeCnt += 1\r\n\r\n if (timeCnt < timeout):\r\n with self.state.get_lock():\r\n self.state.value = myId\r\n if mode == 1:\r\n with self.set_sony_still_cap_mode.get_lock():\r\n self.prev_sony_still_cap_mode.value = self.set_sony_still_cap_mode.value\r\n with self.set_sony_still_cap_mode.get_lock():\r\n self.set_sony_still_cap_mode.value = int(value)\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return True\r\n else:\r\n if (reset_state == True): \r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return False\r\n\r\n def clearReq_sony_still_cap_mode(self,myId,timeout=20,reset_state=False):\r\n timeCnt = 0\r\n while (not (self.state.value == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):\r\n time.sleep(0.1)\r\n timeCnt += 1\r\n\r\n if (timeCnt < timeout):\r\n with self.state.get_lock():\r\n self.state.value = myId\r\n with self.set_sony_still_cap_mode.get_lock():\r\n self.set_sony_still_cap_mode.value = self.prev_sony_still_cap_mode.value\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return True\r\n else:\r\n if (reset_state == True): \r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return False\r\n \r\n def getVal_sony_still_cap_mode(self,YourID,timeout=20,reset_state=False): \r\n timeCnt = 0\r\n while (not (self.state.value == mavlinkSonyCamWriteVals.STATE_READY)) and (timeCnt < timeout):\r\n time.sleep(0.1)\r\n timeCnt += 1\r\n\r\n if (timeCnt < timeout):\r\n with self.state.get_lock():\r\n self.state.value = YourID\r\n print('value: {} previous: {}'.format(self.set_sony_still_cap_mode.value,self.prev_sony_still_cap_mode.value))\r\n c = self.set_sony_still_cap_mode.value\r\n p = self.prev_sony_still_cap_mode.value\r\n with self.state.get_lock():\r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return c,p,True\r\n else:\r\n if (reset_state == True):\r\n with self.state.get_lock(): \r\n self.state.value = mavlinkSonyCamWriteVals.STATE_READY\r\n return self.set_sony_still_cap_mode.value,self.prev_sony_still_cap_mode.value,False\r\n\r\n def setMavIsoModeData( self, dataRcv ):\r\n \r\n ret = False \r\n ret = self.setVal_sony_iso(dataRcv,mavlinkSonyCamWriteVals.STATE_MAV_WRITING,mavlinkSonyCamWriteVals.DONT_WRITE_PREV_DATA,5) \r\n return ret\r\n\r\n def getMavIsoModeData( self ):\r\n \r\n ret = False \r\n set_sony_iso = 0\r\n prev_sony_iso = 0 \r\n set_sony_iso,prev_sony_iso,ret = self.getVal_sony_iso(mavlinkSonyCamWriteVals.STATE_MAV_READING) \r\n return set_sony_iso,prev_sony_iso,ret\r\n \r\n def setMavApertureData( self, dataRcv ):\r\n \r\n ret = False \r\n ret = self.setVal_sony_aperture(dataRcv,mavlinkSonyCamWriteVals.STATE_MAV_WRITING,mavlinkSonyCamWriteVals.DONT_WRITE_PREV_DATA,5) \r\n return ret\r\n\r\n def getMavApertureData( self ):\r\n \r\n ret = False \r\n set_sony = 0\r\n prev_sony = 0 \r\n set_sony,prev_sony,ret = self.getVal_sony_aperture(mavlinkSonyCamWriteVals.STATE_MAV_READING) \r\n return set_sony,prev_sony,ret\r\n \r\n def setMavExProData( self, dataRcv ):\r\n \r\n ret = False \r\n ret = self.setVal_sony_ex_pro(dataRcv,mavlinkSonyCamWriteVals.STATE_MAV_WRITING,mavlinkSonyCamWriteVals.DONT_WRITE_PREV_DATA,5) \r\n return ret\r\n\r\n def getMavExProData( self ):\r\n \r\n ret = False \r\n set_sony = 0\r\n prev_sony = 0 \r\n set_sony,prev_sony,ret = self.getVal_sony_ex_pro(mavlinkSonyCamWriteVals.STATE_MAV_READING) \r\n return set_sony,prev_sony,ret\r\n \r\n def setMavFocusAreaData( self, dataRcv ):\r\n \r\n ret = False \r\n ret = self.setVal_sony_focus_area(dataRcv,mavlinkSonyCamWriteVals.STATE_MAV_WRITING,mavlinkSonyCamWriteVals.DONT_WRITE_PREV_DATA,5) \r\n return ret\r\n\r\n def getMavFocusAreaData( self ):\r\n \r\n ret = False \r\n set_sony = 0\r\n prev_sony = 0 \r\n set_sony,prev_sony,ret = self.getVal_sony_focus_area(mavlinkSonyCamWriteVals.STATE_MAV_READING) \r\n return set_sony,prev_sony,ret\r\n \r\n def setMavFocusData( self, dataRcv ):\r\n \r\n ret = False \r\n ret = self.setVal_sony_focus(dataRcv,mavlinkSonyCamWriteVals.STATE_MAV_WRITING,mavlinkSonyCamWriteVals.DONT_WRITE_PREV_DATA,5) \r\n return ret\r\n\r\n def getMavFocusData( self ):\r\n \r\n ret = False \r\n set_sony = 0\r\n prev_sony = 0 \r\n set_sony,prev_sony,ret = self.getVal_sony_focus(mavlinkSonyCamWriteVals.STATE_MAV_READING) \r\n return set_sony,prev_sony,ret\r\n \r\n def setMavShutterData( self, dataRcv ):\r\n \r\n ret = False \r\n ret = self.setVal_sony_shutter(dataRcv,mavlinkSonyCamWriteVals.STATE_MAV_WRITING,mavlinkSonyCamWriteVals.DONT_WRITE_PREV_DATA,5) \r\n return ret\r\n\r\n def getMavShutterData( self ):\r\n \r\n ret = False \r\n set_sony = 0\r\n prev_sony = 0 \r\n set_sony,prev_sony,ret = self.getVal_sony_shutter(mavlinkSonyCamWriteVals.STATE_MAV_READING) \r\n return set_sony,prev_sony,ret\r\n \r\n def setMavWhiteBalData( self, dataRcv ):\r\n \r\n ret = False \r\n ret = self.setVal_sony_white_bal(dataRcv,mavlinkSonyCamWriteVals.STATE_MAV_WRITING,mavlinkSonyCamWriteVals.DONT_WRITE_PREV_DATA,5) \r\n return ret\r\n\r\n def getMavWhiteBalData( self ):\r\n \r\n ret = False \r\n set_sony = 0\r\n prev_sony = 0 \r\n set_sony,prev_sony,ret = self.getVal_sony_white_bal(mavlinkSonyCamWriteVals.STATE_MAV_READING) \r\n return set_sony,prev_sony,ret\r\n \r\n def setMavStillCapModeData( self, dataRcv ):\r\n \r\n ret = False \r\n ret = self.setVal_sony_still_cap_mode(dataRcv,mavlinkSonyCamWriteVals.STATE_MAV_WRITING,mavlinkSonyCamWriteVals.DONT_WRITE_PREV_DATA,5) \r\n return ret\r\n\r\n def getMavStillCapModeData( self ):\r\n \r\n ret = False \r\n set_sony = 0\r\n prev_sony = 0 \r\n set_sony,prev_sony,ret = self.getVal_sony_still_cap_mode(mavlinkSonyCamWriteVals.STATE_MAV_READING) \r\n return set_sony,prev_sony,ret\r\n \r\n# ===============================================================================================================================\r\n#\r\n# Name : MemoryValueClass.py\r\n# Desc : Global memory value class for use with cameras and mavlink\r\n# Auth : AIR-obots Ai-Robots\r\n#\r\n# ===============================================================================================================================\r\nimport time\r\n \r\nclass memoryValue():\r\n\r\n # multi process thread status of object\r\n STATE_READY = 1\r\n STATE_CAM_WRITING = 2\r\n STATE_MAV_READING = 3\r\n STATE_MAV_WRITING = 4\r\n STATE_CAM_READING = 5\r\n # number of objects created\r\n numberOfVals = 0\r\n\r\n def __init__ (self, name = 'value_name_not_set', signal = 0, prev = 0, state = STATE_READY):\r\n self.signal = multiprocessing.Value('i', signal) # signal.value value\r\n self.prev = multiprocessing.Value('i', prev) # previous signal value\r\n self.state = multiprocessing.Value('i', state) # state of the value\r\n self.nextpointer = None # pointer for chain if needed\r\n self.name = name # name as a string\r\n self.timestamp = multiprocessing.Value('l', 0) # timestamp\r\n self.updateNeeded = multiprocessing.Value('b', False) # update required\r\n self.ack_send = multiprocessing.Value('b', False) # param_ext_ack needed\r\n self.index = 0 # index number used for ack send\r\n memoryValue.numberOfVals += 1 # global counter of the number of values\r\n \r\n def __del__(self): \r\n class_name = self.__class__.__name__ \r\n memoryValue.numberOfVals -= 1 # global counter of the number of values\r\n print('{} Deleted'.format(class_name))\r\n\r\n def get_value_counter(self): \r\n print('%s: %d' % (self.name,memoryValue.numberOfVals))\r\n return memoryValue.numberOfVals\t\t\r\n \r\n def get_value_data(self,YourID,timeout=100): \r\n timeCnt = 0\r\n while (not (self.state.value == memoryValue.STATE_READY)) and (timeCnt < timeout):\r\n time.sleep(0.1)\r\n timeCnt += 1\r\n\r\n if (timeCnt < timeout):\r\n with self.state.get_lock():\r\n self.state.value = YourID\r\n print('Description: {}. value: {} previous: {}'.format(self.name, self.signal.value,self.prev.value))\r\n with self.state.get_lock():\r\n self.state.value = memoryValue.STATE_READY\r\n return self.name,self.signal.value,self.prev.value,True\r\n else:\r\n return self.name,self.signal.value,self.prev.value,False\r\n\r\n def set_value(self,value,myId,timeout=100):\r\n timeCnt = 0\r\n while (not (self.state.value == memoryValue.STATE_READY)) and (timeCnt < timeout):\r\n time.sleep(0.1)\r\n timeCnt += 1\r\n\r\n if (timeCnt < timeout):\r\n with self.state.get_lock():\r\n self.state.value = myId\r\n with self.prev.get_lock():\r\n self.prev.value = self.signal.value\r\n with self.signal.get_lock():\r\n self.signal.value = value\r\n with self.updateNeeded.get_lock():\r\n self.updateNeeded.value = True\r\n with self.state.get_lock():\r\n self.state.value = memoryValue.STATE_READY\r\n return True\r\n else:\r\n return False\r\n\r\n def get_value_data_if_avail(self,YourID): \r\n if (self.state.value == memoryValue.STATE_READY):\r\n with self.state.get_lock():\r\n self.state.value = YourID\r\n print('Description: {}. value: {}'.format(self.name, self.signal.value))\r\n with self.state.get_lock():\r\n self.state.value = memoryValue.STATE_READY\r\n return self.name,self.signal.value,self.prev.value,True\r\n else:\r\n return self.name,self.signal.value,self.prev.value,False\r\n\r\n def set_update_flag( self, stateSent, myId ):\r\n if (self.state.value == memoryValue.STATE_READY):\r\n with self.state.get_lock():\r\n self.state.value = myId\r\n with self.state.get_lock():\r\n self.updateNeeded.value = stateSent\r\n with self.state.get_lock():\r\n self.state.value = memoryValue.STATE_READY\r\n return True\r\n else:\r\n return False\r\n\r\n def get_update_flag( self, myId ):\r\n v = 0\r\n if (self.state.value == memoryValue.STATE_READY):\r\n with self.state.get_lock():\r\n self.state.value = myId\r\n v = self.updateNeeded.value \r\n with self.state.get_lock():\r\n self.state.value = memoryValue.STATE_READY\r\n return v,True\r\n else:\r\n return v,False\r\n \r\n def set_ack_send( self, stateSent, myId ):\r\n if (self.state.value == memoryValue.STATE_READY):\r\n with self.state.get_lock():\r\n self.state.value = myId\r\n with self.ack_send.get_lock():\r\n self.ack_send.value = stateSent\r\n with self.state.get_lock():\r\n self.state.value = memoryValue.STATE_READY\r\n return True\r\n else:\r\n return False\t\t\t\r\n\r\n def get_ack_send( self, myId ):\r\n v = 0\r\n if (self.state.value == memoryValue.STATE_READY):\r\n with self.state.get_lock():\r\n self.state.value = myId\r\n v = self.ack_send.value \r\n with self.state.get_lock():\r\n self.state.value = memoryValue.STATE_READY\r\n return v,True\r\n else:\r\n return v,False\r\n \r\nif __name__ == '__main__':\r\n\r\n #\r\n # Test the library is okay\r\n #\r\n initVal = 23\r\n getName = \"noName\"\r\n getValueforMAVSending = 0\r\n getPrev = 0\r\n \r\n SonyWhiteBalance = memoryValue('sonyWhiteBal',initVal)\r\n FocusSetpoint = memoryValue('FocusSetpoint',initVal+6)\r\n\r\n #\r\n # example using in mavlink sender\r\n\t#\r\n mavSetPointVal = 99 #we_got_from_mavlink\r\n Timeout = 20\r\n if (FocusSetpoint.set_value(mavSetPointVal, memoryValue.STATE_MAV_WRITING, Timeout) == True):\r\n # { value has been successfully set }\r\n print(\"set the setpoint value focus\")\r\n\r\n #\r\n # example to get the white balance setting from the cam to send over mavlink\r\n #\r\n getName, getValueforMAVSending, getPrev, myState = SonyWhiteBalance.get_value_data(memoryValue.STATE_MAV_READING, Timeout) \r\n if (myState == True):\r\n # now pack tha data\r\n print(\"got data ok\")\r\n else:\r\n # you got an error or timeout\r\n print(\"data error\")\r\n\r\n #\r\n # example using in mavlink sender\r\n #\r\n mavSetPointVal = 199 #we_got_from_mavlink\r\n Timeout = 20\r\n if (SonyWhiteBalance.set_value(mavSetPointVal, memoryValue.STATE_MAV_WRITING, Timeout) == True):\r\n # { value has been successfully set }\r\n print(\"set the setpoint value white balance\")\r\n\r\n #\r\n # example to get the white balance setting from the cam to send over mavlink\r\n #\r\n getName, getValueforMAVSending, getPrev, myState = SonyWhiteBalance.get_value_data(memoryValue.STATE_MAV_READING, Timeout) \r\n if (myState == True):\r\n # now pack tha data\r\n print(\"got data ok\")\r\n else:\r\n # you got an error or timeout\r\n print(\"data error\")\r\n \r\n #\r\n # example to iterate without waiting for completion on the write to the value from elsewhere\r\n #\r\n myState = False\r\n while not myState == True:\r\n getName, getVal, getPrev, myState = FocusSetpoint.get_value_data_if_avail( memoryValue.STATE_CAM_READING )\r\n if myState == True:\r\n # now use this value and send to the camera\r\n print(\"setpoint available\")\r\n else:\r\n # do something else while watiting\r\n print(\"setpoint being written by other task\")\r\n # what you do until it has arrived\r\n time.sleep(0.1)\r\n \r\n # what you do after\r\n print(\"using the setpoint to change the camera\")\r\n\r\n #\r\n # print the number of memory values\r\n #\r\n print(FocusSetpoint.get_value_counter())\r\n print(SonyWhiteBalance.get_value_counter())\r\n \r\n #\r\n # Release the shared memory\r\n #\t\r\n del FocusSetpoint\r\n del SonyWhiteBalance\r\n\t\r\n# ===============================================================================================================================\r\n#\r\n# Name : NewSonyAlphaClass.py\r\n# Desc : Communicate with new Sony Alpha Series of Camera\r\n# Auth : AIR-obots Ai-Robots\r\n#\r\n# ===============================================================================================================================\r\nimport shlex, subprocess, pprint\r\n\r\nclass sonyAlphaNewCamera():\r\n\r\n def __init__ (self, name = 'sonyAlphaCamClass'):\r\n self.name = name # name as a string\r\n self.error_counts = multiprocessing.Value('i', 0)\r\n \r\n def __del__(self): \r\n class_name = self.__class__.__name__ \r\n print('{} Deleted'.format(class_name))\r\n \r\n def check_my_os( self ):\r\n if ((sys.platform=='linux2') or (sys.platform=='linux')): return 1\r\n elif sys.platform=='win32': return 2\r\n else: return 3\r\n\r\n def my_timestamp( self ):\r\n if (self.check_my_os() == 1):\r\n cmd = \"date +%s\"\r\n return( int(os.popen(cmd).read()) )\r\n\r\n def take_a_picture_now( self,flag ):\r\n\r\n # run the API command in the shell and look for the descriptor for the field\r\n #\r\n if (flag == 1):\r\n cmd='/home/pi/cams/SonyTEST32/take_picture/RemoteCli ' \r\n c = os.popen(cmd)\r\n print(c.read())\r\n flag = 2\r\n # fastGlobals.take_picture = 2\r\n print(f\"\\033[36m Took the picture {flag}\")\r\n return 2\r\n return flag\r\n \r\n def set_sony_iso_orig( self, isoVal ):\r\n\r\n # run the API command in the shell and look for the descriptor for the field\r\n #\r\n isoValArg=str(isoVal)\r\n cmd='/home/pi/cams/SonyTEST32/set_iso/RemoteCli ' + isoValArg\r\n args = shlex.split(cmd)\r\n s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n p2 = subprocess.Popen([\"grep\", \"ISO_Mode\"], stdin=s.stdout, stdout=subprocess.PIPE)\t # look for only this string in the output\r\n output = p2.communicate()[0]\r\n print(f\"{output} \\n returned to shell {p2.returncode}\")\r\n s.stdout.close()\r\n # consider if needed (if check of setval isnt working look for \"cancelled\" in the program output\r\n # \r\n # s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n # p3 = subprocess.Popen([\"grep\", \"cancelled\"], stdin=s.stdout, stdout=subprocess.PIPE)\r\n # output2 = p3.communicate()[0]\r\n \r\n z = output.decode('ascii') # convert bytes array output to ascii string \r\n a = shlex.split(z) # split this unique output into fields separated by commas\r\n #\r\n # Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output\r\n # we get the value fields before and after and return that list\r\n #\r\n itemNo = 0\r\n idx = 99999\r\n answers = []\r\n for xx in a:\r\n if xx.find('ISO_Format') > -1:\r\n idx = itemNo\r\n else:\r\n if (idx != 99999):\r\n if xx.find(':') > -1:\r\n idx = itemNo\r\n else:\r\n if not (xx.isdigit()):\r\n if xx.find(\"AUTO\") > -1:\r\n xx = str(0)\r\n xx = xx.replace(\",\",\"\") \r\n vv = xx.strip(\"}\") # caters for a case in testing where i have closing bracket 34} \r\n answers.append(vv)\r\n idx = 99999\r\n itemNo += 1\r\n return answers\t\r\n\r\n def set_sony_iso( self, isoVal ):\r\n\r\n # run the API command in the shell and look for the descriptor for the field\r\n #\r\n isoValArg=str(isoVal)\r\n cmd='/home/pi/cams/SonyTEST32/set_iso/RemoteCli ' + isoValArg\r\n args = shlex.split(cmd)\r\n\r\n # CONSIDER :: because we have to trap various errors and act on them we had to parse the output for other things i might modify CameraDevice.cpp to\r\n # cover it all and exit as quick as possible using return codes but i couldnt succeed to read them all successfully using .returncode property\r\n # with the pipe attached as above and we dont want too many delays\r\n #\r\n s=subprocess.run( args, stdout=subprocess.PIPE )\r\n output=s.stdout\r\n #s.stdout.close()\r\n \r\n z = output.decode('ascii') # convert bytes array output to ascii string \r\n a = shlex.split(z) # split this unique output into fields separated by commas\r\n\r\n # alternative using file........\r\n #\r\n # with open('out.txt','w+') as fout:\r\n # s=subprocess.call(args, stdout=fout)\r\n # fout.seek(0)\r\n # output=fout.read()\r\n # a = shlex.split(output)\r\n # fout.close()\r\n \r\n #\r\n # Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output\r\n # we get the value fields before and after and return that list\r\n #\r\n itemNo = 0\r\n idx = 99999\r\n answers = []\r\n\r\n # new error handler sends a unique reply which means count these errors and then force a reset of the usb\r\n #\r\n if ( not(z.find(\"No cameras detected\") == -1) or not(z.find(\"Failed to get\") == -1)): \r\n self.error_counts.value += 1\r\n print(f\"\\033[31m Error Reading from Camera USB Link {self.error_counts.value} \\033[0m\")\r\n return answers\r\n\r\n # look for the not writable option\r\n #\r\n if not(z.find(\"not writable\") == -1):\r\n print(\"\\033[31m This option is not writable \\033[0m \")\r\n answers.append(-1) \r\n answers.append(-1) \r\n answers.append(0) \r\n answers.append(\"CAN_NOT_WRITE\") \r\n return answers\r\n \r\n for xx in a:\r\n if xx.find('ISO_Format') > -1:\r\n idx = itemNo\r\n else:\r\n if (idx != 99999):\r\n if xx.find(':') > -1:\r\n idx = itemNo\r\n else:\r\n if not (xx.isdigit()):\r\n if xx.find(\"AUTO\") > -1:\r\n xx = str(0)\r\n xx = xx.replace(\",\",\"\") \r\n vv = xx.strip(\"}\") # caters for a case in testing where i have closing bracket 34} \r\n answers.append(vv)\r\n idx = 99999\r\n itemNo += 1\r\n return answers\t\r\n \r\n def set_sony_aperture_orig( self, Val ):\r\n\r\n # run the API command in the shell and look for the descriptor for the field\r\n #\r\n ValArg=str(Val)\r\n cmd='/home/pi/cams/SonyTEST32/set_aperture/RemoteCli ' + ValArg\r\n args = shlex.split(cmd)\r\n s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n p2 = subprocess.Popen([\"grep\", \"Aperture_Val\"], stdin=s.stdout, stdout=subprocess.PIPE)\t # look for only this string in the output\r\n output = p2.communicate()[0]\r\n print(output)\r\n s.stdout.close()\r\n # consider if needed (if check of setval isnt working look for \"cancelled\" in the program output\r\n # \r\n # s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n # p3 = subprocess.Popen([\"grep\", \"cancelled\"], stdin=s.stdout, stdout=subprocess.PIPE)\r\n # output2 = p3.communicate()[0]\r\n \r\n z = output.decode('ascii') # convert bytes array output to ascii string \r\n a = shlex.split(z) # split this unique output into fields separated by commas\r\n #\r\n # Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output\r\n # we get the value fields before and after and return that list\r\n #\r\n itemNo = 0\r\n idx = 99999\r\n answers = []\r\n for xx in a:\r\n if xx.find('Aperture_Val') > -1:\r\n idx = itemNo\r\n else:\r\n if (idx != 99999):\r\n if xx.find(':') > -1:\r\n idx = itemNo\r\n else:\r\n if not (xx.isdigit()):\r\n if xx.find(\"AUTO\") > -1:\r\n xx = str(0) \r\n xx = xx.replace(\",\",\"\") \r\n vv = xx.strip(\"}\") # caters for a case in testing where i have closing bracket 34} \r\n answers.append(vv)\r\n idx = 99999\r\n itemNo += 1\r\n return answers\t\r\n\r\n def set_sony_aperture( self, Val ):\r\n\r\n # run the API command in the shell and look for the descriptor for the field\r\n #\r\n ValArg=str(Val)\r\n cmd='/home/pi/cams/SonyTEST32/set_aperture/RemoteCli ' + ValArg\r\n args = shlex.split(cmd)\r\n\r\n # CONSIDER :: because we have to trap various errors and act on them we had to parse the output for other things i might modify CameraDevice.cpp to\r\n # cover it all and exit as quick as possible using return codes but i couldnt succeed to read them all successfully using .returncode property\r\n # with the pipe attached as above and we dont want too many delays\r\n #\r\n s=subprocess.run( args, stdout=subprocess.PIPE )\r\n output=s.stdout\r\n #s.stdout.close()\r\n \r\n z = output.decode('ascii') # convert bytes array output to ascii string \r\n a = shlex.split(z) # split this unique output into fields separated by commas\r\n \r\n #\r\n # Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output\r\n # we get the value fields before and after and return that list\r\n #\r\n itemNo = 0\r\n idx = 99999\r\n answers = []\r\n\r\n # new error handler sends a unique reply which means count these errors and then force a reset of the usb\r\n #\r\n if ( not(z.find(\"No cameras detected\") == -1) or not(z.find(\"Failed to get\") == -1)): \r\n self.error_counts.value += 1\r\n print(f\"\\033[31m Error Reading from Camera USB Link {self.error_counts.value} \\033[0m\")\r\n return answers\r\n\r\n # look for the not writable option\r\n #\r\n if not(z.find(\"not writable\") == -1):\r\n print(\"\\033[31m This option is not writable \\033[0m \")\r\n answers.append(-1) \r\n answers.append(-1) \r\n answers.append(0) \r\n answers.append(\"CAN_NOT_WRITE\") \r\n return answers\r\n \r\n for xx in a:\r\n if xx.find('Aperture_Val') > -1:\r\n idx = itemNo\r\n else:\r\n if (idx != 99999):\r\n if xx.find(':') > -1:\r\n idx = itemNo\r\n else:\r\n if not (xx.isdigit()):\r\n if xx.find(\"AUTO\") > -1:\r\n xx = str(0)\r\n xx = xx.replace(\",\",\"\") \r\n vv = xx.strip(\"}\") # caters for a case in testing where i have closing bracket 34} \r\n answers.append(vv)\r\n idx = 99999\r\n itemNo += 1\r\n return answers\r\n \r\n def set_sony_ex_pro_orig( self, Val ):\r\n\r\n # run the API command in the shell and look for the descriptor for the field\r\n #\r\n ValArg=str(Val)\r\n cmd='/home/pi/cams/SonyTEST32/set_ex_pro/RemoteCli ' + ValArg\r\n args = shlex.split(cmd)\r\n s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n p2 = subprocess.Popen([\"grep\", \"Exposure_Program_Value\"], stdin=s.stdout, stdout=subprocess.PIPE)\t # look for only this string in the output\r\n output = p2.communicate()[0]\r\n print(output)\r\n s.stdout.close()\r\n # consider if needed (if check of setval isnt working look for \"cancelled\" in the program output\r\n # \r\n # s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n # p3 = subprocess.Popen([\"grep\", \"cancelled\"], stdin=s.stdout, stdout=subprocess.PIPE)\r\n # output2 = p3.communicate()[0]\r\n \r\n z = output.decode('ascii') # convert bytes array output to ascii string \r\n a = shlex.split(z) # split this unique output into fields separated by commas\r\n #\r\n # Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output\r\n # we get the value fields before and after and return that list\r\n #\r\n itemNo = 0\r\n idx = 99999\r\n answers = []\r\n for xx in a:\r\n if xx.find('Exposure_Program_Value') > -1:\r\n idx = itemNo\r\n else:\r\n if (idx != 99999):\r\n if xx.find(':') > -1:\r\n idx = itemNo\r\n else:\r\n if not (xx.isdigit()):\r\n if xx.find(\"AUTO\") > -1:\r\n xx = str(0) \r\n xx = xx.replace(\",\",\"\") \r\n vv = xx.strip(\"}\") # caters for a case in testing where i have closing bracket 34} \r\n answers.append(vv)\r\n idx = 99999\r\n itemNo += 1\r\n return answers\t\r\n\r\n def set_sony_ex_pro( self, Val ):\r\n\r\n # run the API command in the shell and look for the descriptor for the field\r\n #\r\n ValArg=str(Val)\r\n cmd='/home/pi/cams/SonyTEST32/set_ex_pro/RemoteCli ' + ValArg\r\n args = shlex.split(cmd)\r\n\r\n # CONSIDER :: because we have to trap various errors and act on them we had to parse the output for other things i might modify CameraDevice.cpp to\r\n # cover it all and exit as quick as possible using return codes but i couldnt succeed to read them all successfully using .returncode property\r\n # with the pipe attached as above and we dont want too many delays\r\n #\r\n s=subprocess.run( args, stdout=subprocess.PIPE )\r\n output=s.stdout\r\n #s.stdout.close()\r\n \r\n z = output.decode('ascii') # convert bytes array output to ascii string \r\n a = shlex.split(z) # split this unique output into fields separated by commas\r\n \r\n #\r\n # Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output\r\n # we get the value fields before and after and return that list\r\n #\r\n itemNo = 0\r\n idx = 99999\r\n answers = []\r\n\r\n # new error handler sends a unique reply which means count these errors and then force a reset of the usb\r\n #\r\n if ( not(z.find(\"No cameras detected\") == -1) or not(z.find(\"Failed to get\") == -1)): \r\n self.error_counts.value += 1\r\n print(f\"\\033[31m Error Reading from Camera USB Link {self.error_counts.value} \\033[0m\")\r\n return answers\r\n\r\n # look for the not writable option\r\n #\r\n if not(z.find(\"not writable\") == -1):\r\n print(\"\\033[31m This option is not writable \\033[0m \")\r\n answers.append(-1) \r\n answers.append(-1) \r\n answers.append(0) \r\n answers.append(\"CAN_NOT_WRITE\") \r\n return answers\r\n \r\n for xx in a:\r\n if xx.find('Exposure_Program_Value') > -1:\r\n idx = itemNo\r\n else:\r\n if (idx != 99999):\r\n if xx.find(':') > -1:\r\n idx = itemNo\r\n else:\r\n if not (xx.isdigit()):\r\n if xx.find(\"AUTO\") > -1:\r\n xx = str(0)\r\n xx = xx.replace(\",\",\"\") \r\n vv = xx.strip(\"}\") # caters for a case in testing where i have closing bracket 34} \r\n answers.append(vv)\r\n idx = 99999\r\n itemNo += 1\r\n return answers\r\n \r\n def set_sony_focus_orig( self, Val ):\r\n\r\n # run the API command in the shell and look for the descriptor for the field\r\n #\r\n ValArg=str(Val)\r\n cmd='/home/pi/cams/SonyTEST32/set_focus/RemoteCli ' + ValArg\r\n args = shlex.split(cmd)\r\n s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n p2 = subprocess.Popen([\"grep\", \"Focus_Val\"], stdin=s.stdout, stdout=subprocess.PIPE)\t # look for only this string in the output\r\n output = p2.communicate()[0]\r\n print(output)\r\n s.stdout.close()\r\n # consider if needed (if check of setval isnt working look for \"cancelled\" in the program output\r\n # \r\n # s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n # p3 = subprocess.Popen([\"grep\", \"cancelled\"], stdin=s.stdout, stdout=subprocess.PIPE)\r\n # output2 = p3.communicate()[0]\r\n \r\n z = output.decode('ascii') # convert bytes array output to ascii string \r\n a = shlex.split(z) # split this unique output into fields separated by commas\r\n #\r\n # Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output\r\n # we get the value fields before and after and return that list\r\n #\r\n itemNo = 0\r\n idx = 99999\r\n answers = []\r\n for xx in a:\r\n if xx.find('Focus_Val') > -1:\r\n idx = itemNo\r\n else:\r\n if (idx != 99999):\r\n if xx.find(':') > -1:\r\n idx = itemNo\r\n else:\r\n if not (xx.isdigit()):\r\n if xx.find(\"AUTO\") > -1:\r\n xx = str(0) \r\n xx = xx.replace(\",\",\"\") \r\n vv = xx.strip(\"}\") # caters for a case in testing where i have closing bracket 34} \r\n answers.append(vv)\r\n idx = 99999\r\n itemNo += 1\r\n return answers\r\n\r\n def set_sony_focus( self, Val ):\r\n\r\n # run the API command in the shell and look for the descriptor for the field\r\n #\r\n ValArg=str(Val)\r\n cmd='/home/pi/cams/SonyTEST32/set_focus/RemoteCli ' + ValArg\r\n args = shlex.split(cmd)\r\n\r\n # CONSIDER :: because we have to trap various errors and act on them we had to parse the output for other things i might modify CameraDevice.cpp to\r\n # cover it all and exit as quick as possible using return codes but i couldnt succeed to read them all successfully using .returncode property\r\n # with the pipe attached as above and we dont want too many delays\r\n #\r\n s=subprocess.run( args, stdout=subprocess.PIPE )\r\n output=s.stdout\r\n #s.stdout.close()\r\n \r\n z = output.decode('ascii') # convert bytes array output to ascii string \r\n a = shlex.split(z) # split this unique output into fields separated by commas\r\n \r\n #\r\n # Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output\r\n # we get the value fields before and after and return that list\r\n #\r\n itemNo = 0\r\n idx = 99999\r\n answers = []\r\n\r\n # new error handler sends a unique reply which means count these errors and then force a reset of the usb\r\n #\r\n if ( not(z.find(\"No cameras detected\") == -1) or not(z.find(\"Failed to get\") == -1)): \r\n self.error_counts.value += 1\r\n print(f\"\\033[31m Error Reading from Camera USB Link {self.error_counts.value} \\033[0m\")\r\n return answers\r\n\r\n # look for the not writable option\r\n #\r\n if not(z.find(\"not writable\") == -1):\r\n print(\"\\033[31m This option is not writable \\033[0m \")\r\n answers.append(-1) \r\n answers.append(-1) \r\n answers.append(0) \r\n answers.append(\"CAN_NOT_WRITE\") \r\n return answers\r\n \r\n for xx in a:\r\n if xx.find('Focus_Val') > -1:\r\n idx = itemNo\r\n else:\r\n if (idx != 99999):\r\n if xx.find(':') > -1:\r\n idx = itemNo\r\n else:\r\n if not (xx.isdigit()):\r\n if xx.find(\"AUTO\") > -1:\r\n xx = str(0)\r\n xx = xx.replace(\",\",\"\") \r\n vv = xx.strip(\"}\") # caters for a case in testing where i have closing bracket 34} \r\n answers.append(vv)\r\n idx = 99999\r\n itemNo += 1\r\n return answers\r\n \r\n def set_sony_focus_area_orig( self, Val ):\r\n\r\n # run the API command in the shell and look for the descriptor for the field\r\n #\r\n ValArg=str(Val)\r\n cmd='/home/pi/cams/SonyTEST32/set_fa/RemoteCli ' + ValArg\r\n args = shlex.split(cmd)\r\n s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n p2 = subprocess.Popen([\"grep\", \"Focus_Area_Val\"], stdin=s.stdout, stdout=subprocess.PIPE)\t # look for only this string in the output\r\n output = p2.communicate()[0]\r\n print(output)\r\n s.stdout.close()\r\n # consider if needed (if check of setval isnt working look for \"cancelled\" in the program output\r\n # \r\n # s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n # p3 = subprocess.Popen([\"grep\", \"cancelled\"], stdin=s.stdout, stdout=subprocess.PIPE)\r\n # output2 = p3.communicate()[0]\r\n \r\n z = output.decode('ascii') # convert bytes array output to ascii string \r\n a = shlex.split(z) # split this unique output into fields separated by commas\r\n #\r\n # Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output\r\n # we get the value fields before and after and return that list\r\n #\r\n itemNo = 0\r\n idx = 99999\r\n answers = []\r\n for xx in a:\r\n if xx.find('Focus_Area_Val') > -1:\r\n idx = itemNo\r\n else:\r\n if (idx != 99999):\r\n if xx.find(':') > -1:\r\n idx = itemNo\r\n else:\r\n if not (xx.isdigit()):\r\n if xx.find(\"AUTO\") > -1:\r\n xx = str(0) \r\n xx = xx.replace(\",\",\"\") \r\n vv = xx.strip(\"}\") # caters for a case in testing where i have closing bracket 34} \r\n answers.append(vv)\r\n idx = 99999\r\n itemNo += 1\r\n return answers\r\n\r\n def set_sony_focus_area( self, Val ):\r\n\r\n # run the API command in the shell and look for the descriptor for the field\r\n #\r\n ValArg=str(Val)\r\n cmd='/home/pi/cams/SonyTEST32/set_fa/RemoteCli ' + ValArg\r\n args = shlex.split(cmd)\r\n\r\n # CONSIDER :: because we have to trap various errors and act on them we had to parse the output for other things i might modify CameraDevice.cpp to\r\n # cover it all and exit as quick as possible using return codes but i couldnt succeed to read them all successfully using .returncode property\r\n # with the pipe attached as above and we dont want too many delays\r\n #\r\n s=subprocess.run( args, stdout=subprocess.PIPE )\r\n output=s.stdout\r\n #s.stdout.close()\r\n \r\n z = output.decode('ascii') # convert bytes array output to ascii string \r\n a = shlex.split(z) # split this unique output into fields separated by commas\r\n \r\n #\r\n # Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output\r\n # we get the value fields before and after and return that list\r\n #\r\n itemNo = 0\r\n idx = 99999\r\n answers = []\r\n\r\n # new error handler sends a unique reply which means count these errors and then force a reset of the usb\r\n #\r\n if ( not(z.find(\"No cameras detected\") == -1) or not(z.find(\"Failed to get\") == -1)): \r\n self.error_counts.value += 1\r\n print(f\"\\033[31m Error Reading from Camera USB Link {self.error_counts.value} \\033[0m\")\r\n return answers\r\n\r\n # look for the not writable option\r\n #\r\n if not(z.find(\"not writable\") == -1):\r\n print(\"\\033[31m This option is not writable \\033[0m \")\r\n answers.append(-1) \r\n answers.append(-1) \r\n answers.append(0) \r\n answers.append(\"CAN_NOT_WRITE\") \r\n return answers\r\n \r\n for xx in a:\r\n if xx.find('Focus_Area_Val') > -1:\r\n idx = itemNo\r\n else:\r\n if (idx != 99999):\r\n if xx.find(':') > -1:\r\n idx = itemNo\r\n else:\r\n if not (xx.isdigit()):\r\n if xx.find(\"AUTO\") > -1:\r\n xx = str(0)\r\n xx = xx.replace(\",\",\"\") \r\n vv = xx.strip(\"}\") # caters for a case in testing where i have closing bracket 34} \r\n answers.append(vv)\r\n idx = 99999\r\n itemNo += 1\r\n return answers\r\n \r\n def set_sony_shutter_orig( self, Val ):\r\n\r\n # run the API command in the shell and look for the descriptor for the field\r\n #\r\n ValArg=str(Val)\r\n cmd='/home/pi/cams/SonyTEST32/set_shutter/RemoteCli ' + ValArg\r\n args = shlex.split(cmd)\r\n s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n p2 = subprocess.Popen([\"grep\", \"Shutter_Value\"], stdin=s.stdout, stdout=subprocess.PIPE)\t # look for only this string in the output\r\n output = p2.communicate()[0]\r\n print(output)\r\n s.stdout.close()\r\n # consider if needed (if check of setval isnt working look for \"cancelled\" in the program output\r\n # \r\n # s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n # p3 = subprocess.Popen([\"grep\", \"cancelled\"], stdin=s.stdout, stdout=subprocess.PIPE)\r\n # output2 = p3.communicate()[0]\r\n \r\n z = output.decode('ascii') # convert bytes array output to ascii string \r\n zz = z.replace(\"\\\"\",\"\") # get rid of the inch symbol it will crash us\r\n a = shlex.split(zz) # split this unique output into fields separated by commas\r\n #\r\n # Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output\r\n # we get the value fields before and after and return that list\r\n #\r\n itemNo = 0\r\n idx = 99999\r\n answers = []\r\n for xx in a:\r\n if xx.find('Shutter_Value') > -1:\r\n idx = itemNo\r\n else:\r\n if (idx != 99999):\r\n if xx.find(':') > -1:\r\n idx = itemNo\r\n else:\r\n if not (xx.isdigit()):\r\n if xx.find(\"AUTO\") > -1:\r\n xx = str(0) \r\n xx = xx.replace(\",\",\"\") \r\n vv = xx.strip(\"}\") # caters for a case in testing where i have closing bracket 34} \r\n answers.append(vv)\r\n idx = 99999\r\n itemNo += 1\r\n return answers\r\n\r\n def set_sony_shutter( self, Val ):\r\n\r\n # run the API command in the shell and look for the descriptor for the field\r\n #\r\n ValArg=str(Val)\r\n cmd='/home/pi/cams/SonyTEST32/set_shutter/RemoteCli ' + ValArg\r\n args = shlex.split(cmd)\r\n\r\n # CONSIDER :: because we have to trap various errors and act on them we had to parse the output for other things i might modify CameraDevice.cpp to\r\n # cover it all and exit as quick as possible using return codes but i couldnt succeed to read them all successfully using .returncode property\r\n # with the pipe attached as above and we dont want too many delays\r\n #\r\n s=subprocess.run( args, stdout=subprocess.PIPE )\r\n output=s.stdout\r\n #s.stdout.close()\r\n \r\n z = output.decode('ascii') # convert bytes array output to ascii string \r\n zz = z.replace(\"\\\"\",\"\") # get rid of the inch symbol it will crash us\r\n a = shlex.split(zz) # split this unique output into fields separated by commas\r\n \r\n #\r\n # Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output\r\n # we get the value fields before and after and return that list\r\n #\r\n itemNo = 0\r\n idx = 99999\r\n answers = []\r\n\r\n # new error handler sends a unique reply which means count these errors and then force a reset of the usb\r\n #\r\n if ( not(z.find(\"No cameras detected\") == -1) or not(z.find(\"Failed to get\") == -1)): \r\n self.error_counts.value += 1\r\n print(f\"\\033[31m Error Reading from Camera USB Link {self.error_counts.value} \\033[0m\")\r\n return answers\r\n\r\n # look for the not writable option\r\n #\r\n if not(z.find(\"not writable\") == -1):\r\n print(\"\\033[31m This option is not writable \\033[0m \")\r\n answers.append(-1) \r\n answers.append(-1) \r\n answers.append(0) \r\n answers.append(\"CAN_NOT_WRITE\") \r\n return answers\r\n \r\n for xx in a:\r\n if xx.find('Shutter_Value') > -1:\r\n idx = itemNo\r\n else:\r\n if (idx != 99999):\r\n if xx.find(':') > -1:\r\n idx = itemNo\r\n else:\r\n if not (xx.isdigit()):\r\n if xx.find(\"AUTO\") > -1:\r\n xx = str(0)\r\n xx = xx.replace(\",\",\"\") \r\n vv = xx.strip(\"}\") # caters for a case in testing where i have closing bracket 34} \r\n answers.append(vv)\r\n idx = 99999\r\n itemNo += 1\r\n return answers\r\n \r\n def set_sony_white_bal_orig( self, Val ):\r\n\r\n # run the API command in the shell and look for the descriptor for the field\r\n #\r\n ValArg=str(Val)\r\n cmd='/home/pi/cams/SonyTEST32/set_wb/RemoteCli ' + ValArg\r\n args = shlex.split(cmd)\r\n s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n p2 = subprocess.Popen([\"grep\", \"White_Bal_Val\"], stdin=s.stdout, stdout=subprocess.PIPE)\t # look for only this string in the output\r\n output = p2.communicate()[0]\r\n print(output)\r\n s.stdout.close()\r\n # consider if needed (if check of setval isnt working look for \"cancelled\" in the program output\r\n # \r\n # s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n # p3 = subprocess.Popen([\"grep\", \"cancelled\"], stdin=s.stdout, stdout=subprocess.PIPE)\r\n # output2 = p3.communicate()[0]\r\n \r\n z = output.decode('ascii') # convert bytes array output to ascii string \r\n a = shlex.split(z) # split this unique output into fields separated by commas\r\n #\r\n # Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output\r\n # we get the value fields before and after and return that list\r\n #\r\n itemNo = 0\r\n idx = 99999\r\n answers = []\r\n for xx in a:\r\n if xx.find('White_Bal_Val') > -1:\r\n idx = itemNo\r\n else:\r\n if (idx != 99999):\r\n if xx.find(':') > -1:\r\n idx = itemNo\r\n else:\r\n if not (xx.isdigit()):\r\n if xx.find(\"AUTO\") > -1:\r\n xx = str(0) \r\n xx = xx.replace(\",\",\"\") \r\n vv = xx.strip(\"}\") # caters for a case in testing where i have closing bracket 34} \r\n answers.append(vv)\r\n idx = 99999\r\n itemNo += 1\r\n return answers\r\n\r\n def set_sony_white_bal( self, Val ):\r\n\r\n # run the API command in the shell and look for the descriptor for the field\r\n #\r\n ValArg=str(Val)\r\n cmd='/home/pi/cams/SonyTEST32/set_wb/RemoteCli ' + ValArg\r\n args = shlex.split(cmd)\r\n\r\n # CONSIDER :: because we have to trap various errors and act on them we had to parse the output for other things i might modify CameraDevice.cpp to\r\n # cover it all and exit as quick as possible using return codes but i couldnt succeed to read them all successfully using .returncode property\r\n # with the pipe attached as above and we dont want too many delays\r\n #\r\n s=subprocess.run( args, stdout=subprocess.PIPE )\r\n output=s.stdout\r\n #s.stdout.close()\r\n \r\n z = output.decode('ascii') # convert bytes array output to ascii string \r\n a = shlex.split(z) # split this unique output into fields separated by commas\r\n \r\n #\r\n # Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output\r\n # we get the value fields before and after and return that list\r\n #\r\n itemNo = 0\r\n idx = 99999\r\n answers = []\r\n\r\n # new error handler sends a unique reply which means count these errors and then force a reset of the usb\r\n #\r\n if ( not(z.find(\"No cameras detected\") == -1) or not(z.find(\"Failed to get\") == -1)): \r\n self.error_counts.value += 1\r\n print(f\"\\033[31m Error Reading from Camera USB Link {self.error_counts.value} \\033[0m\")\r\n return answers\r\n\r\n # look for the not writable option\r\n #\r\n if not(z.find(\"not writable\") == -1):\r\n print(\"\\033[31m This option is not writable \\033[0m \")\r\n answers.append(-1) \r\n answers.append(-1) \r\n answers.append(0) \r\n answers.append(\"CAN_NOT_WRITE\") \r\n return answers\r\n \r\n for xx in a:\r\n if xx.find('White_Bal_Val') > -1:\r\n idx = itemNo\r\n else:\r\n if (idx != 99999):\r\n if xx.find(':') > -1:\r\n idx = itemNo\r\n else:\r\n if not (xx.isdigit()):\r\n if xx.find(\"AUTO\") > -1:\r\n xx = str(0)\r\n xx = xx.replace(\",\",\"\") \r\n vv = xx.strip(\"}\") # caters for a case in testing where i have closing bracket 34} \r\n answers.append(vv)\r\n idx = 99999\r\n itemNo += 1\r\n return answers\r\n\r\n def set_sony_still_cap_orig( self, Val ):\r\n\r\n # run the API command in the shell and look for the descriptor for the field\r\n #\r\n ValArg=str(Val)\r\n cmd='/home/pi/cams/SonyTEST32/set_still_cap/RemoteCli ' + ValArg\r\n args = shlex.split(cmd)\r\n s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n p2 = subprocess.Popen([\"grep\", \"Still_Capture_Val\"], stdin=s.stdout, stdout=subprocess.PIPE)\t # look for only this string in the output\r\n output = p2.communicate()[0]\r\n print(output)\r\n s.stdout.close()\r\n # consider if needed (if check of setval isnt working look for \"cancelled\" in the program output\r\n # \r\n # s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n # p3 = subprocess.Popen([\"grep\", \"cancelled\"], stdin=s.stdout, stdout=subprocess.PIPE)\r\n # output2 = p3.communicate()[0]\r\n \r\n z = output.decode('ascii') # convert bytes array output to ascii string \r\n a = shlex.split(z) # split this unique output into fields separated by commas\r\n #\r\n # Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output\r\n # we get the value fields before and after and return that list\r\n #\r\n itemNo = 0\r\n idx = 99999\r\n answers = []\r\n for xx in a:\r\n if xx.find('Still_Capture_Val') > -1:\r\n idx = itemNo\r\n else:\r\n if (idx != 99999):\r\n if xx.find(':') > -1:\r\n idx = itemNo\r\n else:\r\n if not (xx.isdigit()):\r\n if xx.find(\"AUTO\") > -1:\r\n xx = str(0) \r\n xx = xx.replace(\",\",\"\") \r\n vv = xx.strip(\"}\") # caters for a case in testing where i have closing bracket 34} \r\n answers.append(vv)\r\n idx = 99999\r\n itemNo += 1\r\n return answers\r\n\r\n def set_sony_still_cap( self, Val ):\r\n\r\n # run the API command in the shell and look for the descriptor for the field\r\n #\r\n ValArg=str(Val)\r\n cmd='/home/pi/cams/SonyTEST32/set_still_cap/RemoteCli ' + ValArg\r\n args = shlex.split(cmd)\r\n\r\n # CONSIDER :: because we have to trap various errors and act on them we had to parse the output for other things i might modify CameraDevice.cpp to\r\n # cover it all and exit as quick as possible using return codes but i couldnt succeed to read them all successfully using .returncode property\r\n # with the pipe attached as above and we dont want too many delays\r\n #\r\n s=subprocess.run( args, stdout=subprocess.PIPE )\r\n output=s.stdout\r\n #s.stdout.close()\r\n \r\n z = output.decode('ascii') # convert bytes array output to ascii string \r\n a = shlex.split(z) # split this unique output into fields separated by commas\r\n \r\n #\r\n # Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output\r\n # we get the value fields before and after and return that list\r\n #\r\n itemNo = 0\r\n idx = 99999\r\n answers = []\r\n\r\n # new error handler sends a unique reply which means count these errors and then force a reset of the usb\r\n #\r\n if ( not(z.find(\"No cameras detected\") == -1) or not(z.find(\"Failed to get\") == -1)): \r\n self.error_counts.value += 1\r\n print(f\"\\033[31m Error Reading from Camera USB Link {self.error_counts.value} \\033[0m\")\r\n return answers\r\n\r\n # look for the not writable option\r\n #\r\n if not(z.find(\"not writable\") == -1):\r\n print(\"\\033[31m This option is not writable \\033[0m \")\r\n answers.append(-1) \r\n answers.append(-1) \r\n answers.append(0) \r\n answers.append(\"CAN_NOT_WRITE\") \r\n return answers\r\n \r\n for xx in a:\r\n if xx.find('Still_Capture_Val') > -1:\r\n idx = itemNo\r\n else:\r\n if (idx != 99999):\r\n if xx.find(':') > -1:\r\n idx = itemNo\r\n else:\r\n if not (xx.isdigit()):\r\n if xx.find(\"AUTO\") > -1:\r\n xx = str(0)\r\n xx = xx.replace(\",\",\"\") \r\n vv = xx.strip(\"}\") # caters for a case in testing where i have closing bracket 34} \r\n answers.append(vv)\r\n idx = 99999\r\n itemNo += 1\r\n return answers\r\n \r\n def get_sony_still_cap_mode( self ):\r\n\r\n # run the API command in the shell and look for the descriptor for the field\r\n #\r\n cmd='/home/pi/cams/SonyTEST32/still_cap_mode/RemoteCli ' \r\n args = shlex.split(cmd)\r\n s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n p2 = subprocess.Popen([\"grep\", \"Still_Capture_Val\"], stdin=s.stdout, stdout=subprocess.PIPE)\t # look for only this string in the output\r\n output = p2.communicate()[0]\r\n print(output)\r\n s.stdout.close()\r\n # consider if needed (if check of setval isnt working look for \"cancelled\" in the program output\r\n # \r\n # s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n # p3 = subprocess.Popen([\"grep\", \"cancelled\"], stdin=s.stdout, stdout=subprocess.PIPE)\r\n # output2 = p3.communicate()[0]\r\n \r\n z = output.decode('ascii') # convert bytes array output to ascii string \r\n a = shlex.split(z) # split this unique output into fields separated by commas\r\n #\r\n # Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output\r\n # we get the value fields before and after and return that list\r\n #\r\n itemNo = 0\r\n idx = 99999\r\n answers = []\r\n for xx in a:\r\n if xx.find('Still_Capture_Val') > -1:\r\n idx = itemNo\r\n else:\r\n if (idx != 99999):\r\n if xx.find(':') > -1:\r\n idx = itemNo\r\n else:\r\n if not (xx.isdigit()):\r\n if xx.find(\"AUTO\") > -1:\r\n xx = str(0) \r\n xx = xx.replace(\",\",\"\") \r\n vv = xx.strip(\"}\") # caters for a case in testing where i have closing bracket 34} \r\n answers.append(vv)\r\n idx = 99999\r\n itemNo += 1\r\n return answers\r\n \r\n def get_sony_white_balance( self ):\r\n\r\n # run the API command in the shell and look for the descriptor for the field\r\n #\r\n cmd='/home/pi/cams/SonyTEST32/white_bal/RemoteCli ' \r\n args = shlex.split(cmd)\r\n s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n p2 = subprocess.Popen([\"grep\", \"White_Bal_Val\"], stdin=s.stdout, stdout=subprocess.PIPE)\t # look for only this string in the output\r\n output = p2.communicate()[0]\r\n print(output)\r\n s.stdout.close()\r\n # consider if needed (if check of setval isnt working look for \"cancelled\" in the program output\r\n # \r\n # s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n # p3 = subprocess.Popen([\"grep\", \"cancelled\"], stdin=s.stdout, stdout=subprocess.PIPE)\r\n # output2 = p3.communicate()[0]\r\n \r\n z = output.decode('ascii') # convert bytes array output to ascii string \r\n a = shlex.split(z) # split this unique output into fields separated by commas\r\n #\r\n # Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output\r\n # we get the value fields before and after and return that list\r\n #\r\n itemNo = 0\r\n idx = 99999\r\n answers = []\r\n for xx in a:\r\n if xx.find('White_Bal_Val') > -1:\r\n idx = itemNo\r\n else:\r\n if (idx != 99999):\r\n if xx.find(':') > -1:\r\n idx = itemNo\r\n else:\r\n if not (xx.isdigit()):\r\n if xx.find(\"AUTO\") > -1:\r\n xx = str(0) \r\n xx = xx.replace(\",\",\"\") \r\n vv = xx.strip(\"}\") # caters for a case in testing where i have closing bracket 34} \r\n answers.append(vv)\r\n idx = 99999\r\n itemNo += 1\r\n return answers\r\n\r\n def get_sony_ex_pro( self ):\r\n\r\n # run the API command in the shell and look for the descriptor for the field\r\n #\r\n cmd='/home/pi/cams/SonyTEST32/exp_pro_mode/RemoteCli ' \r\n args = shlex.split(cmd)\r\n s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n p2 = subprocess.Popen([\"grep\", \"Exposure_Program_Value\"], stdin=s.stdout, stdout=subprocess.PIPE)\t # look for only this string in the output\r\n output = p2.communicate()[0]\r\n print(output)\r\n s.stdout.close()\r\n # consider if needed (if check of setval isnt working look for \"cancelled\" in the program output\r\n # \r\n # s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n # p3 = subprocess.Popen([\"grep\", \"cancelled\"], stdin=s.stdout, stdout=subprocess.PIPE)\r\n # output2 = p3.communicate()[0]\r\n \r\n z = output.decode('ascii') # convert bytes array output to ascii string \r\n a = shlex.split(z) # split this unique output into fields separated by commas\r\n #\r\n # Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output\r\n # we get the value fields before and after and return that list\r\n #\r\n itemNo = 0\r\n idx = 99999\r\n answers = []\r\n for xx in a:\r\n if xx.find('Exposure_Program_Value') > -1:\r\n idx = itemNo\r\n else:\r\n if (idx != 99999):\r\n if xx.find(':') > -1:\r\n idx = itemNo\r\n else:\r\n if not (xx.isdigit()):\r\n if xx.find(\"AUTO\") > -1:\r\n xx = str(0) \r\n xx = xx.replace(\",\",\"\") \r\n vv = xx.strip(\"}\") # caters for a case in testing where i have closing bracket 34} \r\n answers.append(vv)\r\n idx = 99999\r\n itemNo += 1\r\n return answers\r\n\r\n def get_sony_aperture( self ):\r\n\r\n # run the API command in the shell and look for the descriptor for the field\r\n #\r\n cmd='/home/pi/cams/SonyTEST32/get_aperture/RemoteCli ' \r\n args = shlex.split(cmd)\r\n s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n p2 = subprocess.Popen([\"grep\", \"Aperture_Val\"], stdin=s.stdout, stdout=subprocess.PIPE)\t # look for only this string in the output\r\n output = p2.communicate()[0]\r\n print(output)\r\n s.stdout.close()\r\n # consider if needed (if check of setval isnt working look for \"cancelled\" in the program output\r\n # \r\n # s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n # p3 = subprocess.Popen([\"grep\", \"cancelled\"], stdin=s.stdout, stdout=subprocess.PIPE)\r\n # output2 = p3.communicate()[0]\r\n \r\n z = output.decode('ascii') # convert bytes array output to ascii string \r\n a = shlex.split(z) # split this unique output into fields separated by commas\r\n #\r\n # Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output\r\n # we get the value fields before and after and return that list\r\n #\r\n itemNo = 0\r\n idx = 99999\r\n answers = []\r\n for xx in a:\r\n if xx.find('Aperture_Val') > -1:\r\n idx = itemNo\r\n else:\r\n if (idx != 99999):\r\n if xx.find(':') > -1:\r\n idx = itemNo\r\n else:\r\n if not (xx.isdigit()):\r\n if xx.find(\"AUTO\") > -1:\r\n xx = str(0) \r\n xx = xx.replace(\",\",\"\") \r\n vv = xx.strip(\"}\") # caters for a case in testing where i have closing bracket 34} \r\n answers.append(vv)\r\n idx = 99999\r\n itemNo += 1\r\n return answers\r\n\r\n def get_sony_focus( self ):\r\n\r\n # run the API command in the shell and look for the descriptor for the field\r\n #\r\n cmd='/home/pi/cams/SonyTEST32/get_focus/RemoteCli ' \r\n args = shlex.split(cmd)\r\n s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n p2 = subprocess.Popen([\"grep\", \"Focus_Val\"], stdin=s.stdout, stdout=subprocess.PIPE)\t # look for only this string in the output\r\n output = p2.communicate()[0]\r\n print(output)\r\n s.stdout.close()\r\n # consider if needed (if check of setval isnt working look for \"cancelled\" in the program output\r\n # \r\n # s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n # p3 = subprocess.Popen([\"grep\", \"cancelled\"], stdin=s.stdout, stdout=subprocess.PIPE)\r\n # output2 = p3.communicate()[0]\r\n \r\n z = output.decode('ascii') # convert bytes array output to ascii string \r\n a = shlex.split(z) # split this unique output into fields separated by commas\r\n #\r\n # Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output\r\n # we get the value fields before and after and return that list\r\n #\r\n itemNo = 0\r\n idx = 99999\r\n answers = []\r\n for xx in a:\r\n if xx.find('Focus_Val') > -1:\r\n idx = itemNo\r\n else:\r\n if (idx != 99999):\r\n if xx.find(':') > -1:\r\n idx = itemNo\r\n else:\r\n if not (xx.isdigit()):\r\n if xx.find(\"AUTO\") > -1:\r\n xx = str(0) \r\n xx = xx.replace(\",\",\"\") \r\n vv = xx.strip(\"}\") # caters for a case in testing where i have closing bracket 34} \r\n answers.append(vv)\r\n idx = 99999\r\n itemNo += 1\r\n return answers\r\n\r\n def get_sony_focus_area( self ):\r\n\r\n # run the API command in the shell and look for the descriptor for the field\r\n #\r\n cmd='/home/pi/cams/SonyTEST32/get_focus_dist/RemoteCli ' \r\n args = shlex.split(cmd)\r\n s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n p2 = subprocess.Popen([\"grep\", \"Focus_Area_Val\"], stdin=s.stdout, stdout=subprocess.PIPE)\t # look for only this string in the output\r\n output = p2.communicate()[0]\r\n print(output)\r\n s.stdout.close()\r\n # consider if needed (if check of setval isnt working look for \"cancelled\" in the program output\r\n # \r\n # s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n # p3 = subprocess.Popen([\"grep\", \"cancelled\"], stdin=s.stdout, stdout=subprocess.PIPE)\r\n # output2 = p3.communicate()[0]\r\n \r\n z = output.decode('ascii') # convert bytes array output to ascii string \r\n a = shlex.split(z) # split this unique output into fields separated by commas\r\n #\r\n # Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output\r\n # we get the value fields before and after and return that list\r\n #\r\n itemNo = 0\r\n idx = 99999\r\n answers = []\r\n for xx in a:\r\n if xx.find('Focus_Area_Val') > -1:\r\n idx = itemNo\r\n else:\r\n if (idx != 99999):\r\n if xx.find(':') > -1:\r\n idx = itemNo\r\n else:\r\n if not (xx.isdigit()):\r\n if xx.find(\"AUTO\") > -1:\r\n xx = str(0) \r\n xx = xx.replace(\",\",\"\") \r\n vv = xx.strip(\"}\") # caters for a case in testing where i have closing bracket 34} \r\n answers.append(vv)\r\n idx = 99999\r\n itemNo += 1\r\n return answers\r\n\r\n def get_sony_iso( self ):\r\n\r\n # run the API command in the shell and look for the descriptor for the field\r\n #\r\n cmd='/home/pi/cams/SonyTEST32/get_iso/RemoteCli ' \r\n args = shlex.split(cmd)\r\n s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n p2 = subprocess.Popen([\"grep\", \"ISO_Format\"], stdin=s.stdout, stdout=subprocess.PIPE)\t # look for only this string in the output\r\n output = p2.communicate()[0]\r\n print(output)\r\n s.stdout.close()\r\n # consider if needed (if check of setval isnt working look for \"cancelled\" in the program output\r\n # \r\n # s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n # p3 = subprocess.Popen([\"grep\", \"cancelled\"], stdin=s.stdout, stdout=subprocess.PIPE)\r\n # output2 = p3.communicate()[0]\r\n \r\n z = output.decode('ascii') # convert bytes array output to ascii string \r\n a = shlex.split(z) # split this unique output into fields separated by commas\r\n #\r\n # Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output\r\n # we get the value fields before and after and return that list\r\n #\r\n itemNo = 0\r\n idx = 99999\r\n answers = []\r\n for xx in a:\r\n if xx.find('ISO_Format') > -1:\r\n idx = itemNo\r\n else:\r\n if (idx != 99999):\r\n if xx.find(':') > -1:\r\n idx = itemNo\r\n else:\r\n if not (xx.isdigit()):\r\n if xx.find(\"AUTO\") > -1:\r\n xx = str(0) \r\n xx = xx.replace(\",\",\"\") \r\n vv = xx.strip(\"}\") # caters for a case in testing where i have closing bracket 34} \r\n answers.append(vv)\r\n idx = 99999\r\n itemNo += 1\r\n return answers\r\n\r\n def get_sony_shut_spd( self ):\r\n\r\n # run the API command in the shell and look for the descriptor for the field\r\n #\r\n cmd='/home/pi/cams/SonyTEST32/get_shutter/RemoteCli ' \r\n args = shlex.split(cmd)\r\n s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n p2 = subprocess.Popen([\"grep\", \"Shutter_Value\"], stdin=s.stdout, stdout=subprocess.PIPE)\t # look for only this string in the output\r\n output = p2.communicate()[0]\r\n print(output)\r\n s.stdout.close()\r\n # consider if needed (if check of setval isnt working look for \"cancelled\" in the program output\r\n # \r\n # s=subprocess.Popen(args, stdout=subprocess.PIPE)\r\n # p3 = subprocess.Popen([\"grep\", \"cancelled\"], stdin=s.stdout, stdout=subprocess.PIPE)\r\n # output2 = p3.communicate()[0]\r\n \r\n z = output.decode('ascii') # convert bytes array output to ascii string \r\n zz = z.replace(\"\\\"\",\"\") # remove quoate mark meaning seconds\r\n a = shlex.split(zz) # split this unique output into fields separated by commas\r\n #\r\n # Using this parser as it sometimes missed the bracket at the start (odd??) in the popen output\r\n # we get the value fields before and after and return that list\r\n #\r\n itemNo = 0\r\n idx = 99999\r\n answers = []\r\n for xx in a:\r\n if xx.find('Shutter_Value') > -1:\r\n idx = itemNo\r\n else:\r\n if (idx != 99999):\r\n if xx.find(':') > -1:\r\n idx = itemNo\r\n else:\r\n if not (xx.isdigit()):\r\n if xx.find(\"AUTO\") > -1:\r\n xx = str(0) \r\n xx = xx.replace(\",\",\"\") \r\n vv = xx.strip(\"}\") # caters for a case in testing where i have closing bracket 34} \r\n answers.append(vv)\r\n idx = 99999\r\n itemNo += 1\r\n return answers\r\n \r\n # ======================= new additions to the class ================================================\r\n\r\n def setSonyObjData( self, mem, camDataPointVal, Timeout = 100 ):\r\n\r\n if not (mem.set_value(camDataPointVal, mem.STATE_CAM_WRITING, Timeout) == True):\r\n print(\"\\033[31m value has not been successfully set \\033[0m\") \r\n return False\r\n else:\r\n return True\r\n \r\n def initSonyCamExProData( self ):\r\n \r\n ans = self.get_sony_ex_pro( )\r\n if not (ans is None):\r\n if (len(ans) > 0):\r\n print(f\" Exposure Prog Mode = {ans}\")\r\n try:\r\n SonyObject = memoryValue('S_EX_PRO_MODE',int(ans[0]))\r\n with SonyObject.updateNeeded.get_lock():\r\n SonyObject.updateNeeded.value = True \r\n except Exception as err_msg:\r\n print(\"\\033[31m Failed set the object to initial value : %s \\033[0m\" % (err_msg))\r\n SonyObject = memoryValue('S_EX_PRO_MODE',0) \r\n else:\r\n print(\"\\033[31m Failed get the camera ExPro \\033[0m\")\r\n SonyObject = memoryValue('S_EX_PRO_MODE',0) \r\n else:\r\n print(\"\\033[31m Cant get Exposure Prog Mode \\033[0m\")\r\n SonyObject = memoryValue('S_EX_PRO_MODE',0)\r\n\r\n SonyObject.index = memoryValue.numberOfVals \r\n print(f\"Expro : created object number : {SonyObject.index}\")\r\n return SonyObject\r\n\r\n def getSonyCamExProData( self, mem ):\r\n \r\n ret = False\r\n ans = self.get_sony_ex_pro( )\r\n if not (ans is None):\r\n if (len(ans) > 0):\r\n print(f\" exposure program mode = {ans}\")\r\n try:\r\n ret = self.setSonyObjData( mem, int(ans[0]) ) \r\n except Exception as err_msg:\r\n print(\"\\033[31m Failed set the object to initial value : %s \\033[0m\" % (err_msg))\r\n else:\r\n print(\"\\033[31m Failed get the exposure program mode\\033[0m \")\r\n else:\r\n print(\"\\033[31m Cant get Exposure Prog Mode \\033[0m\") \r\n return ret\r\n \r\n def initSonyApertureData( self ):\r\n \r\n ans = self.get_sony_aperture( )\r\n if not (ans is None):\r\n if (len(ans) > 0):\r\n print(f\" Aperture = {ans}\")\r\n try:\r\n SonyObject = memoryValue('S_APERTURE',int(ans[0]))\r\n with SonyObject.updateNeeded.get_lock():\r\n SonyObject.updateNeeded.value = True \r\n except Exception as err_msg:\r\n print(\"\\033[31m Failed set the object to initial value : %s \\033[0m\" % (err_msg))\r\n SonyObject = memoryValue('S_APERTURE',0) \r\n else:\r\n print(\"\\033[31m Failed get the camera aperture \\033[0m\")\r\n SonyObject = memoryValue('S_APERTURE',0) \r\n else:\r\n print(\"\\033[31m Cant get Aperture \\033[0m\")\r\n SonyObject = memoryValue('S_APERTURE',0) \r\n\r\n SonyObject.index = memoryValue.numberOfVals \r\n print(f\"Aperture : created object number : {SonyObject.index}\") \r\n return SonyObject\r\n\r\n def getSonyApertureData( self, mem ):\r\n \r\n ret = False\r\n ans = self.get_sony_aperture( )\r\n if not (ans is None):\r\n if (len(ans) > 0):\r\n print(f\" aperture = {ans}\")\r\n try:\r\n ret = self.setSonyObjData( mem, int(ans[0]) ) \r\n except Exception as err_msg:\r\n print(\"\\033[31m Failed set the object to initial value : %s \\033[0m\" % (err_msg))\r\n else:\r\n print(\"\\033[31m Failed get the aperture \\033[0m\")\r\n else:\r\n print(\"\\033[31m Cant get aperture \\033[0m\") \r\n return ret\r\n \r\n def initSonyCamFocusData( self ): ###### @@11\r\n \r\n ans = self.get_sony_focus( )\r\n if not (ans is None):\r\n if (len(ans) > 0):\r\n print(f\" Focus Mode = {ans}\")\r\n try:\r\n SonyObject = memoryValue('S_FOCUS_MODE',int(ans[0]))\r\n with SonyObject.updateNeeded.get_lock():\r\n SonyObject.updateNeeded.value = True \r\n except Exception as err_msg:\r\n print(\"\\033[31m Failed set the object to initial value : %s \\033[0m\" % (err_msg))\r\n SonyObject = memoryValue('S_FOCUS_MODE',0) \r\n else:\r\n print(\"\\033[31m Failed get the camera focus mode \\033[0m\")\r\n SonyObject = memoryValue('S_FOCUS_MODE',0) \r\n else:\r\n print(\"\\033[31m Cant get Focus Mode \\033[0m\")\r\n SonyObject = memoryValue('S_FOCUS_MODE',0) \r\n\r\n SonyObject.index = memoryValue.numberOfVals \r\n print(f\"FocusData : created object number : {SonyObject.index}\") \r\n return SonyObject\r\n\r\n def getSonyCamFocusData( self, mem ):\r\n \r\n ret = False\r\n ans = self.get_sony_focus( )\r\n if not (ans is None):\r\n if (len(ans) > 0):\r\n print(f\" focus = {ans}\")\r\n try:\r\n ret = self.setSonyObjData( mem, int(ans[0]) ) \r\n except Exception as err_msg:\r\n print(\"\\033[31m Failed set the focus mode object to initial value : %s \\033[0m\" % (err_msg))\r\n else:\r\n print(\"\\033[31m Failed get the focus mode \\033[0m\")\r\n else:\r\n print(\"\\033[31m Cant get focus mode \\033[0m\") \r\n return ret\r\n \r\n def initSonyCamFocusAreaData( self ):\r\n \r\n ans = self.get_sony_focus_area( )\r\n\r\n if not (ans is None):\r\n if (len(ans) > 0):\r\n print(f\" Focus Area = {ans}\")\r\n try:\r\n SonyObject = memoryValue('S_FOCUS_AREA',int(ans[0]))\r\n with SonyObject.updateNeeded.get_lock():\r\n SonyObject.updateNeeded.value = True \r\n except Exception as err_msg:\r\n print(\"\\033[31m Failed set the focus area object to initial value : %s \\033[0m\" % (err_msg))\r\n SonyObject = memoryValue('S_FOCUS_AREA',0) \r\n else:\r\n print(\"\\033[31m Failed get the camera focus area \\033[0m\")\r\n SonyObject = memoryValue('S_FOCUS_AREA',0) \r\n else:\r\n print(\"\\033[31m Cant get Focus Mode \\033[0m\")\r\n SonyObject = memoryValue('S_FOCUS_AREA',0) \r\n\r\n SonyObject.index = memoryValue.numberOfVals \r\n print(f\"Focus Area : created object number : {SonyObject.index}\") \r\n return SonyObject \r\n\r\n def getSonyCamFocusAreaData( self, mem ):\r\n \r\n ret = False\r\n ans = self.get_sony_focus_area( )\r\n if not (ans is None):\r\n if (len(ans) > 0):\r\n print(f\"\\033[33m FOCUS AREA = {ans} \\033[0m\")\r\n try:\r\n ret = self.setSonyObjData( mem, int(ans[0]) ) \r\n except Exception as err_msg:\r\n print(\"\\033[31m Failed set the focus area object to initial value : %s \\033[0m\" % (err_msg))\r\n else:\r\n print(\"\\033[31m Failed get the focus area \\033[0m\")\r\n else:\r\n print(\"\\033[31m Cant get focus area \") \r\n return ret\r\n \r\n def initSonyCamISOData( self ):\r\n \r\n ans = self.get_sony_iso( )\r\n if not (ans is None):\r\n if (len(ans) > 0):\r\n print(f\" ISO = {ans}\")\r\n try:\r\n SonyObject = memoryValue('S_ISO',int(ans[0]))\r\n with SonyObject.updateNeeded.get_lock():\r\n SonyObject.updateNeeded.value = True \r\n except Exception as err_msg:\r\n print(\"\\033[31m Failed set the iso object to initial value : %s \\033[0m\" % (err_msg))\r\n SonyObject = memoryValue('S_ISO',0) \r\n else:\r\n print(\"\\033[31m Failed get the camera iso \\033[0m\")\r\n SonyObject = memoryValue('S_ISO',0) \r\n else:\r\n print(\"\\033[31m Cant get ISO \\033[0m\")\r\n SonyObject = memoryValue('S_ISO',0) \r\n\r\n SonyObject.index = memoryValue.numberOfVals \r\n print(f\"ISO : created object number : {SonyObject.index}\") \r\n return SonyObject \r\n\r\n def getSonyCamISOData( self, mem ):\r\n \r\n ret = False\r\n ans = self.get_sony_iso( )\r\n if not (ans is None):\r\n if (len(ans) > 0):\r\n print(f\" ISO = {ans}\")\r\n try:\r\n ret = self.setSonyObjData( mem, int(ans[0]) ) \r\n except Exception as err_msg:\r\n print(\"\\033[31m Failed set the iso object to initial value : %s \\033[0m\" % (err_msg))\r\n else:\r\n print(\"\\033[31m Failed get the iso \\033[0m\")\r\n else:\r\n print(\"\\033[31m Cant get iso \\033[0m\") \r\n return ret\r\n \r\n def initSonyCamShutSpdData( self ):\r\n \r\n ans = self.get_sony_shut_spd( )\r\n if not (ans is None):\r\n if (len(ans) > 0):\r\n print(f\" Shutter Speed = {ans}\")\r\n try:\r\n SonyObject = memoryValue('S_SHUT_SPD',int(ans[0]))\r\n with SonyObject.updateNeeded.get_lock():\r\n SonyObject.updateNeeded.value = True \r\n except Exception as err_msg:\r\n print(\"\\033[31m Failed set the shut spd object to initial value : %s \\033[0m\" % (err_msg))\r\n SonyObject = memoryValue('S_SHUT_SPD',0) \r\n else:\r\n print(\"\\033[31m Failed get the camera shutter speed \\033[0m\")\r\n SonyObject = memoryValue('S_SHUT_SPD',0) \r\n else:\r\n print(\"\\033[31m Cant get Shutter Speed \\033[0m\")\r\n SonyObject = memoryValue('S_SHUT_SPD',0)\r\n \r\n SonyObject.index = memoryValue.numberOfVals \r\n print(f\"Shut Speed : created object number : {SonyObject.index}\") \r\n return SonyObject \r\n\r\n def getSonyCamShutSpdData( self, mem ):\r\n \r\n ret = False\r\n ans = self.get_sony_shut_spd( )\r\n if not (ans is None):\r\n if (len(ans) > 0):\r\n print(f\" Shutter Speed = {ans}\")\r\n try:\r\n ret = self.setSonyObjData( mem, int(ans[0]) ) \r\n except Exception as err_msg:\r\n print(\"\\033[31m Failed set the object to initial value : %s \\033[0m\" % (err_msg))\r\n else:\r\n print(\"\\033[31m Failed get the shutter speed \\033[0m\")\r\n else:\r\n print(\"\\033[31m Cant get shutter speed \\033[0m\") \r\n return ret\r\n \r\n def initSonyCamWhiteBalaData( self ):\r\n \r\n ans = self.get_sony_white_balance( )\r\n if not (ans is None):\r\n if (len(ans) > 0):\r\n print(f\" White Balance = {ans}\")\r\n try:\r\n SonyObject = memoryValue('S_WHITE_BAL',int(ans[0]))\r\n with SonyObject.updateNeeded.get_lock():\r\n SonyObject.updateNeeded.value = True \r\n except Exception as err_msg:\r\n print(\"\\033[31m Failed set the object to initial value : %s \\033[0m\" % (err_msg))\r\n SonyObject = memoryValue('S_WHITE_BAL',0) \r\n else:\r\n print(\"\\033[31m Failed get the camera white balance \\033[0m\")\r\n SonyObject = memoryValue('S_WHITE_BAL',0) \r\n else:\r\n print(\"\\033[31m Cant get Shutter Speed \\033[0m\")\r\n SonyObject = memoryValue('S_WHITE_BAL',0) \r\n\r\n SonyObject.index = memoryValue.numberOfVals \r\n print(f\"White Balance : created object number : {SonyObject.index}\") \r\n return SonyObject \r\n\r\n def getSonyCamWhiteBalaData( self, mem ):\r\n \r\n ret = False\r\n ans = self.get_sony_white_balance( )\r\n if not (ans is None):\r\n if (len(ans) > 0):\r\n print(f\" White Balance = {ans}\")\r\n try:\r\n ret = self.setSonyObjData( mem, int(ans[0]) ) \r\n except Exception as err_msg:\r\n print(\"\\033[31m Failed set the white balance object to initial value : %s \\033[0m\" % (err_msg))\r\n else:\r\n print(\"\\033[31m Failed get the camera white balance \\033[0m\")\r\n else:\r\n print(\"\\033[31m Cant get white balance \\033[0m\") \r\n return ret\r\n \r\n def initSonyCamStillCapModeData( self ):\r\n \r\n ans = self.get_sony_still_cap_mode( )\r\n if not (ans is None):\r\n if (len(ans) > 0):\r\n print(f\" Still Cap Mode = {ans}\")\r\n try:\r\n SonyObject = memoryValue('S_STILL_CAP',int(ans[0]))\r\n with SonyObject.updateNeeded.get_lock():\r\n SonyObject.updateNeeded.value = True \r\n except Exception as err_msg:\r\n print(\"\\033[31m Failed set the object to initial value : %s \\033[0m\" % (err_msg))\r\n SonyObject = memoryValue('S_STILL_CAP',0) \r\n else:\r\n print(\"\\033[31m Failed get the camera still capture mode \\033[0m\")\r\n SonyObject = memoryValue('S_STILL_CAP',0) \r\n else:\r\n print(\"\\033[31m Cant get Still Capture Mode \\033[0m\")\r\n SonyObject = memoryValue('S_STILL_CAP',0) \r\n\r\n SonyObject.index = memoryValue.numberOfVals \r\n print(f\"Still Cap Mode : created object number : {SonyObject.index}\") \r\n return SonyObject \r\n\r\n def getSonyCamStillCapModeData( self, mem ):\r\n \r\n ret = False\r\n ans = self.get_sony_still_cap_mode( )\r\n if not (ans is None):\r\n if (len(ans) > 0):\r\n print(f\" Still Cap Mode = {ans}\")\r\n try:\r\n ret = self.setSonyObjData( mem, int(ans[0]) ) \r\n except Exception as err_msg:\r\n print(\"\\033[31m Failed set the object to initial value : %s \\033[0m\" % (err_msg))\r\n else:\r\n print(\"\\033[31m Failed get the camera still capture mode \\033[0m\")\r\n else:\r\n print(\"\\033[31m Cant get still cap mode \\033[0m\") \r\n return ret \r\n\r\n def enumerate_still_cap_sony_a7( self, num ):\r\n\r\n enum_num = 0\r\n enum_num_state = True\r\n if num == 65543:\r\n enum_num = 2\r\n elif num == 1:\r\n enum_num = 0\r\n elif num == 65540:\r\n enum_num = 1\r\n elif num == 65537:\r\n enum_num = 3\r\n elif num == 65538:\r\n enum_num = 4\r\n elif num == 196611:\r\n enum_num = 5\r\n elif num == 196610:\r\n enum_num = 6\r\n elif num == 196609:\r\n enum_num = 7\r\n elif num == 524289:\r\n enum_num = 8\r\n elif num == 524290:\r\n enum_num = 9\r\n elif num == 524293:\r\n enum_num = 10\r\n elif num == 524294:\r\n enum_num = 11\r\n elif num == 524251:\r\n enum_num = 12\r\n elif num == 524292:\r\n enum_num = 13\r\n elif ((num >= 262913) and (num <= 262928)):\r\n enum_num = 14 + (num-262913)\r\n elif ((num >= 327681) and (num <= 327696)):\r\n enum_num = 30 + (num-327681)\r\n elif num == 393218:\r\n enum_num = 46\r\n elif num == 393217:\r\n enum_num = 47\r\n elif num == 458754:\r\n enum_num = 48\r\n elif num == 458753:\r\n enum_num = 49\r\n else:\r\n enum_num_state = False\r\n return enum_num_state, enum_num\r\n\r\n def enumerate_aperture_sony_a7( self, num ):\r\n\r\n enum_num = 0\r\n enum_num_state = True\r\n if num == 280:\r\n enum_num = 0\r\n elif num == 320:\r\n enum_num = 1\r\n elif num == 350:\r\n enum_num = 2\r\n elif num == 400:\r\n enum_num = 3\r\n elif num == 450:\r\n enum_num = 4 \r\n elif num == 500:\r\n enum_num = 5 \r\n elif num == 560:\r\n enum_num = 6 \r\n elif num == 630:\r\n enum_num = 7 \r\n elif num == 710:\r\n enum_num = 8 \r\n elif num == 800:\r\n enum_num = 9 \r\n elif num == 900:\r\n enum_num = 10 \r\n elif num == 1000:\r\n enum_num = 11 \r\n elif num == 1100:\r\n enum_num = 12 \r\n elif num == 1300:\r\n enum_num = 13 \r\n elif num == 1400:\r\n enum_num = 14 \r\n elif num == 1600:\r\n enum_num = 15 \r\n elif num == 1800:\r\n enum_num = 16 \r\n elif num == 2000:\r\n enum_num = 17 \r\n elif num == 2200:\r\n enum_num = 18 \r\n else:\r\n enum_num_state = False\r\n return enum_num_state, enum_num\r\n\r\n\r\n def enumerate_iso_sony_a7( self, num ):\r\n\r\n enum_num = 0\r\n enum_num_state = True\r\n if num == 0:\r\n enum_num = 0\r\n elif num == 50:\r\n enum_num = 1\r\n elif num == 64:\r\n enum_num = 2\r\n elif num == 80:\r\n enum_num = 3\r\n elif num == 100:\r\n enum_num = 4\r\n elif num == 125:\r\n enum_num = 5\r\n elif num == 160:\r\n enum_num = 6\r\n elif num == 200:\r\n enum_num = 7\r\n elif num == 250:\r\n enum_num = 8\r\n elif num == 320:\r\n enum_num = 9\r\n elif num == 400:\r\n enum_num = 10\r\n elif num == 500:\r\n enum_num = 11\r\n elif num == 640:\r\n enum_num = 12\r\n elif num == 800:\r\n enum_num = 13\r\n elif num == 1000:\r\n enum_num = 14\r\n elif num == 1250:\r\n enum_num = 15\r\n elif num == 1600:\r\n enum_num = 16\r\n elif num == 2000:\r\n enum_num = 17\r\n elif num == 2500:\r\n enum_num = 18\r\n elif num == 3200:\r\n enum_num = 19\r\n elif num == 4000:\r\n enum_num = 20\r\n elif num == 5000:\r\n enum_num = 21\r\n elif num == 6400:\r\n enum_num = 22\r\n elif num == 8000:\r\n enum_num = 23 \r\n elif num == 10000:\r\n enum_num = 24 \r\n elif num == 12800:\r\n enum_num = 25 \r\n elif num == 16000:\r\n enum_num = 26 \r\n elif num == 20000:\r\n enum_num = 27 \r\n elif num == 25600:\r\n enum_num = 28 \r\n elif num == 32000:\r\n enum_num = 29 \r\n elif num == 40000:\r\n enum_num = 30\r\n elif num == 51200:\r\n enum_num = 31 \r\n elif num == 64000:\r\n enum_num = 32 \r\n elif num == 80000:\r\n enum_num = 33 \r\n elif num == 102400:\r\n enum_num = 34 \r\n else:\r\n enum_num_state = False\r\n return enum_num_state, enum_num\r\n\r\n # Only works in movie mode\r\n #\r\n def enumerate_ex_pro_sony_a7( self, num ):\r\n\r\n enum_num = 0\r\n enum_num_state = True\r\n if num == 32850:\r\n enum_num = 2\r\n elif num == 32848:\r\n enum_num = 0\r\n elif num == 32849:\r\n enum_num = 1\r\n elif num == 32851:\r\n enum_num = 3\r\n else:\r\n enum_num_state = False\r\n return enum_num_state, enum_num\r\n\r\n def enumerate_focus_area_sony_a7( self, num ):\r\n\r\n enum_num = 0\r\n enum_num_state = True\r\n if ((num >= 1) and (num <= 7)):\r\n enum_num = num - 1\r\n else:\r\n enum_num_state = False\r\n return enum_num_state, enum_num\r\n\r\n # To enable this ensure physical switch has been set to AUTO on the Lens\r\n # seemed to give a different subset of options list of 4 or 5 solved in camera \"c++\"\r\n #\r\n def enumerate_focus_sony_a7( self, num ):\r\n\r\n enum_num = 0\r\n enum_num_state = True\r\n if num == 2:\r\n enum_num = 0\r\n elif num == 4:\r\n enum_num = 1\r\n elif num == 3:\r\n enum_num = 2\r\n elif num == 6:\r\n enum_num = 3\r\n elif num == 1:\r\n enum_num = 4\r\n else:\r\n enum_num_state = False\r\n return enum_num_state, enum_num\r\n \r\n # after testing can this sometimes under certain circustances change by one ??\r\n # Bulb 0 = 0 - This has been done in the camera \"C++\"\r\n #\r\n def enumerate_shutter_sony_a7( self, num ):\r\n\r\n enum_num = 0\r\n enum_num_state = True\r\n # this occurs under certain conditions\r\n # then they all shift down by one \r\n #\r\n if num == 0:\r\n enum_num = 0 \r\n if num == 19660810:\r\n enum_num = 1\r\n elif num == 16384010:\r\n enum_num = 2\r\n elif num == 13107210:\r\n enum_num = 3\r\n elif num == 9830410:\r\n enum_num = 4\r\n elif num == 8519690:\r\n enum_num = 5\r\n elif num == 6553610:\r\n enum_num = 6\r\n elif num == 5242890:\r\n enum_num = 7\r\n elif num == 3932170:\r\n enum_num = 8\r\n elif num == 3276810:\r\n enum_num = 9\r\n elif num == 2621450:\r\n enum_num = 10\r\n elif num == 2097162:\r\n enum_num = 11\r\n elif num == 1638410:\r\n enum_num = 12\r\n elif num == 1310730:\r\n enum_num = 13\r\n elif num == 1048586:\r\n enum_num = 14\r\n elif num == 851978:\r\n enum_num = 15\r\n elif num == 655370:\r\n enum_num = 16\r\n elif num == 524298:\r\n enum_num = 17\r\n elif num == 393226:\r\n enum_num = 18\r\n elif num == 327690:\r\n enum_num = 19\r\n elif num == 262154:\r\n enum_num = 20\r\n elif num == 65539:\r\n enum_num = 21\r\n elif num == 65540:\r\n enum_num = 22\r\n elif num == 65541:\r\n enum_num = 23\r\n elif num == 65542:\r\n enum_num = 24\r\n elif num == 65544:\r\n enum_num = 25\r\n elif num == 65546:\r\n enum_num = 26\r\n elif num == 65549:\r\n enum_num = 27\r\n elif num == 65551:\r\n enum_num = 28\r\n elif num == 65556:\r\n enum_num = 29\r\n elif num == 65561:\r\n enum_num = 30\r\n elif num == 65566:\r\n enum_num = 31\r\n elif num == 65576:\r\n enum_num = 32\r\n elif num == 65586:\r\n enum_num = 33\r\n elif num == 65596:\r\n enum_num = 34\r\n elif num == 65616:\r\n enum_num = 35\r\n elif num == 65636:\r\n enum_num = 36\r\n elif num == 65661:\r\n enum_num = 37\r\n elif num == 65696:\r\n enum_num = 38 \r\n elif num == 65736:\r\n enum_num = 39 \r\n elif num == 65786:\r\n enum_num = 40\r\n elif num == 65856:\r\n enum_num = 41 \r\n elif num == 65936:\r\n enum_num = 42 \r\n elif num == 66036:\r\n enum_num = 43 \r\n elif num == 66176:\r\n enum_num = 44 \r\n elif num == 66336:\r\n enum_num = 45 \r\n elif num == 66536:\r\n enum_num = 46 \r\n elif num == 66786:\r\n enum_num = 47\r\n elif num == 67136:\r\n enum_num = 48 \r\n elif num == 67536:\r\n enum_num = 49 \r\n elif num == 68036:\r\n enum_num = 50 \r\n elif num == 68736:\r\n enum_num = 51 \r\n elif num == 69536:\r\n enum_num = 52 \r\n elif num == 70536:\r\n enum_num = 53 \r\n elif num == 71936:\r\n enum_num = 54 \r\n elif num == 73536:\r\n enum_num = 55 \r\n else:\r\n enum_num_state = False\r\n return enum_num_state, enum_num\r\n\r\n def enumerate_white_bal_sony_a7( self, num ):\r\n\r\n enum_num = 0\r\n enum_num_state = True\r\n if num == 0:\r\n enum_num = 0\r\n elif num == 17:\r\n enum_num = 1\r\n elif num == 18:\r\n enum_num = 2\r\n elif num == 19:\r\n enum_num = 3\r\n elif num == 20:\r\n enum_num = 4\r\n elif num == 33:\r\n enum_num = 5\r\n elif num == 34:\r\n enum_num = 6\r\n elif num == 35:\r\n enum_num = 7\r\n elif num == 36:\r\n enum_num = 8\r\n elif num == 48:\r\n enum_num = 9\r\n elif num == 1:\r\n enum_num = 10\r\n elif num == 256:\r\n enum_num = 11\r\n elif num == 257:\r\n enum_num = 12\r\n elif num == 258:\r\n enum_num = 13\r\n elif num == 259:\r\n enum_num = 14\r\n else:\r\n enum_num_state = False\r\n return enum_num_state, enum_num\r\n\r\n #def setSonyCamISOData( self, mem, mavObj, timeout1=100, timeout2=50, no_timeout1_retry=1, no_timeout2_retry=1 ):\r\n \r\n # ret = False\r\n # readSuccess = False\r\n # print(\" =========== sony cam iso ================ \")\r\n \r\n # \r\n # timeout1 = timeout1 * no_timeout1_retry\r\n # timeout2 = timeout2 * no_timeout2_retry\r\n #\r\n # while (readSuccess == False) and (timeout1 > 0):\r\n # reqDat, prevDat, readSuccess = mavObj.getVal_sony_iso(mavObj.STATE_CAM_READING,timeout1)\r\n # timeout1 -= timeout1 # no retries\r\n # print(\"Why !!!!!!!! {readSuccess} {reqDat} {prevDat}\")\r\n \r\n # print(f\"set to ISO {reqDat} {prevDat} {timeout1} {mavObj.state}\")\r\n \r\n # if ((not (reqDat == mavlinkSonyCamWriteVals.STATE_INIT) and not (reqDat == prevDat)) and not(timeout1 <= 0)):\r\n # ee = self.enumerate_iso_sony_a7(reqDat)\r\n # print(f\"enumeration value for iso {ee} req {reqDat}\")\r\n # ans = self.set_sony_iso( ee ) \r\n # print(ans)\r\n # exit(90)\r\n # if not (ans is None): \r\n # if (len(ans)==0):\r\n # print(\"length of command return was zero\")\r\n # return ret \r\n # writeSuccess = False\r\n # while (writeSuccess == False) and (timeout2 > 0): \r\n # try:\r\n # writeSuccess = mavObj.setVal_sony_iso(ans[1],mavObj.STATE_CAM_WRITING,mavObj.WRITE_PREV_DATA,timeout2) \r\n # ret = ( ans[1] == reqDat ) \r\n # except Exception as err_msg: \r\n # print(\"write sony iso failed to set iso\")\r\n # timeout2 -= timeout2 # no retries \r\n # if ( ret == True ):\r\n # ret = self.setSonyObjData( mem, int(ans[1]) ) \r\n #exit(200) \r\n # return ret\r\n def setSonyCamISOData( self, mem, mavObj, timeout1=100, timeout2=50, no_timeout1_retry=1, no_timeout2_retry=1 ):\r\n \r\n ret = False\r\n readSuccess = False\r\n print(\" =========== set sony cam iso ================ \")\r\n \r\n # \r\n timeoutS1 = timeout1 * no_timeout1_retry\r\n\r\n # READ_TRIGGER (0)\r\n while (readSuccess == False) and (timeoutS1 > 0):\r\n reqDat, prevDat, readSuccess = mavObj.getVal_sony_iso(mavObj.STATE_CAM_READING,timeout1)\r\n timeoutS1 -= timeout1 # no retries\r\n print(f\"In iterator {readSuccess} {reqDat} {prevDat}\")\r\n \r\n print(f\"set to ISO r={reqDat} p={prevDat} time={timeout1} state={mavObj.state.value}\")\r\n \r\n # LOOK FOR CHANGE (1)\r\n if ((not (int(reqDat) == mavlinkSonyCamWriteVals.STATE_INIT) and not (int(reqDat) == int(prevDat))) and (readSuccess == True)):\r\n timeoutS2 = timeout2 * no_timeout2_retry\r\n ret,ee = self.enumerate_iso_sony_a7(int(reqDat))\r\n # INVALID CHANGE MADE (return to state == 0)\r\n if (ret == False):\r\n print(f\"\\033[31m Error Invalid parameter Iso {reqDat}\\033[0m\")\r\n writeSuccess = False\r\n while (writeSuccess == False) and (timeoutS2 > 0): \r\n writeSuccess = mavObj.setVal_sony_iso( int(prevDat),mavObj.STATE_CAM_WRITING,mavObj.DONT_WRITE_PREV_DATA,timeout2 ) \r\n timeoutS2 -= timeout2 # no retries \r\n return writeSuccess\r\n print(f\"enumeration value for iso {ee} req {reqDat}\")\r\n # MAKE CHANGE CAMERA RANK1 \r\n ans = self.set_sony_iso( ee ) ### MPI_Modification ::: will have to set a GLOBAL_STATE which is used as state machine\r\n ### this has to wait for MPI on other rank at this point\r\n ### when rank replies we do below. \r\n # SEND_MPI_TO_RANK1 (5)\r\n # DO ACTION IN RANK1 (6)\r\n # READ MPI_REPLY FROM RANK1 (7)\r\n \r\n # PROCESS RANK1_REPLY (8)\r\n if not (ans is None): \r\n writeSuccess = False \r\n wpWrite = False \r\n if (len(ans)==0):\r\n print(\"setting iso the length of command return was zero\")\r\n return ret \r\n if (len(ans)==4): # thats what we return for a non-writable value\r\n if not (ans[3].find(\"CAN_NOT_WRITE\") == -1): # if we get that we cant write it, we reset the request\r\n writeSuccess = mavObj.clearReq_sony_iso( mavObj.STATE_CAM_WRITING )\r\n while wpWrite == False:\r\n wpWrite = mavObj.set_WritePro( mavObj.STATE_CAM_WRITING, mavObj.WriPro_ISO )\r\n return writeSuccessWriPro_ISO\r\n print(f\" \\033[32m set the ISO from/to :: {ans} \\033[0m\") \r\n try:\r\n if ( int(ans[1]) == int(reqDat) ) :\r\n while (writeSuccess == False) and (timeoutS2 > 0): \r\n writeSuccess = mavObj.setVal_sony_iso( int(ans[1]), mavObj.STATE_CAM_WRITING, mavObj.WRITE_PREV_DATA,timeout2 ) \r\n timeoutS2 -= timeout2 # no retries \r\n print(f\" write {writeSuccess}\")\r\n else:\r\n print(f\" what wat {ans[1]}=={reqDat}\")\r\n except Exception as err_msg: \r\n print(\"\\033[31m write sony iso failed to set iso \\033[0m\") \r\n if ( writeSuccess == True ):\r\n while wpWrite == False:\r\n wpWrite = mavObj.clear_WritePro( mavObj.STATE_CAM_WRITING, mavObj.WriPro_ISO )\r\n ret = self.setSonyObjData( mem, int(ans[1]) ) \r\n else:\r\n ret = ( int(prevDat) == int(reqDat) )\r\n if ret == False:\r\n print(f\"\\033[32m timeout error trying to set iso to \\033[4;42;31m {reqDat} \\033[0m\")\r\n #exit(200) \r\n return ret\r\n \r\n def setSonyCamApertureData( self, mem, mavObj, timeout1=100, timeout2=50, no_timeout1_retry=1, no_timeout2_retry=1 ):\r\n \r\n ret = False\r\n readSuccess = False\r\n print(\" =========== set sony cam aperture ================ \")\r\n \r\n # \r\n timeoutS1 = timeout1 * no_timeout1_retry\r\n\r\n #\r\n while (readSuccess == False) and (timeoutS1 > 0):\r\n reqDat, prevDat, readSuccess = mavObj.getVal_sony_aperture(mavObj.STATE_CAM_READING,timeout1)\r\n timeoutS1 -= timeout1 # no retries\r\n \r\n if ((not (int(reqDat) == mavObj.STATE_INIT) and not (int(reqDat) == int(prevDat))) and (readSuccess == True)):\r\n timeoutS2 = timeout2 * no_timeout2_retry\r\n ret,e = self.enumerate_aperture_sony_a7(int(reqDat))\r\n if (ret == False):\r\n print(f\"\\033[31m Error Invalid parameter aperture {reqDat}\\033[0m\")\r\n writeSuccess = False\r\n while (writeSuccess == False) and (timeoutS2 > 0): \r\n writeSuccess = mavObj.setVal_sony_aperture(int(prevDat),mavObj.STATE_CAM_WRITING,mavObj.DONT_WRITE_PREV_DATA,timeout2) \r\n timeoutS2 -= timeout2 # no retries \r\n return writeSuccess\r\n ans = self.set_sony_aperture( e ) \r\n if not (ans is None):\r\n writeSuccess = False\r\n wpWrite = False \r\n if (len(ans)==0):\r\n print(\"length of command return was zero\")\r\n return ret \r\n if (len(ans)==4): # thats what we return for a non-writable value\r\n if not (ans[3].find(\"CAN_NOT_WRITE\") == -1): # if we get that we cant write it, we reset the request\r\n writeSuccess = mavObj.clearReq_sony_aperture( mavObj.STATE_CAM_WRITING )\r\n while wpWrite == False:\r\n wpWrite = mavObj.set_WritePro( mavObj.STATE_CAM_WRITING, mavObj.WriPro_APER )\r\n return writeSuccess\r\n print(f\" \\033[32m set the Aperture from/to :: {ans} \\033[0m\") # \r\n try:\r\n if ( int(ans[1]) == int(reqDat) ) :\r\n while (writeSuccess == False) and (timeoutS2 > 0): \r\n writeSuccess = mavObj.setVal_sony_aperture(ans[1],mavObj.STATE_CAM_WRITING,mavObj.WRITE_PREV_DATA,timeout2) \r\n timeoutS2 -= timeout2 # no retries \r\n except Exception as err_msg: \r\n print(\"\\033[31m write sony aperture failed to set aperture \\033[0m\") \r\n if ( writeSuccess == True ):\r\n while wpWrite == False:\r\n wpWrite = mavObj.clear_WritePro( mavObj.STATE_CAM_WRITING, mavObj.WriPro_APER )\r\n ret = self.setSonyObjData( mem, int(ans[1]) ) \r\n else:\r\n ret = ( int(prevDat) == int(reqDat) )\r\n if ret == False:\r\n print(f\"\\033[32m timeout error trying to set aperture to \\033[4;42;31m {reqDat}\\033[0m\") \r\n return ret \r\n\r\n def setSonyCamExProData( self, mem, mavObj, timeout1=100, timeout2=50, no_timeout1_retry=1, no_timeout2_retry=1 ):\r\n \r\n ret = False\r\n readSuccess = False\r\n print(\" =========== set sony ex pro ================ \")\r\n \r\n # \r\n timeoutS1 = timeout1 * no_timeout1_retry\r\n\r\n #\r\n while (readSuccess == False) and (timeoutS1 > 0):\r\n reqDat, prevDat, readSuccess = mavObj.getVal_sony_ex_pro(mavObj.STATE_CAM_READING,timeout1)\r\n timeoutS1 -= timeout1\r\n \r\n if ((not (int(reqDat) == mavObj.STATE_INIT) and not (int(reqDat) == int(prevDat))) and (readSuccess == True)):\r\n timeoutS2 = timeout2 * no_timeout2_retry\r\n ret,ee = self.enumerate_ex_pro_sony_a7(int(reqDat))\r\n if (ret == False):\r\n print(f\"\\033[31m Error Invalid parameter exposure program mode {reqDat}\\033[0m\")\r\n writeSuccess = False\r\n while (writeSuccess == False) and (timeoutS2 > 0): \r\n writeSuccess = mavObj.setVal_sony_ex_pro(int(prevDat),mavObj.STATE_CAM_WRITING,mavObj.DONT_WRITE_PREV_DATA,timeout2) \r\n timeoutS2 -= timeout2 # no retries \r\n return writeSuccess \r\n ans = self.set_sony_ex_pro( ee ) \r\n print(f\" \\033[32m set the ex=Pro from/to :: {ans}\\033[0m\") \r\n if not (ans is None): \r\n writeSuccess = False \r\n wpWrite = False \r\n if (len(ans)==0):\r\n print(\"\\033[31m length of command return was zero \\033[0m\")\r\n return ret \r\n if (len(ans)==4): # thats what we return for a non-writable value\r\n if not (ans[3].find(\"CAN_NOT_WRITE\") == -1): # if we get that we cant write it, we reset the request\r\n writeSuccess = mavObj.clearReq_sony_ex_pro( mavObj.STATE_CAM_WRITING )\r\n while wpWrite == False:\r\n wpWrite = mavObj.set_WritePro( mavObj.STATE_CAM_WRITING, mavObj.WriPro_EX_PRO ) \r\n return writeSuccess # \r\n print(f\" \\033[32m set the ex=Pro from/to :: {ans} \\033[0m\") \r\n try:\r\n if ( int(ans[1]) == int(reqDat) ) :\r\n while (writeSuccess == False) and (timeoutS2 > 0): \r\n writeSuccess = mavObj.setVal_sony_ex_pro(ans[1],mavObj.STATE_CAM_WRITING,mavObj.WRITE_PREV_DATA,timeout2) \r\n timeoutS2 -= timeout2 # no retries \r\n except Exception as err_msg: \r\n print(\"\\033[31m write sony expro failed to set expro \\033[0m\") \r\n if ( writeSuccess == True ):\r\n while wpWrite == False:\r\n wpWrite = mavObj.clear_WritePro( mavObj.STATE_CAM_WRITING, mavObj.WriPro_EX_PRO )\r\n ret = self.setSonyObjData( mem, int(ans[1]) ) \r\n else:\r\n ret = ( int(prevDat) == int(reqDat) )\r\n if ret == False:\r\n print(f\"\\033[32m timeout error trying to set expro to \\033[4;42;31m {reqDat} \\033[0m\") \r\n return ret\r\n\r\n def setSonyCamFocusData( self, mem, mavObj, timeout1=100, timeout2=50, no_timeout1_retry=1, no_timeout2_retry=1 ):\r\n \r\n ret = False\r\n readSuccess = False\r\n print(\" =========== set sony focus mode ================ \")\r\n # \r\n timeoutS1 = timeout1 * no_timeout1_retry\r\n\r\n #\r\n while (readSuccess == False) and (timeoutS1 > 0):\r\n reqDat, prevDat, readSuccess = mavObj.getVal_sony_focus(mavObj.STATE_CAM_READING,timeout1)\r\n timeoutS1 -= timeout1\r\n \r\n if ((not (int(reqDat) == mavObj.STATE_INIT) and not (int(reqDat) == int(prevDat))) and (readSuccess == True)):\r\n timeoutS2 = timeout2 * no_timeout2_retry\r\n ret,ee = self.enumerate_focus_sony_a7(int(reqDat))\r\n if (ret == False):\r\n print(f\"\\033[31m Error Invalid parameter focus mode {reqDat}\\033[0m\")\r\n writeSuccess = False\r\n while (writeSuccess == False) and (timeoutS2 > 0): \r\n writeSuccess = mavObj.setVal_sony_focus(int(prevDat),mavObj.STATE_CAM_WRITING,mavObj.DONT_WRITE_PREV_DATA,timeout2) \r\n timeoutS2 -= timeout2 # no retries \r\n return writeSuccess \r\n ans = self.set_sony_focus( ee ) \r\n if not (ans is None): \r\n writeSuccess = False\r\n wpWrite = False \r\n if (len(ans)==0):\r\n print(\"\\033[31m length of command return was zero \\033[0m\")\r\n return ret # \r\n if (len(ans)==4): # thats what we return for a non-writable value\r\n if not (ans[3].find(\"CAN_NOT_WRITE\") == -1): # if we get that we cant write it, we reset the request\r\n writeSuccess = mavObj.clearReq_sony_focus( mavObj.STATE_CAM_WRITING )\r\n while wpWrite == False:\r\n wpWrite = mavObj.set_WritePro( mavObj.STATE_CAM_WRITING, mavObj.WriPro_FOCUS ) \r\n return writeSuccess \r\n print(f\" \\033[32m set the focus mode from/to :: {ans} \\033[0m\") \r\n try:\r\n if ( int(ans[1]) == int(reqDat) ) :\r\n while (writeSuccess == False) and (timeoutS2 > 0): \r\n writeSuccess = mavObj.setVal_sony_focus(ans[1],mavObj.STATE_CAM_WRITING,mavObj.WRITE_PREV_DATA,timeout2) \r\n timeoutS2 -= timeout2 # no retries \r\n except Exception as err_msg: \r\n print(\"\\033[31m write sony focus mode failed to set focus mode \\033[0m\") \r\n if ( writeSuccess == True ):\r\n while wpWrite == False:\r\n wpWrite = mavObj.clear_WritePro( mavObj.STATE_CAM_WRITING, mavObj.WriPro_FOCUS )\r\n ret = self.setSonyObjData( mem, int(ans[1]) ) \r\n else:\r\n ret = ( int(prevDat) == int(reqDat) )\r\n if ret == False:\r\n print(f\"\\033[32m timeout error trying to set focus mode to \\033[4;42;31m {reqDat} \\033[0m\") \r\n return ret\r\n\r\n def setSonyCamFocusAreaData( self, mem, mavObj, timeout1=100, timeout2=50, no_timeout1_retry=1, no_timeout2_retry=1 ):\r\n \r\n ret = False\r\n readSuccess = False\r\n print(\" =========== set sony focus area ================ \")\r\n # \r\n timeoutS1 = timeout1 * no_timeout1_retry\r\n \r\n #\r\n while (readSuccess == False) and (timeoutS1 > 0):\r\n reqDat, prevDat, readSuccess = mavObj.getVal_sony_focus_area(mavObj.STATE_CAM_READING,timeout1)\r\n timeoutS1 -= timeout1\r\n \r\n if ((not (int(reqDat) == mavObj.STATE_INIT) and not (int(reqDat) == prevDat)) and (readSuccess == True)):\r\n timeoutS2 = timeout2 * no_timeout2_retry\r\n ret,ee = self.enumerate_focus_area_sony_a7(int(reqDat))\r\n if (ret == False):\r\n print(f\"\\033[31m Error Invalid parameter focus area {reqDat}\\033[0m\")\r\n writeSuccess = False\r\n while (writeSuccess == False) and (timeoutS2 > 0): \r\n writeSuccess = mavObj.setVal_sony_focus_area(int(prevDat),mavObj.STATE_CAM_WRITING,mavObj.DONT_WRITE_PREV_DATA,timeout2) \r\n timeoutS2 -= timeout2 # no retries \r\n return writeSuccess \r\n ans = self.set_sony_focus_area( ee ) \r\n if not (ans is None): \r\n writeSuccess = False \r\n wpWrite = False \r\n if (len(ans)==0):\r\n print(\"\\033[31m length of command return was zero \\033[0m\")\r\n return ret # \r\n if (len(ans)==4): # thats what we return for a non-writable value\r\n if not (ans[3].find(\"CAN_NOT_WRITE\") == -1): # if we get that we cant write it, we reset the request\r\n writeSuccess = mavObj.clearReq_sony_focus_area( mavObj.STATE_CAM_WRITING )\r\n while wpWrite == False:\r\n wpWrite = mavObj.set_WritePro( mavObj.STATE_CAM_WRITING, mavObj.WriPro_FOCUSA ) \r\n return writeSuccess \r\n print(f\" \\033[32m set the focus area from/to :: {ans} \\033[0m\") \r\n try:\r\n if ( int(ans[1]) == int(reqDat) ) :\r\n while (writeSuccess == False) and (timeoutS2 > 0): \r\n writeSuccess = mavObj.setVal_sony_focus_area(ans[1],mavObj.STATE_CAM_WRITING,mavObj.WRITE_PREV_DATA,timeout2) \r\n timeoutS2 -= timeout2 # no retries \r\n except Exception as err_msg: \r\n print(\"\\033[31m write sony focus area failed to set focus area \\033[0m\") \r\n if ( writeSuccess == True ):\r\n while wpWrite == False:\r\n wpWrite = mavObj.clear_WritePro( mavObj.STATE_CAM_WRITING, mavObj.WriPro_FOCUSA )\r\n ret = self.setSonyObjData( mem, int(ans[1]) ) \r\n else:\r\n ret = ( int(prevDat) == int(reqDat) )\r\n if ret == False:\r\n print(f\"\\033[32m timeout error trying to set focus area to \\033[4;42;31m {reqDat} \\033[0m\") \r\n return ret\r\n\r\n def setSonyCamShutSpdData( self, mem, mavObj, timeout1=100, timeout2=50, no_timeout1_retry=1, no_timeout2_retry=1 ):\r\n \r\n ret = False\r\n readSuccess = False\r\n print(\" =========== set sony shutter speed ================ \")\r\n # \r\n timeoutS1 = timeout1 * no_timeout1_retry\r\n \r\n #\r\n while (readSuccess == False) and (timeoutS1 > 0):\r\n reqDat, prevDat, readSuccess = mavObj.getVal_sony_shutter(mavObj.STATE_CAM_READING,timeout1)\r\n timeoutS1 -= timeout1\r\n \r\n print(f\"set to Shutter Speed r={reqDat} p={prevDat} time={timeout1} state={mavObj.state.value}\")\r\n \r\n if ((not (int(reqDat) == mavObj.STATE_INIT) and not (int(reqDat) == int(prevDat))) and (readSuccess == True)):\r\n timeoutS2 = timeout2 * no_timeout2_retry\r\n ret,ee = self.enumerate_shutter_sony_a7(int(reqDat))\r\n if (ret == False):\r\n print(f\"\\033[31m Error Invalid parameter shutter speed {reqDat}\\033[0m\")\r\n writeSuccess = False\r\n while (writeSuccess == False) and (timeoutS2 > 0): \r\n writeSuccess = mavObj.setVal_sony_shutter(int(prevDat),mavObj.STATE_CAM_WRITING,mavObj.DONT_WRITE_PREV_DATA,timeout2) \r\n #while wpWrite == False:\r\n # wpWrite = set_WritePro( mavObj.STATE_CAM_WRITING, mavObj.WriPro_SS ) \r\n timeoutS2 -= timeout2 # no retries \r\n return writeSuccess \r\n ans = self.set_sony_shutter( ee ) \r\n if not (ans is None): \r\n writeSuccess = False\r\n wpWrite = False \r\n if (len(ans)==0):\r\n print(\"\\033[31m length of command return was zero \\033[0m\")\r\n return ret # \r\n if (len(ans)==4): # thats what we return for a non-writable value\r\n if not (ans[3].find(\"CAN_NOT_WRITE\") == -1): # if we get that we cant write it, we reset the request\r\n writeSuccess = mavObj.clearReq_sony_shutter( mavObj.STATE_CAM_WRITING )\r\n while wpWrite == False:\r\n wpWrite = mavObj.set_WritePro( mavObj.STATE_CAM_WRITING, mavObj.WriPro_SS ) \r\n return writeSuccess \r\n print(f\" \\033[32m set the shutter speed from/to :: {ans} \\033[0m\") \r\n try:\r\n if ( int(ans[1]) == int(reqDat) ) :\r\n while (writeSuccess == False) and (timeoutS2 > 0): \r\n writeSuccess = mavObj.setVal_sony_shutter(ans[1],mavObj.STATE_CAM_WRITING,mavObj.WRITE_PREV_DATA,timeout2) \r\n print(f\"written {ans[1]} {writeSuccess}\")\r\n timeoutS2 -= timeout2 # no retries \r\n except Exception as err_msg: \r\n print(\"\\033[31m write sony shutter speed failed to set shutter speed \\033[0m\") \r\n if ( writeSuccess == True ):\r\n print(f\"saving..... {ans[1]} {writeSuccess}\") \r\n while wpWrite == False:\r\n wpWrite = mavObj.clear_WritePro( mavObj.STATE_CAM_WRITING, mavObj.WriPro_SS ) \r\n ret = self.setSonyObjData( mem, int(ans[1]) ) \r\n else:\r\n ret = ( int(prevDat) == int(reqDat) )\r\n if ret == False:\r\n print(f\"\\033[32m timeout error trying to set shutter speed to \\033[4;42;31m {reqDat} \\033[0m\") \r\n return ret\r\n\r\n def setSonyCamWhiteBalaData( self, mem, mavObj, timeout1=100, timeout2=50, no_timeout1_retry=1, no_timeout2_retry=1 ):\r\n \r\n ret = False\r\n readSuccess = False\r\n print(\" =========== set sony white balance ================ \")\r\n # \r\n timeoutS1 = timeout1 * no_timeout1_retry\r\n \r\n #\r\n while (readSuccess == False) and (timeoutS1 > 0):\r\n reqDat, prevDat, readSuccess = mavObj.getVal_sony_white_bal(mavObj.STATE_CAM_READING,timeout1)\r\n timeoutS1 -= timeout1\r\n \r\n if ((not (int(reqDat) == mavObj.STATE_INIT) and not (int(reqDat) == int(prevDat))) and (readSuccess == True)):\r\n timeoutS2 = timeout2 * no_timeout2_retry\r\n ret,ee = self.enumerate_white_bal_sony_a7(int(reqDat))\r\n if (ret == False):\r\n print(f\"\\033[31m Error Invalid parameter white balance {reqDat}\\033[0m\")\r\n writeSuccess = False\r\n while (writeSuccess == False) and (timeoutS2 > 0): \r\n writeSuccess = mavObj.setVal_sony_white_bal(int(prevDat),mavObj.STATE_CAM_WRITING,mavObj.DONT_WRITE_PREV_DATA,timeout2) \r\n timeoutS2 -= timeout2 # no retries \r\n return writeSuccess \r\n ans = self.set_sony_white_bal( ee ) \r\n if not (ans is None): \r\n writeSuccess = False \r\n wpWrite = False \r\n if (len(ans)==0):\r\n print(\"\\033[31m length of command return was zero \\033[0m\")\r\n return ret # \r\n if (len(ans)==4): # thats what we return for a non-writable value\r\n if not (ans[3].find(\"CAN_NOT_WRITE\") == -1): # if we get that we cant write it, we reset the request\r\n writeSuccess = mavObj.clearReq_sony_white_bal( mavObj.STATE_CAM_WRITING )\r\n while wpWrite == False:\r\n wpWrite = mavObj.set_WritePro( mavObj.STATE_CAM_WRITING, mavObj.WriPro_WB ) \r\n return writeSuccess \r\n print(f\" \\033[32m set the white balance from/to :: {ans} \\033[0m\") \r\n try:\r\n if ( int(ans[1]) == int(reqDat) ) :\r\n while (writeSuccess == False) and (timeoutS2 > 0): \r\n writeSuccess = mavObj.setVal_sony_white_bal(ans[1],mavObj.STATE_CAM_WRITING,mavObj.WRITE_PREV_DATA,timeout2) \r\n timeoutS2 -= timeout2 # no retries \r\n except Exception as err_msg: \r\n print(\"\\033[31m write sony white balance failed to set white balance \\033[0m\") \r\n if ( writeSuccess == True ):\r\n while wpWrite == False:\r\n wpWrite = mavObj.clear_WritePro( mavObj.STATE_CAM_WRITING, mavObj.WriPro_WB ) \r\n ret = self.setSonyObjData( mem, int(ans[1]) ) \r\n else:\r\n ret = ( int(prevDat) == int(reqDat) )\r\n if ret == False:\r\n print(f\"\\033[32m timeout error trying to set white balance to \\033[4;42;31m {reqDat} \\033[0m\") \r\n return ret\r\n \r\n def setSonyCamStillCapModeData( self, mem, mavObj, timeout1=100, timeout2=50, no_timeout1_retry=1, no_timeout2_retry=1 ):\r\n \r\n ret = False\r\n readSuccess = False\r\n print(\" =========== set sony still capture mode ================ \")\r\n # \r\n timeoutS1 = timeout1 * no_timeout1_retry\r\n \r\n #\r\n while (readSuccess == False) and (timeoutS1 > 0):\r\n reqDat, prevDat, readSuccess = mavObj.getVal_sony_still_cap_mode(mavObj.STATE_CAM_READING,timeout1)\r\n timeoutS1 -= timeout1\r\n \r\n if ((not (int(reqDat) == mavObj.STATE_INIT) and not (int(reqDat) == int(prevDat))) and (readSuccess == True)):\r\n timeoutS2 = timeout2 * no_timeout2_retry\r\n ret,ee = self.enumerate_still_cap_sony_a7(int(reqDat))\r\n if (ret == False):\r\n print(f\"\\033[31m Error Invalid parameter still capture {reqDat}\\033[0m\")\r\n writeSuccess = False\r\n while (writeSuccess == False) and (timeoutS2 > 0): \r\n writeSuccess = mavObj.setVal_sony_still_cap_mode(int(prevDat),mavObj.STATE_CAM_WRITING,mavObj.DONT_WRITE_PREV_DATA,timeout2) \r\n timeoutS2 -= timeout2 # no retries \r\n return writeSuccess \r\n ans = self.set_sony_still_cap( ee ) \r\n if not (ans is None): \r\n writeSuccess = False\r\n wpWrite = False \r\n if (len(ans)==0):\r\n print(\"\\033[31m length of command return was zero \\033[0m\")\r\n return ret # \r\n if (len(ans)==4): # thats what we return for a non-writable value\r\n if not (ans[3].find(\"CAN_NOT_WRITE\") == -1): # if we get that we cant write it, we reset the request\r\n writeSuccess = mavObj.clearReq_sony_still_cap_mode( mavObj.STATE_CAM_WRITING )\r\n while wpWrite == False:\r\n wpWrite = mavObj.set_WritePro( mavObj.STATE_CAM_WRITING, mavObj.WriPro_SC ) \r\n return writeSuccess \r\n print(f\" \\033[32m set the still capture mode from/to :: {ans} \\033[0m\") \r\n try:\r\n if ( int(ans[1]) == int(reqDat) ) :\r\n while (writeSuccess == False) and (timeoutS2 > 0): \r\n writeSuccess = mavObj.setVal_sony_still_cap_mode(ans[1],mavObj.STATE_CAM_WRITING,mavObj.WRITE_PREV_DATA,timeout2) \r\n timeoutS2 -= timeout2 # no retries \r\n except Exception as err_msg: \r\n print(\"\\033[31m write sony still capture mode failed to set still capture mode \\033[0m\") \r\n if ( writeSuccess == True ):\r\n while wpWrite == False:\r\n wpWrite = mavObj.clear_WritePro( mavObj.STATE_CAM_WRITING, mavObj.WriPro_SC ) \r\n ret = self.setSonyObjData( mem, int(ans[1]) ) \r\n else:\r\n ret = ( int(prevDat) == int(reqDat) )\r\n if ret == False:\r\n print(f\"\\033[32m timeout error trying to set still capture mode to \\033[4;42;31m {reqDat} \\033[0m\") \r\n return ret \r\n\r\n #\r\n # would go into mavlink class if it was in multi-tasking mode\r\n #\r\n def sendMavlinkMessageForParamObject( self, obj, the_connection, Timeout=5 ):\r\n\r\n if (obj.updateNeeded.value == True):\r\n \r\n # send mavlink message obj.name obj.signal.value obj.numberOfVals \r\n #\r\n getName, getValueforMAVSending, getPrev, myState = obj.get_value_data(obj.STATE_MAV_READING, Timeout) \r\n print(f\"-------------- obj update found for param_value {getName} {getValueforMAVSending} {getPrev} {myState}\")\r\n sendVal = struct.unpack('f', struct.pack('I', getValueforMAVSending))[0] \r\n if (myState == True):\r\n try:\r\n the_connection.mav.param_value_send(\r\n getName.encode('ascii'),\r\n sendVal,\r\n mavutil.mavlink.MAV_PARAM_TYPE_UINT32,\r\n obj.numberOfVals,\r\n obj.index)\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to send param value message : %s\" % (err_msg))\r\n ret = False\r\n if (ret == True):\r\n writeSuccess = False\r\n TimeCount = 0\r\n while (writeSuccess == False) and (Timeout > TimeCount):\r\n # obj.updateNeeded.value = False\r\n writeSuccess = obj.set_update_flag( False, obj.STATE_MAV_WRITING )\r\n TimeCount += 1\r\n return ret\r\n\r\n #\r\n # would go into mavlink class if it was in multi-tasking mode\r\n #\r\n def sendMavlinkMessageForParamExtObject( self, obj, the_connection, Timeout=5 ):\r\n\r\n v, r = obj.get_ack_send( obj.STATE_MAV_READING )\r\n if ((v == True) and (r == True)):\r\n # if (obj.ack_send.value == True):\r\n \r\n # send mavlink message obj.name obj.signal.value obj.numberOfVals \r\n #\r\n getName, getValueforMAVSending, getPrev, myState = obj.get_value_data(obj.STATE_MAV_READING, Timeout) \r\n print(f\"-------------- obj update found for param_value {getName} {getValueforMAVSending} {getPrev} {myState}\")\r\n if (myState == True):\r\n try:\r\n the_connection.mav.param_ext_value_send(\r\n getName.encode('ascii'),\r\n str(getValueforMAVSending).encode('ascii'),\r\n mavdefs.MAV_PARAM_EXT_TYPE_UINT32,\r\n obj.numberOfVals,\r\n obj.index)\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to send param value message : %s\" % (err_msg))\r\n ret = False\r\n if (ret == True):\r\n writeSuccess = False\r\n TimeCount = 0\r\n while (writeSuccess == False) and (Timeout > TimeCount):\r\n # obj.updateNeeded.value = False\r\n writeSuccess = obj.set_ack_send( False, obj.STATE_MAV_WRITING )\r\n TimeCount += 1\r\n return ret\r\n \r\n#\r\n# Pymavlink Library\r\n\r\n# Acknowledgements:\r\n# Thank you to Andrew Tridgell, the mastermind behind pymavlink and MAVProxy\r\n# Thread code from http://stackoverflow.com/questions/730645/python-wxpython-doing-work-continuously-in-the-background\r\n# Serial port code taken from http://stackoverflow.com/questions/12090503/listing-available-com-ports-with-python\r\n# UDP http://snakeproject.ru/rubric/article.php?art=python_udp_network_server_client\r\n\r\n# AirCamPro :- 21/10/21 support android kivy serial driver\r\n#\r\n# when you install pymavlink you also need to use mavgen to generate the libraries\r\n# instructions are shown here\r\n# https://mavlink.io/en/mavgen_python/\r\n# https://github.com/ArduPilot/pymavlink/blob/master/mavutil.py\r\n\r\n# multi-tasking info\r\n# https://ja.pymotw.com/2/multiprocessing/basics.html\r\n# https://techacademy.jp/magazine/20607 \r\n\r\n# sudo apt-get install python3-dev python3-opencv python3-wxgtk4.0 python3-pip python3-matplotlib python3-lxml\r\n# sudo apt-get install libxml++2.6-dev\r\n# sudo pip install dronekit\r\n\r\n# ================== Compatible Joysticks =========================================\r\n# X-Box 360 Controller (name: \"Xbox 360 Controller\")\r\n# Playstation 4 Controller (name: \"PS4 Controller\")\r\n# X-Box 360 Controller (name: \"Controller (XBOX 360 For Windows)\")\r\n# \r\nfrom pymavlink import mavutil # ref:- https://www.ardusub.com/developers/pymavlink.html\r\n#import wx\r\nimport sys, serial, glob, threading\r\n# for serial message out packing\r\nimport struct\r\n\r\n# this is included for android serial and to detect the android platform using kivy\r\n# ref:- https://github.com/frmdstryr/kivy-android-serial\r\n# install kivy with the following in your conda environment\r\n# conda install kivy -c conda-forge\r\n#`from kivy.utils import platform\r\n# from kvserial.driver import CdcAcmSerialPort\r\n\r\n# to list ports using the serial library\r\nfrom serial.tools import list_ports\r\n\r\nBUTTON_CONNECT = 10\r\nBUTTON_ARM = 20\r\n\r\n# ethernet UDP communication and joystick\r\n#\r\n# python3 -m pip install -U pygame --user\r\nimport socket\r\nimport pygame\r\nJOYSTICK_UDP_PORT = 14556\r\nJOY_SCALE = 1000\r\nMAX_SCALE = 32767\r\nX_MAX = MAX_SCALE\r\nY_MAX = MAX_SCALE\r\n\r\n\r\nMAV_TARGET = 110\r\nMAV_SOURCE = 30\r\n\r\n# import pymavlink.dialects.v10.lapwing as mavlink\r\n# this is a custom dialect which i cant find\r\n# this chooses version 1 you would need to change the ACK function TODO\r\n#\r\n# from mavlink_python_libs import com1 as commonV1\r\n# import com1 as mavdefs\r\n#\r\nfrom mavlink_python_libs import com2 as commonV1\r\n#from my_python_libs import com2 as commonV1\r\nimport com2 as mavdefs\r\n\r\nimport math\r\nimport time\r\nimport array as arr\r\n\r\n#from mypymavlink import mavutilcust as custommav\r\n\r\n#\r\n# multithreading control via asyncio\r\n#\r\nimport asyncio\r\nimport time\r\n\r\nimport numpy as np\r\nimport os\r\n\r\n# ============== control Raspberry Pi IO ===============\r\n# sudo apt-get install rpi.gpio\r\n#\r\n#import RPi.GPIO as GPIO\r\n \r\n# to use Raspberry Pi board pin numbers\r\n#GPIO.setmode(GPIO.BOARD)\r\n \r\n# set up the GPIO channels - one input and one output here\r\n#GPIO.setup(11, GPIO.IN)\r\n#GPIO.setup(12, GPIO.OUT)\r\n\r\n#---------------------------------------------------------------------------\r\n\r\nclass fifo(object):\r\n def __init__(self):\r\n self.buf = []\r\n def write(self, data):\r\n self.buf += data\r\n return len(data)\r\n def read(self):\r\n return self.buf.pop(0)\r\n\r\nimport re\r\n \r\n# mavlink communicator class (without GUI)\r\n#\r\n\r\nclass MAVFrame():\r\n\r\n RCV_COMMAND = mavutil.mavlink.MAV_CMD_REQUEST_MESSAGE\r\n RPM2 = 0\r\n ACK_RESULT = mavutil.mavlink.MAV_RESULT_UNSUPPORTED\r\n DEFAULT_SYS_ID = 1\r\n ACK_ALL_DATA_COMPLETE = 99\r\n \r\n CAMERA_INFORMATION = 259 #camera_information\r\n CAMERA_SETTINGS = 260\r\n STORAGE_INFORMATION = 261\r\n CAMERA_CAPTURE_STATUS = 262\r\n CAMERA_IMAGE_CAPTURED = 263\r\n VIDEO_STREAM = 269\r\n\r\n # camera informations (default camera routines will retrieve this)\r\n time_boot_ms = 1\r\n firmware_version = 12\r\n focal_length = 1.1\r\n sensor_size_h = 3.0\r\n sensor_size_v = 4.0\r\n flags = 4\r\n resolution_h = 300\r\n resolution_v = 400\r\n cam_definition_version = 2\r\n #vendor_name_nd = np.dtype([('A',np.uint8)])\r\n #model_name_nd = np.dtype([('B',np.uint8)])\r\n #vendor_name_list = [65] \r\n #model_name_list = [67]\r\n #vendor_name = \"A\" \r\n #model_name = \"B\"\r\n lens_id = 1\r\n cam_definition_uri = \"http://10.0.2.51/cam_defs\"\r\n \r\n # camera settings\r\n mode_id = 3 # Camera mode\r\n zoomLevel = 7 # Current zoom level (0.0 to 100.0, NaN if not known)*/\r\n focusLevel = 9 \r\n\r\n # storage informations\r\n total_capacity = 1.2 # [MiB] Total capacity. If storage is not ready (STORAGE_STATUS_READY) value will be ignored.\r\n used_capacity = 1.1 # [MiB] Used capacity. If storage is not ready (STORAGE_STATUS_READY) value will be ignored.\r\n available_capacity = 0.1 # [MiB] Available storage capacity. If storage is not ready (STORAGE_STATUS_READY) value will be ignored.\r\n read_speed = 0.67 # [MiB/s] Read speed.\r\n write_speed = 0.76 # [MiB/s] Write speed.\r\n storage_id = 1 # Storage ID (1 for first, 2 for second, etc.)\r\n storage_count = 2 # Number of storage devices\r\n status = 0 \r\n #status = mavutil.mavlink.STORAGE_STATUS_READY \r\n\r\n # camera capture status\r\n image_interval = 3.3 # [s] Image capture interval\r\n recording_time_ms = 10000 # [ms] Time since recording started\r\n available_capacity = 0.34 # [MiB] Available storage capacity.\r\n image_status = 1 # Current status of image capturing (0: idle, 1: capture in progress, 2: interval set but idle, 3: interval set and capture in progress)\r\n video_status = 1 # Current status of video capturing (0: idle, 1: capture in progress)\r\n image_count = 11\r\n\r\n # video stream\r\n framerate = 30.0 # [Hz] Frame rate.\r\n bitrate = 3000 # [bits/s] Bit rate.\r\n Vflags = 3 # Bitmap of stream status flags.\r\n Vresolution_h = 300 # [pix] Horizontal resolution.\r\n Vresolution_v = 400 # [pix] Vertical resolution.\r\n rotation = 90 # [deg] Video image rotation clockwise.\r\n hfov = 45 # [deg] Horizontal Field of view.\r\n stream_id = 2 # Video Stream ID (1 for first, 2 for second, etc.)\r\n count = 4 # Number of streams available.\r\n stream_type = mavutil.mavlink.VIDEO_STREAM_TYPE_MPEG_TS_H264 # Type of stream.\r\n videoname = \"vid_001\"\r\n video_uri = \"http://10.0.0.56/vids/001.mov\"\r\n\r\n # camera image captured\r\n time_utc = 667700 # [us] Timestamp (time since UNIX epoch) in UTC. 0 for unknown.\r\n lat = 30 # [degE7] Latitude where image was taken\r\n lon = 40 # [degE7] Longitude where capture was taken\r\n alt = 11 # [mm] Altitude (MSL) where image was taken\r\n relative_alt = 12 # [mm] Altitude above ground\r\n q = [1,0,0,0] # Quaternion of camera orientation (w, x, y, z order, zero-rotation is 0, 0, 0, 0)\r\n image_index = 4 # Zero based index of this image (image count since armed -1)\r\n camera_id = 1 # Camera ID (1 for first, 2 for second, etc.)\r\n capture_result = 1 # Boolean indicating success (1) or failure (0) while capturing this image.\r\n file_url = \"http://10.1.2.3/img/1.jpg\"\r\n\r\n # camera feedback\r\n time_usec = 10000 \r\n cam_idx = 1 \r\n img_idx = 1 \r\n # already lat, \r\n lng = lon \r\n alt_msl = 2 \r\n alt_rel = 4 \r\n roll = 6\r\n pitch = 1 \r\n yaw = 2 \r\n foc_len = 7 \r\n CFflags = 3\r\n \r\n ACK_ERROR = 0\r\n errRCV_COMMAND = 0\r\n errRPM2 = 0\r\n\r\n # task control flag\r\n #\r\n task_control_1 = 0\r\n\r\n # global constants\r\n #\r\n GOT_ERROR = 1\r\n GOT_SUCCESS = 2\r\n GOT_BAD = 3\r\n GOT_UNFORMAT = 4 \r\n \r\n # used to decide what is being requested from the calling (GCS) station\r\n #\r\n type_of_msg = 0\r\n \r\n g_count = 0\r\n pin_no = 0\r\n\r\n # defines for camera ID file\r\n #\r\n CAM_XML_FILE = \"alpha_cam_new.xml\"\r\n NETWORK_ID = 1\r\n\r\n def __init__(self, pinNum=26):\r\n #self.setUPPiRelayNumBCM()\r\n #self.setPinINput(pinNum)\r\n MAVFrame.pin_no=pinNum\r\n\r\n def __del__(self): \r\n class_name = self.__class__.__name__ \r\n print('{} Deleted'.format(class_name)) \r\n\r\n #\r\n # check our operating system we mostly at present only support linux\r\n #\r\n def check_os( self ):\r\n if ((sys.platform=='linux2') or (sys.platform=='linux')): return 1\r\n elif sys.platform=='win32': return 2\r\n else: return 3\r\n \r\n def update_utc_label( self ):\r\n if (self.check_os() == 1):\r\n cmd = \"date +%s\"\r\n self.time_utc = os.popen(cmd).read()\r\n\r\n def update_uptime_label( self ):\r\n if (self.check_os() == 1):\r\n cmd = \"uptime\"\r\n upTimStr = os.popen(cmd).read().split(\",\")\r\n dd = upTimStr[0].split()\r\n days = int(dd[2])\r\n xx = dd[0].split(\":\")\r\n hours = int(xx[0])\r\n mins = int(xx[1])\r\n secs = int(xx[2])\r\n self.time_boot_ms = (days*60*60*24) + (hours*60*60) + (mins*60) + secs\r\n #print(f\"boot tim {self.time_boot_ms} { (days*60*60*24) + (hours*60*60) + (mins*60) + secs }\")\r\n \r\n def on_click_connect(self,e):\r\n #\"\"\"\r\n #Process a click on the CONNECT button\r\n #Attempt to connect to the MAV using the specified port and baud rate,\r\n #then subscribe a function called check_heartbeat that will listen for\r\n #a heartbeat message, as well as a function that will print all incoming\r\n #MAVLink messages to the console.\r\n #\"\"\"\r\n\r\n port = self.cb_port.GetValue()\r\n baud = int(self.cb_baud.GetValue())\r\n self.textOutput.AppendText(\"Connecting to \" + port + \" at \" + str(baud) + \" baud\\n\")\r\n\r\n self.master = mavutil.mavlink_connection(port, baud=baud)\r\n self.thread = threading.Thread(target=self.process_messages)\r\n self.thread.setDaemon(True)\r\n self.thread.start()\r\n\r\n self.master.message_hooks.append(self.check_heartbeat)\r\n self.master.message_hooks.append(self.check_rcv_data_msg)\r\n self.master.message_hooks.append(self.log_message)\r\n\r\n\r\n print(\"Connecting to \" + port + \" at \" + str(baud) + \"baud\")\r\n self.textOutput.AppendText(\"Waiting for APM heartbeat\\n\")\r\n return\r\n\r\n def on_click_arm(self,e):\r\n #\"\"\"\r\n #Process a click on the ARM button\r\n #Send an arm message to the MAV, then subscribe a function called\r\n #check_arm_ack that will listen for a positive confirmation of arming.\r\n # \"\"\"\r\n self.textOutput.AppendText(\"Arming motor\\n\")\r\n print(\"******arming motor*********\")\r\n self.master.arducopter_arm()\r\n\r\n self.master.message_hooks.append(self.check_arm_ack)\r\n\r\n def log_message(self,caller,msg):\r\n if msg.get_type() != 'BAD_DATA':\r\n print(str(msg))\r\n return\r\n\r\n def process_messages(self):\r\n #\"\"\"\r\n #This runs continuously. The mavutil.recv_match() function will call mavutil.post_message()\r\n #any time a new message is received, and will notify all functions in the master.message_hooks list.\r\n #\"\"\"\r\n while True:\r\n msg = self.master.recv_match(blocking=True)\r\n if not msg:\r\n return\r\n if msg.get_type() == \"BAD_DATA\":\r\n if mavutil.all_printable(msg.data):\r\n sys.stdout.write(msg.data)\r\n sys.stdout.flush()\r\n\r\n def check_heartbeat(self,caller,msg):\r\n #\"\"\"\r\n #Listens for a heartbeat message\r\n #Once this function is subscribed to the dispatcher, it listens to every\r\n #incoming MAVLINK message and watches for a 'HEARTBEAT' message. Once\r\n #that message is received, the function updates the GUI and then\r\n # unsubscribes itself.\r\n #\" \"\"\r\n\r\n if msg.get_type() == 'HEARTBEAT':\r\n self.textOutput.AppendText(\"Heartbeat received from APM (system %u component %u)\\n\" % (self.master.target_system, self.master.target_component))\r\n self.master.message_hooks.remove(self.check_heartbeat)\r\n\r\n def check_arm_ack(self, caller, msg):\r\n #\"\"\"\r\n #Listens for confirmation of motor arming\r\n #Once this function is subscribed to the dispatcher, it listens to every\r\n #incomign MAVLINK message and watches for the \"Motor armed!\" confirmation.\r\n #Once the message is received, teh function updates the GUI and then\r\n #unsubscribes itself.\r\n #\"\"\"\r\n\r\n if msg.get_type() == 'STATUSTEXT':\r\n if \"Throttle armed\" in msg.text:\r\n self.textOutput.AppendText(\"Motor armed!\")\r\n self.master.message_hooks.remove(self.check_arm_ack)\r\n\r\n def check_rcv_data_msg(self, msg):\r\n \r\n if msg.get_type() == 'RC_CHANNELS':\r\n self.textOutput.AppendText(\"RC Channel message (system %u component %u)\\n\" % (self.master.target_system, self.master.target_component))\r\n self.textOutput.AppendText(\"chan1 %u chan2 %u)\\n\" % (self.master.chan1_raw, self.master.chan2_raw))\r\n self.master.message_hooks.remove(self.check_rcv_data_msg)\r\n elif msg.get_type() == 'COMMAND_LONG':\r\n self.textOutput.AppendText(\"Long message received (system %u component %u)\\n\" % (self.master.target_system, self.master.target_component))\r\n self.textOutput.AppendText(\"Command %u p1 %u p2 %u p3 %u p4 %u \\n\" % (self.master.command, self.master.param1, self.master.param2, self.master.param3, self.master.param4))\r\n self.textOutput.AppendText(\"p5 %u p6 %u p7 %u \\n\" % (self.master.param5, self.master.param6, self.master.param7))\r\n self.master.message_hooks.remove(self.check_rcv_data_msg) \r\n elif msg.get_type() == 'CAMERA_IMAGE_CAPTURED':\r\n self.textOutput.AppendText(\"Cam Cap message received (system %u component %u)\\n\" % (self.master.target_system, self.master.target_component)) \r\n self.textOutput.AppendText(\"lat %u lon %u alt %u\\n\" % (self.master.lat, self.master.lon, self.master.alt)) \r\n self.textOutput.AppendText(\"URL %u)\\n\" % (self.master.file_url))\r\n self.master.message_hooks.remove(self.check_rcv_data_msg) \r\n \r\n def OnClose(self, e):\r\n self._mgr.UnInit()\r\n self.Close()\r\n\r\n # if you want this then uncomment these lines for the android kivy libraries as well (instructions to install are above)\r\n\r\n #`from kivy.utils import platform\r\n # from kvserial.driver import CdcAcmSerialPort\r\n\r\n def serial_ports(self):\r\n # Lists all available serial ports\r\n #:raises EnvironmentError:\r\n # On unsupported or unknown platforms\r\n #:returns:\r\n # A list of available serial ports\r\n #\r\n if 'ANDROID_BOOTLOGO' in os.environ: # detect android first as if using sys alone, it returns linux\r\n #if platform == 'android': using kivy instead \r\n ports = '/dev/ttyACM0'\r\n else:\r\n if sys.platform.startswith('win'):\r\n ports = ['COM' + str(i + 1) for i in range(256)]\r\n\r\n elif sys.platform.startswith('linux') or sys.platform.startswith('linux2') or sys.platform.startswith('cygwin'): # check this shows /dev/ttyAMA0 on raspberry pi.\r\n # this is to exclude your current terminal \"/dev/tty\"\r\n ports = glob.glob('/dev/tty[A-Za-z]*')\r\n\r\n elif sys.platform.startswith('darwin'): # apple mac support if using darwin\r\n ports = glob.glob('/dev/tty.*')\r\n\r\n else:\r\n ports = list_ports.comports() # Outputs list of available serial ports should do the rest e.g. riscos atheos os2 freebsd aix etc\r\n \r\n if len(ports) == 0: \r\n raise EnvironmentError('Unsupported platform')\r\n\r\n result = []\r\n for port in ports:\r\n if 'ANDROID_BOOTLOGO' in os.environ: # device android\r\n s = CdcAcmSerialPort(port)\r\n s.close()\r\n result.append(port)\r\n else:\r\n try:\r\n s = serial.Serial(port)\r\n s.close()\r\n result.append(port)\r\n except (OSError, serial.SerialException):\r\n pass\r\n return result\r\n\r\n\r\n def print_red(self,text,value):\r\n print(\"\\033[31m %s : %6.3f\"%(text,value))\r\n \r\n def print_yellow(self,text,value):\r\n print(\"\\033[33m %s : %6.3f\"%(text,value))\r\n\r\n def print_2_yellow(self,text,value1,value2):\r\n print(\"\\033[33m %s : %6.3f %6.3f\"%(text,value1,value2))\r\n\r\n def print_3_yellow(self,text,value1,value2,value3):\r\n print(\"\\033[33m %s : %6.3f %6.3f %6.3f\"%(text,value1,value2,value3))\r\n\r\n def print_3_blue(self,text,value1,value2,value3):\r\n print(\"\\033[34m %s %6.3f %6.3f %6.3f\"%(text,value1,value2,value3))\r\n \r\n def print_blue(self,text,value):\r\n print(\"\\033[34m %s : %6.3f\"%(text,value))\r\n \r\n def joystickInit(self):\r\n # Set the width and height of the screen [width,height]\r\n size = [500, 700]\r\n screen = pygame.display.set_mode(size)\r\n pygame.display.set_caption(\"----- My test of mavlink and joystick -----\")\r\n \r\n pygame.init()\r\n # Used to manage how fast the screen updates\r\n clock = pygame.time.Clock()\r\n # Initialize the joysticks \r\n pygame.joystick.init()\r\n joystick = pygame.joystick.Joystick(0)\r\n joystick.init()\r\n # Get ready to print\r\n textPrint = TextPrint()\r\n \r\n def initUDPSocket(self,bind):\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n # bind the socket if this is a server (pass bind==1)\r\n if bind == 1:\r\n host = 'localhost'\r\n port = JOYSTICK_UDP_PORT\r\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n addr = (host,port)\r\n sock.bind(addr)\r\n sock.setblocking(0)\r\n return sock\r\n\r\n def closeUDPSocket(self,udp_socket):\r\n udp_socket.close()\r\n \r\n def serverReadUDPSocket(self,udp_socket,port):\r\n conn, addr = udp_socket.recvfrom(port)\r\n return conn,addr\r\n \r\n def clientReadUDPSocket(self,udp_socket,port):\r\n dataV = udp_socket.recvfrom(port)\r\n return dataV\r\n \r\n def joyMavlinkInit(self):\r\n mav = mavutil.mavlink.MAVLink(fifo())\r\n mav.srcSystem = MAV_SOURCE # set to master\r\n\r\n def blockMouseDown(self,block_flag):\r\n if block_flag:\r\n pygame.event.set_blocked(pygame.MOUSEBUTTONDOWN)\r\n else:\r\n pygame.event.set_allowed(pygame.MOUSEBUTTONDOWN)\r\n\r\n def blockMouseUp(self,block_flag):\r\n if block_flag:\r\n pygame.event.set_blocked(pygame.MOUSEBUTTONUP)\r\n else:\r\n pygame.event.set_allowed(pygame.MOUSEBUTTONUP)\r\n\r\n def checkMouseDwnBlock(self):\r\n print ('MOUSEBUTTONDOWN is block: ', pygame.event.get_blocked(pygame.MOUSEBUTTONDOWN))\r\n\r\n def checkMouseUpBlock(self):\r\n print ('MOUSEBUTTONUP is block: ', pygame.event.get_blocked(pygame.MOUSEBUTTONUP))\r\n\r\n def write_mav_serial_data(self, serial, x ):\r\n serial.write(struct.pack(x))\r\n \r\n def write_pack_serial_data(self, serial, x, y, z, roll, pitch, yaw):\r\n serial.write(struct.pack('<chhhhhh', 'S',x, y, z, roll, pitch, yaw))\r\n\r\n def test_linear(self, serial, lenght=200, times=1000, delta=0.05):\r\n for angle in xrange(1, times, 5):\r\n a = angle * math.pi / 180\r\n self.write_serial_data(serial, int(lenght * math.cos(a)), int(lenght * math.sin(a)),0,0,0,0)\r\n time.sleep(delta)\r\n self.write_serial_data(serial, 0,0,0,0,0,0)\r\n \r\n def test_angles(self, serial, lenght=200, times=1000, delta=0.05):\r\n for angle in xrange(1, times, 5):\r\n a = angle * math.pi / 180\r\n self.write_serial_data(0, 0,0,0,int(30 * math.cos(a)),int(30 * math.sin(-a)))\r\n time.sleep(delta)\r\n self.write_serial_data(serial, 0,0,0,0,0,0)\r\n \r\n def test_yaw(self, serial, lenght=200, times=1000, delta=0.05):\r\n for angle in xrange(1, times, 5):\r\n a = angle * math.pi / 180\r\n self.write_serial_data(serial, int(lenght * math.cos(a)),0,0,int(30 * math.sin(a)),0,0)\r\n time.sleep(delta)\r\n self.write_serial_data(serial, 0,0,0,0,0,0)\r\n \r\n def processJoystickSendMavlink(self,sock):\r\n\r\n msgbuf = None\r\n # -------- Main Program Loop -----------\r\n while done == False:\r\n btns = 0\r\n thrust = 0.0\r\n rudder = 0.0\r\n\r\n # EVENT PROCESSING STEP\r\n for event in pygame.event.get(): # User did something\r\n\r\n screen.fill(WHITE)\r\n textPrint.reset()\r\n \r\n # Possible joystick actions: JOYAXISMOTION JOYBALLMOTION JOYBUTTONDOWN JOYBUTTONUP JOYHATMOTION\r\n # QUIT - none\r\n # ACTIVEEVENT - gain, state\r\n # KEYDOWN - unicode, key, mod\r\n # KEYUP - key, mod\r\n # MOUSEMOTION - pos, rel, buttons\r\n # MOUSEBUTTONUP - pos, button\r\n # MOUSEBUTTONDOWN - pos, button\r\n # JOYAXISMOTION - joy, axis, value\r\n # JOYBALLMOTION - joy, ball, rel\r\n # JOYHATMOTION - joy, hat, value\r\n # JOYBUTTONUP - joy, button\r\n # JOYBUTTONDOWN - joy, button\r\n # VIDEORESIZE - size, w, h\r\n # VIDEOEXPOSE - none\r\n # USEREVENT – code\r\n if event.type == pygame.QUIT:\r\n done=True\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n self.print_2_yellow(\"Mouse button down pressed.\",event.button,event.pos)\r\n elif event.type == pygame.MOUSEBUTTONUP:\r\n self.print_2_yellow(\"Mouse button up pressed.\",event.button,event.pos)\r\n elif event.type == pygame.JOYBUTTONDOWN:\r\n self.print_2_yellow(\"Joystick button down pressed.\",event.button,event.joy)\r\n elif event.type == pygame.JOYBUTTONUP:\r\n self.print_2_yellow(\"Joystick button up released.\",event.button,event.joy)\r\n elif event.type == pygame.JOYAXISMOTION:\r\n self.print_3_yellow(\"Joystick axis motion.\",event.joy,event.axis,event.value)\r\n elif event.type == pygame.JOYBALLMOTION:\r\n self.print_3_yellow(\"Joystick ball motion.\",event.joy,event.ball,event.rel)\r\n elif event.type == pygame.JOYHATMOTION:\r\n self.print_3_yellow(\"Joystick hat motion\",event.joy,event.hat,event.value)\r\n elif event.type == pygame.VIDEORESIZE:\r\n self.print_3_blue(\"video re-size.\",event.size,event.w,event.h)\r\n elif event.type == pygame.KEYDOWN:\r\n self.print_3_yellow(\"key down \",event.unicode,event.key,event.mod)\r\n elif event.type == pygame.KEYUP:\r\n self.print_2_yellow(\"key up \",event.key,event.mod)\r\n\r\n # Get the name from the OS for the controller/joystick\r\n name = joystick.get_name()\r\n print(\"Joystick name: {}\".format(name) )\r\n\r\n # get the buttons\r\n buttons = joystick.get_numbuttons()\r\n for i in range( buttons ):\r\n button = joystick.get_button( i )\r\n print( \"Button {:>2} value: {}\".format(i,button) )\r\n\r\n # get the hats\r\n # Hat switch. All or nothing for direction, not like joysticks.\r\n # Value comes back in an array.\r\n hats = joystick.get_numhats()\r\n print( \"Number of hats: {}\".format(hats) )\r\n textPrint.indent()\r\n\r\n for i in range( hats ):\r\n hat = joystick.get_hat( i )\r\n print( \"Hat {} value: {}\".format(i, str(hat)) )\r\n \r\n # Getting available devices\r\n for id in range(pygame.joystick.get_count()):\r\n print( \"devices list : %u %d %s\" % (id, pygame.joystick.Joystick(id).get_name()))\r\n \r\n # Get thrust and break first\r\n # mix 2 shifts in single channels\r\n thr = (joystick.get_axis(5) + 1) / 2\r\n brk = -(joystick.get_axis(2) + 1) / 2\r\n thrust = thr + brk\r\n self.print_yellow(\"Thrust value \",thrust)\r\n\r\n # this is the x axis\r\n rudder = joystick.get_axis(0)\r\n self.print_blue(\"Rudder value \",rudder)\r\n\r\n # now collect all buttons\r\n btns = 0\r\n for i in range(joystick.get_numbuttons()):\r\n btns |= joystick.get_button(i) << i\r\n\r\n # Usually axis run in pairs, up/down for one, and left/right for\r\n # the other.\r\n axes = joystick.get_numaxes()\r\n print( \"Number of axes: {}\".format(axes) )\r\n textPrint.indent()\r\n\r\n for i in range( axes ):\r\n axis = joystick.get_axis( i )\r\n print( \"Axis {} value: {:>6.3f}\".format(i, axis) )\r\n textPrint.unindent()\r\n\r\n # Update events in pygame\r\n pygame.event.pump()\r\n \r\n # pack acquired data and throw it to socket\r\n msg = mavutil.mavlink.MAVLink_manual_control_message( target = MAV_TARGET, x = X_MAX, y = Y_MAX, z = round(thrust*JOY_SCALE), r = round(rudder*JOY_SCALE), buttons = btns)\r\n msgbuf = msg.pack(mav)\r\n\r\n try:\r\n jid = joystick.get_instance_id()\r\n except AttributeError:\r\n # get_instance_id() is an SDL2 method\r\n jid = joystick.get_id()\r\n \r\n print( \"Joystick {}\".format(jid))\r\n\r\n try:\r\n guid = joystick.get_guid()\r\n except AttributeError:\r\n # get_guid() is an SDL2 method\r\n pass\r\n else:\r\n print(\"GUID: {}\".format(guid)) \r\n \r\n # Limit to 20 frames per second\r\n clock.tick(25)\r\n if msgbuf:\r\n # send the message on the UDP Port\r\n sock.sendto(msgbuf, ('', JOYSTICK_UDP_PORT)) \r\n # send the message on serial\r\n # write_mav_serial_data(serial, msgbuf)\r\n \r\n # Close the window and quit.\r\n # If you forget this line, the program will 'hang'\r\n # on exit if running from IDLE.\r\n pygame.joystick.quit()\r\n pygame.quit()\r\n\r\n # make a mavlink connection using mavutil like ardusub does....\r\n #\r\n # Create the connection and return it for use with the other functions\r\n #\r\n # TODO::: change the port and see if this can run entirely paralel with camera\r\n # take picture on another port\r\n #\r\n def makeMAVlinkConn(self):\r\n try:\r\n #the_conection = mavutil.mavlink_connection('udpin:127.0.0.1:14551',autoreconnect=True)\r\n the_conection = mavutil.mavlink_connection('udpin:0.0.0.0:14550',autoreconnect=True, source_system=1, source_component=100)\r\n return the_conection,True\r\n except Exception as err_msg:\r\n print(\"Failed to connect : %s\" % (err_msg))\r\n return the_conection,False\r\n\r\n def makeNewMAVlinkConn(self,id):\r\n try:\r\n #the_conection = mavutil.mavlink_connection('udpin:127.0.0.1:14551',autoreconnect=True, source_system=id)\r\n the_conection = mavutil.mavlink_connection('udpin:0.0.0.0:14550',autoreconnect=True, source_system=id, source_component=100)\r\n return the_conection,True\r\n except Exception as err_msg:\r\n print(\"Failed to connect : %s\" % (err_msg))\r\n return the_conection,False\r\n \r\n # Send heartbeat from camera to GCS (types are define as enum in the dialect file). \r\n #\r\n def mavlink_send_GCS_heartbeat(self, the_conection): \r\n print(\" heartbeat.............................. %s\\n\"%(mavutil.mavlink.MAV_TYPE_CAMERA))\r\n try:\r\n the_conection.mav.heartbeat_send(mavutil.mavlink.MAV_TYPE_CAMERA, mavutil.mavlink.MAV_AUTOPILOT_INVALID, 0, 0, mavutil.mavlink.MAV_STATE_ACTIVE)\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to send GCS heartbeat : %s\" % (err_msg))\r\n ret = False\r\n print(\" heartbeat.............................. %s\\n\"%(ret))\r\n return ret\r\n \r\n # Send heartbeat from a MAVLink application.\r\n #\r\n def mavlink_send_OBC_heartbeat2(self, the_connection): \r\n try:\r\n mavutil.mavlink.heartbeat_send(mavutil.mavlink.MAV_TYPE_CAMERA, mavutil.mavlink.MAV_AUTOPILOT_GENERIC, 0, 0, 0)\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to send OBC heartbeat : %s\" % (err_msg))\r\n ret = False\r\n return ret\r\n \r\n # Receive heartbeat from a MAVLink application.\r\n #\r\n def mavlink_rcv_heartbeat(self, the_connection): \r\n try:\r\n the_connection.wait_heartbeat()\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to wait for heartbeat : %s\" % (err_msg))\r\n ret = False\r\n return ret\r\n \r\n # Sets a value to the rc channel\r\n #\r\n def mavlink_set_rc_channel_pwm(self, the_connection, channel_id, pwm=1500):\r\n #\"\"\" Set RC channel pwm value\r\n #Args:\r\n # channel_id (TYPE): Channel ID\r\n # pwm (int, optional): Channel pwm value 1100-1900\r\n #\"\"\"\r\n if channel_id < 1 or channel_id > 18:\r\n print(\"Channel does not exist.\")\r\n return\r\n\r\n # Mavlink 2 supports up to 18 channels:\r\n # https://mavlink.io/en/messages/common.html#RC_CHANNELS_OVERRIDE\r\n rc_channel_values = [65535 for _ in range(18)]\r\n rc_channel_values[channel_id - 1] = pwm\r\n\r\n try:\r\n the_connection.mav.rc_channels_override_send( the_connection.target_system, the_connection.target_component, *rc_channel_values ) \r\n ret = True \r\n except Exception as err_msg:\r\n print(\"Failed to set RC Chan PWM : %s\" % (err_msg))\r\n ret = False\r\n return ret\r\n \r\n # drives a gimbal axis controller to the pitch roll yaw specified\r\n #\r\n def gimbal_move_to( self, the_connection, tilt=0, roll=0, pan=0):\r\n #\"\"\"\r\n #Moves gimbal to given position\r\n try:\r\n the_connection.mav.command_long_send(the_connection.target_system, the_connection.target_component, mavutil.mavlink.MAV_CMD_DO_MOUNT_CONTROL, 1, tilt, roll, pan, 0, 0, 0, mavutil.mavlink.MAV_MOUNT_MODE_MAVLINK_TARGETING)\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to move gimbal using command long : %s\" % (err_msg))\r\n ret = False\r\n return ret\r\n \r\n def mavlink10(self,connID):\r\n # '''return True if using MAVLink 1.0 or later'''\r\n return float(connID.WIRE_PROTOCOL_VERSION) >= 1\r\n\r\n def mavlink20(self,connID):\r\n # '''return True if using MAVLink 2.0 or later'''\r\n return float(connID.WIRE_PROTOCOL_VERSION) >= 2\t\t \r\n\r\n # Set relay_pin to value of state\r\n def mavlink_set_relay(self, the_connection, relay_pin=0, state=True):\r\n\r\n #if self.mavlink10():\r\n try:\r\n the_connection.mav.command_long_send(\r\n the_connection.target_system, # target_system\r\n the_connection.target_component, # target_component\r\n mavutil.mavlink.MAV_CMD_DO_SET_RELAY, # command\r\n 0, # Confirmation\r\n relay_pin, # Relay Number\r\n int(state), # state (1 to indicate arm)\r\n 0, # param3 (all other params meaningless)\r\n 0, # param4\r\n 0, # param5\r\n 0, # param6\r\n 0) # param7\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to set relay using command long : %s\" % (err_msg))\r\n ret = False\r\n return ret\r\n \r\n\r\n # ref:- https://mavlink.io/en/messages/common.html#MAV_CMD\r\n \r\n def mavlink_video_stop_capture(self, the_connection, streamNo):\r\n #if self.mavlink10():\r\n try:\r\n the_connection.mav.command_long_send(\r\n the_connection.target_system, # target_system\r\n the_connection.target_component, # target_component\r\n mavutil.mavlink.MAV_CMD_VIDEO_STOP_CAPTURE, # command\r\n 0, # Confirmation\r\n streamNo, # stream number\r\n 0, # param2\r\n 0, # param3 \r\n 0, # param4\r\n 0, # param5\r\n 0, # param6\r\n 0) # param7\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to stop video capture using command long : %s\" % (err_msg))\r\n ret = False\r\n return ret \r\n \r\n\r\n def mavlink_video_start_capture(self, the_connection, streamNo, freq):\r\n #if self.mavlink10():\r\n try:\r\n the_connection.mav.command_long_send(\r\n the_connection.target_system, # target_system\r\n the_connection.target_component, # target_component\r\n mavutil.mavlink.MAV_CMD_VIDEO_START_CAPTURE, # command\r\n 0, # Confirmation\r\n streamNo, # stream number\r\n freq, # param2\r\n 0, # param3 \r\n 0, # param4\r\n 0, # param5\r\n 0, # param6\r\n 0) # param7\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to start video capture using command long : %s\" % (err_msg))\r\n ret = False\r\n return ret \r\n\r\n def mavlink_image_stop_capture(self, the_connection):\r\n #if self.mavlink10():\r\n try:\r\n the_connection.mav.command_long_send(\r\n the_connection.target_system, # target_system\r\n the_connection.target_component, # target_component\r\n mavutil.mavlink.MAV_CMD_IMAGE_STOP_CAPTURE, # command\r\n 0, # Confirmation\r\n 0, # param1\r\n 0, # param2\r\n 0, # param3 \r\n 0, # param4\r\n 0, # param5\r\n 0, # param6\r\n 0) # param7\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to stop image capture using command long : %s\" % (err_msg))\r\n ret = False\r\n return ret \r\n \r\n def mavlink_image_start_capture(self, the_connection, interval, totalImages, seqNo):\r\n #if self.mavlink10():\r\n try:\r\n the_connection.mav.command_long_send(\r\n the_connection.target_system, # target_system\r\n the_connection.target_component, # target_component\r\n mavutil.mavlink.MAV_CMD_IMAGE_START_CAPTURE, # command\r\n 0, # Confirmation\r\n 0, # param1\r\n interval, # Desired elapsed time between two consecutive pictures (in seconds)\r\n totalImages, # Total number of images to capture. 0 to capture forever/until MAV_CMD_IMAGE_STOP_CAPTURE.\r\n seqNo, # Capture sequence number starting from 1. This is only valid for single-capture (param3 == 1), otherwise set to 0. Increment the capture ID for each capture command to prevent double captures when a command is re-transmitted\r\n 0, # param5\r\n 0, # param6\r\n 0) # param7\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to start image capture using command long : %s\" % (err_msg))\r\n ret = False\r\n return ret \r\n\r\n def mavlink_video_stop_streaming(self, the_connection, streamNo):\r\n #if self.mavlink10():\r\n try:\r\n the_connection.mav.command_long_send(\r\n the_connection.target_system, # target_system\r\n the_connection.target_component, # target_component\r\n mavutil.mavlink.MAV_CMD_VIDEO_STOP_STREAMING, # command\r\n 0, # Confirmation\r\n streamNo, # stream number\r\n 0, # param2\r\n 0, # param3 \r\n 0, # param4\r\n 0, # param5\r\n 0, # param6\r\n 0) # param7\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to send stop streaming using command long : %s\" % (err_msg))\r\n ret = False\r\n return ret \r\n\r\n def mavlink_do_ftp_send(self, the_connection, network, payload):\r\n MAX_CHUNK_BYTES = 251\r\n numOfchunk = round(len(payload) / MAX_CHUNK_BYTES)\r\n for i in range(numOfchunk):\r\n #print(f\"ftp send chunk {i} offset {i*251}\")\r\n msgpay = []\r\n b = 1\r\n for b in range(MAX_CHUNK_BYTES):\r\n try:\r\n msgpay.append(payload[b+(i*251)])\r\n except Exception as e:\r\n msgpay.append(0)\r\n try:\r\n the_connection.mav.file_transfer_protocol_send (\r\n network,\r\n the_connection.target_system, # target_system\r\n the_connection.target_component, # target_component\r\n msgpay )\r\n except Exception as e:\r\n print(f\" ftp send exception {e} \\nchunk {i} @ offset {i*MAX_CHUNK_BYTES}\") \r\n\r\n def mavlink_video_start_streaming(self, the_connection, streamNo):\r\n #if self.mavlink10():\r\n try:\r\n the_connection.mav.command_long_send(\r\n the_connection.target_system, # target_system\r\n the_connection.target_component, # target_component\r\n mavutil.mavlink.MAV_CMD_VIDEO_START_STREAMING, # command\r\n 0, # Confirmation\r\n streamNo, # stream number\r\n 0, # param2\r\n 0, # param3 \r\n 0, # param4\r\n 0, # param5\r\n 0, # param6\r\n 0) # param7\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to send start streaming using command long : %s\" % (err_msg))\r\n ret = False\r\n return ret \r\n\r\n # suitable variables to drive CamMode\r\n #\r\n MAV_CAMERA_MODE_IMAGE = 0\r\n MAV_CAMERA_MODE_VIDEO = 1\r\n MAV_CAMERA_MODE_IMAGE_SURVEY = 2\r\n \r\n def mavlink_video_set_camera_mode(self, the_connection, camMode):\r\n #if self.mavlink10():\r\n try:\r\n the_connection.mav.command_long_send(\r\n the_connection.target_system, # target_system\r\n the_connection.target_component, # target_component\r\n mavutil.mavlink.MAV_CMD_SET_CAMERA_MODE, # command\r\n 0, # Confirmation\r\n 0, # param1\r\n camMode, # param2\r\n 0, # param3 \r\n 0, # param4\r\n 0, # param5\r\n 0, # param6\r\n 0) # param7\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to send video set camera mode using command long : %s\" % (err_msg))\r\n ret = False\r\n return ret \r\n \r\n # suitable variables to drive CamZoomType\r\n #\r\n MAV_ZOOM_TYPE_STEP = 0\t # Zoom one step increment (-1 for wide, 1 for tele)\r\n MAV_ZOOM_TYPE_CONTINUOUS = 1\t# Continuous zoom up/down until stopped (-1 for wide, 1 for tele, 0 to stop zooming)\r\n MAV_ZOOM_TYPE_RANGE = 2 # Zoom value as proportion of full camera range (a value between 0.0 and 100.0)\r\n MAV_ZOOM_TYPE_FOCAL_LENGTH = 3 # Zoom value/variable focal length in milimetres\r\n \r\n def mavlink_video_set_camera_zoom(self, the_connection, camZoomType, camZoomValue):\r\n #if self.mavlink10():\r\n try:\r\n the_connection.mav.command_long_send(\r\n the_connection.target_system, # target_system\r\n the_connection.target_component, # target_component\r\n mavutil.mavlink.MAV_CMD_SET_CAMERA_MODE, # command\r\n 0, # Confirmation\r\n camZoomType, # param1\r\n camZoomValue, # param2\r\n 0, # param3 \r\n 0, # param4\r\n 0, # param5\r\n 0, # param6\r\n 0) # param7\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to send camera zoom using command long : %s\" % (err_msg))\r\n ret = False\r\n return ret \r\n \r\n MAV_FOCUS_TYPE_STEP = 0\t # Focus one step increment (-1 for focusing in, 1 for focusing out towards infinity).\r\n MAV_FOCUS_TYPE_CONTINUOUS = 1 # Continuous focus up/down until stopped (-1 for focusing in, 1 for focusing out towards infinity, 0 to stop focusing)\r\n MAV_FOCUS_TYPE_RANGE = 2\t # Focus value as proportion of full camera focus range (a value between 0.0 and 100.0)\r\n MAV_FOCUS_TYPE_METERS = 3\t # Focus value in metres\r\n\r\n def mavlink_video_set_camera_focus(self, the_connection, camFocusType, camFocusValue):\r\n #if self.mavlink10():\r\n try:\r\n the_connection.mav.command_long_send(\r\n the_connection.target_system, # target_system\r\n the_connection.target_component, # target_component\r\n mavdefs.MAV_CMD_SET_CAMERA_FOCUS, # command\r\n 0, # Confirmation\r\n camFocusType, # param1\r\n camFocusValue, # param2\r\n 0, # param3 \r\n 0, # param4\r\n 0, # param5\r\n 0, # param6\r\n 0) # param7\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to send camera focus using command long : %s\" % (err_msg))\r\n ret = False\r\n return ret\r\n \r\n def mavlink_do_digicam_configure(self, the_connection, camMode, camShutterSpeed, camAperture, camISO, camExposure, camCommandIdentity, camEngineCutOff):\r\n #if self.mavlink10():\r\n try:\r\n the_connection.mav.command_long_send(\r\n the_connection.target_system, # target_system\r\n the_connection.target_component, # target_component\r\n mavutil.mavlink.MAV_CMD_DO_DIGICAM_CONFIGURE, # command\r\n 0, # Confirmation\r\n camMode, # param1\r\n camShutterSpeed, # param2\r\n camAperture, # param3 \r\n camISO, # param4\r\n camExposure, # param5\r\n camCommandIdentity, # param6\r\n camEngineCutOff) # param7\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to send digicam configure using command long : %s\" % (err_msg))\r\n ret = False\r\n return ret\r\n \r\n def mavlink_do_digicam_control(self, the_connection, camSessionControl, camZoomAbsolute, camZoomRelative, camFocus, camShootCommand, camCommandIdentity, camShotID):\r\n #if self.mavlink10():\r\n try:\r\n the_connection.mav.command_long_send(\r\n the_connection.target_system, # target_system\r\n the_connection.target_component, # target_component\r\n mavutil.mavlink.MAV_CMD_DO_DIGICAM_CONTROL, # command\r\n 0, # Confirmation\r\n camSessionControl, # param1\r\n camZoomAbsolute, # param2\r\n camZoomRelative, # param3 \r\n camFocus, # param4\r\n camShootCommand, # param5\r\n camCommandIdentity, # param6\r\n camShotID) # param7\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to send digicam control using command long : %s\" % (err_msg))\r\n ret = False\r\n return ret\r\n \r\n def mavlink_do_video_control(self, the_connection, camID, camTransmission, camInterval, camRecording):\r\n #if self.mavlink10():\r\n try:\r\n the_connection.mav.command_long_send(\r\n the_connection.target_system, # target_system\r\n the_connection.target_component, # target_component\r\n mavutil.mavlink.MAV_CMD_DO_CONTROL_VIDEO, # command\r\n 0, # Confirmation\r\n camID, # param1\r\n camTransmission, # param2\r\n camInterval, # param3 \r\n camRecording, # param4\r\n 0, # param5\r\n 0, # param6\r\n 0) # param7 \r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to send do video control using command long : %s\" % (err_msg))\r\n ret = False\r\n return ret\r\n \r\n def mavlink_get_camera_settings(self, the_connection):\r\n #if self.mavlink10():\r\n try:\r\n the_connection.mav.command_long_send(\r\n the_connection.target_system, # target_system\r\n the_connection.target_component, # target_component\r\n mavutil.mavlink.MAV_CMD_REQUEST_CAMERA_SETTINGS, # command\r\n 0, # Confirmation\r\n 1, # param1\r\n 0, # param2\r\n 0, # param3 \r\n 0, # param4\r\n 0, # param5\r\n 0, # param6\r\n 0) # param7 \r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to get cam settings using command long : %s\" % (err_msg))\r\n ret = False\r\n return ret\r\n \r\n def mavlink_get_storage_info(self, the_connection, StoId):\r\n #if self.mavlink10():\r\n try:\r\n the_connection.mav.command_long_send(\r\n the_connection.target_system, # target_system\r\n the_connection.target_component, # target_component\r\n mavutil.mavlink.MAV_CMD_REQUEST_STORAGE_INFORMATION, # command\r\n 0, # Confirmation\r\n StoId, # param1\r\n 1, # param2\r\n 0, # param3 \r\n 0, # param4\r\n 0, # param5\r\n 0, # param6\r\n 0) # param7 \r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to get storage info using command long : %s\" % (err_msg))\r\n ret = False\r\n return ret\r\n \r\n\r\n def mavlink_get_capture_status(self, the_connection):\r\n #if self.mavlink10():\r\n try:\r\n the_connection.mav.command_long_send(\r\n the_connection.target_system, # target_system\r\n the_connection.target_component, # target_component\r\n mavutil.mavlink.MAV_CMD_REQUEST_CAMERA_CAPTURE_STATUS, # command\r\n 0, # Confirmation\r\n 1, # param1\r\n 0, # param2\r\n 0, # param3 \r\n 0, # param4\r\n 0, # param5\r\n 0, # param6\r\n 0) # param7 \r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to get capture status using command long : %s\" % (err_msg))\r\n ret = False\r\n return ret\r\n \r\n def mavlink_get_stream_info(self, the_connection):\r\n #if self.mavlink10():\r\n try:\r\n the_connection.mav.command_long_send(\r\n the_connection.target_system, # target_system\r\n the_connection.target_component, # target_component\r\n mavutil.mavlink.MAV_CMD_REQUEST_VIDEO_STREAM_INFORMATION, # command\r\n 0, # Confirmation\r\n 1, # param1\r\n 0, # param2\r\n 0, # param3 \r\n 0, # param4\r\n 0, # param5\r\n 0, # param6\r\n 0) # param7 \r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to get stream info using command long : %s\" % (err_msg))\r\n ret = False\r\n return ret\r\n \r\n def mavlink_reset_camera(self, the_connection):\r\n #if self.mavlink10():\r\n try:\r\n the_connection.mav.command_long_send(\r\n the_connection.target_system, # target_system\r\n the_connection.target_component, # target_component\r\n mavutil.mavlink.MAV_CMD_RESET_CAMERA_SETTINGS, # command\r\n 0, # Confirmation\r\n 1, # param1\r\n 0, # param2\r\n 0, # param3 \r\n 0, # param4\r\n 0, # param5\r\n 0, # param6\r\n 0) # param7 \r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to reset camera using command long : %s\" % (err_msg))\r\n ret = False\r\n return ret\r\n \r\n def mavlink_set_camera_trig_interval(self, the_connection, camTriggerCycle, camShutterIntegration):\r\n #if self.mavlink10():\r\n try: \r\n the_connection.mav.command_long_send(\r\n the_connection.target_system, # target_system\r\n the_connection.target_component, # target_component\r\n mavutil.mavlink.MAV_CMD_DO_SET_CAM_TRIGG_INTERVAL, # command\r\n 0, # Confirmation\r\n camTriggerCycle, # param1\r\n camShutterIntegration, # param2\r\n 0, # param3 \r\n 0, # param4\r\n 0, # param5\r\n 0, # param6\r\n 0) # param7 \r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to set camera trip interval using command long : %s\" % (err_msg))\r\n ret = False\r\n return ret\r\n \r\n def mavlink_set_camera_to_quaternion(self, the_connection, q1, q2, q3, q4):\r\n #if self.mavlink10():\r\n try: \r\n the_connection.mav.command_long_send(\r\n the_connection.target_system, # target_system\r\n the_connection.target_component, # target_component\r\n mavutil.mavlink.MAV_CMD_DO_MOUNT_CONTROL_QUAT, # command\r\n 0, # Confirmation\r\n q1, # param1\r\n q2, # param2\r\n q3, # param3 \r\n q4, # param4\r\n 0, # param5\r\n 0, # param6\r\n 0) # param7 \r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to set camera to quartenion using command long : %s\" % (err_msg))\r\n ret = False\r\n return ret\r\n\r\n # convert to integer param_value from mavlink\r\n #\r\n def mav_param_type_conv( self, typ, value ):\r\n\r\n if ( int(mavutil.mavlink.MAV_PARAM_TYPE_INT64) >= int(typ) ):\r\n return int(struct.unpack('I', struct.pack('f', value))[0]) \r\n else:\r\n return value\r\n\r\n # convert an integer param_value to be sent on mavlink\r\n #\r\n def param_to_mav_msg_conv( self, typ, value ):\r\n\r\n if ( int(mavutil.mavlink.MAV_PARAM_TYPE_INT64) >= int(typ) ):\r\n return float(struct.unpack('f', struct.pack('I', value))[0]) \r\n else:\r\n return value\r\n\r\n # param_value handlers : seperate ones to allow changes in data type etc\r\n #\r\n def mavlink_send_param_value_iso(self, the_connection, val ):\r\n \r\n print(\"\\033[36m sending a parameter : iso \") \r\n d = struct.unpack('f', struct.pack('I', val))[0]\r\n try:\r\n the_connection.mav.param_value_send(\r\n \"S_ISO\".encode('ascii'),\r\n d,\r\n mavutil.mavlink.MAV_PARAM_TYPE_UINT32,\r\n 8,\r\n 1)\r\n return True\r\n except Exception as err_msg: \r\n print(\"Failed to send param value message 1: %s\" % (err_msg))\r\n return False\r\n\r\n def mavlink_send_param_value_aper(self, the_connection, val ):\r\n\r\n print(\"\\033[31m sending a parameter : aperture \") \r\n d = struct.unpack('f', struct.pack('I', val))[0]\r\n try:\r\n the_connection.mav.param_value_send(\r\n \"S_APERTURE\".encode('ascii'),\r\n d,\r\n mavutil.mavlink.MAV_PARAM_TYPE_UINT32,\r\n 8,\r\n 2)\r\n return True\r\n except Exception as err_msg:\r\n print(\"Failed to send param value message 2: %s\" % (err_msg))\r\n return False\r\n\r\n def mavlink_send_param_value_expro(self, the_connection, val ):\r\n\r\n print(\"\\033[32m sending a parameter : expro \") \r\n d = struct.unpack('f', struct.pack('I', val))[0]\r\n try:\r\n the_connection.mav.param_value_send(\r\n \"S_EX_PRO_MODE\".encode('ascii'),\r\n d,\r\n mavutil.mavlink.MAV_PARAM_TYPE_UINT32,\r\n 8,\r\n 3)\r\n return True\r\n except Exception as err_msg:\r\n print(\"Failed to send param value message 3: %s\" % (err_msg))\r\n return False\r\n\r\n def mavlink_send_param_value_focus(self, the_connection, val ):\r\n\r\n print(\"\\033[33m sending a parameter : focus mode \") \r\n d = struct.unpack('f', struct.pack('I', val))[0] \r\n try:\r\n the_connection.mav.param_value_send(\r\n \"S_FOCUS_MODE\".encode('ascii'),\r\n d,\r\n mavutil.mavlink.MAV_PARAM_TYPE_UINT32,\r\n 8,\r\n 4)\r\n return True\r\n except Exception as err_msg:\r\n print(\"Failed to send param value message 4: %s\" % (err_msg))\r\n return False\r\n\r\n def mavlink_send_param_value_focus_area(self, the_connection, val ):\r\n\r\n print(\"\\033[34m sending a parameter : focus area \") \r\n p = struct.unpack('f', struct.pack('I', val))[0] \r\n try:\r\n the_connection.mav.param_value_send(\r\n \"S_FOCUS_AREA\".encode('ascii'),\r\n p,\r\n mavutil.mavlink.MAV_PARAM_TYPE_UINT32,\r\n 8,\r\n 5)\r\n return True\r\n except Exception as err_msg:\r\n print(\"Failed to send param value message 5: %s\" % (err_msg))\r\n return False\r\n\r\n def mavlink_send_param_value_shut_spd(self, the_connection, val ):\r\n\r\n print(\"\\033[35m sending a parameter : shutter speed \") \r\n d = struct.unpack('f', struct.pack('I', val))[0]\r\n try:\r\n the_connection.mav.param_value_send(\r\n \"S_SHUT_SPD\".encode('ascii'),\r\n d,\r\n mavutil.mavlink.MAV_PARAM_TYPE_UINT32,\r\n 8,\r\n 6)\r\n return True\r\n except Exception as err_msg:\r\n print(\"Failed to send param value message 6: %s\" % (err_msg))\r\n return False\r\n\r\n def mavlink_send_param_value_white_bal(self, the_connection, val ):\r\n\r\n print(\"\\033[37m sending a parameter : white balance \") \r\n d = struct.unpack('f', struct.pack('I', val))[0]\r\n try:\r\n the_connection.mav.param_value_send(\r\n \"S_WHITE_BAL\".encode('ascii'),\r\n d,\r\n mavutil.mavlink.MAV_PARAM_TYPE_UINT32,\r\n 8,\r\n 7)\r\n return True\r\n except Exception as err_msg:\r\n print(\"Failed to send param value message 7: %s\" % (err_msg))\r\n return False\r\n\r\n def mavlink_send_param_value_still_cap(self, the_connection, val ):\r\n\r\n print(\"\\033[38m sending a parameter : still capture mode \") \r\n d = struct.unpack('f', struct.pack('I', val))[0]\r\n try:\r\n the_connection.mav.param_value_send(\r\n \"S_STILL_CAP\".encode('ascii'),\r\n d,\r\n mavutil.mavlink.MAV_PARAM_TYPE_UINT32,\r\n 8,\r\n 8)\r\n return True\r\n except Exception as err_msg:\r\n print(\"Failed to send param value message 8: %s\" % (err_msg))\r\n return False\r\n\r\n def mavlink_send_param_ext_value_iso(self, the_connection, val ):\r\n \r\n print(\"\\033[31m sending an ext parameter iso\") \r\n d = str(val)\r\n try:\r\n the_connection.mav.param_ext_value_send(\r\n \"S_ISO\".encode('ascii'),\r\n d.encode('ascii'),\r\n mavdefs.MAV_PARAM_EXT_TYPE_UINT32,\r\n 8,\r\n 1)\r\n return True\r\n except Exception as err_msg: \r\n print(\"Failed to send param value message 1: %s\" % (err_msg))\r\n return False\r\n\r\n def mavlink_send_param_ext_value_aper(self, the_connection, val ):\r\n\r\n print(\"\\033[32m sending an ext parameter aper\") \r\n d = str(val)\r\n try:\r\n the_connection.mav.param_ext_value_send(\r\n \"S_APERTURE\".encode('ascii'),\r\n d.encode('ascii'),\r\n mavdefs.MAV_PARAM_EXT_TYPE_UINT32,\r\n 8,\r\n 2)\r\n return True\r\n except Exception as err_msg:\r\n print(\"Failed to send param value message 2: %s\" % (err_msg))\r\n return False\r\n\r\n def mavlink_send_param_ext_value_expro(self, the_connection, val ):\r\n\r\n print(\"\\033[33m sending an ext parameter expro\") \r\n d = str(val)\r\n try:\r\n the_connection.mav.param_ext_value_send(\r\n \"S_EX_PRO_MODE\".encode('ascii'),\r\n d.encode('ascii'),\r\n mavdefs.MAV_PARAM_EXT_TYPE_UINT32,\r\n 8,\r\n 3)\r\n return True\r\n except Exception as err_msg:\r\n print(\"Failed to send param value message 3: %s\" % (err_msg))\r\n return False\r\n\r\n def mavlink_send_param_ext_value_focus(self, the_connection, val ):\r\n\r\n print(\"\\033[34m sending an ext parameter focus\") \r\n d = str(val) \r\n try:\r\n the_connection.mav.param_ext_value_send(\r\n \"S_FOCUS_MODE\".encode('ascii'),\r\n d.encode('ascii'),\r\n mavdefs.MAV_PARAM_EXT_TYPE_UINT32,\r\n 8,\r\n 4)\r\n return True\r\n except Exception as err_msg:\r\n print(\"Failed to send param value message 4: %s\" % (err_msg))\r\n return False\r\n\r\n def mavlink_send_param_ext_value_focus_area(self, the_connection, val ):\r\n\r\n print(\"\\033[35m sending an ext parameter focus area\") \r\n p = str(val) \r\n try:\r\n the_connection.mav.param_ext_value_send(\r\n \"S_FOCUS_AREA\".encode('ascii'),\r\n p.encode('ascii'),\r\n mavdefs.MAV_PARAM_EXT_TYPE_UINT32,\r\n 8,\r\n 5)\r\n return True\r\n except Exception as err_msg:\r\n print(\"Failed to send param value message 5: %s\" % (err_msg))\r\n return False\r\n\r\n def mavlink_send_param_ext_value_shut_spd(self, the_connection, val ):\r\n\r\n print(\"\\033[36m sending an ext parameter shutter speed\") \r\n d = str(val)\r\n try:\r\n the_connection.mav.param_ext_value_send(\r\n \"S_SHUT_SPD\".encode('ascii'),\r\n d.encode('ascii'),\r\n mavdefs.MAV_PARAM_EXT_TYPE_UINT32,\r\n 8,\r\n 6)\r\n return True\r\n except Exception as err_msg:\r\n print(\"Failed to send param value message 6: %s\" % (err_msg))\r\n return False\r\n\r\n def mavlink_send_param_ext_value_white_bal(self, the_connection, val ):\r\n\r\n print(\"\\033[37m sending an ext parameter white balance\") \r\n d = str(val)\r\n try:\r\n the_connection.mav.param_ext_value_send(\r\n \"S_WHITE_BAL\".encode('ascii'),\r\n d.encode('ascii'),\r\n mavdefs.MAV_PARAM_EXT_TYPE_UINT32,\r\n 8,\r\n 7)\r\n return True\r\n except Exception as err_msg:\r\n print(\"Failed to send param value message 7: %s\" % (err_msg))\r\n return False\r\n\r\n def mavlink_send_param_ext_value_still_cap(self, the_connection, val ):\r\n\r\n print(\"\\033[37m sending an ext parameter still capture\") \r\n d = str(val)\r\n try:\r\n the_connection.mav.param_ext_value_send(\r\n \"S_STILL_CAP\".encode('ascii'),\r\n d.encode('ascii'),\r\n mavdefs.MAV_PARAM_EXT_TYPE_UINT32,\r\n 8,\r\n 8)\r\n return True\r\n except Exception as err_msg:\r\n print(\"Failed to send param value message 8: %s\" % (err_msg))\r\n return False\r\n \r\n def mavlink_send_param_value(self, the_connection):\r\n \r\n print(\"\\033[36m sending a parameter\") \r\n d = struct.unpack('f', struct.pack('I', 1))[0]\r\n try:\r\n the_connection.mav.param_value_send(\r\n \"S_ISO\".encode('ascii'),\r\n d,\r\n mavutil.mavlink.MAV_PARAM_TYPE_UINT32,\r\n 8,\r\n 1)\r\n ret = True\r\n except Exception as err_msg: \r\n print(\"Failed to send param value message 1: %s\" % (err_msg))\r\n ret = False\r\n d = struct.unpack('f', struct.pack('I', 10))[0]\r\n try:\r\n the_connection.mav.param_value_send(\r\n \"S_APERTURE\".encode('ascii'),\r\n d,\r\n mavutil.mavlink.MAV_PARAM_TYPE_UINT32,\r\n 8,\r\n 2)\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to send param value message 2: %s\" % (err_msg))\r\n ret = False\r\n d = struct.unpack('f', struct.pack('I', 30))[0]\r\n try:\r\n the_connection.mav.param_value_send(\r\n \"S_EX_PRO_MODE\".encode('ascii'),\r\n d,\r\n mavutil.mavlink.MAV_PARAM_TYPE_UINT32,\r\n 8,\r\n 3)\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to send param value message 3: %s\" % (err_msg))\r\n ret = False\r\n d = struct.unpack('f', struct.pack('I', 5))[0] \r\n try:\r\n the_connection.mav.param_value_send(\r\n \"S_FOCUS_MODE\".encode('ascii'),\r\n d,\r\n mavutil.mavlink.MAV_PARAM_TYPE_UINT32,\r\n 8,\r\n 4)\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to send param value message 4: %s\" % (err_msg))\r\n ret = False\r\n p = struct.unpack('f', struct.pack('I', 11))[0] \r\n try:\r\n the_connection.mav.param_value_send(\r\n \"S_FOCUS_AREA\".encode('ascii'),\r\n p,\r\n mavutil.mavlink.MAV_PARAM_TYPE_UINT32,\r\n 8,\r\n 5)\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to send param value message 5: %s\" % (err_msg))\r\n ret = False\r\n d = struct.unpack('f', struct.pack('I', 675))[0]\r\n try:\r\n the_connection.mav.param_value_send(\r\n \"S_SHUT_SPD\".encode('ascii'),\r\n d,\r\n mavutil.mavlink.MAV_PARAM_TYPE_UINT32,\r\n 8,\r\n 6)\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to send param value message 6: %s\" % (err_msg))\r\n ret = False\r\n d = struct.unpack('f', struct.pack('I', 76))[0]\r\n try:\r\n the_connection.mav.param_value_send(\r\n \"S_WHITE_BAL\".encode('ascii'),\r\n d,\r\n mavutil.mavlink.MAV_PARAM_TYPE_UINT32,\r\n 8,\r\n 7)\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to send param value message 7: %s\" % (err_msg))\r\n ret = False\r\n d = struct.unpack('f', struct.pack('I', 7))[0]\r\n try:\r\n the_connection.mav.param_value_send(\r\n \"S_STILL_CAP\".encode('ascii'),\r\n d,\r\n mavutil.mavlink.MAV_PARAM_TYPE_UINT32,\r\n 8,\r\n 8)\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to send param value message 8: %s\" % (err_msg))\r\n ret = False\r\n return ret\r\n\r\n def mavlink_send_camera_information(self, the_connection):\r\n #if self.mavlink10():\r\n\r\n vendor_name_nd = np.dtype([('ABB',np.uint8)])\r\n model_name_nd = np.dtype([('BAC',np.uint8)])\r\n vendor_name_list = [65,66,66] \r\n model_name_list = [66,65,67]\r\n vendor_name = \"ABB\" \r\n model_name = \"BAC\"\r\n #\r\n # convert string to ascii list and make numpy array\r\n #\r\n vn = []\r\n mn = []\r\n j = 0\r\n for j in range(len(model_name)):\r\n mn.append(ord(model_name[j]))\r\n k = 0\r\n for k in range(len(vendor_name)):\r\n vn.append(ord(vendor_name[k]))\r\n u8_model_name = np.array(mn, np.uint8)\r\n u8_vendor_name = np.array(vn, np.uint8)\r\n mn_u8 = u8_model_name.astype(np.uint8)\r\n vn_u8 = u8_vendor_name.astype(np.uint8)\r\n\r\n arr_vendor = [0] * 32\r\n arr_vendor[0] = ord(\"A\")\r\n\r\n arr_model = [0] * 32\r\n arr_model[0] = ord(\"C\")\r\n\r\n # \"http://10.0.2.51/cam_defs/alpha_cam_new.xml\".encode('ascii'))\r\n print(\"\\033[33m Sending camera information\")\r\n try:\r\n the_connection.mav.camera_information_send(\r\n 100,\r\n arr_vendor,\r\n arr_model,\r\n 0,\r\n 0,\r\n 0,\r\n 0,\r\n 0,\r\n 0,\r\n 0,\r\n 391,\r\n 1,\r\n \"http://10.0.2.51/cam_defs\".encode('ascii'))\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to send camera information message : %s\" % (err_msg))\r\n ret = False\r\n return ret\r\n \r\n def mavlink_send_camera_settings(self, the_connection):\r\n #if self.mavlink10():\r\n try: \r\n the_connection.mav.camera_settings_send(\r\n self.time_boot_ms,\r\n self.mode_id, # Camera mode\r\n self.zoomLevel, # Current zoom level (0.0 to 100.0, NaN if not known)*/\r\n self.focusLevel) \r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to send camera settings message : %s\" % (err_msg))\r\n ret = False\r\n return ret\r\n \r\n def mavlink_send_storage_information(self, the_connection):\r\n #if self.mavlink10():\r\n #\r\n # This is a byte array of the string \r\n #\r\n b = bytearray(b'ABB')\r\n #\r\n # forced uint8 with numpy\r\n #\r\n b8_numpy = np.array(b, np.uint8)\r\n #\r\n # ascii string encoded\r\n #\r\n nm = \"storenm\"\r\n try:\r\n u8_model_name = (nm).encode(\"ascii\")\r\n except Exception as err_msg:\r\n print(\"\\033[32m Failed to SET storage information message : %s \" % (err_msg))\r\n print(f\" sending storage info {u8_model_name} type {type(u8_model_name)}\")\r\n try: \r\n the_connection.mav.storage_information_send(\r\n self.time_boot_ms, \r\n self.storage_id, \r\n self.storage_count, \r\n self.status, \r\n self.total_capacity, \r\n self.used_capacity, \r\n self.available_capacity, \r\n self.read_speed, \r\n self.write_speed,\r\n 1,\r\n np.array(u8_model_name,np.uint8),\r\n 2)\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"\\033[32m Failed to send storage information message : %s type is %s\" % (err_msg,type(u8_model_name)))\r\n ret = False\r\n return ret\r\n \r\n def mavlink_send_camera_capture_status(self, the_connection):\r\n \r\n try: \r\n the_connection.mav.camera_capture_status_send(\r\n self.time_boot_ms, \r\n self.image_status, \r\n self.video_status, \r\n self.image_interval, \r\n self.recording_time_ms, \r\n self.available_capacity,\r\n self.image_count) \r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to send camera capture status message : %s\" % (err_msg))\r\n ret = False\r\n return ret\r\n \r\n def mavlink_send_video_stream_information(self, the_connection):\r\n #if self.mavlink10():\r\n print(\" !!! sending the video stream information !!! \\n\")\r\n try: \r\n the_connection.mav.video_stream_information_send(\r\n self.stream_id, \r\n self.count, \r\n self.stream_type, \r\n self.Vflags, \r\n self.framerate, \r\n self.Vresolution_h, \r\n self.Vresolution_v, \r\n self.bitrate, \r\n self.rotation, \r\n self.hfov, \r\n #self.videoname, \r\n (self.videoname).encode('ascii'),\r\n (self.video_uri).encode('ascii'))\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to send video stream information message : %s\" % (err_msg))\r\n ret = False\r\n return ret\r\n \r\n def mavlink_send_camera_image_captured(self, the_connection):\r\n #if self.mavlink10():\r\n b = bytearray(b'[2,3,4,5]')\r\n print(f\"sending cam image cap {self.time_boot_ms}\")\r\n try: \r\n the_connection.mav.camera_image_captured_send(\r\n self.time_boot_ms, \r\n self.time_utc, \r\n self.camera_id, \r\n self.lat, \r\n self.lon, \r\n self.alt, \r\n self.relative_alt, \r\n b, \r\n self.image_index, \r\n self.capture_result, \r\n self.file_url)\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed to send camera image captured message : %s\" % (err_msg))\r\n ret = False\r\n return ret\r\n \r\n def mavlink_send_camera_feedback(self, the_connection):\r\n #if self.mavlink10():\r\n print(\"\\033[32m sending camera feedback\")\r\n try: \r\n the_connection.mav.camera_feedback_send( \r\n self.time_usec, \r\n the_connection.target_system, \r\n self.cam_idx, \r\n self.img_idx, \r\n self.lat, \r\n self.lng, \r\n self.alt_msl, \r\n self.alt_rel, \r\n self.roll,\r\n self.pitch, \r\n self.yaw, \r\n self.foc_len, \r\n self.CFflags)\r\n ret = True\r\n print(\"\\033[36m success sending camera feedback\")\r\n except Exception as err_msg:\r\n print(\"Failed to send camera feedback message : %s\" % (err_msg))\r\n ret = False\r\n return ret\r\n\r\n # check with this function the order in python !!!!!!!!\r\n #\r\n def mavlink_send_param_ext_ack(self, the_connection, val, status_code, tag ):\r\n \r\n print(\"\\033[38m acking the ext parameter iso\") \r\n d = str(val)\r\n try:\r\n the_connection.mav.param_ext_ack_send(\r\n tag.encode('ascii'),\r\n d,\r\n mavdefs.MAV_PARAM_EXT_TYPE_UINT32,\r\n status_code)\r\n ret = True\r\n except Exception as err_msg: \r\n print(\"Failed to send param value message 1: %s\" % (err_msg))\r\n ret = False\r\n\r\n \r\n def writeParamSetFromMavLink( self, msgString, mavObj, dataRcv, the_connection ):\r\n\r\n # must be EXACT tag match\r\n #patternISO = re.compile(r\"\\bS_ISO\\b\")\r\n #patternAper = re.compile(r\"\\bS_APERTURE\\b\")\r\n #patternExPro = re.compile(r\"\\bS_EX_PRO_MODE\\b\")\r\n #patternFocus = re.compile(r\"\\bS_FOCUS_MODE\\b\")\r\n #patternFocA = re.compile(r\"\\bS_FOCUS_AREA\\b\")\r\n #patternShSp = re.compile(r\"\\bS_SHUT_SPD\\b\") \r\n #patternWhiBal = re.compile(r\"S_WHITE_BAL\") \r\n #patternStCa = re.compile(r\"\\bS_STILL_CAP\\b\") \r\n\r\n if (len(msgString) == 0):\r\n print(\"zero length tag passed\")\r\n return False\r\n \r\n if not (msgString.find(\"S_ISO\") == -1):\r\n #if (re.search(patternISO, msgString.upper())==True): \r\n print(f\"saw sonISO with {dataRcv}\")\r\n if (mavObj.setMavIsoModeData( dataRcv )==True):\r\n print(f\"setMavIsoModeData sonISO with {dataRcv}\")\r\n return (self.mavlink_send_param_value_iso( the_connection, dataRcv )) \r\n elif not (msgString.find(\"S_APERTURE\") == -1):\r\n #elif (re.search(patternAper, msgString.upper())==True):\r\n if (mavObj.setMavApertureData( dataRcv )==True): \r\n return (self.mavlink_send_param_value_aper( the_connection, dataRcv )) \r\n elif not (msgString.find(\"S_EX_PRO_MODE\") == -1):\r\n #elif (re.search(patternExPro, msgString.upper())==True): \r\n if (mavObj.setMavExProData( dataRcv )==True): \r\n return (self.mavlink_send_param_value_expro( the_connection, dataRcv )) \r\n elif not (msgString.find(\"S_FOCUS_AREA\") == -1):\r\n #elif (re.search(patternFocA, msgString.upper())==True): \r\n if (mavObj.setMavFocusAreaData( dataRcv )==True): \r\n return (self.mavlink_send_param_value_focus_area( the_connection, dataRcv )) \r\n elif not (msgString.find(\"S_FOCUS_MODE\") == -1):\r\n #elif (re.search(patternFocus, msgString.upper())==True):\r\n if (mavObj.setMavFocusData( dataRcv )==True): \r\n return (self.mavlink_send_param_value_focus( the_connection, dataRcv )) \r\n elif not (msgString.find(\"S_SHUT_SPD\") == -1):\r\n #elif (re.search(patternShSp, msgString.upper())==True):\r\n if (mavObj.setMavShutterData( dataRcv )==True): \r\n return (self.mavlink_send_param_value_shut_spd( the_connection, dataRcv )) \r\n elif not (msgString.find(\"S_WHITE_BAL\") == -1):\r\n #elif (re.search(patternWhiBal, msgString.upper())==True): \r\n if (mavObj.setMavWhiteBalData( dataRcv )==True): \r\n return (self.mavlink_send_param_value_white_bal( the_connection, dataRcv ))\r\n elif not (msgString.find(\"S_STILL_CAP\") == -1):\r\n #elif (re.search(patternStCa, msgString.upper())==True): \r\n if (mavObj.setMavStillCapModeData( dataRcv )==True): \r\n return (self.mavlink_send_param_value_still_cap( the_connection, dataRcv )) \r\n else:\r\n print(\"unsupported variable name %s to val=%d :: NOT SET \"%(msgString,dataRcv))\r\n return False\r\n\r\n def readParamSetFromMavLink( self, msgString, mavObj, the_connection ):\r\n\r\n # must be EXACT tag match\r\n #patternISO = re.compile(r\"\\bS_ISO\\b\")\r\n #patternAper = re.compile(r\"\\bS_APERTURE\\b\")\r\n #patternExPro = re.compile(r\"\\bS_EX_PRO_MODE\\b\")\r\n #patternFocus = re.compile(r\"\\bS_FOCUS_MODE\\b\")\r\n #patternFocA = re.compile(r\"\\bS_FOCUS_AREA\\b\")\r\n #patternShSp = re.compile(r\"\\bS_SHUT_SPD\\b\") \r\n #patternWhiBal = re.compile(r\"S_WHITE_BAL\") \r\n #patternStCa = re.compile(r\"\\bS_STILL_CAP\\b\") \r\n\r\n v = 0\r\n p = 0\r\n r = False\r\n\r\n if (len(msgString) == 0):\r\n print(\"zero length tag passed\")\r\n return False\r\n \r\n if not (msgString.find(\"S_ISO\") == -1):\r\n #if (re.search(patternISO, msgString.upper())==True): \r\n with mavObj.mav_req_all_param.get_lock():\r\n mavObj.mav_req_all_param.value |= mavlinkSonyCamWriteVals.ParamIso # >>> set the bit to enable full read of parameter\r\n v, p, r = mavObj.getMavIsoModeData( )\r\n if (r == True):\r\n return ( self.mavlink_send_param_value_iso( the_connection, v ) )\r\n else:\r\n return False \r\n elif not (msgString.find(\"S_APERTURE\") == -1):\r\n #elif (re.search(patternAper, msgString.upper())==True): \r\n with mavObj.mav_req_all_param.get_lock():\r\n mavObj.mav_req_all_param.value |= mavlinkSonyCamWriteVals.ParamAperture # >>> set the bit to enable full read of parameter\r\n v, p, r = mavObj.getMavApertureData( )\r\n if (r == True):\r\n return ( self.mavlink_send_param_value_aper( the_connection, v ) ) \r\n else:\r\n return False \r\n elif not (msgString.find(\"S_EX_PRO_MODE\") == -1):\r\n #elif (re.search(patternExPro, msgString.upper())==True): \r\n with mavObj.mav_req_all_param.get_lock():\r\n mavObj.mav_req_all_param.value |= mavlinkSonyCamWriteVals.ParamExPro # >>> set the bit to enable full read of parameter\r\n v, p, r = mavObj.getMavExProData( )\r\n if (r == True):\r\n return ( self.mavlink_send_param_value_expro( the_connection, v ) )\r\n else:\r\n return False \r\n elif not (msgString.find(\"S_FOCUS_AREA\") == -1):\r\n #elif (re.search(patternFocA, msgString.upper())==True): \r\n with mavObj.mav_req_all_param.get_lock():\r\n mavObj.mav_req_all_param.value |= mavlinkSonyCamWriteVals.ParamFocusArea # >>> set the bit to enable full read of parameter \r\n v, p, r = mavObj.getMavFocusAreaData( )\r\n if (r == True): \r\n return ( self.mavlink_send_param_value_focus_area( the_connection, v ) ) \r\n else:\r\n return False \r\n elif not (msgString.find(\"S_FOCUS_MODE\") == -1):\r\n #elif (re.search(patternFocus, msgString.upper())==True):\r\n with mavObj.mav_req_all_param.get_lock():\r\n mavObj.mav_req_all_param.value |= mavlinkSonyCamWriteVals.ParamFocus # >>> set the bit to enable full read of parameter \r\n v, p, r = mavObj.getMavFocusData( )\r\n if (r == True):\r\n return ( self.mavlink_send_param_value_focus( the_connection, v ) )\r\n else:\r\n return False \r\n elif not (msgString.find(\"S_SHUT_SPD\") == -1):\r\n #elif (re.search(patternShSp, msgString.upper())==True): \r\n with mavObj.mav_req_all_param.get_lock():\r\n mavObj.mav_req_all_param.value |= mavlinkSonyCamWriteVals.ParamShutSpd # >>> set the bit to enable full read of parameter \r\n v, p, r = mavObj.getMavShutterData( )\r\n if (r == True):\r\n return ( self.mavlink_send_param_value_shut_spd( the_connection, v ) ) \r\n else:\r\n return False \r\n elif not (msgString.find(\"S_WHITE_BAL\") == -1):\r\n #elif (re.search(patternWhiBal, msgString.upper())==True): \r\n with mavObj.mav_req_all_param.get_lock():\r\n mavObj.mav_req_all_param.value |= mavlinkSonyCamWriteVals.ParamWhiteBala # >>> set the bit to enable full read of parameter \r\n v, p, r = mavObj.getMavWhiteBalData( )\r\n if (r == True):\r\n return ( self.mavlink_send_param_value_white_bal( the_connection, v ) ) \r\n else:\r\n return False \r\n elif not (msgString.find(\"S_STILL_CAP\") == -1):\r\n #elif (re.search(patternStCa, msgString.upper())==True): \r\n with mavObj.mav_req_all_param.get_lock(): \r\n mavObj.mav_req_all_param.value |= mavlinkSonyCamWriteVals.ParamStillCap # >>> set the bit to enable full read of parameter \r\n v, p, r = mavObj.getMavStillCapModeData( )\r\n if (r == True):\r\n return ( self.mavlink_send_param_value_still_cap( the_connection, v ) ) \r\n else:\r\n return False \r\n else:\r\n print(\"unsupported variable name %s :: NOT SET \"%(msgString))\r\n return False\r\n \r\n def writeParamExtSetFromMavLink( self, msgString, mavObj, dataRcv, the_connection ):\r\n\r\n # must be EXACT tag match\r\n #patternISO = re.compile(r\"\\bS_ISO\\b\")\r\n #patternAper = re.compile(r\"\\bS_APERTURE\\b\")\r\n #patternExPro = re.compile(r\"\\bS_EX_PRO_MODE\\b\")\r\n #patternFocus = re.compile(r\"\\bS_FOCUS_MODE\\b\")\r\n #patternFocA = re.compile(r\"\\bS_FOCUS_AREA\\b\")\r\n #patternShSp = re.compile(r\"\\bS_SHUT_SPD\\b\") \r\n #patternWhiBal = re.compile(r\"S_WHITE_BAL\") \r\n #patternStCa = re.compile(r\"\\bS_STILL_CAP\\b\") \r\n\r\n ret = False\r\n\r\n if (len(msgString) == 0):\r\n print(\"zero length tag passed\")\r\n return False\r\n \r\n if not (msgString.find(\"S_ISO\") == -1):\r\n #if (re.search(patternISO, msgString.upper())==True): \r\n if ((mavObj.setMavIsoModeData( dataRcv )) == True):\r\n ret = self.mavlink_send_param_ext_ack( the_connection, dataRcv, mavutil.mavlink.PARAM_ACK_ACCEPTED, \"S_ISO\" )\r\n else:\r\n self.mavlink_send_param_ext_ack( the_connection, dataRcv, mavutil.mavlink.PARAM_ACK_FAILED, \"S_ISO\" ) \r\n elif not (msgString.find(\"S_APERTURE\") == -1):\r\n #elif (re.search(patternAper, msgString.upper())==True): \r\n if ((mavObj.setMavApertureData( dataRcv )) == True):\r\n ret = self.mavlink_send_param_ext_ack( the_connection, dataRcv, mavutil.mavlink.PARAM_ACK_ACCEPTED, \"S_APERTURE\" )\r\n else:\r\n self.mavlink_send_param_ext_ack( the_connection, dataRcv, mavutil.mavlink.PARAM_ACK_FAILED, \"S_APERTURE\" ) \r\n elif not (msgString.find(\"S_EX_PRO_MODE\") == -1):\r\n #elif (re.search(patternExPro, msgString.upper())==True): \r\n if ((mavObj.setMavExProData( dataRcv )) == True):\r\n self.mavlink_send_param_ext_ack( the_connection, dataRcv, mavutil.mavlink.PARAM_ACK_ACCEPTED, \"S_EX_PRO_MODE\" )\r\n else:\r\n self.mavlink_send_param_ext_ack( the_connection, dataRcv, mavutil.mavlink.PARAM_ACK_FAILED, \"S_EX_PRO_MODE\" ) \r\n elif not (msgString.find(\"S_FOCUS_AREA\") == -1):\r\n #elif (re.search(patternFocA, msgString.upper())==True): \r\n if ((mavObj.setMavFocusAreaData( dataRcv )) == True):\r\n ret = self.mavlink_send_param_ext_ack( the_connection, dataRcv, mavutil.mavlink.PARAM_ACK_ACCEPTED, \"S_FOCUS_AREA\" )\r\n else:\r\n self.mavlink_send_param_ext_ack( the_connection, dataRcv, mavutil.mavlink.PARAM_ACK_FAILED, \"S_FOCUS_AREA\" ) \r\n elif not (msgString.find(\"S_FOCUS_MODE\") == -1):\r\n #elif (re.search(patternFocus, msgString.upper())==True):\r\n if ((mavObj.setMavFocusData( dataRcv )) == True):\r\n ret = self.mavlink_send_param_ext_ack( the_connection, dataRcv, mavutil.mavlink.PARAM_ACK_ACCEPTED, \"S_FOCUS_MODE\" )\r\n else:\r\n self.mavlink_send_param_ext_ack( the_connection, dataRcv, mavutil.mavlink.PARAM_ACK_FAILED, \"S_FOCUS_MODE\" ) \r\n elif not (msgString.find(\"S_SHUT_SPD\") == -1):\r\n #elif (re.search(patternShSp, msgString.upper())==True): \r\n if ((mavObj.setMavShutterData( dataRcv )) == True):\r\n ret = self.mavlink_send_param_ext_ack( the_connection, dataRcv, mavutil.mavlink.PARAM_ACK_ACCEPTED, \"S_SHUT_SPD\" )\r\n else:\r\n self.mavlink_send_param_ext_ack( the_connection, dataRcv, mavutil.mavlink.PARAM_ACK_FAILED, \"S_SHUT_SPD\" ) \r\n elif not (msgString.find(\"S_WHITE_BAL\") == -1):\r\n #elif (re.search(patternWhiBal, msgString.upper())==True): \r\n if ((mavObj.setMavWhiteBalData( dataRcv )) == True):\r\n ret = self.mavlink_send_param_ext_ack( the_connection, dataRcv, mavutil.mavlink.PARAM_ACK_ACCEPTED, \"S_WHITE_BAL\" )\r\n else:\r\n self.mavlink_send_param_ext_ack( the_connection, dataRcv, mavutil.mavlink.PARAM_ACK_FAILED, \"S_WHITE_BAL\" ) \r\n elif not (msgString.find(\"S_STILL_CAP\") == -1):\r\n #elif (re.search(patternStCa, msgString.upper())==True): \r\n if ((mavObj.setMavStillCapModeData( dataRcv )) == True):\r\n ret = self.mavlink_send_param_ext_ack( the_connection, dataRcv, mavutil.mavlink.PARAM_ACK_ACCEPTED, \"S_STILL_CAP\" )\r\n else:\r\n self.mavlink_send_param_ext_ack( the_connection, dataRcv, mavutil.mavlink.PARAM_ACK_FAILED, \"S_STILL_CAP\" ) \r\n else:\r\n print(\"unsupported variable name %s to val=%d :: NOT SET \"%(msgString,dataRcv))\r\n return False\r\n\r\n return ret\r\n \r\n def readParamExtSetFromMavLink( self, msgString, mavObj, the_connection ):\r\n\r\n # must be EXACT tag match\r\n #patternISO = re.compile(r\"\\bS_ISO\\b\")\r\n #patternAper = re.compile(r\"\\bS_APERTURE\\b\")\r\n #patternExPro = re.compile(r\"\\bS_EX_PRO_MODE\\b\")\r\n #patternFocus = re.compile(r\"\\bS_FOCUS_MODE\\b\")\r\n #patternFocA = re.compile(r\"\\bS_FOCUS_AREA\\b\")\r\n #patternShSp = re.compile(r\"\\bS_SHUT_SPD\\b\") \r\n #patternWhiBal = re.compile(r\"S_WHITE_BAL\") \r\n #patternStCa = re.compile(r\"\\bS_STILL_CAP\\b\") \r\n\r\n v = 0\r\n p = 0\r\n r = False\r\n\r\n if (len(msgString) == 0):\r\n print(\"zero length tag passed\")\r\n return False\r\n \r\n if not (msgString.find(\"S_ISO\") == -1):\r\n #if (re.search(patternISO, msgString.upper())==True): \r\n v, p, r = mavObj.getMavIsoModeData( )\r\n if (r == True):\r\n if ( self.mavlink_send_param_ext_value_iso( the_connection, v ) == True ):\r\n with mavObj.mav_ext_req_all_param.get_lock():\r\n mavObj.mav_ext_req_all_param.value |= mavObj.ParamIso # >>> set the bit to enable full read of parameter\r\n return r\r\n return False \r\n elif not (msgString.find(\"S_APERTURE\") == -1):\r\n #elif (re.search(patternAper, msgString.upper())==True): \r\n v, p, r = mavObj.getMavApertureData( )\r\n if (r == True):\r\n if ( self.mavlink_send_param_ext_value_aper( the_connection, v ) == True ):\r\n with mavObj.mav_ext_req_all_param.get_lock(): \r\n mavObj.mav_ext_req_all_param.value |= mavObj.ParamAperture # >>> set the bit to enable full read of parameter \r\n return r\r\n return False \r\n elif not (msgString.find(\"S_EX_PRO_MODE\") == -1):\r\n #elif (re.search(patternExPro, msgString.upper())==True): \r\n v, p, r = mavObj.getMavExProData( )\r\n if (r == True):\r\n if ( self.mavlink_send_param_ext_value_expro( the_connection, v ) == True ):\r\n with mavObj.mav_ext_req_all_param.get_lock(): \r\n mavObj.mav_ext_req_all_param.value |= mavObj.ParamExPro # >>> set the bit to enable full read of parameter\r\n return r\r\n return False \r\n elif not (msgString.find(\"S_FOCUS_AREA\") == -1):\r\n #elif (re.search(patternFocA, msgString.upper())==True): \r\n v, p, r = mavObj.getMavFocusAreaData( )\r\n if (r == True):\r\n if ( self.mavlink_send_param_ext_value_focus_area( the_connection, v ) == True ):\r\n with mavObj.mav_ext_req_all_param.get_lock(): \r\n mavObj.mav_ext_req_all_param.value |= mavObj.ParamFocusArea # >>> set the bit to enable full read of parameter \r\n return r\r\n return False \r\n elif not (msgString.find(\"S_FOCUS_MODE\") == -1):\r\n #elif (re.search(patternFocus, msgString.upper())==True):\r\n v, p, r = mavObj.getMavFocusData( )\r\n if (r == True):\r\n if ( self.mavlink_send_param_ext_value_focus( the_connection, v ) == True ):\r\n with mavObj.mav_ext_req_all_param.get_lock(): \r\n mavObj.mav_ext_req_all_param.value |= mavObj.ParamFocus # >>> set the bit to enable full read of parameter \r\n return r\r\n return False \r\n elif not (msgString.find(\"S_SHUT_SPD\") == -1):\r\n #elif (re.search(patternShSp, msgString.upper())==True): \r\n v, p, r = mavObj.getMavShutterData( )\r\n if (r == True):\r\n if ( self.mavlink_send_param_ext_value_shut_spd( the_connection, v ) == True ): \r\n with mavObj.mav_ext_req_all_param.get_lock(): \r\n mavObj.mav_ext_req_all_param.value |= mavObj.ParamShutSpd # >>> set the bit to enable full read of parameter \r\n return r\r\n return False \r\n elif not (msgString.find(\"S_WHITE_BAL\") == -1):\r\n #elif (re.search(patternWhiBal, msgString.upper())==True): \r\n v, p, r = mavObj.getMavWhiteBalData( )\r\n if (r == True):\r\n if ( self.mavlink_send_param_ext_value_white_bal( the_connection, v ) == True ): \r\n with mavObj.mav_ext_req_all_param.get_lock(): \r\n mavObj.mav_ext_req_all_param.value |= mavObj.ParamWhiteBala # >>> set the bit to enable full read of parameter \r\n return r\r\n return False \r\n elif not (msgString.find(\"S_STILL_CAP\") == -1):\r\n #elif (re.search(patternStCa, msgString.upper())==True): \r\n v, p, r = mavObj.getMavStillCapModeData( )\r\n if (r == True):\r\n if ( self.mavlink_send_param_ext_value_still_cap( the_connection, v ) == True ): \r\n with mavObj.mav_ext_req_all_param.get_lock(): \r\n mavObj.mav_ext_req_all_param.value |= mavObj.ParamStillCap # >>> set the bit to enable full read of parameter \r\n return r\r\n return False \r\n else:\r\n print(\"unsupported variable name %s to val=%d :: NOT SET \"%(msgString,dataRcv))\r\n return False\r\n \r\n # process the incoming messages received\r\n #\r\n def process_messages_from_connection(self, the_connection, sharedObj):\r\n #\"\"\"\r\n #This runs continuously. The mavutil.recv_match() function will call mavutil.post_message()\r\n #any time a new message is received, and will notify all functions in the master.message_hooks list.\r\n #\"\"\"\r\n loop = 5\r\n while loop >= 1:\r\n #while True:\r\n print(\"im receiving.............\")\r\n #time.sleep(0.05)\r\n #self.update_uptime_label( )\r\n #self.update_utc_label( )\r\n #\r\n # wait heartbeat (only the GCS does this )\r\n # m = the_connection.recv_match(type=\"HEARTBEAT\", blocking=True, timeout=5)\r\n #\r\n # you can also use type lists like this \r\n # type=['COMMAND_LONG,RC_CHANNELS']\r\n #\r\n #msg = the_connection.recv_match(blocking=True, timeout=5)\r\n msg = the_connection.recv_match(blocking=True, timeout=1)\r\n if (msg is None): # timeout with nothing just return \r\n return\r\n if ( the_connection.target_system == msg.get_srcSystem() ): # check this and eliminate spurious messages if needed\r\n print(f\"data read {msg.get_type()}\")\r\n print(f\"connection {the_connection.target_system} == {msg.get_srcSystem()}\")\r\n last_timestamp = msg._timestamp\r\n #\r\n # These are test messages to check the receive end !!!!\r\n #\r\n #self.mavlink_send_camera_feedback( the_connection )\r\n #self.mavlink_send_camera_information(the_connection)\r\n #self.mavlink_send_storage_information(the_connection)\r\n #self.mavlink_send_camera_capture_status(the_connection)\r\n #print(f\" video stream returned {self.mavlink_send_video_stream_information(the_connection)}\")\r\n #self.mavlink_send_camera_image_captured(the_connection)\r\n #the_connection.mav.camera_feedback_send( 1000, 1, 1, 22, 21, 10, 30, 21, 2, 3, 5, 2, 3)\r\n #the_connection.mav.gps_raw_int_send( 1000, self.g_count, 77, 66, 76, 3, 1, 2, 3, 5)\r\n the_connection.mav.vibration_send( 1000, 1, 1, 22, 21, 10, 30 )\r\n #self.mavlink_send_param_value(the_connection)\r\n #print(\"FTP request for XML file .... I'm sending it as my payload\")\r\n #try:\r\n # f = open(self.CAM_XML_FILE,'r')\r\n # #payload = f.read() not raw read but as bytes below\r\n # lab = np.fromfile(f, dtype=np.uint8)\r\n # f.close()\r\n #except Exception as e:\r\n # print(f\" XML file read exception {e}\") \r\n #self.mavlink_do_ftp_send( the_connection, self.NETWORK_ID, lab)\r\n self.g_count = self.g_count + 1\r\n if not msg:\r\n print(\"\\033[31m no msg ! \\033[0m\")\r\n return\r\n if msg.get_type() == \"BAD_DATA\":\r\n self.ACK_ERROR = self.GOT_BAD\r\n self.errRCV_COMMAND = 0\r\n self.errRPM2 = 0\r\n if mavutil.all_printable(msg.data):\r\n sys.stdout.write(msg.data)\r\n sys.stdout.flush()\r\n elif msg.get_type() == 'PARAM_REQUEST_LIST':\r\n # this is a dummy message to reply immediately for now\r\n #\r\n # out for now update sent right away self.mavlink_send_param_value(the_connection)\r\n #\r\n with sharedObj.mav_req_all_param.get_lock():\r\n sharedObj.mav_req_all_param.value = mavlinkSonyCamWriteVals.MAV_REQ_ALL_PARAM\r\n print(\"\\033[35m PARAM_REQUEST_LIST was sent - shared object set to %d\" % (sharedObj.mav_req_all_param.value))\r\n # ===== TRAP ======\r\n #exit(99)\r\n elif msg.get_type() == 'PARAM_EXT_REQUEST_LIST':\r\n #\r\n #\r\n with sharedObj.mav_ext_req_all_param.get_lock(): \r\n sharedObj.mav_ext_req_all_param.value = mavlinkSonyCamWriteVals.MAV_REQ_ALL_PARAM\r\n print(\"\\033[35m PARAM_EXT_REQUEST_LIST was sent - shared object set to %d\" % (sharedObj.mav_ext_req_all_param.value)) \r\n # ===== TRAP ======\r\n #exit(99) \r\n elif msg.get_type() == 'PARAM_SET':\r\n #\r\n # for testing...... self.mavlink_send_ext_param_value(the_connection)\r\n #\r\n #\r\n # value is sent as a float but its an integer, so unpack the float read as an integer\r\n # ref:- https://gist.github.com/AlexEshoo/d3edc53129ed010b0a5b693b88c7e0b5\r\n #\r\n m = struct.unpack('I', struct.pack('f', msg.param_value))[0]\r\n ee = self.mav_param_type_conv( msg.param_type, msg.param_value )\r\n print(f\"\\033[36m Values mavlink :: {m} from func {ee}\")\r\n if ( self.writeParamSetFromMavLink( msg.param_id, sharedObj, ee, the_connection ) == True ):\r\n print(f\"\\033[33m PARAM_SET was sent for {msg.param_id} val {ee} type {msg.param_type} really sent {m} \\033[0m\" )\r\n else:\r\n print(\"\\033[31m PARAM_SET write fail for %s :: %d type %d \\033[0m\"%( msg.param_id, ee, msg.param_type ))\r\n # ===== TRAP =====\r\n #exit(97)\r\n elif msg.get_type() == 'PARAM_REQUEST_READ':\r\n #\r\n if ( self.readParamSetFromMavLink( msg.param_id, sharedObj, the_connection )==True):\r\n print(f\"\\033[34m Success reading param {msg.param_id} \\033[0m\")\r\n else:\r\n print(f\"\\033[31;46m Error reading param {msg.param_id} \\033[0m\")\r\n # ===== TRAP =====\r\n #exit(96)\r\n elif msg.get_type() == 'PARAM_EXT_REQUEST_READ':\r\n #\r\n if ( self.readParamExtSetFromMavLink( msg.param_id, sharedObj, the_connection )==True):\r\n print(f\"\\033[32m Sucess reading EXT Param {msg.param_id} \\033[0m\")\r\n else:\r\n print(f\"\\033[31;43m Error sending param {msg.param_id} \\033[0m\") \r\n # ===== TRAP =====\r\n #exit(96)\r\n elif msg.get_type() == 'PARAM_EXT_SET':\r\n #\r\n # self.mavlink_send_param_value(the_connection) there are two different data types from various senders\r\n #\r\n converted = True\r\n try:\r\n valueSet = int(msg.param_value)\r\n except Exception as err_msg:\r\n print(f\"\\033[31m PARAM_EXT_SET :: Error converting type {err_msg} \\033[0m\")\r\n try:\r\n valueSetActual = msg.param_value\r\n valueSet = int(valueSetActual.decode('ascii'))\r\n except Exception as err_msg:\r\n print(f\"\\033[31m PARAM_EXT_SET :: Error converting type {err_msg} it says {msg.param_value} \\033[0m\")\r\n converted = False\r\n if (( self.writeParamExtSetFromMavLink( msg.param_id, sharedObj, valueSet ) == True ) and ( converted == True) ):\r\n print(\"\\033[35m PARAM_EXT_SET :: was sent for %s :: %d \\033[0m\"%( msg.param_id, valueSet ))\r\n ## =======> send_ext_ack\r\n else:\r\n print(\"\\033[31m PARAM_EXT_SET :: write fail for %s :: %d \\033[0m\"%( msg.param_id, valueSet))\r\n # ===== TRAP =====\r\n #exit(95)\r\n elif msg.get_type() == 'PARAM_VALUE':\r\n print(f\"Recieved a param value for :- {msg.param_id} = {msg.param_value}\")\r\n elif msg.get_type() == 'RC_CHANNELS':\r\n print(\"RC Channel message (system %u component %u)\\n\" % (the_connection.target_system, the_connection.target_component))\r\n elif msg.get_type() == 'COMMAND_LONG':\r\n print(\"!!!!!! Long message received (system %u component %u)\\n\" % (the_connection.target_system, the_connection.target_component))\r\n print(\"in cmd long ... ACK RES %s %u \\n\" % (self.ACK_RESULT,mavutil.mavlink.MAV_CMD_REQUEST_CAMERA_INFORMATION))\r\n print(\"Command %u p1 %u p2 %u p3 %u p4 %u \\n\" % (msg.command, msg.param1, msg.param2, msg.param3, msg.param4))\r\n print(\"p5 %u p6 %u p7 %u \\n\" % (msg.param5, msg.param6, msg.param7)) \r\n\r\n # print(msg.get_payload())\r\n # print(msg.get_msgbuf())\r\n # print(msg.get_fieldnames())\r\n # print(msg.get_type())\r\n #\r\n # print the message recieved in json\r\n #\r\n print(msg.to_dict())\r\n\r\n if not (self.ACK_RESULT == mavutil.mavlink.MAV_RESULT_ACCEPTED):\r\n self.RCV_COMMAND = int(msg.command)\r\n print(f\"\\033[35m IN LOOP :: self ACK RES {self.ACK_RESULT} RCV {self.RCV_COMMAND} == {mavutil.mavlink.MAV_CMD_REQUEST_MESSAGE}\")\r\n \r\n if (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_REQUEST_MESSAGE):\r\n self.RPM2 = int(msg.param1) \r\n print(f\"Is it here {self.RPM2} == {self.CAMERA_INFORMATION}\")\r\n if (self.RPM2 == self.CAMERA_INFORMATION): #camera_information\r\n self.type_of_msg = 6500\r\n print(\"\\033[34m >>>>>> camera information \\033[36m >>>>>>>>>>>>>>>>>>>>>>\")\r\n self.mavlink_send_camera_information(the_connection)\r\n elif (self.RPM2 == self.CAMERA_SETTINGS): #camera_settings\r\n self.type_of_msg = 6501 \r\n elif (self.RPM2 == self.STORAGE_INFORMATION): #storage information\r\n self.type_of_msg = 6502\r\n elif (self.RPM2 == self.CAMERA_CAPTURE_STATUS): #camera capture status\r\n self.type_of_msg = 6503 \r\n elif (self.RPM2 == mavutil.mavlink.MAVLINK_MSG_ID_CAMERA_IMAGE_CAPTURED): #retrieve lost images\r\n self.type_of_msg = 6504 \r\n self.Got_Param1 = int(msg.param2)\r\n elif (self.RPM2 == 269): #video stream\r\n self.type_of_msg = 6505 \r\n else:\r\n self.type_of_msg = 0\r\n print(f\"camera info received {self.RPM2}\")\r\n elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_REQUEST_CAMERA_INFORMATION):\r\n print(\"request camera Info OLD MESSAGE.....\")\r\n #\r\n # Send camera information\r\n #\r\n self.mavlink_send_camera_information(the_connection)\r\n if (msg.param1 == 1):\r\n self.type_of_msg = mavutil.mavlink.MAV_CMD_REQUEST_CAMERA_INFORMATION\r\n print(\"=========== !! send to QGround Camera Information !! ==========\")\r\n self.mavlink_send_camera_information(the_connection)\r\n elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_REQUEST_VIDEO_STREAM_INFORMATION):\r\n print(\"request video stream Info OLD MESSAGE.....\")\r\n self.type_of_msg = mavutil.mavlink.MAV_CMD_REQUEST_VIDEO_STREAM_INFORMATION\r\n print(\"=========== !! send to QGround VideoStream !! ==========\")\r\n self.mavlink_send_video_stream_information(the_connection)\r\n elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_REQUEST_CAMERA_SETTINGS):\r\n print(\"request camera settings Info OLD MESSAGE.....\")\r\n self.type_of_msg = mavutil.mavlink.MAV_CMD_REQUEST_CAMERA_SETTINGS\r\n print(\"\\033[35m =========== !! send to QGround Camera settings !! ========== \\033[0m\")\r\n self.mavlink_send_camera_settings(the_connection)\r\n elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_REQUEST_CAMERA_CAPTURE_STATUS):\r\n print(\"request camera capture status Info OLD MESSAGE.....\")\r\n self.type_of_msg = mavutil.mavlink.MAV_CMD_REQUEST_CAMERA_CAPTURE_STATUS\r\n print(\"\\033[36m =========== !! send to QGround Camera capture status !! ========== \\033[0m\")\r\n self.mavlink_send_camera_capture_status(the_connection)\r\n elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_REQUEST_STORAGE_INFORMATION):\r\n print(\"request storage info Info OLD MESSAGE.....\")\r\n self.type_of_msg = mavutil.mavlink.MAV_CMD_REQUEST_STORAGE_INFORMATION\r\n print(\"\\033[34m =========== !! send to QGround Camera storage_info !! ========== \\033[0m\")\r\n self.mavlink_send_storage_information(the_connection)\r\n elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_DO_SET_RELAY):\r\n self.type_of_msg = mavutil.mavlink.MAV_CMD_DO_SET_RELAY\r\n print(f\"\\033 [31m >>>>> Got a message to set the RelayNo {msg.param1} to state {msg.param2}\")\r\n self.raspberry_pi3_set_relay(self, msg.param1, msg.param2)\r\n self.Got_Param1 = msg.param1\r\n self.Got_Param2 = msg.param2\r\n elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_VIDEO_START_CAPTURE):\r\n self.type_of_msg = mavutil.mavlink.MAV_CMD_VIDEO_START_CAPTURE\r\n self.Got_Param1 = msg.param1 \r\n elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_VIDEO_STOP_CAPTURE):\r\n self.type_of_msg = mavutil.mavlink.MAV_CMD_VIDEO_START_CAPTURE\r\n self.Got_Param1 = msg.param1 \r\n elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_IMAGE_START_CAPTURE):\r\n self.type_of_msg = mavutil.mavlink.MAV_CMD_IMAGE_START_CAPTURE\r\n self.Got_Param1 = msg.param2\r\n self.Got_Param2 = msg.param3\r\n self.Got_Param3 = msg.param4\r\n sharedObj.take_continuos = True\r\n elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_IMAGE_STOP_CAPTURE):\r\n self.type_of_msg = mavutil.mavlink.MAV_CMD_IMAGE_STOP_CAPTURE\r\n self.Got_Param1 = msg.param3\r\n self.Got_Param2 = msg.param4\r\n sharedObj.take_continuos = False\r\n elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_VIDEO_START_STREAMING):\r\n self.type_of_msg = mavutil.mavlink.MAV_CMD_VIDEO_START_STREAMING\r\n self.Got_Param1 = msg.param1\r\n elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_VIDEO_STOP_STREAMING):\r\n self.type_of_msg = mavutil.mavlink.MAV_CMD_VIDEO_STOP_STREAMING\r\n self.Got_Param1 = msg.param1\r\n elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_SET_CAMERA_MODE):\r\n self.type_of_msg = mavutil.mavlink.MAV_CMD_SET_CAMERA_MODE\r\n self.Got_Param1 = msg.param2\r\n elif (self.RCV_COMMAND == mavdefs.MAV_CMD_SET_CAMERA_ZOOM):\r\n self.type_of_msg = mavdefs.MAV_CMD_SET_CAMERA_ZOOM\r\n self.Got_Param1 = msg.param1\r\n self.Got_Param2 = msg.param2\r\n self.Got_Param3 = msg.param3\r\n elif (self.RCV_COMMAND == mavdefs.MAV_CMD_SET_CAMERA_FOCUS):\r\n self.type_of_msg = mavdefs.MAV_CMD_SET_CAMERA_FOCUS\r\n self.Got_Param1 = msg.param1\r\n self.Got_Param2 = msg.param2\r\n elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_DO_DIGICAM_CONFIGURE):\r\n self.type_of_msg = mavutil.mavlink.MAV_CMD_DO_DIGICAM_CONFIGURE\r\n self.Got_Param1 = msg.param1\r\n self.Got_Param2 = msg.param2\r\n self.Got_Param3 = msg.param3\r\n self.Got_Param4 = msg.param4\r\n self.Got_Param5 = msg.param5\r\n self.Got_Param6 = msg.param6\r\n self.Got_Param7 = msg.param7\r\n elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_DO_DIGICAM_CONTROL):\r\n #\r\n # Taking a picture is hard coded to here as it needs no delay\r\n #\r\n self.type_of_msg = mavutil.mavlink.MAV_CMD_DO_DIGICAM_CONTROL\r\n print(f\"\\033[33m DO DIGICAM CONTROL {msg.param5} {msg.param7}\")\r\n if ((int(msg.param5) == 1) and (int(msg.param7) == 0)):\r\n #fastGlobals.take_picture = 1 \r\n sharedObj.take_photo.value = True\r\n print(\"asked to take a photo ....... set it to 1\")\r\n #time.sleep(10)\r\n self.Got_Param1 = msg.param1\r\n self.Got_Param2 = msg.param2\r\n self.Got_Param3 = msg.param3\r\n self.Got_Param4 = msg.param4\r\n self.Got_Param5 = msg.param5\r\n self.Got_Param6 = msg.param6\r\n self.Got_Param7 = msg.param7\r\n print(f\"\\033[36m DO DIGICAM CONTROL COMPLETED \\033[0m {msg.param5} {msg.param7} \")\r\n #time.sleep(10)\r\n #exit(100)\r\n elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_DO_CONTROL_VIDEO):\r\n self.type_of_msg = mavutil.mavlink.MAV_CMD_DO_CONTROL_VIDEO\r\n self.Got_Param1 = msg.param1\r\n self.Got_Param2 = msg.param2\r\n self.Got_Param3 = msg.param3\r\n self.Got_Param4 = msg.param4\r\n elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_DO_SET_CAM_TRIGG_INTERVAL):\r\n self.type_of_msg = mavutil.mavlink.MAV_CMD_DO_SET_CAM_TRIGG_INTERVAL\r\n self.Got_Param1 = msg.param1\r\n self.Got_Param2 = msg.param2\r\n elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_RESET_CAMERA_SETTINGS):\r\n self.type_of_msg = mavutil.mavlink.MAV_CMD_RESET_CAMERA_SETTINGS\r\n self.Got_Param1 = msg.param1\r\n self.Got_Param2 = msg.param2\r\n sharedObj.reset_cam = True\r\n elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_DO_MOUNT_CONTROL_QUAT):\r\n self.type_of_msg = mavutil.mavlink.MAV_CMD_DO_MOUNT_CONTROL_QUAT\r\n self.Got_Param1 = msg.param1\r\n self.Got_Param2 = msg.param2\r\n self.Got_Param3 = msg.param3\r\n self.Got_Param4 = msg.param4\r\n #elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_DO_GIMBAL_MANAGER_PITCHYAW):\r\n # self.type_of_msg = mavutil.mavlink.MAV_CMD_DO_GIMBAL_MANAGER_PITCHYAW\r\n # self.Got_Param1 = msg.param1\r\n # self.Got_Param2 = msg.param2\r\n # self.Got_Param3 = msg.param3\r\n # self.Got_Param4 = msg.param4\r\n # self.Got_Param5 = msg.param5\r\n # self.Got_Param6 = msg.param6\r\n elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_DO_TRIGGER_CONTROL):\r\n self.type_of_msg = mavutil.mavlink.MAV_CMD_DO_TRIGGER_CONTROL\r\n self.Got_Param1 = msg.param1\r\n self.Got_Param2 = msg.param2\r\n self.Got_Param3 = msg.param3\r\n elif (self.RCV_COMMAND == 2004): # MAV_CMD_CAMERA_TRACK_POINT=2004\r\n self.type_of_msg = 2004;\r\n self.Got_Param1 = msg.param1\r\n self.Got_Param2 = msg.param2\r\n self.Got_Param3 = msg.param3\r\n elif (self.RCV_COMMAND == 2005): # MAV_CMD_CAMERA_TRACK_RECTANGLE=2005\r\n self.type_of_msg = 2005;\r\n self.Got_Param1 = msg.param1\r\n self.Got_Param2 = msg.param2\r\n self.Got_Param3 = msg.param3\r\n elif (self.RCV_COMMAND == 2010): # MAV_CMD_CAMERA_STOP_TRACKING=2010\r\n self.type_of_msg = 2010;\r\n elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_STORAGE_FORMAT): \r\n self.type_of_msg = mavutil.mavlink.MAV_CMD_STORAGE_FORMAT\r\n self.Got_Param1 = msg.param1\r\n self.Got_Param2 = msg.param2\r\n elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_DO_SET_SERVO):\r\n self.type_of_msg = mavutil.mavlink.MAV_CMD_DO_SET_SERVO\r\n self.Got_Param1 = msg.param1\r\n self.Got_Param2 = msg.param2\r\n print(\"\\033[32m saw the relay command come in\")\r\n elif (self.RCV_COMMAND == mavutil.mavlink.MAVLINK_MSG_ID_BATTERY_STATUS):\r\n self.type_of_msg = mavutil.mavlink.MAVLINK_MSG_ID_BATTERY_STATUS\r\n self.Got_Param1 = msg.param1\r\n self.Got_Param2 = msg.param2\r\n print(\"\\033[32m saw the battery status request come in\")\r\n elif (self.RCV_COMMAND == mavutil.mavlink.MAV_CMD_PREFLIGHT_STORAGE):\r\n print(f\"\\033[33m Asks for storage params paramStorage={msg.param1} missionStorage={msg.param2} \\033[0m\")\r\n elif (self.RCV_COMMAND == 42428):\r\n print(f\"\\033[37m Command 42428 was sent not sure what im meant to do..... \\033[0m\")\t\r\n else:\r\n print(f\"got this id {self.RCV_COMMAND} {msg.command}\")\r\n self.RPM2 = 0\r\n self.type_of_msg = self.RCV_COMMAND\r\n self.ACK_RESULT = mavutil.mavlink.MAV_RESULT_ACCEPTED\r\n print(\"\\033[36m >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> ACK RES %d %d\"%(self.ACK_RESULT,mavutil.mavlink.MAV_RESULT_ACCEPTED))\r\n print(\"\\033[31m\")\r\n else:\r\n self.ACK_ERROR = self.GOT_ERROR\r\n self.errRCV_COMMAND = msg.command\r\n self.errRPM2 = msg.param1\r\n print(f\"Error ACK message send for multiple request @ cmd :: {self.errRCV_COMMAND} rpm :: {self.errRPM2}\")\r\n \r\n elif msg.get_type() == 'CAMERA_IMAGE_CAPTURED':\r\n print(\"Cam Cap message received (system %u component %u)\\n\" % (the_connection.target_system, the_connection.target_component)) \r\n print(\"lat %u lon %u alt %u\\n\" % (msg.lat, msg.lon, msg.alt)) \r\n print(\"URL %u)\\n\" % (msg.file_url)) \r\n elif msg.get_type() == 'GPS_RAW_INT':\r\n the_connection.mav.gps_raw_int_send( 1000, 1, 22, 21, 1, 3, 1, 2, 3, 5)\r\n elif msg.get_type() == 'CAMERA_FEEDBACK':\r\n print(\"Camera Feedback request was made\")\r\n #the_connection.mav.camera_feedback_send( 1000, 1, 1, 22, 21, 10, 30, 21, 2, 3, 5, 2, 3)\r\n elif msg.get_type() == 'FILE_TRANSFER_PROTOCOL':\r\n print(\"FTP request for XML file .... I'm sending it as my payloads in chunks of 251 bytes\")\r\n lab = []\r\n try:\r\n f = open(self.CAM_XML_FILE,'r')\r\n #payload = f.read() not raw read but as bytes below\r\n lab = np.fromfile(f, dtype=np.uint8)\r\n f.close()\r\n except Exception as e:\r\n print(f\" XML file read exception {e}\") \r\n self.mavlink_do_ftp_send( the_connection, self.NETWORK_ID, lab)\r\n elif msg.get_type() == 'REQUEST_DATA_STREAM':\r\n print(\"REQUEST DATA STREAM :: start %u id %u req_rte %u\\n\" % (msg.start_stop, msg.req_stream_id, msg.req_message_rate))\r\n elif msg.get_type() == 'STATUSTEXT':\r\n print(\"STATUSTEXT :: text %s \" % (msg.text)) \r\n elif msg.get_type() == 'HEARTBEAT':\r\n print(\"HEARTBEAT :: src %s type %s auto %s sys %s\" % (msg.get_srcSystem(), msg.type,msg.autopilot,msg.system_status)) \r\n else:\r\n print(f\"unsupported command :: {msg.get_type()}\") \r\n #time.sleep(0.05)\r\n loop = loop - 1\r\n\r\n def mavlink_send_ack_command(self, the_connection, cmd, rpm2, pro, res):\r\n if (self.mavlink20(the_connection) == True):\r\n print(f\"\\033[31m sending an ACK {pro}\")\r\n try:\r\n the_connection.mav.command_ack_send(\r\n int(cmd), # command\r\n int(res), # result\r\n int(pro), # progress\r\n int(rpm2), # result_param2\r\n the_connection.target_system, # target_system\r\n the_connection.target_component) # target_component\r\n print(f\"ACK sent {rpm2} {res}\")\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed 1st ACK message : %s\" % (err_msg))\r\n try:\r\n the_connection.mav.command_ack_send(\r\n int(cmd), # command\r\n int(res)) # result\r\n print(f\"ACK sent {rpm2} {res}\")\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed 2nd ACK message : %s\" % (err_msg))\r\n ret = False\r\n return ret\r\n elif (self.mavlink10(the_connection) == True):\r\n print(f\"\\033[31m sending an ACK {pro}\")\r\n try:\r\n the_connection.mav.command_ack_send(\r\n int(cmd), # command\r\n int(res)) # result\r\n print(f\"ACK sent {rpm2} {res}\")\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed 1st ACK message : %s\" % (err_msg))\r\n try:\r\n the_connection.mav.command_ack_send(\r\n int(cmd), # command\r\n int(res), # result\r\n int(pro), # progress\r\n int(rpm2), # result_param2\r\n the_connection.target_system, # target_system\r\n the_connection.target_component) # target_component\r\n print(f\"ACK sent {rpm2} {res}\")\r\n ret = True\r\n except Exception as err_msg:\r\n print(\"Failed 2nd ACK message : %s\" % (err_msg))\r\n ret = False\r\n return ret\r\n\r\n#\r\n# ============================================================= multi-process threads =====================================================================\r\n# Camera Action Routines\r\n# Mavlink Response Signals\r\n#\r\n\r\n \r\ndef doAlphaCameraExpro( mySonyCam, mav2SonyVals, expro, tm_upd_disable=False, time_delta = 1000 ):\r\n\r\n p = multiprocessing.current_process()\r\n print ('Starting Exposure Program :', p.name, p.pid)\r\n #\r\n # initialise general program control flags\r\n #\r\n success = False\r\n timenow = 0\r\n \r\n #\r\n # check to see if mavlink wrote something if so write to cam\r\n # and update the update flag to get the mavlink send\r\n # \r\n success = mySonyCam.setSonyCamExProData( expro, mav2SonyVals )\r\n\r\n #\r\n # Time enabled reading to poll on time_delta\r\n # when this data is written the mavlink task \r\n # should send it to the GCS via mavlink messages\r\n # \r\n if not (tm_upd_disable == True): \r\n timenow = mySonyCam.my_timestamp() \r\n # \r\n if ((timenow - expro.timestamp.value) > time_delta):\r\n if (mySonyCam.getSonyCamExProData( expro )==True):\r\n with expro.timestamp.get_lock():\r\n expro.timestamp.value = timenow\r\n #print(f\"\\033[36m Time Delta occurred {timenow} {expro.timestamp.value}\")\r\n #else:\r\n #print(f\"\\033[34m No time diff {timenow} {expro.timestamp.value}\")\r\n print ('Exiting Exposure Program :', multiprocessing.current_process().name)\r\n \r\ndef sendMavExpro( mySonyCam, expro, ConnID ):\r\n p = multiprocessing.current_process()\r\n print ('Starting Exposure Program:', p.name, p.pid)\r\n success = mySonyCam.sendMavlinkMessageForParamObject( expro, ConnID )\r\n success = mySonyCam.sendMavlinkMessageForParamExtObject( expro, ConnID )\r\n print ('Exiting Exposure Program :', multiprocessing.current_process().name) \r\n\r\ndef doAlphaCameraAperture( mySonyCam, mav2SonyVals, aper, tm_upd_disable=False, time_delta = 1000 ):\r\n\r\n p = multiprocessing.current_process()\r\n print ('Starting Aperture :', p.name, p.pid)\r\n #\r\n # initialise general program control flags\r\n #\r\n success = False\r\n timenow = 0\r\n\r\n #\r\n # check to see if mavlink wrote something if so write to cam\r\n # and update the update flag to get the mavlink send\r\n # \r\n success = mySonyCam.setSonyCamApertureData( aper, mav2SonyVals )\r\n\r\n #\r\n # Time enabled reading to poll on time_delta\r\n # when this data is written the mavlink task \r\n # should send it to the GCS via mavlink messages\r\n # \r\n if not (tm_upd_disable == True): \r\n timenow = mySonyCam.my_timestamp() \r\n # \r\n\r\n if ((timenow - aper.timestamp.value) > time_delta):\r\n if (mySonyCam.getSonyApertureData( aper )==True):\r\n with aper.timestamp.get_lock():\r\n aper.timestamp.value = timenow\r\n print ('Exiting Aperture :', multiprocessing.current_process().name)\r\n \r\ndef sendMavAper( mySonyCam, aper, ConnID ):\r\n p = multiprocessing.current_process()\r\n print ('Starting Mavlink Aperture :', p.name, p.pid)\r\n success = mySonyCam.sendMavlinkMessageForParamObject( aper, ConnID )\r\n success = mySonyCam.sendMavlinkMessageForParamExtObject( aper, ConnID )\r\n print ('Exiting Mavlink Aperture :', multiprocessing.current_process().name)\r\n \r\ndef doAlphaCameraFocusData( mySonyCam, mav2SonyVals, focusdata, focusarea, tm_upd_disable=False, time_delta = 1000 ):\r\n\r\n p = multiprocessing.current_process()\r\n print ('Starting Focus :', p.name, p.pid)\r\n #\r\n # initialise general program control flags\r\n #\r\n success = False\r\n timenow = 0\r\n\r\n #\r\n # check to see if mavlink wrote something if so write to cam\r\n # and update the update flag to get the mavlink send\r\n # \r\n success = mySonyCam.setSonyCamFocusData( focusdata, mav2SonyVals )\r\n success = mySonyCam.setSonyCamFocusAreaData( focusarea, mav2SonyVals ) \r\n\r\n #\r\n # Time enabled reading to poll on time_delta\r\n # when this data is written the mavlink task \r\n # should send it to the GCS via mavlink messages\r\n # \r\n if not (tm_upd_disable == True): \r\n timenow = mySonyCam.my_timestamp() \r\n # \r\n if ((timenow - focusdata.timestamp.value) > time_delta):\r\n if (mySonyCam.getSonyCamFocusData( focusdata )==True):\r\n with focusdata.timestamp.get_lock():\r\n focusdata.timestamp.value = timenow\r\n\r\n if ((timenow - focusarea.timestamp.value) > time_delta):\r\n if (mySonyCam.getSonyCamFocusAreaData( focusarea )==True):\r\n with focusarea.timestamp.get_lock():\r\n focusarea.timestamp.value = timenow\r\n print ('Exiting Focus :', multiprocessing.current_process().name)\r\n \r\ndef sendMavFocusData( mySonyCam, focusdata, focusarea, ConnID ):\r\n\r\n p = multiprocessing.current_process()\r\n print ('Starting Mavlink Focus Data :', p.name, p.pid)\r\n #\r\n # check to see if mavlink wrote something if so write to cam\r\n # and update the update flag to get the mavlink send\r\n # \r\n success = mySonyCam.sendMavlinkMessageForParamObject( focusdata, ConnID )\r\n success = mySonyCam.sendMavlinkMessageForParamObject( focusarea, ConnID )\r\n success = mySonyCam.sendMavlinkMessageForParamExtObject( focusdata, ConnID )\r\n success = mySonyCam.sendMavlinkMessageForParamExtObject( focusarea, ConnID )\r\n print ('Exiting Mavlink Focus Data :', multiprocessing.current_process().name)\r\n\r\ndef doAlphaCameraIso( mySonyCam, mav2SonyVals, iso, retries=3, tm_upd_disable=False, time_delta = 1000 ):\r\n\r\n p = multiprocessing.current_process()\r\n print ('Starting ISO set :', p.name, p.pid)\r\n #\r\n # initialise general program control flags\r\n #\r\n success = False\r\n timenow = 0\r\n\r\n print(\"\\033[33m ================== ISO :: in manage function ======================== \\033[0m\")\r\n #\r\n # check to see if mavlink wrote something if so write to cam\r\n # and update the update flag to get the mavlink send\r\n # \r\n retry = 0\r\n while (retry < retries): \r\n if (mySonyCam.setSonyCamISOData( iso, mav2SonyVals ) == True):\r\n print(\"\\033[36m sony cam iso data success \\033[0m\")\r\n break\r\n else:\r\n print(\"\\033[31m sony cam iso data write failure \\033[0m\")\r\n time.sleep(1)\r\n retry += 1\r\n else:\r\n print(\"\\033[31;43m having probs !!! gonna reset it \\033[0m\")\r\n reset_usb_camlink() \r\n\r\n #\r\n # Time enabled reading to poll on time_delta\r\n # when this data is written the mavlink task \r\n # should send it to the GCS via mavlink messages\r\n # \r\n if not (tm_upd_disable == True): \r\n timenow = mySonyCam.my_timestamp() \r\n # \r\n if ((timenow - iso.timestamp.value) > time_delta):\r\n if (mySonyCam.getSonyCamISOData( iso )==True):\r\n print(f\"\\033[36;45m ISO timeupdate required @ {iso.timestamp.value} {timenow}\")\r\n with iso.timestamp.get_lock():\r\n iso.timestamp.value = timenow\r\n print ('Exiting ISO Set :', multiprocessing.current_process().name)\r\n \r\ndef sendMavIso( mySonyCam, iso, ConnID ):\r\n\r\n p = multiprocessing.current_process()\r\n print ('Starting ISO :', p.name, p.pid)\r\n #\r\n # check to see if mavlink wrote something if so write to cam\r\n # and update the update flag to get the mavlink send\r\n # \r\n success = mySonyCam.sendMavlinkMessageForParamObject( iso, ConnID )\r\n success = mySonyCam.sendMavlinkMessageForParamExtObject( iso, ConnID )\r\n print ('Exiting ISO :', multiprocessing.current_process().name) \r\n \r\ndef doAlphaCameraShutSpd( mySonyCam, mav2SonyVals, shut_sp, tm_upd_disable=False, time_delta = 1000 ):\r\n\r\n p = multiprocessing.current_process()\r\n print ('Starting Shutter Speed :', p.name, p.pid)\r\n \r\n #\r\n # initialise general program control flags\r\n #\r\n success = False\r\n timenow = 0\r\n\r\n #\r\n # check to see if mavlink wrote something if so write to cam\r\n # and update the update flag to get the mavlink send\r\n # \r\n success = mySonyCam.setSonyCamShutSpdData( shut_sp, mav2SonyVals )\r\n \r\n #\r\n # Time enabled reading to poll on time_delta\r\n # when this data is written the mavlink task \r\n # should send it to the GCS via mavlink messages\r\n # \r\n if not (tm_upd_disable == True): \r\n timenow = mySonyCam.my_timestamp() \r\n # \r\n if ((timenow - shut_sp.timestamp.value) > time_delta):\r\n if (mySonyCam.getSonyCamShutSpdData( shut_sp )==True):\r\n with shut_sp.timestamp.get_lock():\r\n shut_sp.timestamp.value = timenow \r\n print ('Exiting Shutter Speed :', multiprocessing.current_process().name)\r\n \r\ndef sendMavShutSpd( mySonyCam, shut_sp, ConnID ):\r\n\r\n p = multiprocessing.current_process()\r\n print ('Starting Mavlink Shutter Speed :', p.name, p.pid)\r\n #\r\n # check to see if mavlink wrote something if so write to cam\r\n # and update the update flag to get the mavlink send\r\n # \r\n success = mySonyCam.sendMavlinkMessageForParamObject( shut_sp, ConnID )\r\n success = mySonyCam.sendMavlinkMessageForParamExtObject( shut_sp, ConnID )\r\n print ('Exiting Mavlink Shutter Speed :', multiprocessing.current_process().name)\r\n \r\ndef doAlphaWhiteBala( mySonyCam, mav2SonyVals, whitebal, tm_upd_disable=False, time_delta = 1000 ):\r\n\r\n p = multiprocessing.current_process()\r\n print ('Starting White Balance :', p.name, p.pid)\r\n \r\n #\r\n # initialise general program control flags\r\n #\r\n success = False\r\n timenow = 0\r\n\r\n #\r\n # check to see if mavlink wrote something if so write to cam\r\n # and update the update flag to get the mavlink send\r\n # \r\n success = mySonyCam.setSonyCamWhiteBalaData( whitebal, mav2SonyVals )\r\n \r\n #\r\n # Time enabled reading to poll on time_delta\r\n # when this data is written the mavlink task \r\n # should send it to the GCS via mavlink messages\r\n # \r\n if not (tm_upd_disable == True): \r\n timenow = mySonyCam.my_timestamp() \r\n # \r\n if ((timenow - whitebal.timestamp.value) > time_delta):\r\n if (mySonyCam.getSonyCamWhiteBalaData( whitebal )==True):\r\n with whitebal.timestamp.get_lock():\r\n whitebal.timestamp.value = timenow \r\n\r\n print ('Exiting White Balance :', multiprocessing.current_process().name)\r\n \r\ndef sendMavWhiteBala( mySonyCam, whitebal, ConnID ):\r\n p = multiprocessing.current_process()\r\n print ('Starting Mavlink White Balance :', p.name, p.pid)\r\n success = mySonyCam.sendMavlinkMessageForParamObject( whitebal, ConnID ) \r\n success = mySonyCam.sendMavlinkMessageForParamExtObject( whitebal, ConnID ) \r\n print ('Exiting Mavlink White Balance :', multiprocessing.current_process().name) \r\n \r\ndef doAlphaCameraStillCap( mySonyCam, mav2SonyVals, stillcap, tm_upd_disable=False, time_delta = 1000 ):\r\n\r\n #\r\n # initialise general program control flags\r\n #\r\n success = False\r\n timenow = 0\r\n\r\n # use this if you want ot make a daemon proc\r\n p = multiprocessing.current_process()\r\n print ('Starting Still Capture :', p.name, p.pid)\r\n #\r\n\r\n #\r\n # check to see if mavlink wrote something if so write to cam\r\n # and update the update flag to get the mavlink send\r\n # \r\n success = mySonyCam.setSonyCamStillCapModeData( stillcap, mav2SonyVals ) \r\n \r\n #\r\n # Time enabled reading to poll on time_delta\r\n # when this data is written the mavlink task \r\n # should send it to the GCS via mavlink messages\r\n # \r\n if not (tm_upd_disable == True): \r\n timenow = mySonyCam.my_timestamp() \r\n # \r\n if ((timenow - stillcap.timestamp.value) > time_delta):\r\n if (mySonyCam.getSonyCamStillCapModeData( stillcap )==True):\r\n with stillcap.timestamp.get_lock():\r\n stillcap.timestamp.value = timenow \r\n\r\n print ('Exiting Still Capture :', multiprocessing.current_process().name)\r\n\r\n\r\n# -------------------------------- SEQUENTIAL ------------------------------------------------------------------------------\r\n#\r\n# These tasks runs sequentially the camera actions and interface back with the scheduler \r\n# \r\ndef manageAlphaCameraExpro( cam, classObj, pvar, mpc, state_of_task ):\r\n\r\n state_of_task.value = mavlinkSonyCamWriteVals.STATE_CAM_WRITING\r\n\r\n doAlphaCameraExpro( cam, classObj, pvar )\r\n\r\n with mpc.get_lock():\r\n mpc.value = mavlinkSonyCamWriteVals.FUNC_APER\r\n print(f\"Task1:: Expro\")\r\n # advance to the next routine in the queued sequence\r\n with state_of_task.get_lock():\r\n state_of_task.value = mavlinkSonyCamWriteVals.STATE_READY \r\n\r\n#\r\n# this task runs sequentially and doesnt set the variables (attributes) of the class passed to it.\r\n# \r\ndef manageAlphaCameraAperture( cam, classObj, pvar, mpc, state_of_task ):\r\n\r\n state_of_task.value = mavlinkSonyCamWriteVals.STATE_CAM_WRITING\r\n\r\n doAlphaCameraAperture( cam, classObj, pvar )\r\n\r\n with mpc.get_lock():\r\n mpc.value = mavlinkSonyCamWriteVals.FUNC_FOCUS\r\n print(f\"Task2:: Aperture\")\r\n # advance to the next routine in the queued sequence\r\n with state_of_task.get_lock():\r\n state_of_task.value = mavlinkSonyCamWriteVals.STATE_READY \r\n\r\ndef manageAlphaCameraFocusData( cam, classObj, pvar, c, mpc, state_of_task ):\r\n\r\n state_of_task.value = mavlinkSonyCamWriteVals.STATE_CAM_WRITING\r\n\r\n doAlphaCameraFocusData( cam, classObj, pvar, c )\r\n \r\n with mpc.get_lock():\r\n mpc.value = mavlinkSonyCamWriteVals.FUNC_ISO\r\n print(f\"Task3:: Focus parameters\")\r\n # advance to the next routine in the queued sequence\r\n with state_of_task.get_lock():\r\n state_of_task.value = mavlinkSonyCamWriteVals.STATE_READY \r\n\r\ndef manageAlphaCameraIso( cam, classObj, pvar, mpc, state_of_task ):\r\n\r\n state_of_task.value = mavlinkSonyCamWriteVals.STATE_CAM_WRITING\r\n\r\n doAlphaCameraIso( cam, classObj, pvar )\r\n \r\n with mpc.get_lock():\r\n mpc.value = mavlinkSonyCamWriteVals.FUNC_SS\r\n print(f\"Task4:: Iso\")\r\n # advance to the next routine in the queued sequence\r\n with state_of_task.get_lock():\r\n state_of_task.value = mavlinkSonyCamWriteVals.STATE_READY \r\n\r\ndef manageAlphaCameraShutSpd( cam, classObj, pvar, mpc, state_of_task ):\r\n\r\n state_of_task.value = mavlinkSonyCamWriteVals.STATE_CAM_WRITING\r\n\r\n doAlphaCameraShutSpd( cam, classObj, pvar )\r\n\r\n with mpc.get_lock():\r\n mpc.value = mavlinkSonyCamWriteVals.FUNC_WB\r\n print(f\"Task5:: Shutter Speed\")\r\n # advance to the next routine in the queued sequence\r\n with state_of_task.get_lock():\r\n state_of_task.value = mavlinkSonyCamWriteVals.STATE_READY \r\n\r\ndef manageAlphaWhiteBala( cam, classObj, pvar, mpc, state_of_task ):\r\n\r\n state_of_task.value = mavlinkSonyCamWriteVals.STATE_CAM_WRITING\r\n\r\n doAlphaWhiteBala( cam, classObj, pvar )\r\n\r\n with mpc.get_lock():\r\n mpc.value = mavlinkSonyCamWriteVals.FUNC_SC\r\n print(f\"Task6:: White Balance\")\r\n # advance to the next routine in the queued sequence\r\n with state_of_task.get_lock():\r\n state_of_task.value = mavlinkSonyCamWriteVals.STATE_READY \r\n\r\ndef manageAlphaCameraStillCap( cam, classObj, pvar, mpc, state_of_task ):\r\n\r\n state_of_task.value = mavlinkSonyCamWriteVals.STATE_CAM_WRITING\r\n\r\n doAlphaCameraStillCap( cam, classObj, pvar )\r\n\r\n with mpc.get_lock():\r\n mpc.value = mavlinkSonyCamWriteVals.FUNC_EX_PRO\r\n print(f\"Task7:: Still Capture\")\r\n # advance to the next routine in the queued sequence\r\n with state_of_task.get_lock():\r\n state_of_task.value = mavlinkSonyCamWriteVals.STATE_READY \r\n \r\ndef sendMavStillCap( mySonyCam, stillcap, ConnID ): \r\n p = multiprocessing.current_process()\r\n print ('Starting Mavlink Still Capture :', p.name, p.pid) \r\n success = mySonyCam.sendMavlinkMessageForParamObject( stillcap, ConnID ) \r\n success = mySonyCam.sendMavlinkMessageForParamExtObject( stillcap, ConnID ) \r\n print ('Exiting Mavlink Still Capture :', multiprocessing.current_process().name) #\r\n\r\ndef mavlinkReqGetParamStillCap( mySonyCam, obj ):\r\n if (mySonyCam.getSonyCamStillCapModeData( obj )==True):\r\n with obj.timestamp.get_lock():\r\n obj.timestamp.value = mySonyCam.my_timestamp()\r\n return True\r\n else:\r\n return False\r\n \r\ndef mavlinkReqGetParamWhiteBala( mySonyCam, obj ):\r\n if (mySonyCam.getSonyCamWhiteBalaData( obj )==True):\r\n with obj.timestamp.get_lock(): \r\n obj.timestamp.value = mySonyCam.my_timestamp()\r\n return True\r\n else:\r\n return False\r\n \r\ndef mavlinkReqGetParamShutSpd( mySonyCam, obj ):\r\n if (mySonyCam.getSonyCamShutSpdData( obj )==True):\r\n with obj.timestamp.get_lock(): \r\n obj.timestamp.value = mySonyCam.my_timestamp()\r\n return True\r\n else:\r\n return False\r\n \r\ndef mavlinkReqGetParamIso( mySonyCam, obj ):\r\n if (mySonyCam.getSonyCamISOData( obj )==True):\r\n with obj.timestamp.get_lock(): \r\n obj.timestamp.value = mySonyCam.my_timestamp()\r\n return True\r\n else:\r\n return False\r\n \r\ndef mavlinkReqGetParamFocus( mySonyCam, obj ):\r\n if (mySonyCam.getSonyCamFocusData( obj )==True):\r\n with obj.timestamp.get_lock(): \r\n obj.timestamp.value = mySonyCam.my_timestamp()\r\n return True\r\n else:\r\n return False\r\n \r\ndef mavlinkReqGetParamFocusArea( mySonyCam, obj ):\r\n if (mySonyCam.getSonyCamFocusAreaData( obj )==True):\r\n with obj.timestamp.get_lock(): \r\n obj.timestamp.value = mySonyCam.my_timestamp()\r\n return True\r\n else:\r\n return False\r\n \r\ndef mavlinkReqGetParamAperture( mySonyCam, obj ):\r\n if (mySonyCam.getSonyApertureData( obj )==True):\r\n with obj.timestamp.get_lock(): \r\n obj.timestamp.value = mySonyCam.my_timestamp()\r\n return True\r\n else:\r\n return False\r\n \r\ndef mavlinkReqGetParamExPro( mySonyCam, obj ):\r\n if (mySonyCam.getSonyCamExProData( obj )==True):\r\n with obj.timestamp.get_lock(): \r\n obj.timestamp.value = mySonyCam.my_timestamp()\r\n return True\r\n else:\r\n return False\r\n\r\ndef mavlinkTakePhoto( mySonyCam, flg ):\r\n return (mySonyCam.take_a_picture_now(flg)) \r\n \r\ndef get_cam_enum( nameS ):\r\n for s in sorted(camStateClass):\r\n if not s.name.find(nameS) == -1:\r\n return s.value\r\n return -1\r\n \r\ndef serviceParamRequestsOneAtATime( mySonyCam, mav2SonyVals, stcap, wb, ss, iso, pf, pfa, pa, expro ):\r\n\r\n p = multiprocessing.current_process()\r\n print ('Starting Service Mavlink incoming request packets :', p.name, p.pid)\r\n\r\n if not (mav2SonyVals.mav_req_all_param.value == 0):\r\n if not ((int(mav2SonyVals.mav_req_all_param.value) & int(mav2SonyVals.ParamStillCap)) == 0): \r\n stcap.set_update_flag( True, memoryValue.STATE_MAV_WRITING )\r\n if (mavlinkReqGetParamStillCap( mySonyCam, stcap ) == True):\r\n with mav2SonyVals.mav_req_all_param.get_lock():\r\n mav2SonyVals.mav_req_all_param.value = mav2SonyVals.mav_req_all_param.value & ~mav2SonyVals.ParamStillCap \r\n if not ((int(mav2SonyVals.mav_req_all_param.value) & int(mav2SonyVals.ParamWhiteBala)) == 0): \r\n wb.set_update_flag( True, memoryValue.STATE_MAV_WRITING ) \r\n if (mavlinkReqGetParamWhiteBala( mySonyCam, wb ) == True):\r\n with mav2SonyVals.mav_req_all_param.get_lock():\r\n mav2SonyVals.mav_req_all_param.value = mav2SonyVals.mav_req_all_param.value & ~mav2SonyVals.ParamWhiteBala \r\n if not ((int(mav2SonyVals.mav_req_all_param.value) & int(mav2SonyVals.ParamShutSpd)) == 0): \r\n ss.set_update_flag( True, memoryValue.STATE_MAV_WRITING ) \r\n if (mavlinkReqGetParamShutSpd( mySonyCam, ss ) == True):\r\n with mav2SonyVals.mav_req_all_param.get_lock():\r\n mav2SonyVals.mav_req_all_param.value = mav2SonyVals.mav_req_all_param.value & ~mav2SonyVals.ParamShutSpd \r\n if not ((int(mav2SonyVals.mav_req_all_param.value) & int(mav2SonyVals.ParamIso)) == 0): \r\n iso.set_update_flag( True, memoryValue.STATE_MAV_WRITING ) \r\n if (mavlinkReqGetParamIso( mySonyCam, iso ) == True):\r\n with mav2SonyVals.mav_req_all_param.get_lock():\r\n mav2SonyVals.mav_req_all_param.value = mav2SonyVals.mav_req_all_param.value & ~mav2SonyVals.ParamIso \r\n if not ((int(mav2SonyVals.mav_req_all_param.value) & int(mav2SonyVals.ParamFocus)) == 0): \r\n pf.set_update_flag( True, memoryValue.STATE_MAV_WRITING ) \r\n if (mavlinkReqGetParamFocus( mySonyCam, pf ) == True):\r\n with mav2SonyVals.mav_req_all_param.get_lock():\r\n mav2SonyVals.mav_req_all_param.value = mav2SonyVals.mav_req_all_param.value & ~mav2SonyVals.ParamFocus \r\n if not ((int(mav2SonyVals.mav_req_all_param.value) & int(mav2SonyVals.ParamFocusArea)) == 0): \r\n pfa.set_update_flag( True, memoryValue.STATE_MAV_WRITING ) \r\n if (mavlinkReqGetParamFocusArea( mySonyCam, pfa ) == True):\r\n with mav2SonyVals.mav_req_all_param.get_lock():\r\n mav2SonyVals.mav_req_all_param.value = mav2SonyVals.mav_req_all_param.value & ~mav2SonyVals.ParamFocusArea \r\n if not ((int(mav2SonyVals.mav_req_all_param.value) & int(mav2SonyVals.ParamAperture)) == 0): \r\n pa.set_update_flag( True, memoryValue.STATE_MAV_WRITING ) \r\n if (mavlinkReqGetParamAperture( mySonyCam, pa ) == True):\r\n with mav2SonyVals.mav_req_all_param.get_lock():\r\n mav2SonyVals.mav_req_all_param.value = mav2SonyVals.mav_req_all_param.value & ~mav2SonyVals.ParamAperture \r\n if not ((int(mav2SonyVals.mav_req_all_param.value) & int(mav2SonyVals.ParamExPro)) == 0): \r\n expro.set_update_flag( True, memoryValue.STATE_MAV_WRITING ) \r\n if (mavlinkReqGetParamExPro( mySonyCam, expro ) == True):\r\n with mav2SonyVals.mav_req_all_param.get_lock():\r\n mav2SonyVals.mav_req_all_param.value = mav2SonyVals.mav_req_all_param.value & ~mav2SonyVals.ParamExPro\r\n\r\n if not (mav2SonyVals.mav_ext_req_all_param.value == 0):\r\n if not ((int(mav2SonyVals.mav_ext_req_all_param.value) & int(mav2SonyVals.ParamStillCap)) == 0): \r\n stcap.set_ack_send( True, memoryValue.STATE_MAV_WRITING ) \r\n if ( mavlinkReqGetParamStillCap( mySonyCam, stcap ) == True ):\r\n if ( stcap.set_ack_send( True, stcap.STATE_CAM_READING ) == True ): # additional PARAM_EXT_VALUE message requested\r\n with mav2SonyVals.mav_ext_req_all_param.get_lock():\r\n mav2SonyVals.mav_ext_req_all_param.value = mav2SonyVals.mav_ext_req_all_param.value & ~mav2SonyVals.ParamStillCap \r\n if not ((int(mav2SonyVals.mav_ext_req_all_param.value) & int(mav2SonyVals.ParamWhiteBala)) == 0): \r\n wb.set_ack_send( True, memoryValue.STATE_MAV_WRITING ) \r\n if ( mavlinkReqGetParamWhiteBala( mySonyCam, wb ) == True ):\r\n if ( wb.set_ack_send( True, wb.STATE_CAM_READING ) == True ): # additional PARAM_EXT_VALUE message requested \r\n with mav2SonyVals.mav_ext_req_all_param.get_lock():\r\n mav2SonyVals.mav_ext_req_all_param.value = mav2SonyVals.mav_ext_req_all_param.value & ~mav2SonyVals.ParamWhiteBala \r\n if not ((int(mav2SonyVals.mav_ext_req_all_param.value) & int(mav2SonyVals.ParamShutSpd)) == 0): \r\n ss.set_ack_send( True, memoryValue.STATE_MAV_WRITING ) \r\n if ( mavlinkReqGetParamShutSpd( mySonyCam, ss ) == True ):\r\n if ( ss.set_ack_send( True, ss.STATE_CAM_READING ) == True ): # additional PARAM_EXT_VALUE message requested \r\n with mav2SonyVals.mav_ext_req_all_param.get_lock():\r\n mav2SonyVals.mav_ext_req_all_param.value = mav2SonyVals.mav_ext_req_all_param.value & ~mav2SonyVals.ParamShutSpd \r\n if not ((int(mav2SonyVals.mav_ext_req_all_param.value) & int(mav2SonyVals.ParamIso)) == 0): \r\n iso.set_ack_send( True, memoryValue.STATE_MAV_WRITING ) \r\n if ( mavlinkReqGetParamIso( mySonyCam, iso ) == True ):\r\n if ( iso.set_ack_send( True, iso.STATE_CAM_READING ) == True ): # additional PARAM_EXT_VALUE message requested \r\n with mav2SonyVals.mav_ext_req_all_param.get_lock():\r\n mav2SonyVals.mav_ext_req_all_param.value = mav2SonyVals.mav_ext_req_all_param.value & ~mav2SonyVals.ParamIso \r\n if not ((int(mav2SonyVals.mav_ext_req_all_param.value) & int(mav2SonyVals.ParamFocus)) == 0): \r\n pf.set_ack_send( True, memoryValue.STATE_MAV_WRITING ) \r\n if ( mavlinkReqGetParamFocus( mySonyCam, pf ) == True ):\r\n if ( pf.set_ack_send( True, pf.STATE_CAM_READING ) == True ): # additional PARAM_EXT_VALUE message requested \r\n with mav2SonyVals.mav_ext_req_all_param.get_lock():\r\n mav2SonyVals.mav_ext_req_all_param.value = mav2SonyVals.mav_ext_req_all_param.value & ~mav2SonyVals.ParamFocus \r\n if not ((int(mav2SonyVals.mav_ext_req_all_param.value) & int(mav2SonyVals.ParamFocusArea)) == 0): \r\n pfa.set_ack_send( True, memoryValue.STATE_MAV_WRITING ) \r\n if ( mavlinkReqGetParamFocusArea( mySonyCam, pfa ) == True ):\r\n if ( pfa.set_ack_send( True, pfa.STATE_CAM_READING ) == True ): # additional PARAM_EXT_VALUE message requested \r\n with mav2SonyVals.mav_ext_req_all_param.get_lock():\r\n mav2SonyVals.mav_ext_req_all_param.value = mav2SonyVals.mav_ext_req_all_param.value & ~mav2SonyVals.ParamFocusArea \r\n if not ((int(mav2SonyVals.mav_ext_req_all_param.value) & int(mav2SonyVals.ParamAperture)) == 0): \r\n pa.set_ack_send( True, memoryValue.STATE_MAV_WRITING ) \r\n if ( mavlinkReqGetParamAperture( mySonyCam, pa ) == True ):\r\n if ( pa.set_ack_send( True, pa.STATE_CAM_READING ) == True ): # additional PARAM_EXT_VALUE message requested \r\n with mav2SonyVals.mav_ext_req_all_param.get_lock():\r\n mav2SonyVals.mav_ext_req_all_param.value = mav2SonyVals.mav_ext_req_all_param.value & ~mav2SonyVals.ParamAperture \r\n if not ((int(mav2SonyVals.mav_ext_req_all_param.value) & int(mav2SonyVals.ParamExPro)) == 0): \r\n expro.set_ack_send( True, memoryValue.STATE_MAV_WRITING ) \r\n if ( mavlinkReqGetParamExPro( mySonyCam, expro ) == True ):\r\n if ( expro.set_ack_send( True, expro.STATE_CAM_READING ) == True ): # additional PARAM_EXT_VALUE message requested \r\n with mav2SonyVals.mav_ext_req_all_param.get_lock():\r\n mav2SonyVals.mav_ext_req_all_param.value = mav2SonyVals.mav_ext_req_all_param.value & ~mav2SonyVals.ParamExPro\r\n\r\n print ('Exiting Service Mavlink incoming packet requests :', multiprocessing.current_process().name) #\r\n\r\ndef serviceParamRequests( mySonyCam, mav2SonyVals, stcap, wb, ss, iso, pf, pfa, pa, expro ):\r\n\r\n p = multiprocessing.current_process()\r\n print ('Starting Service Mavlink incoming request packets :', p.name, p.pid)\r\n\r\n if not (mav2SonyVals.mav_req_all_param.value == 0):\r\n \r\n if not ((int(mav2SonyVals.mav_req_all_param.value) & int(mav2SonyVals.ParamStillCap)) == 0): \r\n stcap.set_update_flag( True, memoryValue.STATE_MAV_WRITING )\r\n \r\n if not ((int(mav2SonyVals.mav_req_all_param.value) & int(mav2SonyVals.ParamWhiteBala)) == 0): \r\n wb.set_update_flag( True, memoryValue.STATE_MAV_WRITING ) \r\n \r\n if not ((int(mav2SonyVals.mav_req_all_param.value) & int(mav2SonyVals.ParamShutSpd)) == 0): \r\n ss.set_update_flag( True, memoryValue.STATE_MAV_WRITING ) \r\n \r\n if not ((int(mav2SonyVals.mav_req_all_param.value) & int(mav2SonyVals.ParamIso)) == 0): \r\n iso.set_update_flag( True, memoryValue.STATE_MAV_WRITING ) \r\n\r\n if not ((int(mav2SonyVals.mav_req_all_param.value) & int(mav2SonyVals.ParamFocus)) == 0): \r\n pf.set_update_flag( True, memoryValue.STATE_MAV_WRITING ) \r\n\r\n if not ((int(mav2SonyVals.mav_req_all_param.value) & int(mav2SonyVals.ParamFocusArea)) == 0): \r\n pfa.set_update_flag( True, memoryValue.STATE_MAV_WRITING ) \r\n\r\n if not ((int(mav2SonyVals.mav_req_all_param.value) & int(mav2SonyVals.ParamAperture)) == 0): \r\n pa.set_update_flag( True, memoryValue.STATE_MAV_WRITING ) \r\n\r\n if not ((int(mav2SonyVals.mav_req_all_param.value) & int(mav2SonyVals.ParamExPro)) == 0): \r\n expro.set_update_flag( True, memoryValue.STATE_MAV_WRITING ) \r\n\r\n if not (mav2SonyVals.mav_ext_req_all_param.value == 0):\r\n \r\n if not ((int(mav2SonyVals.mav_ext_req_all_param.value) & int(mav2SonyVals.ParamStillCap)) == 0): \r\n stcap.set_ack_send( True, memoryValue.STATE_MAV_WRITING ) \r\n \r\n if not ((int(mav2SonyVals.mav_ext_req_all_param.value) & int(mav2SonyVals.ParamWhiteBala)) == 0): \r\n wb.set_ack_send( True, memoryValue.STATE_MAV_WRITING ) \r\n \r\n if not ((int(mav2SonyVals.mav_ext_req_all_param.value) & int(mav2SonyVals.ParamShutSpd)) == 0): \r\n ss.set_ack_send( True, memoryValue.STATE_MAV_WRITING ) \r\n \r\n if not ((int(mav2SonyVals.mav_ext_req_all_param.value) & int(mav2SonyVals.ParamIso)) == 0): \r\n iso.set_ack_send( True, memoryValue.STATE_MAV_WRITING ) \r\n\r\n if not ((int(mav2SonyVals.mav_ext_req_all_param.value) & int(mav2SonyVals.ParamFocus)) == 0): \r\n pf.set_ack_send( True, memoryValue.STATE_MAV_WRITING ) \r\n\r\n if not ((int(mav2SonyVals.mav_ext_req_all_param.value) & int(mav2SonyVals.ParamFocusArea)) == 0): \r\n pfa.set_ack_send( True, memoryValue.STATE_MAV_WRITING ) \r\n\r\n if not ((int(mav2SonyVals.mav_ext_req_all_param.value) & int(mav2SonyVals.ParamAperture)) == 0): \r\n pa.set_ack_send( True, memoryValue.STATE_MAV_WRITING ) \r\n\r\n if not ((int(mav2SonyVals.mav_ext_req_all_param.value) & int(mav2SonyVals.ParamExPro)) == 0): \r\n expro.set_ack_send( True, memoryValue.STATE_MAV_WRITING ) \r\n \r\n if not (mav2SonyVals.mav_req_all_param.value == 0):\r\n \r\n if not ((int(mav2SonyVals.mav_req_all_param.value) & int(mav2SonyVals.ParamStillCap)) == 0): \r\n if (mavlinkReqGetParamStillCap( mySonyCam, stcap ) == True):\r\n with mav2SonyVals.mav_req_all_param.get_lock():\r\n mav2SonyVals.mav_req_all_param.value = mav2SonyVals.mav_req_all_param.value & ~mav2SonyVals.ParamStillCap \r\n if not ((int(mav2SonyVals.mav_req_all_param.value) & int(mav2SonyVals.ParamWhiteBala)) == 0): \r\n if (mavlinkReqGetParamWhiteBala( mySonyCam, wb ) == True):\r\n with mav2SonyVals.mav_req_all_param.get_lock():\r\n mav2SonyVals.mav_req_all_param.value = mav2SonyVals.mav_req_all_param.value & ~mav2SonyVals.ParamWhiteBala \r\n if not ((int(mav2SonyVals.mav_req_all_param.value) & int(mav2SonyVals.ParamShutSpd)) == 0): \r\n if (mavlinkReqGetParamShutSpd( mySonyCam, ss ) == True):\r\n with mav2SonyVals.mav_req_all_param.get_lock():\r\n mav2SonyVals.mav_req_all_param.value = mav2SonyVals.mav_req_all_param.value & ~mav2SonyVals.ParamShutSpd \r\n if not ((int(mav2SonyVals.mav_req_all_param.value) & int(mav2SonyVals.ParamIso)) == 0): \r\n if (mavlinkReqGetParamIso( mySonyCam, iso ) == True):\r\n with mav2SonyVals.mav_req_all_param.get_lock():\r\n mav2SonyVals.mav_req_all_param.value = mav2SonyVals.mav_req_all_param.value & ~mav2SonyVals.ParamIso \r\n if not ((int(mav2SonyVals.mav_req_all_param.value) & int(mav2SonyVals.ParamFocus)) == 0): \r\n if (mavlinkReqGetParamFocus( mySonyCam, pf ) == True):\r\n with mav2SonyVals.mav_req_all_param.get_lock():\r\n mav2SonyVals.mav_req_all_param.value = mav2SonyVals.mav_req_all_param.value & ~mav2SonyVals.ParamFocus \r\n if not ((int(mav2SonyVals.mav_req_all_param.value) & int(mav2SonyVals.ParamFocusArea)) == 0): \r\n if (mavlinkReqGetParamFocusArea( mySonyCam, pfa ) == True):\r\n with mav2SonyVals.mav_req_all_param.get_lock():\r\n mav2SonyVals.mav_req_all_param.value = mav2SonyVals.mav_req_all_param.value & ~mav2SonyVals.ParamFocusArea \r\n if not ((int(mav2SonyVals.mav_req_all_param.value) & int(mav2SonyVals.ParamAperture)) == 0): \r\n if (mavlinkReqGetParamAperture( mySonyCam, pa ) == True):\r\n with mav2SonyVals.mav_req_all_param.get_lock():\r\n mav2SonyVals.mav_req_all_param.value = mav2SonyVals.mav_req_all_param.value & ~mav2SonyVals.ParamAperture \r\n if not ((int(mav2SonyVals.mav_req_all_param.value) & int(mav2SonyVals.ParamExPro)) == 0): \r\n if (mavlinkReqGetParamExPro( mySonyCam, expro ) == True):\r\n with mav2SonyVals.mav_req_all_param.get_lock():\r\n mav2SonyVals.mav_req_all_param.value = mav2SonyVals.mav_req_all_param.value & ~mav2SonyVals.ParamExPro \r\n\r\n if not (mav2SonyVals.mav_ext_req_all_param.value == 0):\r\n \r\n if not ((int(mav2SonyVals.mav_ext_req_all_param.value) & int(mav2SonyVals.ParamStillCap)) == 0): \r\n if ( mavlinkReqGetParamStillCap( mySonyCam, stcap ) == True ):\r\n if ( stcap.set_ack_send( True, stcap.STATE_CAM_READING ) == True ): # additional PARAM_EXT_VALUE message requested\r\n with mav2SonyVals.mav_ext_req_all_param.get_lock():\r\n mav2SonyVals.mav_ext_req_all_param.value = mav2SonyVals.mav_ext_req_all_param.value & ~mav2SonyVals.ParamStillCap \r\n\r\n if not ((int(mav2SonyVals.mav_ext_req_all_param.value) & int(mav2SonyVals.ParamWhiteBala)) == 0): \r\n if ( mavlinkReqGetParamWhiteBala( mySonyCam, wb ) == True ):\r\n if ( wb.set_ack_send( True, wb.STATE_CAM_READING ) == True ): # additional PARAM_EXT_VALUE message requested \r\n with mav2SonyVals.mav_ext_req_all_param.get_lock():\r\n mav2SonyVals.mav_ext_req_all_param.value = mav2SonyVals.mav_ext_req_all_param.value & ~mav2SonyVals.ParamWhiteBala \r\n \r\n if not ((int(mav2SonyVals.mav_ext_req_all_param.value) & int(mav2SonyVals.ParamShutSpd)) == 0): \r\n if ( mavlinkReqGetParamShutSpd( mySonyCam, ss ) == True ):\r\n if ( ss.set_ack_send( True, ss.STATE_CAM_READING ) == True ): # additional PARAM_EXT_VALUE message requested \r\n with mav2SonyVals.mav_ext_req_all_param.get_lock():\r\n mav2SonyVals.mav_ext_req_all_param.value = mav2SonyVals.mav_ext_req_all_param.value & ~mav2SonyVals.ParamShutSpd \r\n\r\n if not ((int(mav2SonyVals.mav_ext_req_all_param.value) & int(mav2SonyVals.ParamIso)) == 0): \r\n if ( mavlinkReqGetParamIso( mySonyCam, iso ) == True ):\r\n if ( iso.set_ack_send( True, iso.STATE_CAM_READING ) == True ): # additional PARAM_EXT_VALUE message requested \r\n with mav2SonyVals.mav_ext_req_all_param.get_lock():\r\n mav2SonyVals.mav_ext_req_all_param.value = mav2SonyVals.mav_ext_req_all_param.value & ~mav2SonyVals.ParamIso \r\n\r\n if not ((int(mav2SonyVals.mav_ext_req_all_param.value) & int(mav2SonyVals.ParamFocus)) == 0): \r\n if ( mavlinkReqGetParamFocus( mySonyCam, pf ) == True ):\r\n if ( pf.set_ack_send( True, pf.STATE_CAM_READING ) == True ): # additional PARAM_EXT_VALUE message requested \r\n with mav2SonyVals.mav_ext_req_all_param.get_lock():\r\n mav2SonyVals.mav_ext_req_all_param.value = mav2SonyVals.mav_ext_req_all_param.value & ~mav2SonyVals.ParamFocus \r\n\r\n if not ((int(mav2SonyVals.mav_ext_req_all_param.value) & int(mav2SonyVals.ParamFocusArea)) == 0): \r\n if ( mavlinkReqGetParamFocusArea( mySonyCam, pfa ) == True ):\r\n if ( pfa.set_ack_send( True, pfa.STATE_CAM_READING ) == True ): # additional PARAM_EXT_VALUE message requested \r\n with mav2SonyVals.mav_ext_req_all_param.get_lock():\r\n mav2SonyVals.mav_ext_req_all_param.value = mav2SonyVals.mav_ext_req_all_param.value & ~mav2SonyVals.ParamFocusArea \r\n\r\n if not ((int(mav2SonyVals.mav_ext_req_all_param.value) & int(mav2SonyVals.ParamAperture)) == 0): \r\n if ( mavlinkReqGetParamAperture( mySonyCam, pa ) == True ):\r\n if ( pa.set_ack_send( True, pa.STATE_CAM_READING ) == True ): # additional PARAM_EXT_VALUE message requested \r\n with mav2SonyVals.mav_ext_req_all_param.get_lock():\r\n mav2SonyVals.mav_ext_req_all_param.value = mav2SonyVals.mav_ext_req_all_param.value & ~mav2SonyVals.ParamAperture \r\n \r\n if not ((int(mav2SonyVals.mav_ext_req_all_param.value) & int(mav2SonyVals.ParamExPro)) == 0): \r\n if ( mavlinkReqGetParamExPro( mySonyCam, expro ) == True ):\r\n if ( expro.set_ack_send( True, expro.STATE_CAM_READING ) == True ): # additional PARAM_EXT_VALUE message requested \r\n with mav2SonyVals.mav_ext_req_all_param.get_lock():\r\n mav2SonyVals.mav_ext_req_all_param.value = mav2SonyVals.mav_ext_req_all_param.value & ~mav2SonyVals.ParamExPro\r\n\r\n print ('Exiting Service Mavlink incoming packet requests :', multiprocessing.current_process().name) #\r\n \r\ndef run_process_messages_from_connection_single(fra, the_connect, sharedObj):\r\n p = multiprocessing.current_process()\r\n print ('Starting: MavReader ', p.name, p.pid) \r\n fra.process_messages_from_connection( the_connect, sharedObj )\r\n print ('Exiting MavReader :', multiprocessing.current_process().name)\r\n\r\n#\r\n# @#= delete above after daemon fully tested\r\n#\r\ndef run_process_messages_from_connection(fra, the_connect, sharedObj):\r\n while True:\r\n p = multiprocessing.current_process()\r\n print ('Starting: MavReader ', p.name, p.pid) \r\n fra.process_messages_from_connection( the_connect, sharedObj )\r\n time.sleep(0.2)\r\n print ('Exiting MavReader :', multiprocessing.current_process().name)\r\n \r\n# ================ error handler if the camera fails (powers link on off) ============\r\n\r\n# uses https://github.com/mvp/uhubctl\r\n#\r\ndef reset_usb_camlink():\r\n print(\"\\033[31;43m executing reset usb camera link \\033[0m\")\r\n #\r\n p = os.popen('sudo /home/pi/cams/SonyTEST32/uhubctl/uhubctl -l 1-1 -a 0')\r\n print(p.read())\r\n #cmd='sudo /home/pi/cams/SonyTEST32/uhubctl/uhubctl -l 1-1 -a 0' \r\n #args = shlex.split(cmd)\r\n #s=subprocess.run( args, stdout=subprocess.PIPE )\r\n #output=s.stdout\r\n #print(output)\r\n time.sleep(2)\r\n p = os.popen('sudo /home/pi/cams/SonyTEST32/uhubctl/uhubctl -l 1-1 -a 1')\r\n print(p.read())\r\n #cmd='sudo /home/pi/cams/SonyTEST32/uhubctl/uhubctl -l 1-1 -a 1' \r\n #args = shlex.split(cmd)\r\n #s=subprocess.run( args, stdout=subprocess.PIPE )\r\n #output=s.stdout\r\n #print(output)\r\n #\r\n # had to add this to prevent a Killed occurring - no idea why?\r\n #\r\n time.sleep(50)\r\n print(\"\\033[31m completed reset usb camera link \\033[0m\")\r\n\r\ndef perform_usb_reset( mySonyCam ):\r\n if (mySonyCam.error_counts.value >= 5): \r\n reset_usb_camlink()\r\n with mySonyCamN.error_counts.get_lock():\r\n mySonyCam.error_counts.value = 0 \r\n#\r\n# The heartbeat task\r\n#\r\ndef sendMavlinkHeartBeat_single(fm, cID, sleepTm=1):\r\n fm.mavlink_send_GCS_heartbeat(cID)\r\n while sleepTm > 0:\r\n time.sleep(1)\r\n print(f'{sleepTm} seconds')\r\n sleepTm -= 1\r\n\r\n#\r\n# @#= delete above after daemon tested\r\n#\r\ndef sendMavlinkHeartBeat(fm, cID, sleepTm=1):\r\n while True:\r\n fm.mavlink_send_GCS_heartbeat(cID)\r\n time.sleep(sleepTm)\r\n print(f\"\\033[36;44m HeartBEAT !!!!! ============= {sleepTm} seconds ================= \\033[0m\")\r\n # sleepTm -= 1\r\n \r\ndef sendMavlinkAckData(fm, cID, sleep, cmd, rpm2, pro, res):\r\n ret = fm.mavlink_send_ack_command(cID, cmd, rpm2, pro, res)\r\n while sleep > 0:\r\n #await asyncio.sleep(1)\r\n print(f'{sleep} seconds')\r\n sleep -= 1\r\n return ret\r\n\r\n# ================ mpi writer and reader functions ==============\r\n#\r\nfrom mpi4py import MPI\r\nimport numpy as np\r\n\r\ndef sendTagOverMPILink( tag_name ):\r\n #\r\n # convert string to ascii list and make numpy array from it.\r\n #\r\n vn = []\r\n k = 0\r\n for k in range(len(tag_name)):\r\n vn.append(ord(tag_name[k]))\r\n u8_tag_name = np.array(vn, np.uint8)\r\n data = np.array(u8_tag_name, dtype=\"float64\")\r\n comm.Send(data, dest=1, tag=0)\r\n print('\\033[33m MPI Process {} sent string data: \\033[9m'.format(rank), data) \r\n\r\ndef sendValueListOverMPILink( tagValues ):\r\n\r\n loop - 0\r\n for loop in range(0,len(tagValues)):\r\n tagValue = tagValues[loop]\r\n list1 = []\r\n list1.append(tagValue) \r\n data = np.array(list1, dtype=\"float64\")\r\n comm.Send(data, dest=1, tag=0)\r\n print('\\033[35m MPI Process {} sent data values list: \\033[0m'.format(rank), data) \r\n\r\ndef readTagOverMPILink():\r\n # initialize the receiving buffer to 20 elements max char size for tag\r\n #\r\n data = np.zeros(20)\r\n \r\n # receive data from master process (first its the tag as a string)\r\n #\r\n comm.Recv(data, source=0, tag=0)\r\n \r\n # convert the object recieved back to a list and then convert it to a string\r\n #\r\n list1 = data.tolist()\r\n tagName = \"\"\r\n for s in range(0,len(list1)):\r\n tagName = tagName + chr(int(list1[s]))\r\n print(f\"\\033[32m MPI Process {rank} received the tagname \\033[31;46m {tagName} \\033[0m\") \r\n return tagName \r\n\r\ndef readValueListOverMPILink():\r\n # initialize the receiving buffer to 20 elements max char size for tag\r\n #\r\n data = np.zeros(20)\r\n \r\n # receive data from master process (second its the values associated as floating point numbers) \r\n # \r\n comm.Recv(data, source=0, tag=0)\r\n \r\n # convert it back to a list and parse its values to the variables\r\n #\r\n list1 = []\r\n list1 = data.tolist()\r\n if (len(list1) >= 2):\r\n print('Process {} received data index number :'.format(rank), int(list1[1]))\r\n print('Process {} received data value :'.format(rank), list1[0]) \r\n return list1\r\n \r\n#\r\n# ================ signal handlers ==============================\r\n#\r\n\r\n#\r\n# from a signal.alarm\r\n#\r\ndef raised_signal_handler(a,b):\r\n print(\"\\033[32m ============ Take Picture & heartbeat ==================\")\r\n fastGlobals.take_picture = 1\r\n # do the action here \r\n\r\n#\r\n# CTL-C\r\n#\r\ndef ctlc_handler(signum, frame): \r\n print(\"Signal Number:\", signum, \" Frame: \", frame) \r\n\r\n#\r\n# CTL-Z\r\n# \r\ndef exit_handler(signum, frame): \r\n print('Exiting....') \r\n exit(0)\r\n\r\n#\r\n# on getting kill -SIGUSR1 \r\n#\r\ndef sigusr1_handler(signum, frame):\r\n print(\"signal hander with a kill -SIGUSR1 (signal.SIGUSR1)\")\r\n # what we want to do on that external signal\r\n print(\"\\033[32m ============ Take Picture ==================\")\r\n fastGlobals.take_picture = 1\r\n\r\n# ===============================================================================================\r\n#\r\n# run program as :- mpirun -np 2 python3 <progname>.py\r\n# or on Raspberry Pi :- mpirun.openmpi -np 2 python3 <progname>.py\r\n#\r\nfrom mpi4py import MPI\r\nimport numpy as np\r\n\r\nimport time\r\n\r\n \r\nif __name__ == '__main__':\r\n\r\n # ========================== SIGNAL HANDLERS =================================================\r\n # Register the alarm signal with our handler signal. signal(signal. SIGALRM, alarm_handler)\r\n signal.signal(signal.SIGALRM, raised_signal_handler)\r\n # to raise this insert this anywhere in code\r\n # signal.alarm(1)\r\n\r\n # Register our signal handler with `SIGINT`(CTRL + C) \r\n signal.signal(signal.SIGINT, ctlc_handler) \r\n # Register the exit handler with `SIGTSTP` (Ctrl + Z) \r\n signal.signal(signal.SIGTSTP, exit_handler)\r\n # external signal handler\r\n signal.signal(signal.SIGUSR1, sigusr1_handler)\r\n\r\n # ========================== MULTI_TASKING ====================================================\r\n # create the sequential state variable for the scheduler working on\r\n # the camera operations that must be in series\r\n # and the choice of operation (camera feature) that will execute at each state \r\n #\r\n mp_choice = multiprocessing.Value('i', mavlinkSonyCamWriteVals.FUNC_EX_PRO) # define the initial (starting) operation\r\n mp_state = multiprocessing.Value('i', mavlinkSonyCamWriteVals.STATE_INIT)\r\n wd_timer = multiprocessing.Value('i', 0)\r\n \r\n # initialise pool data - this is the maximum number of operations for the paralel operations group\r\n #\r\n max_number_processes = 7 \r\n\r\n # ========================= MAVLINK ============================================================= \r\n frame = MAVFrame()\r\n \r\n state = False\r\n while (state == False):\r\n try:\r\n cID,state = frame.makeMAVlinkConn()\r\n except Exception as e:\r\n print(\"Error Trap :: \", e.__class__, \" occurred.\")\r\n\r\n print(\"\\033[31m connected to mavlink \\033[0m\")\r\n \r\n # wait heartbeat \r\n # if it sends another sys id we need to change it\r\n #\r\n state = False\r\n xx = 1\r\n while xx == 1:\r\n print(f\"\\033[31m receive ? {xx} \\033[0m\")\r\n try:\r\n m = cID.recv_match(type=\"HEARTBEAT\", blocking=True, timeout=10)\r\n except Exception as e:\r\n print(\"Error Trap :: \", e.__class__, \" occurred.\") \r\n if not (m == None):\r\n if not ( m.autopilot == mavutil.mavlink.MAV_AUTOPILOT_INVALID ):\r\n xx = 2\r\n print(f\"\\033[33m receive ? {xx} \\033[0m\")\r\n \r\n print(f\"\\033[32m receive complete ? {xx} \\033[0m\") \r\n id = m.get_srcSystem() \r\n print(\"\\033[31m heartbeat \\033[0m\")\r\n if not ( m.get_srcSystem() == frame.DEFAULT_SYS_ID ) :\r\n print(f\"-------- new id found -------- {id}\")\r\n while (state == False):\r\n try:\r\n cID,state = frame.makeNewMAVlinkConn(id)\r\n except Exception as e:\r\n print(\"Error Trap :: \", e.__class__, \" occurred.\")\r\n\r\n print(\"\\033[31m to logger \\033[0m\") \r\n \r\n # default logger\r\n #\r\n # multiprocessing.log_to_stderr(logging.DEBUG)\r\n #\r\n # for extra logging use this \r\n # instead\r\n #\r\n multiprocessing.log_to_stderr()\r\n #logger = multiprocessing.get_logger()\r\n #logger.setLevel(logging.INFO)\r\n \r\n #\r\n # create instance of sony alpha cam (new API)\r\n # \r\n mySonyCamNo1 = sonyAlphaNewCamera()\r\n \r\n #\r\n # create an instance of common write structure \r\n # from mavlink reader task to the camera\r\n #\r\n gcsWrites2Sony = mavlinkSonyCamWriteVals()\r\n\r\n print(\"\\033[31m getting data \\033[0m\")\r\n #\r\n # init the objects with camera data \r\n # & set rhw data to be written back to gcs via mavlink\r\n #\r\n #\r\n # Initialise all shared object data between\r\n # camera and mavlink processes\r\n # \r\n expro = mySonyCamNo1.initSonyCamExProData( )\r\n aper = mySonyCamNo1.initSonyApertureData( ) \r\n focusdata = mySonyCamNo1.initSonyCamFocusData( ) \r\n focusarea = mySonyCamNo1.initSonyCamFocusAreaData( ) \r\n iso = mySonyCamNo1.initSonyCamISOData( ) \r\n shut_sp = mySonyCamNo1.initSonyCamShutSpdData( ) \r\n whitebal = mySonyCamNo1.initSonyCamWhiteBalaData( ) \r\n stillcap = mySonyCamNo1.initSonyCamStillCapModeData( )\r\n\r\n print(\"\\033[31m got data \\033[0m\")\r\n \r\n # this is a rough trap for now but if its all zeros its not getting correct data from the camera then try reset the link first\r\n # no need to aquire as this is working in single task mode at this point\r\n if ((((((((expro.signal.value == 0) and (aper.signal.value == 0)) and (focusdata.signal.value == 0)) and (focusarea.signal.value == 0)) and (iso.signal.value == 0)) and (shut_sp.signal.value == 0)) and (whitebal.signal.value == 0)) and (stillcap.signal.value == 0)):\r\n reset_usb_camlink()\r\n retCode = mySonyCamNo1.getSonyCamExProData( expro )\r\n retCode = mySonyCamNo1.getSonyApertureData( aper ) \r\n retCode = mySonyCamNo1.getSonyCamFocusData( focusdata ) \r\n retCode = mySonyCamNo1.getSonyCamFocusAreaData( focusarea ) \r\n retCode = mySonyCamNo1.getSonyCamISOData( iso ) \r\n retCode = mySonyCamNo1.getSonyCamShutSpdData( shut_sp ) \r\n retCode = mySonyCamNo1.getSonyCamWhiteBalaData( whitebal ) \r\n retCode = mySonyCamNo1.getSonyCamStillCapModeData( stillcap )\r\n \r\n # uncomment these if you dont want immediate update message\r\n # \r\n #expro.timestamp = mySonyCamNo1.my_timestamp()\r\n #aper.timestamp = mySonyCamNo1.my_timestamp() \r\n #focusdata.timestamp = mySonyCamNo1.my_timestamp() \r\n #focusarea.timestamp = mySonyCamNo1.my_timestamp() \r\n #iso.timestamp = mySonyCamNo1.my_timestamp() \r\n #shut_sp.timestamp = mySonyCamNo1.my_timestamp() \r\n #whitebal.timestamp = mySonyCamNo1.my_timestamp() \r\n #stillcap.timestamp = mySonyCamNo1.my_timestamp() \r\n\r\n # ========== send back to GCS via mavlink if a new change of state has been detected or polled if requested \r\n # \r\n #p2 = multiprocessing.Process(name='sendMavExpro', target=sendMavExpro, args=(mySonyCamNo1, expro, cID,)).start()\r\n #p4 = multiprocessing.Process(name='sendMavAper', target=sendMavAper, args=(mySonyCamNo1, aper, cID,)).start()\r\n #p6 = multiprocessing.Process(name='sendMavFocusData', target=sendMavFocusData, args=(mySonyCamNo1, focusdata, focusarea, cID, )).start()\r\n #p8 = multiprocessing.Process(name='sendMavIso', target=sendMavIso, args=(mySonyCamNo1, iso, cID, )).start()\r\n #p10 = multiprocessing.Process(name='sendMavShutSpd', target=sendMavShutSpd, args=(mySonyCamNo1, shut_sp, cID, )).start()\r\n #p12 = multiprocessing.Process(name='sendMavWhiteBala', target=sendMavWhiteBala, args=(mySonyCamNo1, whitebal, cID, )).start()\r\n #p14 = multiprocessing.Process(name='sendMavStillCap', target=sendMavStillCap, args=(mySonyCamNo1, stillcap, cID, )).start() \r\n #if p2 is not None: \r\n # p2.join()\r\n #if p4 is not None: \r\n # p4.join()\r\n #if p6 is not None: \r\n # p6.join()\r\n #if p8 is not None: \r\n # p8.join()\r\n #if p10 is not None: \r\n # p10.join()\r\n #if p12 is not None: \r\n # p12.join() \r\n #if p14 is not None: \r\n # p14.join() \r\n \r\n #\r\n # now set the class to be initialised\r\n #\r\n gcsWrites2Sony.init_class_state()\r\n \r\n #\r\n # test iso write (single task mode) \r\n #\r\n # ### If you want this in single task mode ###\r\n #\r\n active = True\r\n #\r\n # ### If you want this in multi mode ###\r\n #\r\n # active = False\r\n\r\n #comm = MPI.COMM_WORLD\r\n #rank = comm.Get_rank()\r\n rank = 0\r\n \r\n if (rank == 0): # ====================== master rank 0 ==========================================================\r\n \r\n print(\"rank 0........\") \r\n while active==True:\r\n run_process_messages_from_connection_single(frame, cID, gcsWrites2Sony)\r\n serviceParamRequests( mySonyCamNo1, gcsWrites2Sony, stillcap, whitebal, shut_sp, iso, focusdata, focusarea, aper, expro )\r\n #\r\n # ===================== Paralell Pool Scheduler 8 Tasks ==========================\r\n #\r\n # runs each task in a paralel waiting for all to finish \r\n #\r\n wd_timer.value = wd_timer.value + 1\r\n if wd_timer.value > 10:\r\n sendMavlinkHeartBeat_single(frame, cID, 0)\r\n wd_timer.value = 0 \r\n #print(\"\\033[31;42m Started mavlink PARAM sender \\033[0m\") \r\n #pool = multiprocessing.Pool(max_number_processes) # start the batch\r\n\r\n #pool.apply_async(sendMavExpro, args=(mySonyCamNo1, expro, cID,)) \r\n #pool.apply_async(sendMavAper, args=(mySonyCamNo1, aper, cID,)) \r\n #pool.apply_async(sendMavFocusData, args=(mySonyCamNo1, focusdata, focusarea, cID,)) \r\n #pool.apply_async(sendMavIso, args=(mySonyCamNo1, iso, cID,)) \r\n #pool.apply_async(sendMavShutSpd, args=(mySonyCamNo1, shut_sp, cID,)) \r\n #pool.apply_async(sendMavWhiteBala, args=(mySonyCamNo1, whitebal, cID,)) \r\n #pool.apply_async(sendMavStillCap, args=(mySonyCamNo1, stillcap, cID,)) \r\n #pool.close() # After all threads started we close the pool\r\n sendMavExpro(mySonyCamNo1, expro, cID)\r\n sendMavAper(mySonyCamNo1, aper, cID)\r\n sendMavFocusData(mySonyCamNo1, focusdata, focusarea, cID)\r\n sendMavIso(mySonyCamNo1, iso, cID)\r\n sendMavShutSpd(mySonyCamNo1, shut_sp, cID)\r\n sendMavWhiteBala(mySonyCamNo1, whitebal, cID)\r\n sendMavStillCap(mySonyCamNo1, stillcap, cID)\r\n \r\n # =============== look for change and do camera action =====================================\r\n #\r\n if not (gcsWrites2Sony.set_sony_iso.value == gcsWrites2Sony.STATE_INIT):\r\n print(f\"on TOP LEVEL saw {gcsWrites2Sony.set_sony_iso.value} {gcsWrites2Sony.prev_sony_iso.value} {gcsWrites2Sony.mav_req_all_param.value}\")\r\n doAlphaCameraIso(mySonyCamNo1, gcsWrites2Sony, iso)\r\n if not (gcsWrites2Sony.set_sony_aperture.value == gcsWrites2Sony.STATE_INIT):\r\n print(f\"on TOP LEVEL saw aperture {gcsWrites2Sony.set_sony_aperture.value} {gcsWrites2Sony.prev_sony_aperture.value} {gcsWrites2Sony.mav_req_all_param.value}\")\r\n doAlphaCameraAperture(mySonyCamNo1, gcsWrites2Sony, aper)\r\n if not (gcsWrites2Sony.set_sony_white_bal.value == gcsWrites2Sony.STATE_INIT):\r\n print(f\"on TOP LEVEL saw wb {gcsWrites2Sony.set_sony_white_bal.value} {gcsWrites2Sony.prev_sony_white_bal.value} {gcsWrites2Sony.mav_req_all_param.value}\")\r\n doAlphaWhiteBala(mySonyCamNo1, gcsWrites2Sony, whitebal)\r\n if not (gcsWrites2Sony.set_sony_ex_pro.value == gcsWrites2Sony.STATE_INIT):\r\n print(f\"on TOP LEVEL saw expro {gcsWrites2Sony.set_sony_ex_pro.value} {gcsWrites2Sony.prev_sony_ex_pro.value} {gcsWrites2Sony.mav_req_all_param.value}\")\r\n doAlphaCameraExpro(mySonyCamNo1, gcsWrites2Sony, expro)\r\n if not (gcsWrites2Sony.set_sony_still_cap_mode.value == gcsWrites2Sony.STATE_INIT):\r\n print(f\"on TOP LEVEL saw still cap {gcsWrites2Sony.set_sony_still_cap_mode.value} {gcsWrites2Sony.prev_sony_still_cap_mode.value} {gcsWrites2Sony.mav_req_all_param.value}\")\r\n doAlphaCameraStillCap(mySonyCamNo1, gcsWrites2Sony, stillcap) \r\n if not (gcsWrites2Sony.set_sony_shutter.value == gcsWrites2Sony.STATE_INIT):\r\n print(f\"on TOP LEVEL saw shutter speed {gcsWrites2Sony.set_sony_shutter.value} {gcsWrites2Sony.prev_sony_shutter.value} {gcsWrites2Sony.mav_req_all_param.value}\")\r\n doAlphaCameraShutSpd(mySonyCamNo1, gcsWrites2Sony, shut_sp) \r\n if not (gcsWrites2Sony.set_sony_focus.value == gcsWrites2Sony.STATE_INIT) or not (gcsWrites2Sony.set_sony_focus_area.value == gcsWrites2Sony.STATE_INIT):\r\n print(f\"on TOP LEVEL saw shutter speed {gcsWrites2Sony.set_sony_focus.value} {gcsWrites2Sony.prev_sony_focus.value} {gcsWrites2Sony.set_sony_focus_area.value} {gcsWrites2Sony.prev_sony_focus_area.value} {gcsWrites2Sony.mav_req_all_param.value}\")\r\n doAlphaCameraFocusData(mySonyCamNo1, gcsWrites2Sony, focusdata, focusarea) \r\n\r\n # =============== photo handler ============================================================\r\n #\r\n print(f\"checking if i need to take a photo ..... {fastGlobals.take_picture}\")\r\n if gcsWrites2Sony.take_photo.value == True:\r\n fastGlobals.take_picture = get_cam_enum(\"taking_photo\")\r\n gcsWrites2Sony.take_photo.value = False\r\n \r\n fastGlobals.take_picture = mavlinkTakePhoto( mySonyCamNo1, fastGlobals.take_picture )\r\n if (fastGlobals.take_picture == get_cam_enum(\"photo_ack\")):\r\n if ( sendMavlinkAckData(frame, cID, 1, frame.RCV_COMMAND, frame.RPM2, 0, frame.ACK_RESULT ) == True):\r\n fastGlobals.take_picture = get_cam_enum(\"photo_complete\")\r\n frame.RCV_COMMAND = 0\r\n frame.ACK_RESULT = MAVFrame.ACK_ALL_DATA_COMPLETE \r\n\r\n #pool.join() # And wait until all pool threads are done \r\n\r\n # on clocked errors perform usb reset\r\n # \r\n if (mySonyCamNo1.error_counts.value >= 5): \r\n reset_usb_camlink()\r\n with mySonyCamNo1.error_counts.get_lock():\r\n mySonyCamNo1.error_counts.value = 0 \r\n\r\n elif (rank == 1): # ============================= heart beat ranking ================================================\r\n\r\n print(\"rank 1........\") \r\n while active==True:\r\n sendMavlinkHeartBeat_single(frame, cID, 0)\r\n time.sleep(0.9)\r\n \r\n \r\n #\r\n # Release the shared memory\r\n #\t\r\n del expro\r\n del stillcap \r\n del aper\r\n del focusdata\r\n del focusarea\r\n del shut_sp\r\n del whitebal\r\n del stillcap"
]
| [
[
"numpy.fromfile",
"numpy.array",
"numpy.dtype",
"numpy.zeros"
]
]
|
wjsutton/tableau_public_api | [
"db7f2c32c460aefaf54e58734b1de59e417df56e"
]
| [
"Python/example_profile_call.py"
]
| [
"# TO DO \n# Parse out profile address data fields\n# can take options: country, state, city but each can also be empty but not quoted\n\n\nimport json\nimport urllib3\nimport pandas as pd\n\nhttp = urllib3.PoolManager()\n\nyour_username = 'wjsutton'\nprofile_call = \"https://public.tableau.com/profile/api/\" + your_username\n\n\ntableau_profile = json.loads(http.request('GET',profile_call).data)\nprofile = pd.json_normalize(tableau_profile, max_level=0)\n#address = pd.json_normalize(tableau_profile['address'], max_level=0)\nwebsites = pd.json_normalize(tableau_profile['websites'], max_level=0)\nworkbooks = pd.json_normalize(tableau_profile['workbooks'], max_level=0)\n\n\n# Finding attributions and merging to workbooks\nattributions_df = []\nfor i in workbooks.index:\n attributions = pd.json_normalize(workbooks['attributions'][i])\n if len(attributions) > 0:\n attributions.columns = 'attribution_' + attributions.columns\n attributions['workbookRepoUrl'] = workbooks['workbookRepoUrl'][i]\n attributions_df.append(attributions)\n\nif len(attributions_df) > 0:\n attributions_df = pd.concat(attributions_df)\n workbooks = pd.merge(workbooks,attributions_df, on='workbookRepoUrl', how='left')\n\ndel profile['websites']\ndel profile['workbooks']\n\nprint(workbooks)\n\nprint(profile)\nprint(tableau_profile['address'])\n#address = pd.json_normalize(tableau_profile['address'])\n#print(address)\n#print(tableau_profile['address']['state'])\n#print(tableau_profile['address']['city'])\n"
]
| [
[
"pandas.concat",
"pandas.merge",
"pandas.json_normalize"
]
]
|
josemarioqv/FS0741-DynamicalSystems-FractalGeometry | [
"e59f7df0fad6c3aeac8c9e654dcbc13e7985ca9b"
]
| [
"Second_Exam/fern.py"
]
| [
"import numpy as np\nimport random\nimport pyglet\n\n\nclass Fern():\n\n def __init__(self):\n self.W = np.array([[[0., 0.], [0., 0.4]],\n [[0.85, 0.04], [-0.04, 0.85]],\n [[0.2, -0.26], [0.23, 0.22]],\n [[-0.15, 0.28], [0.25, 0.24]]])\n self.B = np.array([[0., 0.01],\n [1.6, 0.85],\n [1.6, 0.07],\n [0.44, 0.07]])\n self.X = np.array([0.5, 0.6])\n\n def update(self):\n i = random.choices(population=[0, 1, 2, 3],\n weights=[0.01, 0.85, 0.07, 0.07])[0]\n self.X = np.dot(self.W[i], self.X) + self.B[i]\n\n def draw(self):\n point = self.X*35\n point = tuple(point.astype(int))\n print(point)\n pyglet.graphics.draw(1, pyglet.gl.GL_POINTS, ('v2i', point),\n ('c3B', (40, 200, 40)))\n\n\nclass Window(pyglet.window.Window):\n\n def __init__(self):\n # pyglet window initialization\n super().__init__()\n self.set_size(400, 400)\n pyglet.clock.schedule_interval(self.update, 0.001)\n # initialization\n self.fern = Fern()\n\n def on_draw(self):\n self.fern.draw()\n\n def update(self, dt):\n self.fern.update()\n pass\n\n\nif __name__ == '__main__':\n window = Window()\n pyglet.app.run()\n"
]
| [
[
"numpy.array",
"numpy.dot"
]
]
|
liujiawen-jpg/shifu | [
"b018b07b3c8a04722b7761b0aceeaed261d19e5f"
]
| [
"dispose.py"
]
| [
"# -*- coding: utf-8 -*-\r\n\r\nimport glob\r\nimport numpy as np\r\n\r\n\r\npath=glob.glob('nose_vector_45_nor')\r\ndir_list = glob.glob(f'{path[0]}/*')\r\nfor d in dir_list:\r\n file_list=glob.glob(f'{d}/*.txt')\r\n for file in file_list:\r\n name=file.split('\\\\')[-1]\r\n point_arr=[]\r\n point_list=open(file,'r').read().split('\\n')\r\n for idx,point in enumerate(point_list):\r\n print(point)\r\n if idx==len(point_list)-1:\r\n continue\r\n #row_data=list(map(eval,point.split(' '))) \r\n list=[]\r\n a,b,c,d,e,f=map(eval,point.split())\r\n list.append(a)\r\n list.append(b)\r\n list.append(c)\r\n list.append(d)\r\n list.append(e)\r\n list.append(f)\r\n row_data=list\r\n print(row_data)\r\n #row_data=list(eval(input()))\r\n point_arr.append(row_data)\r\n #######以下为数据处理过程\r\n point_arr=np.array(point_arr)\r\n data_scale=0.006\r\n max_z=np.max(point_arr[:,2])\r\n min_z=np.min(point_arr[:,2])\r\n moveZ=(max_z+min_z)/2\r\n point_arr[:,0]=point_arr[:,0]*data_scale\r\n point_arr[:,1]=point_arr[:,1]*data_scale\r\n point_arr[:,2]=(point_arr[:,2]-moveZ)*data_scale\r\n \r\n #######将处理完的数据写入文件\r\n f_output=open('{name}','w')\r\n for points in point_arr:\r\n i = 0 \r\n for point in points:\r\n if i<3:\r\n point=float('%.6f' %point)\r\n f_output.write(str(point))\r\n f_output.write(' ')\r\n i += 1\r\n f_output.write('\\n')\r\n f_output.close()\r\n \r\n"
]
| [
[
"numpy.max",
"numpy.array",
"numpy.min"
]
]
|
mathematiguy/stylized-neural-painting | [
"97de8a22c8146fbef0bc7b9e75bb54b937521a45"
]
| [
"imitator.py"
]
| [
"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport cv2\r\nimport os\r\n\r\nimport utils\r\nimport loss\r\nfrom networks import *\r\n\r\nimport torch\r\nimport torch.optim as optim\r\nfrom torch.optim import lr_scheduler\r\nimport torch.nn as nn\r\n\r\nimport renderer\r\n\r\n\r\n# Decide which device we want to run on\r\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\n\r\n\r\nclass Imitator():\r\n\r\n def __init__(self, args, dataloaders):\r\n\r\n self.dataloaders = dataloaders\r\n\r\n self.rderr = renderer.Renderer(renderer=args.renderer)\r\n\r\n # define G\r\n self.net_G = define_G(rdrr=self.rderr, netG=args.net_G).to(device)\r\n\r\n # Learning rate\r\n self.lr = args.lr\r\n\r\n # define optimizers\r\n self.optimizer_G = optim.Adam(\r\n self.net_G.parameters(), lr=self.lr, betas=(0.9, 0.999))\r\n\r\n # define lr schedulers\r\n self.exp_lr_scheduler_G = lr_scheduler.StepLR(\r\n self.optimizer_G, step_size=100, gamma=0.1)\r\n\r\n # define some other vars to record the training states\r\n self.running_acc = []\r\n self.epoch_acc = 0\r\n self.best_val_acc = 0.0\r\n self.best_epoch_id = 0\r\n self.epoch_to_start = 0\r\n self.max_num_epochs = args.max_num_epochs\r\n self.G_pred_foreground = None\r\n self.G_pred_alpha = None\r\n self.batch = None\r\n self.G_loss = None\r\n self.is_training = False\r\n self.batch_id = 0\r\n self.epoch_id = 0\r\n self.checkpoint_dir = args.checkpoint_dir\r\n self.vis_dir = args.vis_dir\r\n\r\n # define the loss functions\r\n self._pxl_loss = loss.PixelLoss(p=2)\r\n\r\n self.VAL_ACC = np.array([], np.float32)\r\n if os.path.exists(os.path.join(self.checkpoint_dir, 'val_acc.npy')):\r\n self.VAL_ACC = np.load(os.path.join(self.checkpoint_dir, 'val_acc.npy'))\r\n\r\n # check and create model dir\r\n if os.path.exists(self.checkpoint_dir) is False:\r\n os.mkdir(self.checkpoint_dir)\r\n if os.path.exists(self.vis_dir) is False:\r\n os.mkdir(self.vis_dir)\r\n\r\n # visualize model\r\n if args.print_models:\r\n self._visualize_models()\r\n\r\n\r\n def _visualize_models(self):\r\n\r\n from torchviz import make_dot\r\n\r\n # visualize models with the package torchviz\r\n data = next(iter(self.dataloaders['train']))\r\n y = self.net_G(data['A'].to(device))\r\n mygraph = make_dot(y.mean(), params=dict(self.net_G.named_parameters()))\r\n mygraph.render('G')\r\n\r\n\r\n def _load_checkpoint(self):\r\n\r\n if os.path.exists(os.path.join(self.checkpoint_dir, 'last_ckpt.pt')):\r\n print('loading last checkpoint...')\r\n # load the entire checkpoint\r\n checkpoint = torch.load(os.path.join(self.checkpoint_dir, 'last_ckpt.pt'))\r\n\r\n # update net_G states\r\n self.net_G.load_state_dict(checkpoint['model_G_state_dict'])\r\n self.optimizer_G.load_state_dict(checkpoint['optimizer_G_state_dict'])\r\n self.exp_lr_scheduler_G.load_state_dict(\r\n checkpoint['exp_lr_scheduler_G_state_dict'])\r\n self.net_G.to(device)\r\n\r\n # update some other states\r\n self.epoch_to_start = checkpoint['epoch_id'] + 1\r\n self.best_val_acc = checkpoint['best_val_acc']\r\n self.best_epoch_id = checkpoint['best_epoch_id']\r\n\r\n print('Epoch_to_start = %d, Historical_best_acc = %.4f (at epoch %d)' %\r\n (self.epoch_to_start, self.best_val_acc, self.best_epoch_id))\r\n print()\r\n\r\n else:\r\n print('training from scratch...')\r\n\r\n\r\n def _save_checkpoint(self, ckpt_name):\r\n torch.save({\r\n 'epoch_id': self.epoch_id,\r\n 'best_val_acc': self.best_val_acc,\r\n 'best_epoch_id': self.best_epoch_id,\r\n 'model_G_state_dict': self.net_G.state_dict(),\r\n 'optimizer_G_state_dict': self.optimizer_G.state_dict(),\r\n 'exp_lr_scheduler_G_state_dict': self.exp_lr_scheduler_G.state_dict()\r\n }, os.path.join(self.checkpoint_dir, ckpt_name))\r\n\r\n\r\n def _update_lr_schedulers(self):\r\n self.exp_lr_scheduler_G.step()\r\n\r\n\r\n def _compute_acc(self):\r\n\r\n target_foreground = self.gt_foreground.to(device).detach()\r\n target_alpha_map = self.gt_alpha.to(device).detach()\r\n foreground = self.G_pred_foreground.detach()\r\n alpha_map = self.G_pred_alpha.detach()\r\n\r\n psnr1 = utils.cpt_batch_psnr(foreground, target_foreground, PIXEL_MAX=1.0)\r\n psnr2 = utils.cpt_batch_psnr(alpha_map, target_alpha_map, PIXEL_MAX=1.0)\r\n return (psnr1 + psnr2)/2.0\r\n\r\n\r\n def _collect_running_batch_states(self):\r\n self.running_acc.append(self._compute_acc().item())\r\n\r\n m = len(self.dataloaders['train'])\r\n if self.is_training is False:\r\n m = len(self.dataloaders['val'])\r\n\r\n if np.mod(self.batch_id, 100) == 1:\r\n print('Is_training: %s. [%d,%d][%d,%d], G_loss: %.5f, running_acc: %.5f'\r\n % (self.is_training, self.epoch_id, self.max_num_epochs-1, self.batch_id, m,\r\n self.G_loss.item(), np.mean(self.running_acc)))\r\n\r\n if np.mod(self.batch_id, 1000) == 1:\r\n vis_pred_foreground = utils.make_numpy_grid(self.G_pred_foreground)\r\n vis_gt_foreground = utils.make_numpy_grid(self.gt_foreground)\r\n vis_pred_alpha = utils.make_numpy_grid(self.G_pred_alpha)\r\n vis_gt_alpha = utils.make_numpy_grid(self.gt_alpha)\r\n\r\n vis = np.concatenate([vis_pred_foreground, vis_gt_foreground,\r\n vis_pred_alpha, vis_gt_alpha], axis=0)\r\n vis = np.clip(vis, a_min=0.0, a_max=1.0)\r\n file_name = os.path.join(\r\n self.vis_dir, 'istrain_'+str(self.is_training)+'_'+\r\n str(self.epoch_id)+'_'+str(self.batch_id)+'.png')\r\n plt.imsave(file_name, vis)\r\n\r\n\r\n\r\n def _collect_epoch_states(self):\r\n\r\n self.epoch_acc = np.mean(self.running_acc)\r\n print('Is_training: %s. Epoch %d / %d, epoch_acc= %.5f' %\r\n (self.is_training, self.epoch_id, self.max_num_epochs-1, self.epoch_acc))\r\n print()\r\n\r\n\r\n def _update_checkpoints(self):\r\n\r\n # save current model\r\n self._save_checkpoint(ckpt_name='last_ckpt.pt')\r\n print('Lastest model updated. Epoch_acc=%.4f, Historical_best_acc=%.4f (at epoch %d)'\r\n % (self.epoch_acc, self.best_val_acc, self.best_epoch_id))\r\n print()\r\n\r\n self.VAL_ACC = np.append(self.VAL_ACC, [self.epoch_acc])\r\n np.save(os.path.join(self.checkpoint_dir, 'val_acc.npy'), self.VAL_ACC)\r\n\r\n # update the best model (based on eval acc)\r\n if self.epoch_acc > self.best_val_acc:\r\n self.best_val_acc = self.epoch_acc\r\n self.best_epoch_id = self.epoch_id\r\n self._save_checkpoint(ckpt_name='best_ckpt.pt')\r\n print('*' * 10 + 'Best model updated!')\r\n print()\r\n\r\n\r\n def _clear_cache(self):\r\n self.running_acc = []\r\n\r\n\r\n def _forward_pass(self, batch):\r\n self.batch = batch\r\n z_in = batch['A'].to(device)\r\n self.G_pred_foreground, self.G_pred_alpha = self.net_G(z_in)\r\n\r\n\r\n def _backward_G(self):\r\n\r\n self.gt_foreground = self.batch['B'].to(device)\r\n self.gt_alpha = self.batch['ALPHA'].to(device)\r\n\r\n _, _, h, w = self.G_pred_alpha.shape\r\n self.gt_foreground = torch.nn.functional.interpolate(self.gt_foreground, (h, w), mode='area')\r\n self.gt_alpha = torch.nn.functional.interpolate(self.gt_alpha, (h, w), mode='area')\r\n\r\n pixel_loss1 = self._pxl_loss(self.G_pred_foreground, self.gt_foreground)\r\n pixel_loss2 = self._pxl_loss(self.G_pred_alpha, self.gt_alpha)\r\n self.G_loss = 100 * (pixel_loss1 + pixel_loss2) / 2.0\r\n self.G_loss.backward()\r\n\r\n\r\n def train_models(self):\r\n\r\n self._load_checkpoint()\r\n\r\n # loop over the dataset multiple times\r\n for self.epoch_id in range(self.epoch_to_start, self.max_num_epochs):\r\n\r\n ################## train #################\r\n ##########################################\r\n self._clear_cache()\r\n self.is_training = True\r\n self.net_G.train() # Set model to training mode\r\n # Iterate over data.\r\n for self.batch_id, batch in enumerate(self.dataloaders['train'], 0):\r\n self._forward_pass(batch)\r\n # update G\r\n self.optimizer_G.zero_grad()\r\n self._backward_G()\r\n self.optimizer_G.step()\r\n self._collect_running_batch_states()\r\n self._collect_epoch_states()\r\n self._update_lr_schedulers()\r\n\r\n ########### Update_Checkpoints ###########\r\n ##########################################\r\n self._update_checkpoints()\r\n\r\n"
]
| [
[
"numpy.concatenate",
"numpy.array",
"numpy.clip",
"torch.optim.lr_scheduler.StepLR",
"matplotlib.pyplot.imsave",
"torch.nn.functional.interpolate",
"numpy.mean",
"torch.cuda.is_available",
"numpy.append",
"numpy.mod"
]
]
|
lukovnikov/transformer_generalization | [
"a538bfbba6877cd7a21e710f2535df2e9236ba52"
]
| [
"layers/transformer/multi_head_attention.py"
]
| [
"import torch\nimport torch.nn\nimport torch.nn.functional as F\nimport math\nfrom typing import Optional, Callable, List, Union, Tuple\nfrom dataclasses import dataclass\n\n\n@dataclass\nclass AttentionMask:\n src_length_mask: Optional[torch.Tensor]\n position_mask: Optional[torch.Tensor]\n\n\nclass MultiHeadAttentionBase(torch.nn.Module):\n def __init__(self, state_size: int, n_heads: int, dropout: float = 0.1):\n assert state_size % n_heads == 0\n super().__init__()\n self.state_size = state_size\n self.projection_size = state_size // n_heads\n self.n_heads = n_heads\n self.scale = 1.0 / math.sqrt(self.projection_size)\n\n self.dropout = torch.nn.Dropout(dropout)\n self.multi_head_merge = torch.nn.Linear(n_heads * self.projection_size, state_size, bias=False)\n\n def _masked_softmax(self, logits: torch.Tensor, mask: Optional[AttentionMask]) -> torch.Tensor:\n if mask is None or (mask.src_length_mask is None and mask.position_mask is None):\n return F.softmax(logits, -1)\n\n # Output shape: [n_batch * n_heads, n_time_dest, n_time_src]\n bb, n_time_dest, n_time_src = logits.shape\n\n logits = logits.view(bb // self.n_heads, self.n_heads, n_time_dest, n_time_src)\n\n if mask.position_mask is not None:\n logits = logits.masked_fill(mask.position_mask.unsqueeze(0).unsqueeze(0), float(\"-inf\"))\n\n if mask.src_length_mask is not None:\n logits = logits.masked_fill(mask.src_length_mask.unsqueeze(1).unsqueeze(1), float(\"-inf\"))\n\n logits = F.softmax(logits, -1)\n return logits.view(bb, n_time_dest, n_time_src)\n\n def _attention_read(self, mask: Optional[AttentionMask], logits: torch.Tensor, v: torch.Tensor) -> \\\n Tuple[torch.Tensor, torch.Tensor]:\n # logits: [n_batch * n_heads, n_out, n_in]\n # v: [n_nbatch * n_heads, n_in]\n # Output data shape [n_batch * n_heads, n_time_dest, data_size]\n # Out attention score shape: [n_batch, n_heads, n_time_dest, n_time_src]\n scores = self._masked_softmax(logits * self.scale, mask)\n scores = self.dropout(scores)\n return torch.bmm(scores, v), scores.view(-1, self.n_heads, *scores.shape[1:])\n\n def merged_attention(self, n_batch: int, n_out_steps: int, *args, need_weights: bool = False, **kwargs) -> \\\n Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:\n \n data, scores = self._attention(*args, **kwargs)\n \n data = data.view(n_batch, self.n_heads, n_out_steps, -1).permute(0, 2, 1, 3).contiguous().\\\n view(n_batch, n_out_steps, -1)\n\n return self.multi_head_merge(data), scores\n\n def transform_data(self, input: torch.Tensor, proj: Callable[[torch.Tensor], torch.Tensor],\n n_projs: int) -> List[torch.Tensor]:\n # Input shape: [n_batch, n_steps, n_channels]\n # Output: Tuple of n_projs tensors of dimension: [n_batch * n_heads, n_steps, projection_size]\n n_batch, n_steps, _ = input.shape\n transformed = proj(input).view(n_batch, n_steps, self.n_heads, n_projs, self.projection_size). \\\n permute(0, 2, 1, 3, 4).contiguous().view(n_batch * self.n_heads, n_steps, n_projs, self.projection_size)\n return transformed.unbind(dim=2)\n\n def reset_parameters(self):\n torch.nn.init.xavier_uniform_(self.multi_head_merge.weight)\n\n\nclass AbsPosAttentionBase(MultiHeadAttentionBase):\n def _attention(self, mask: Optional[torch.Tensor], q: torch.Tensor, k: torch.Tensor, v: torch.Tensor) -> \\\n torch.Tensor:\n # all inputs should have a shape of [n_batch, n_steps, data_size]\n # Output shape [n_batch * n_heads, n_time_dest, data_size]\n return self._attention_read(mask, torch.bmm(q, k.transpose(1,2)), v)\n\n\nclass MultiHeadAttention(AbsPosAttentionBase):\n def __init__(self, state_size: int, n_heads: int, dropout: float=0.1, input_size: Optional[torch.Tensor]=None):\n super().__init__(state_size, n_heads, dropout)\n self.data_to_kv = torch.nn.Linear(state_size, 2 * n_heads * self.projection_size, bias=False)\n self.data_to_q = torch.nn.Linear(state_size if input_size is None else input_size,\n n_heads * self.projection_size, bias=False)\n self.reset_parameters()\n\n def forward(self, curr_state: torch.Tensor, attend_to: torch.Tensor, mask: Optional[AttentionMask],\n need_weights: bool = False):\n # Input and output shape: [n_batch, n_steps, data_size]\n k, v = self.transform_data(attend_to, self.data_to_kv, 2)\n q, = self.transform_data(curr_state, self.data_to_q, 1)\n\n data, scores = self.merged_attention(curr_state.shape[0], q.shape[1], mask, q, k, v)\n if need_weights:\n # Calculate the mean over the heads\n return data, scores.mean(1)\n else:\n return data\n\n def reset_parameters(self):\n super().reset_parameters()\n\n torch.nn.init.xavier_uniform_(self.data_to_q.weight)\n torch.nn.init.xavier_uniform_(self.data_to_kv.weight[:self.data_to_kv.weight.shape[0]//2])\n torch.nn.init.xavier_uniform_(self.data_to_kv.weight[self.data_to_kv.weight.shape[0]//2:])\n"
]
| [
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.init.xavier_uniform_",
"torch.bmm",
"torch.nn.functional.softmax"
]
]
|
Daedalos/statsmodels | [
"f67fc16d581767c030b6f191cf650c3a69d9ed5d"
]
| [
"statsmodels/discrete/tests/test_discrete.py"
]
| [
"\"\"\"\nTests for discrete models\n\nNotes\n-----\nDECIMAL_3 is used because it seems that there is a loss of precision\nin the Stata *.dta -> *.csv output, NOT the estimator for the Poisson\ntests.\n\"\"\"\n# pylint: disable-msg=E1101\nfrom statsmodels.compat.pandas import assert_index_equal\n\nimport os\nimport warnings\n\nimport numpy as np\nfrom numpy.testing import (\n assert_,\n assert_allclose,\n assert_almost_equal,\n assert_array_equal,\n assert_array_less,\n assert_equal,\n assert_raises,\n)\nimport pandas as pd\nimport pytest\nfrom scipy import stats\nfrom scipy.stats import nbinom\n\nimport statsmodels.api as sm\nfrom statsmodels.discrete.discrete_margins import _iscount, _isdummy\nfrom statsmodels.discrete.discrete_model import (\n CountModel,\n GeneralizedPoisson,\n Logit,\n MNLogit,\n NegativeBinomial,\n NegativeBinomialP,\n Poisson,\n Probit,\n)\nimport statsmodels.formula.api as smf\nfrom statsmodels.tools.sm_exceptions import (\n ConvergenceWarning,\n PerfectSeparationError,\n SpecificationWarning,\n ValueWarning,\n)\n\nfrom .results.results_discrete import Anes, DiscreteL1, RandHIE, Spector\n\ntry:\n import cvxopt # noqa:F401\n has_cvxopt = True\nexcept ImportError:\n has_cvxopt = False\n\n\nDECIMAL_14 = 14\nDECIMAL_10 = 10\nDECIMAL_9 = 9\nDECIMAL_4 = 4\nDECIMAL_3 = 3\nDECIMAL_2 = 2\nDECIMAL_1 = 1\nDECIMAL_0 = 0\n\ndef load_anes96():\n data = sm.datasets.anes96.load()\n data.endog = np.asarray(data.endog)\n data.exog = np.asarray(data.exog)\n return data\n\ndef load_spector():\n data = sm.datasets.spector.load()\n data.endog = np.asarray(data.endog)\n data.exog = np.asarray(data.exog)\n return data\n\n\ndef load_randhie():\n data = sm.datasets.randhie.load()\n data.endog = np.asarray(data.endog)\n data.exog = np.asarray(data.exog, dtype=float)\n return data\n\nclass CheckModelMixin(object):\n # Assertions about the Model object, as opposed to the Results\n # Assumes that mixed-in class implements:\n # res1\n\n def test_fit_regularized_invalid_method(self):\n # GH#5224 check we get ValueError when passing invalid \"method\" arg\n model = self.res1.model\n\n with pytest.raises(ValueError, match=r'is not supported, use either'):\n model.fit_regularized(method=\"foo\")\n\n\nclass CheckModelResults(CheckModelMixin):\n \"\"\"\n res2 should be the test results from RModelWrap\n or the results as defined in model_results_data\n \"\"\"\n\n def test_params(self):\n assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)\n\n def test_conf_int(self):\n assert_allclose(self.res1.conf_int(), self.res2.conf_int, rtol=8e-5)\n\n def test_zstat(self):\n assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_4)\n\n def test_pvalues(self):\n assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4)\n\n def test_cov_params(self):\n if not hasattr(self.res2, \"cov_params\"):\n raise pytest.skip(\"TODO: implement res2.cov_params\")\n assert_almost_equal(self.res1.cov_params(),\n self.res2.cov_params,\n DECIMAL_4)\n\n def test_llf(self):\n assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_4)\n\n def test_llnull(self):\n assert_almost_equal(self.res1.llnull, self.res2.llnull, DECIMAL_4)\n\n def test_llr(self):\n assert_almost_equal(self.res1.llr, self.res2.llr, DECIMAL_3)\n\n def test_llr_pvalue(self):\n assert_almost_equal(self.res1.llr_pvalue,\n self.res2.llr_pvalue,\n DECIMAL_4)\n\n @pytest.mark.xfail(reason=\"Test has not been implemented for this class.\",\n strict=True, raises=NotImplementedError)\n def test_normalized_cov_params(self):\n raise NotImplementedError\n\n def test_bse(self):\n assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)\n\n def test_dof(self):\n assert_equal(self.res1.df_model, self.res2.df_model)\n assert_equal(self.res1.df_resid, self.res2.df_resid)\n\n def test_aic(self):\n assert_almost_equal(self.res1.aic, self.res2.aic, DECIMAL_3)\n\n def test_bic(self):\n assert_almost_equal(self.res1.bic, self.res2.bic, DECIMAL_3)\n\n def test_predict(self):\n assert_almost_equal(self.res1.model.predict(self.res1.params),\n self.res2.phat, DECIMAL_4)\n\n def test_predict_xb(self):\n assert_almost_equal(self.res1.model.predict(self.res1.params,\n linear=True),\n self.res2.yhat, DECIMAL_4)\n\n def test_loglikeobs(self):\n #basic cross check\n llobssum = self.res1.model.loglikeobs(self.res1.params).sum()\n assert_almost_equal(llobssum, self.res1.llf, DECIMAL_14)\n\n def test_jac(self):\n #basic cross check\n jacsum = self.res1.model.score_obs(self.res1.params).sum(0)\n score = self.res1.model.score(self.res1.params)\n assert_almost_equal(jacsum, score, DECIMAL_9) #Poisson has low precision ?\n\n def test_summary_latex(self):\n # see #7747, last line of top table was dropped\n summ = self.res1.summary()\n ltx = summ.as_latex()\n n_lines = len(ltx.splitlines())\n if not isinstance(self.res1.model, MNLogit):\n # skip MNLogit which creates several params tables\n assert n_lines == 19 + np.size(self.res1.params)\n assert \"Covariance Type:\" in ltx\n\n\nclass CheckBinaryResults(CheckModelResults):\n def test_pred_table(self):\n assert_array_equal(self.res1.pred_table(), self.res2.pred_table)\n\n def test_resid_dev(self):\n assert_almost_equal(self.res1.resid_dev, self.res2.resid_dev,\n DECIMAL_4)\n\n def test_resid_generalized(self):\n assert_almost_equal(self.res1.resid_generalized,\n self.res2.resid_generalized, DECIMAL_4)\n\n @pytest.mark.smoke\n def test_resid_response(self):\n self.res1.resid_response\n\n\nclass CheckMargEff(object):\n \"\"\"\n Test marginal effects (margeff) and its options\n \"\"\"\n\n def test_nodummy_dydxoverall(self):\n me = self.res1.get_margeff()\n assert_almost_equal(me.margeff,\n self.res2.margeff_nodummy_dydx, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_nodummy_dydx_se, DECIMAL_4)\n\n me_frame = me.summary_frame()\n eff = me_frame[\"dy/dx\"].values\n assert_allclose(eff, me.margeff, rtol=1e-13)\n assert_equal(me_frame.shape, (me.margeff.size, 6))\n\n\n def test_nodummy_dydxmean(self):\n me = self.res1.get_margeff(at='mean')\n assert_almost_equal(me.margeff,\n self.res2.margeff_nodummy_dydxmean, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_nodummy_dydxmean_se, DECIMAL_4)\n\n def test_nodummy_dydxmedian(self):\n me = self.res1.get_margeff(at='median')\n assert_almost_equal(me.margeff,\n self.res2.margeff_nodummy_dydxmedian, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_nodummy_dydxmedian_se, DECIMAL_4)\n\n def test_nodummy_dydxzero(self):\n me = self.res1.get_margeff(at='zero')\n assert_almost_equal(me.margeff,\n self.res2.margeff_nodummy_dydxzero, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_nodummy_dydxzero, DECIMAL_4)\n\n def test_nodummy_dyexoverall(self):\n me = self.res1.get_margeff(method='dyex')\n assert_almost_equal(me.margeff,\n self.res2.margeff_nodummy_dyex, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_nodummy_dyex_se, DECIMAL_4)\n\n def test_nodummy_dyexmean(self):\n me = self.res1.get_margeff(at='mean', method='dyex')\n assert_almost_equal(me.margeff,\n self.res2.margeff_nodummy_dyexmean, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_nodummy_dyexmean_se, DECIMAL_4)\n\n def test_nodummy_dyexmedian(self):\n me = self.res1.get_margeff(at='median', method='dyex')\n assert_almost_equal(me.margeff,\n self.res2.margeff_nodummy_dyexmedian, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_nodummy_dyexmedian_se, DECIMAL_4)\n\n def test_nodummy_dyexzero(self):\n me = self.res1.get_margeff(at='zero', method='dyex')\n assert_almost_equal(me.margeff,\n self.res2.margeff_nodummy_dyexzero, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_nodummy_dyexzero_se, DECIMAL_4)\n\n def test_nodummy_eydxoverall(self):\n me = self.res1.get_margeff(method='eydx')\n assert_almost_equal(me.margeff,\n self.res2.margeff_nodummy_eydx, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_nodummy_eydx_se, DECIMAL_4)\n\n def test_nodummy_eydxmean(self):\n me = self.res1.get_margeff(at='mean', method='eydx')\n assert_almost_equal(me.margeff,\n self.res2.margeff_nodummy_eydxmean, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_nodummy_eydxmean_se, DECIMAL_4)\n\n def test_nodummy_eydxmedian(self):\n me = self.res1.get_margeff(at='median', method='eydx')\n assert_almost_equal(me.margeff,\n self.res2.margeff_nodummy_eydxmedian, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_nodummy_eydxmedian_se, DECIMAL_4)\n\n def test_nodummy_eydxzero(self):\n me = self.res1.get_margeff(at='zero', method='eydx')\n assert_almost_equal(me.margeff,\n self.res2.margeff_nodummy_eydxzero, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_nodummy_eydxzero_se, DECIMAL_4)\n\n def test_nodummy_eyexoverall(self):\n me = self.res1.get_margeff(method='eyex')\n assert_almost_equal(me.margeff,\n self.res2.margeff_nodummy_eyex, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_nodummy_eyex_se, DECIMAL_4)\n\n def test_nodummy_eyexmean(self):\n me = self.res1.get_margeff(at='mean', method='eyex')\n assert_almost_equal(me.margeff,\n self.res2.margeff_nodummy_eyexmean, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_nodummy_eyexmean_se, DECIMAL_4)\n\n def test_nodummy_eyexmedian(self):\n me = self.res1.get_margeff(at='median', method='eyex')\n assert_almost_equal(me.margeff,\n self.res2.margeff_nodummy_eyexmedian, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_nodummy_eyexmedian_se, DECIMAL_4)\n\n def test_nodummy_eyexzero(self):\n me = self.res1.get_margeff(at='zero', method='eyex')\n assert_almost_equal(me.margeff,\n self.res2.margeff_nodummy_eyexzero, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_nodummy_eyexzero_se, DECIMAL_4)\n\n def test_dummy_dydxoverall(self):\n me = self.res1.get_margeff(dummy=True)\n assert_almost_equal(me.margeff,\n self.res2.margeff_dummy_dydx, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_dummy_dydx_se, DECIMAL_4)\n\n def test_dummy_dydxmean(self):\n me = self.res1.get_margeff(at='mean', dummy=True)\n assert_almost_equal(me.margeff,\n self.res2.margeff_dummy_dydxmean, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_dummy_dydxmean_se, DECIMAL_4)\n\n def test_dummy_eydxoverall(self):\n me = self.res1.get_margeff(method='eydx', dummy=True)\n assert_almost_equal(me.margeff,\n self.res2.margeff_dummy_eydx, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_dummy_eydx_se, DECIMAL_4)\n\n def test_dummy_eydxmean(self):\n me = self.res1.get_margeff(at='mean', method='eydx', dummy=True)\n assert_almost_equal(me.margeff,\n self.res2.margeff_dummy_eydxmean, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_dummy_eydxmean_se, DECIMAL_4)\n\n def test_count_dydxoverall(self):\n me = self.res1.get_margeff(count=True)\n assert_almost_equal(me.margeff,\n self.res2.margeff_count_dydx, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_count_dydx_se, DECIMAL_4)\n\n def test_count_dydxmean(self):\n me = self.res1.get_margeff(count=True, at='mean')\n assert_almost_equal(me.margeff,\n self.res2.margeff_count_dydxmean, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_count_dydxmean_se, DECIMAL_4)\n\n def test_count_dummy_dydxoverall(self):\n me = self.res1.get_margeff(count=True, dummy=True)\n assert_almost_equal(me.margeff,\n self.res2.margeff_count_dummy_dydxoverall, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_count_dummy_dydxoverall_se, DECIMAL_4)\n\n def test_count_dummy_dydxmean(self):\n me = self.res1.get_margeff(count=True, dummy=True, at='mean')\n assert_almost_equal(me.margeff,\n self.res2.margeff_count_dummy_dydxmean, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_count_dummy_dydxmean_se, DECIMAL_4)\n\n\nclass TestProbitNewton(CheckBinaryResults):\n\n @classmethod\n def setup_class(cls):\n data = load_spector()\n data.exog = sm.add_constant(data.exog, prepend=False)\n cls.res1 = Probit(data.endog, data.exog).fit(method=\"newton\", disp=0)\n res2 = Spector.probit\n cls.res2 = res2\n\n def test_init_kwargs(self):\n endog = self.res1.model.endog\n exog = self.res1.model.exog\n z = np.ones(len(endog))\n with pytest.warns(ValueWarning, match=\"unknown kwargs\"):\n # unsupported keyword\n Probit(endog, exog, weights=z)\n\n\nclass TestProbitBFGS(CheckBinaryResults):\n\n @classmethod\n def setup_class(cls):\n data = load_spector()\n data.exog = sm.add_constant(data.exog, prepend=False)\n cls.res1 = Probit(data.endog, data.exog).fit(method=\"bfgs\",\n disp=0)\n res2 = Spector.probit\n cls.res2 = res2\n\n\nclass TestProbitNM(CheckBinaryResults):\n\n @classmethod\n def setup_class(cls):\n data = load_spector()\n data.exog = sm.add_constant(data.exog, prepend=False)\n res2 = Spector.probit\n cls.res2 = res2\n cls.res1 = Probit(data.endog, data.exog).fit(method=\"nm\",\n disp=0, maxiter=500)\n\n\nclass TestProbitPowell(CheckBinaryResults):\n\n @classmethod\n def setup_class(cls):\n data = load_spector()\n data.exog = sm.add_constant(data.exog, prepend=False)\n res2 = Spector.probit\n cls.res2 = res2\n cls.res1 = Probit(data.endog, data.exog).fit(method=\"powell\",\n disp=0, ftol=1e-8)\n\n\nclass TestProbitCG(CheckBinaryResults):\n\n @classmethod\n def setup_class(cls):\n data = load_spector()\n data.exog = sm.add_constant(data.exog, prepend=False)\n res2 = Spector.probit\n cls.res2 = res2\n\n # fmin_cg fails to converge on some machines - reparameterize\n from statsmodels.tools.transform_model import StandardizeTransform\n transf = StandardizeTransform(data.exog)\n exog_st = transf(data.exog)\n res1_st = Probit(data.endog,\n exog_st).fit(method=\"cg\", disp=0, maxiter=1000,\n gtol=1e-08)\n start_params = transf.transform_params(res1_st.params)\n assert_allclose(start_params, res2.params, rtol=1e-5, atol=1e-6)\n\n cls.res1 = Probit(data.endog,\n data.exog).fit(start_params=start_params,\n method=\"cg\", maxiter=1000,\n gtol=1e-05, disp=0)\n\n assert_array_less(cls.res1.mle_retvals['fcalls'], 100)\n\n\nclass TestProbitNCG(CheckBinaryResults):\n\n @classmethod\n def setup_class(cls):\n data = load_spector()\n data.exog = sm.add_constant(data.exog, prepend=False)\n res2 = Spector.probit\n cls.res2 = res2\n cls.res1 = Probit(data.endog, data.exog).fit(method=\"ncg\",\n disp=0, avextol=1e-8,\n warn_convergence=False)\n # converges close enough but warnflag is 2 for precision loss\n\n\nclass TestProbitBasinhopping(CheckBinaryResults):\n\n @classmethod\n def setup_class(cls):\n data = load_spector()\n data.exog = sm.add_constant(data.exog, prepend=False)\n res2 = Spector.probit\n cls.res2 = res2\n fit = Probit(data.endog, data.exog).fit\n np.random.seed(1)\n cls.res1 = fit(method=\"basinhopping\", disp=0, niter=5,\n minimizer={'method' : 'L-BFGS-B', 'tol' : 1e-8})\n\n\nclass TestProbitMinimizeDefault(CheckBinaryResults):\n\n @classmethod\n def setup_class(cls):\n data = load_spector()\n data.exog = sm.add_constant(data.exog, prepend=False)\n res2 = Spector.probit\n cls.res2 = res2\n fit = Probit(data.endog, data.exog).fit\n cls.res1 = fit(method=\"minimize\", disp=0, niter=5, tol = 1e-8)\n\n\nclass TestProbitMinimizeDogleg(CheckBinaryResults):\n\n @classmethod\n def setup_class(cls):\n data = load_spector()\n data.exog = sm.add_constant(data.exog, prepend=False)\n res2 = Spector.probit\n cls.res2 = res2\n fit = Probit(data.endog, data.exog).fit\n cls.res1 = fit(method=\"minimize\", disp=0, niter=5, tol = 1e-8,\n min_method = 'dogleg')\n\n\nclass TestProbitMinimizeAdditionalOptions(CheckBinaryResults):\n\n @classmethod\n def setup_class(cls):\n data = load_spector()\n data.exog = sm.add_constant(data.exog, prepend=False)\n res2 = Spector.probit\n cls.res2 = res2\n cls.res1 = Probit(data.endog, data.exog).fit(method=\"minimize\", disp=0,\n maxiter=500,\n min_method='Nelder-Mead',\n xatol=1e-4, fatol=1e-4)\n\nclass CheckLikelihoodModelL1(object):\n \"\"\"\n For testing results generated with L1 regularization\n \"\"\"\n def test_params(self):\n assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)\n\n def test_conf_int(self):\n assert_almost_equal(\n self.res1.conf_int(), self.res2.conf_int, DECIMAL_4)\n\n def test_bse(self):\n assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)\n\n def test_nnz_params(self):\n assert_almost_equal(\n self.res1.nnz_params, self.res2.nnz_params, DECIMAL_4)\n\n def test_aic(self):\n assert_almost_equal(\n self.res1.aic, self.res2.aic, DECIMAL_3)\n\n def test_bic(self):\n assert_almost_equal(\n self.res1.bic, self.res2.bic, DECIMAL_3)\n\n\nclass TestProbitL1(CheckLikelihoodModelL1):\n\n @classmethod\n def setup_class(cls):\n data = load_spector()\n data.exog = sm.add_constant(data.exog, prepend=True)\n alpha = np.array([0.1, 0.2, 0.3, 10]) #/ data.exog.shape[0]\n cls.res1 = Probit(data.endog, data.exog).fit_regularized(\n method=\"l1\", alpha=alpha, disp=0, trim_mode='auto',\n auto_trim_tol=0.02, acc=1e-10, maxiter=1000)\n res2 = DiscreteL1.probit\n cls.res2 = res2\n\n def test_cov_params(self):\n assert_almost_equal(\n self.res1.cov_params(), self.res2.cov_params, DECIMAL_4)\n\n\nclass TestMNLogitL1(CheckLikelihoodModelL1):\n\n @classmethod\n def setup_class(cls):\n anes_data = load_anes96()\n anes_exog = anes_data.exog\n anes_exog = sm.add_constant(anes_exog, prepend=False)\n mlogit_mod = sm.MNLogit(anes_data.endog, anes_exog)\n alpha = 10. * np.ones((mlogit_mod.J - 1, mlogit_mod.K)) #/ anes_exog.shape[0]\n alpha[-1,:] = 0\n cls.res1 = mlogit_mod.fit_regularized(\n method='l1', alpha=alpha, trim_mode='auto', auto_trim_tol=0.02,\n acc=1e-10, disp=0)\n res2 = DiscreteL1.mnlogit\n cls.res2 = res2\n\n\nclass TestLogitL1(CheckLikelihoodModelL1):\n\n @classmethod\n def setup_class(cls):\n data = load_spector()\n data.exog = sm.add_constant(data.exog, prepend=True)\n cls.alpha = 3 * np.array([0., 1., 1., 1.]) #/ data.exog.shape[0]\n cls.res1 = Logit(data.endog, data.exog).fit_regularized(\n method=\"l1\", alpha=cls.alpha, disp=0, trim_mode='size',\n size_trim_tol=1e-5, acc=1e-10, maxiter=1000)\n res2 = DiscreteL1.logit\n cls.res2 = res2\n\n def test_cov_params(self):\n assert_almost_equal(\n self.res1.cov_params(), self.res2.cov_params, DECIMAL_4)\n\n\[email protected](not has_cvxopt, reason='Skipped test_cvxopt since cvxopt '\n 'is not available')\nclass TestCVXOPT(object):\n\n @classmethod\n def setup_class(cls):\n if not has_cvxopt:\n pytest.skip('Skipped test_cvxopt since cvxopt is not available')\n cls.data = sm.datasets.spector.load()\n cls.data.endog = np.asarray(cls.data.endog)\n cls.data.exog = np.asarray(cls.data.exog)\n cls.data.exog = sm.add_constant(cls.data.exog, prepend=True)\n\n def test_cvxopt_versus_slsqp(self):\n # Compares results from cvxopt to the standard slsqp\n self.alpha = 3. * np.array([0, 1, 1, 1.]) #/ self.data.endog.shape[0]\n res_slsqp = Logit(self.data.endog, self.data.exog).fit_regularized(\n method=\"l1\", alpha=self.alpha, disp=0, acc=1e-10, maxiter=1000,\n trim_mode='auto')\n res_cvxopt = Logit(self.data.endog, self.data.exog).fit_regularized(\n method=\"l1_cvxopt_cp\", alpha=self.alpha, disp=0, abstol=1e-10,\n trim_mode='auto', auto_trim_tol=0.01, maxiter=1000)\n assert_almost_equal(res_slsqp.params, res_cvxopt.params, DECIMAL_4)\n\n\nclass TestSweepAlphaL1(object):\n\n @classmethod\n def setup_class(cls):\n data = load_spector()\n data.exog = sm.add_constant(data.exog, prepend=True)\n cls.model = Logit(data.endog, data.exog)\n cls.alphas = np.array(\n [[0.1, 0.1, 0.1, 0.1],\n [0.4, 0.4, 0.5, 0.5],\n [0.5, 0.5, 1, 1]]) #/ data.exog.shape[0]\n cls.res1 = DiscreteL1.sweep\n\n def test_sweep_alpha(self):\n for i in range(3):\n alpha = self.alphas[i, :]\n res2 = self.model.fit_regularized(\n method=\"l1\", alpha=alpha, disp=0, acc=1e-10,\n trim_mode='off', maxiter=1000)\n assert_almost_equal(res2.params, self.res1.params[i], DECIMAL_4)\n\n\nclass CheckL1Compatability(object):\n \"\"\"\n Tests compatability between l1 and unregularized by setting alpha such\n that certain parameters should be effectively unregularized, and others\n should be ignored by the model.\n \"\"\"\n def test_params(self):\n m = self.m\n assert_almost_equal(\n self.res_unreg.params[:m], self.res_reg.params[:m], DECIMAL_4)\n # The last entry should be close to zero\n # handle extra parameter of NegativeBinomial\n kvars = self.res_reg.model.exog.shape[1]\n assert_almost_equal(0, self.res_reg.params[m:kvars], DECIMAL_4)\n\n def test_cov_params(self):\n m = self.m\n # The restricted cov_params should be equal\n assert_almost_equal(\n self.res_unreg.cov_params()[:m, :m],\n self.res_reg.cov_params()[:m, :m],\n DECIMAL_1)\n\n def test_df(self):\n assert_equal(self.res_unreg.df_model, self.res_reg.df_model)\n assert_equal(self.res_unreg.df_resid, self.res_reg.df_resid)\n\n def test_t_test(self):\n m = self.m\n kvars = self.kvars\n # handle extra parameter of NegativeBinomial\n extra = getattr(self, 'k_extra', 0)\n t_unreg = self.res_unreg.t_test(np.eye(len(self.res_unreg.params)))\n t_reg = self.res_reg.t_test(np.eye(kvars + extra))\n assert_almost_equal(t_unreg.effect[:m], t_reg.effect[:m], DECIMAL_3)\n assert_almost_equal(t_unreg.sd[:m], t_reg.sd[:m], DECIMAL_3)\n assert_almost_equal(np.nan, t_reg.sd[m])\n assert_allclose(t_unreg.tvalue[:m], t_reg.tvalue[:m], atol=3e-3)\n assert_almost_equal(np.nan, t_reg.tvalue[m])\n\n def test_f_test(self):\n m = self.m\n kvars = self.kvars\n # handle extra parameter of NegativeBinomial\n extra = getattr(self, 'k_extra', 0)\n f_unreg = self.res_unreg.f_test(np.eye(len(self.res_unreg.params))[:m])\n f_reg = self.res_reg.f_test(np.eye(kvars + extra)[:m])\n assert_allclose(f_unreg.fvalue, f_reg.fvalue, rtol=3e-5, atol=1e-3)\n assert_almost_equal(f_unreg.pvalue, f_reg.pvalue, DECIMAL_3)\n\n def test_bad_r_matrix(self):\n kvars = self.kvars\n assert_raises(ValueError, self.res_reg.f_test, np.eye(kvars) )\n\n\nclass TestPoissonL1Compatability(CheckL1Compatability):\n\n @classmethod\n def setup_class(cls):\n cls.kvars = 10 # Number of variables\n cls.m = 7 # Number of unregularized parameters\n rand_data = load_randhie()\n rand_exog = rand_data.exog.view(float).reshape(len(rand_data.exog), -1)\n rand_exog = sm.add_constant(rand_exog, prepend=True)\n # Drop some columns and do an unregularized fit\n exog_no_PSI = rand_exog[:, :cls.m]\n mod_unreg = sm.Poisson(rand_data.endog, exog_no_PSI)\n cls.res_unreg = mod_unreg.fit(method=\"newton\", disp=False)\n # Do a regularized fit with alpha, effectively dropping the last column\n alpha = 10 * len(rand_data.endog) * np.ones(cls.kvars)\n alpha[:cls.m] = 0\n cls.res_reg = sm.Poisson(rand_data.endog, rand_exog).fit_regularized(\n method='l1', alpha=alpha, disp=False, acc=1e-10, maxiter=2000,\n trim_mode='auto')\n\n\nclass TestNegativeBinomialL1Compatability(CheckL1Compatability):\n\n @classmethod\n def setup_class(cls):\n cls.kvars = 10 # Number of variables\n cls.m = 7 # Number of unregularized parameters\n rand_data = load_randhie()\n rand_exog = rand_data.exog.view(float).reshape(len(rand_data.exog), -1)\n rand_exog_st = (rand_exog - rand_exog.mean(0)) / rand_exog.std(0)\n rand_exog = sm.add_constant(rand_exog_st, prepend=True)\n # Drop some columns and do an unregularized fit\n exog_no_PSI = rand_exog[:, :cls.m]\n mod_unreg = sm.NegativeBinomial(rand_data.endog, exog_no_PSI)\n cls.res_unreg = mod_unreg.fit(method=\"newton\", disp=False)\n # Do a regularized fit with alpha, effectively dropping the last column\n alpha = 10 * len(rand_data.endog) * np.ones(cls.kvars + 1)\n alpha[:cls.m] = 0\n alpha[-1] = 0 # do not penalize alpha\n\n mod_reg = sm.NegativeBinomial(rand_data.endog, rand_exog)\n cls.res_reg = mod_reg.fit_regularized(\n method='l1', alpha=alpha, disp=False, acc=1e-10, maxiter=2000,\n trim_mode='auto')\n cls.k_extra = 1 # 1 extra parameter in nb2\n\n\nclass TestNegativeBinomialGeoL1Compatability(CheckL1Compatability):\n\n @classmethod\n def setup_class(cls):\n cls.kvars = 10 # Number of variables\n cls.m = 7 # Number of unregularized parameters\n rand_data = load_randhie()\n rand_exog = rand_data.exog.view(float).reshape(len(rand_data.exog), -1)\n rand_exog = sm.add_constant(rand_exog, prepend=True)\n # Drop some columns and do an unregularized fit\n exog_no_PSI = rand_exog[:, :cls.m]\n mod_unreg = sm.NegativeBinomial(rand_data.endog, exog_no_PSI,\n loglike_method='geometric')\n cls.res_unreg = mod_unreg.fit(method=\"newton\", disp=False)\n # Do a regularized fit with alpha, effectively dropping the last columns\n alpha = 10 * len(rand_data.endog) * np.ones(cls.kvars)\n alpha[:cls.m] = 0\n mod_reg = sm.NegativeBinomial(rand_data.endog, rand_exog,\n loglike_method='geometric')\n cls.res_reg = mod_reg.fit_regularized(\n method='l1', alpha=alpha, disp=False, acc=1e-10, maxiter=2000,\n trim_mode='auto')\n\n assert_equal(mod_reg.loglike_method, 'geometric')\n\n\nclass TestLogitL1Compatability(CheckL1Compatability):\n\n @classmethod\n def setup_class(cls):\n cls.kvars = 4 # Number of variables\n cls.m = 3 # Number of unregularized parameters\n data = load_spector()\n data.exog = sm.add_constant(data.exog, prepend=True)\n # Do a regularized fit with alpha, effectively dropping the last column\n alpha = np.array([0, 0, 0, 10])\n cls.res_reg = Logit(data.endog, data.exog).fit_regularized(\n method=\"l1\", alpha=alpha, disp=0, acc=1e-15, maxiter=2000,\n trim_mode='auto')\n # Actually drop the last columnand do an unregularized fit\n exog_no_PSI = data.exog[:, :cls.m]\n cls.res_unreg = Logit(data.endog, exog_no_PSI).fit(disp=0, tol=1e-15)\n\n\nclass TestMNLogitL1Compatability(CheckL1Compatability):\n\n @classmethod\n def setup_class(cls):\n cls.kvars = 4 # Number of variables\n cls.m = 3 # Number of unregularized parameters\n data = load_spector()\n data.exog = sm.add_constant(data.exog, prepend=True)\n alpha = np.array([0, 0, 0, 10])\n cls.res_reg = MNLogit(data.endog, data.exog).fit_regularized(\n method=\"l1\", alpha=alpha, disp=0, acc=1e-15, maxiter=2000,\n trim_mode='auto')\n # Actually drop the last columnand do an unregularized fit\n exog_no_PSI = data.exog[:, :cls.m]\n cls.res_unreg = MNLogit(data.endog, exog_no_PSI).fit(\n disp=0, gtol=1e-15, method='bfgs', maxiter=1000)\n\n def test_t_test(self):\n m = self.m\n kvars = self.kvars\n t_unreg = self.res_unreg.t_test(np.eye(m))\n t_reg = self.res_reg.t_test(np.eye(kvars))\n assert_almost_equal(t_unreg.effect, t_reg.effect[:m], DECIMAL_3)\n assert_almost_equal(t_unreg.sd, t_reg.sd[:m], DECIMAL_3)\n assert_almost_equal(np.nan, t_reg.sd[m])\n assert_almost_equal(t_unreg.tvalue, t_reg.tvalue[:m], DECIMAL_3)\n\n @pytest.mark.skip(\"Skipped test_f_test for MNLogit\")\n def test_f_test(self):\n pass\n\n\nclass TestProbitL1Compatability(CheckL1Compatability):\n\n @classmethod\n def setup_class(cls):\n cls.kvars = 4 # Number of variables\n cls.m = 3 # Number of unregularized parameters\n data = load_spector()\n data.exog = sm.add_constant(data.exog, prepend=True)\n alpha = np.array([0, 0, 0, 10])\n cls.res_reg = Probit(data.endog, data.exog).fit_regularized(\n method=\"l1\", alpha=alpha, disp=0, acc=1e-15, maxiter=2000,\n trim_mode='auto')\n # Actually drop the last columnand do an unregularized fit\n exog_no_PSI = data.exog[:, :cls.m]\n cls.res_unreg = Probit(data.endog, exog_no_PSI).fit(disp=0, tol=1e-15)\n\n\nclass CompareL1(object):\n \"\"\"\n For checking results for l1 regularization.\n Assumes self.res1 and self.res2 are two legitimate models to be compared.\n \"\"\"\n def test_basic_results(self):\n assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)\n assert_almost_equal(self.res1.cov_params(), self.res2.cov_params(),\n DECIMAL_4)\n assert_almost_equal(self.res1.conf_int(), self.res2.conf_int(),\n DECIMAL_4)\n assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4)\n assert_almost_equal(self.res1.pred_table(), self.res2.pred_table(),\n DECIMAL_4)\n assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)\n assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_4)\n assert_almost_equal(self.res1.aic, self.res2.aic, DECIMAL_4)\n assert_almost_equal(self.res1.bic, self.res2.bic, DECIMAL_4)\n assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4)\n\n assert_(self.res1.mle_retvals['converged'] is True)\n\n\nclass CompareL11D(CompareL1):\n \"\"\"\n Check t and f tests. This only works for 1-d results\n \"\"\"\n def test_tests(self):\n restrictmat = np.eye(len(self.res1.params.ravel()))\n assert_almost_equal(self.res1.t_test(restrictmat).pvalue,\n self.res2.t_test(restrictmat).pvalue, DECIMAL_4)\n assert_almost_equal(self.res1.f_test(restrictmat).pvalue,\n self.res2.f_test(restrictmat).pvalue, DECIMAL_4)\n\n\nclass TestL1AlphaZeroLogit(CompareL11D):\n # Compares l1 model with alpha = 0 to the unregularized model.\n\n @classmethod\n def setup_class(cls):\n data = load_spector()\n data.exog = sm.add_constant(data.exog, prepend=True)\n cls.res1 = Logit(data.endog, data.exog).fit_regularized(\n method=\"l1\", alpha=0, disp=0, acc=1e-15, maxiter=1000,\n trim_mode='auto', auto_trim_tol=0.01)\n cls.res2 = Logit(data.endog, data.exog).fit(disp=0, tol=1e-15)\n\n def test_converged(self):\n res = self.res1.model.fit_regularized(\n method=\"l1\", alpha=0, disp=0, acc=1e-15, maxiter=1,\n trim_mode='auto', auto_trim_tol=0.01)\n\n # see #2857\n assert_(res.mle_retvals['converged'] is False)\n\n\nclass TestL1AlphaZeroProbit(CompareL11D):\n # Compares l1 model with alpha = 0 to the unregularized model.\n\n @classmethod\n def setup_class(cls):\n data = load_spector()\n data.exog = sm.add_constant(data.exog, prepend=True)\n cls.res1 = Probit(data.endog, data.exog).fit_regularized(\n method=\"l1\", alpha=0, disp=0, acc=1e-15, maxiter=1000,\n trim_mode='auto', auto_trim_tol=0.01)\n cls.res2 = Probit(data.endog, data.exog).fit(disp=0, tol=1e-15)\n\n\nclass TestL1AlphaZeroMNLogit(CompareL1):\n\n @classmethod\n def setup_class(cls):\n data = load_anes96()\n data.exog = sm.add_constant(data.exog, prepend=False)\n cls.res1 = MNLogit(data.endog, data.exog).fit_regularized(\n method=\"l1\", alpha=0, disp=0, acc=1e-15, maxiter=1000,\n trim_mode='auto', auto_trim_tol=0.01)\n cls.res2 = MNLogit(data.endog, data.exog).fit(disp=0, gtol=1e-15,\n method='bfgs',\n maxiter=1000)\n\n\nclass TestLogitNewton(CheckBinaryResults, CheckMargEff):\n\n @classmethod\n def setup_class(cls):\n data = load_spector()\n data.exog = sm.add_constant(data.exog, prepend=False)\n cls.res1 = Logit(data.endog, data.exog).fit(method=\"newton\", disp=0)\n res2 = Spector.logit\n cls.res2 = res2\n\n def test_resid_pearson(self):\n assert_almost_equal(self.res1.resid_pearson,\n self.res2.resid_pearson, 5)\n\n def test_nodummy_exog1(self):\n me = self.res1.get_margeff(atexog={0 : 2.0, 2 : 1.})\n assert_almost_equal(me.margeff,\n self.res2.margeff_nodummy_atexog1, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_nodummy_atexog1_se, DECIMAL_4)\n\n def test_nodummy_exog2(self):\n me = self.res1.get_margeff(atexog={1 : 21., 2 : 0}, at='mean')\n assert_almost_equal(me.margeff,\n self.res2.margeff_nodummy_atexog2, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_nodummy_atexog2_se, DECIMAL_4)\n\n def test_dummy_exog1(self):\n me = self.res1.get_margeff(atexog={0 : 2.0, 2 : 1.}, dummy=True)\n assert_almost_equal(me.margeff,\n self.res2.margeff_dummy_atexog1, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_dummy_atexog1_se, DECIMAL_4)\n\n def test_dummy_exog2(self):\n me = self.res1.get_margeff(atexog={1 : 21., 2 : 0}, at='mean',\n dummy=True)\n assert_almost_equal(me.margeff,\n self.res2.margeff_dummy_atexog2, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_dummy_atexog2_se, DECIMAL_4)\n\n def test_diagnostic(self):\n # Hosmer-Lemeshow\n # Stata 14: `estat gof, group(5) table`\n n_groups = 5\n chi2 = 1.630883318257913\n pvalue = 0.6524\n df = 3\n\n import statsmodels.stats.diagnostic_gen as dia\n\n fitted = self.res1.predict()\n en = self.res1.model.endog\n counts = np.column_stack((en, 1 - en))\n expected = np.column_stack((fitted, 1 - fitted))\n # replicate splits in Stata estat gof\n group_sizes = [7, 6, 7, 6, 6]\n indices = np.cumsum(group_sizes)[:-1]\n res = dia.test_chisquare_binning(counts, expected, sort_var=fitted,\n bins=indices, df=None)\n assert_allclose(res.statistic, chi2, rtol=1e-11)\n assert_equal(res.df, df)\n assert_allclose(res.pvalue, pvalue, atol=6e-5)\n assert_equal(res.freqs.shape, (n_groups, 2))\n assert_equal(res.freqs.sum(1), group_sizes)\n\n\nclass TestLogitNewtonPrepend(CheckMargEff):\n # same as previous version but adjusted for add_constant prepend=True\n # bug #3695\n\n @classmethod\n def setup_class(cls):\n data = load_spector()\n data.exog = sm.add_constant(data.exog, prepend=True)\n cls.res1 = Logit(data.endog, data.exog).fit(method=\"newton\", disp=0)\n res2 = Spector.logit\n cls.res2 = res2\n cls.slice = np.roll(np.arange(len(cls.res1.params)), 1) #.astype(int)\n\n def test_resid_pearson(self):\n assert_almost_equal(self.res1.resid_pearson,\n self.res2.resid_pearson, 5)\n\n def test_nodummy_exog1(self):\n me = self.res1.get_margeff(atexog={1 : 2.0, 3 : 1.})\n assert_almost_equal(me.margeff,\n self.res2.margeff_nodummy_atexog1, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_nodummy_atexog1_se, DECIMAL_4)\n\n def test_nodummy_exog2(self):\n me = self.res1.get_margeff(atexog={2 : 21., 3 : 0}, at='mean')\n assert_almost_equal(me.margeff,\n self.res2.margeff_nodummy_atexog2, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_nodummy_atexog2_se, DECIMAL_4)\n\n def test_dummy_exog1(self):\n me = self.res1.get_margeff(atexog={1 : 2.0, 3 : 1.}, dummy=True)\n assert_almost_equal(me.margeff,\n self.res2.margeff_dummy_atexog1, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_dummy_atexog1_se, DECIMAL_4)\n\n def test_dummy_exog2(self):\n me = self.res1.get_margeff(atexog={2 : 21., 3 : 0}, at='mean',\n dummy=True)\n assert_almost_equal(me.margeff,\n self.res2.margeff_dummy_atexog2, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_dummy_atexog2_se, DECIMAL_4)\n\n\nclass TestLogitBFGS(CheckBinaryResults, CheckMargEff):\n\n @classmethod\n def setup_class(cls):\n data = load_spector()\n data.exog = sm.add_constant(data.exog, prepend=False)\n res2 = Spector.logit\n cls.res2 = res2\n cls.res1 = Logit(data.endog, data.exog).fit(method=\"bfgs\", disp=0)\n\n\nclass TestPoissonNewton(CheckModelResults):\n\n @classmethod\n def setup_class(cls):\n data = load_randhie()\n exog = sm.add_constant(data.exog, prepend=False)\n cls.res1 = Poisson(data.endog, exog).fit(method='newton', disp=0)\n res2 = RandHIE.poisson\n cls.res2 = res2\n\n def test_margeff_overall(self):\n me = self.res1.get_margeff()\n assert_almost_equal(me.margeff,\n self.res2.margeff_nodummy_overall, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_nodummy_overall_se, DECIMAL_4)\n\n def test_margeff_dummy_overall(self):\n me = self.res1.get_margeff(dummy=True)\n assert_almost_equal(me.margeff,\n self.res2.margeff_dummy_overall, DECIMAL_4)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_dummy_overall_se, DECIMAL_4)\n\n def test_resid(self):\n assert_almost_equal(self.res1.resid, self.res2.resid, 2)\n\n def test_predict_prob(self):\n cur_dir = os.path.dirname(os.path.abspath(__file__))\n path = os.path.join(cur_dir, \"results\", \"predict_prob_poisson.csv\")\n probs_res = np.loadtxt(path, delimiter=\",\")\n\n # just check the first 100 obs. vs R to save memory\n probs = self.res1.predict_prob()[:100]\n assert_almost_equal(probs, probs_res, 8)\n\n @pytest.mark.xfail(reason=\"res2.cov_params is a zero-dim array of None\",\n strict=True)\n def test_cov_params(self):\n super(TestPoissonNewton, self).test_cov_params()\n\n\nclass CheckNegBinMixin(object):\n # Test methods shared by TestNegativeBinomialXYZ classes\n\n @pytest.mark.xfail(reason=\"pvalues do not match, in some cases wrong size\",\n strict=True, raises=AssertionError)\n def test_pvalues(self):\n assert_almost_equal(self.res1.pvalues,\n self.res2.pvalues,\n DECIMAL_4)\n\n\nclass TestNegativeBinomialNB2Newton(CheckNegBinMixin, CheckModelResults):\n\n @classmethod\n def setup_class(cls):\n data = load_randhie()\n exog = sm.add_constant(data.exog, prepend=False)\n cls.res1 = NegativeBinomial(data.endog, exog, 'nb2').fit(method='newton', disp=0)\n res2 = RandHIE.negativebinomial_nb2_bfgs\n cls.res2 = res2\n\n #NOTE: The bse is much closer precitions to stata\n def test_bse(self):\n assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_3)\n\n def test_params(self):\n assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)\n\n def test_alpha(self):\n self.res1.bse # attaches alpha_std_err\n assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha,\n DECIMAL_4)\n assert_almost_equal(self.res1.lnalpha_std_err,\n self.res2.lnalpha_std_err, DECIMAL_4)\n\n def test_conf_int(self):\n assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,\n DECIMAL_3)\n\n def test_zstat(self): # Low precision because Z vs. t\n assert_almost_equal(self.res1.pvalues[:-1], self.res2.pvalues,\n DECIMAL_2)\n\n def test_fittedvalues(self):\n assert_almost_equal(self.res1.fittedvalues[:10],\n self.res2.fittedvalues[:10], DECIMAL_3)\n\n def test_predict(self):\n assert_almost_equal(self.res1.predict()[:10],\n np.exp(self.res2.fittedvalues[:10]), DECIMAL_3)\n\n def test_predict_xb(self):\n assert_almost_equal(self.res1.predict(linear=True)[:10],\n self.res2.fittedvalues[:10], DECIMAL_3)\n\n\nclass TestNegativeBinomialNB1Newton(CheckNegBinMixin, CheckModelResults):\n\n @classmethod\n def setup_class(cls):\n data = load_randhie()\n exog = sm.add_constant(data.exog, prepend=False)\n model = NegativeBinomial(data.endog, exog, 'nb1')\n cls.res1 = model.fit(method=\"newton\", maxiter=100, disp=0)\n res2 = RandHIE.negativebinomial_nb1_bfgs\n cls.res2 = res2\n\n def test_zstat(self):\n assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_1)\n\n def test_lnalpha(self):\n self.res1.bse # attaches alpha_std_err\n assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha, 3)\n assert_almost_equal(self.res1.lnalpha_std_err,\n self.res2.lnalpha_std_err, DECIMAL_4)\n\n def test_params(self):\n assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)\n\n def test_conf_int(self):\n # the bse for alpha is not high precision from the hessian\n # approximation\n assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,\n DECIMAL_2)\n\n @pytest.mark.xfail(reason=\"Test has not been implemented for this class.\",\n strict=True, raises=NotImplementedError)\n def test_predict(self):\n raise NotImplementedError\n\n @pytest.mark.xfail(reason=\"Test has not been implemented for this class.\",\n strict=True, raises=NotImplementedError)\n def test_predict_xb(self):\n raise NotImplementedError\n\n\nclass TestNegativeBinomialNB2BFGS(CheckNegBinMixin, CheckModelResults):\n\n @classmethod\n def setup_class(cls):\n data = load_randhie()\n exog = sm.add_constant(data.exog, prepend=False)\n cls.res1 = NegativeBinomial(data.endog, exog, 'nb2').fit(\n method='bfgs', disp=0,\n maxiter=1000)\n res2 = RandHIE.negativebinomial_nb2_bfgs\n cls.res2 = res2\n\n #NOTE: The bse is much closer precitions to stata\n def test_bse(self):\n assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_3)\n\n def test_params(self):\n assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)\n\n def test_alpha(self):\n self.res1.bse # attaches alpha_std_err\n assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha,\n DECIMAL_4)\n assert_almost_equal(self.res1.lnalpha_std_err,\n self.res2.lnalpha_std_err, DECIMAL_4)\n\n def test_conf_int(self):\n assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,\n DECIMAL_3)\n\n def test_zstat(self): # Low precision because Z vs. t\n assert_almost_equal(self.res1.pvalues[:-1], self.res2.pvalues,\n DECIMAL_2)\n\n def test_fittedvalues(self):\n assert_almost_equal(self.res1.fittedvalues[:10],\n self.res2.fittedvalues[:10], DECIMAL_3)\n\n def test_predict(self):\n assert_almost_equal(self.res1.predict()[:10],\n np.exp(self.res2.fittedvalues[:10]), DECIMAL_3)\n\n def test_predict_xb(self):\n assert_almost_equal(self.res1.predict(linear=True)[:10],\n self.res2.fittedvalues[:10], DECIMAL_3)\n\n\nclass TestNegativeBinomialNB1BFGS(CheckNegBinMixin, CheckModelResults):\n\n @classmethod\n def setup_class(cls):\n data = load_randhie()\n exog = sm.add_constant(data.exog, prepend=False)\n cls.res1 = NegativeBinomial(data.endog, exog, 'nb1').fit(method=\"bfgs\",\n maxiter=100,\n disp=0)\n res2 = RandHIE.negativebinomial_nb1_bfgs\n cls.res2 = res2\n\n def test_zstat(self):\n assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_1)\n\n def test_lnalpha(self):\n self.res1.bse # attaches alpha_std_err\n assert_almost_equal(self.res1.lnalpha, self.res2.lnalpha, 3)\n assert_almost_equal(self.res1.lnalpha_std_err,\n self.res2.lnalpha_std_err, DECIMAL_4)\n\n def test_params(self):\n assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_4)\n\n def test_conf_int(self):\n # the bse for alpha is not high precision from the hessian\n # approximation\n assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,\n DECIMAL_2)\n\n @pytest.mark.xfail(reason=\"Test has not been implemented for this class.\",\n strict=True, raises=NotImplementedError)\n def test_predict(self):\n raise NotImplementedError\n\n @pytest.mark.xfail(reason=\"Test has not been implemented for this class.\",\n strict=True, raises=NotImplementedError)\n def test_predict_xb(self):\n raise NotImplementedError\n\n\nclass TestNegativeBinomialGeometricBFGS(CheckNegBinMixin, CheckModelResults):\n # Cannot find another implementation of the geometric to cross-check results\n # we only test fitted values because geometric has fewer parameters\n # than nb1 and nb2\n # and we want to make sure that predict() np.dot(exog, params) works\n\n @classmethod\n def setup_class(cls):\n data = load_randhie()\n exog = sm.add_constant(data.exog, prepend=False)\n mod = NegativeBinomial(data.endog, exog, 'geometric')\n cls.res1 = mod.fit(method='bfgs', disp=0)\n res2 = RandHIE.negativebinomial_geometric_bfgs\n cls.res2 = res2\n\n # the following are regression tests, could be inherited instead\n\n def test_aic(self):\n assert_almost_equal(self.res1.aic, self.res2.aic, DECIMAL_3)\n\n def test_bic(self):\n assert_almost_equal(self.res1.bic, self.res2.bic, DECIMAL_3)\n\n def test_conf_int(self):\n assert_almost_equal(self.res1.conf_int(), self.res2.conf_int,\n DECIMAL_3)\n\n def test_fittedvalues(self):\n assert_almost_equal(self.res1.fittedvalues[:10],\n self.res2.fittedvalues[:10], DECIMAL_3)\n\n def test_predict(self):\n assert_almost_equal(self.res1.predict()[:10],\n np.exp(self.res2.fittedvalues[:10]), DECIMAL_3)\n\n def test_params(self):\n assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_3)\n\n def test_predict_xb(self):\n assert_almost_equal(self.res1.predict(linear=True)[:10],\n self.res2.fittedvalues[:10], DECIMAL_3)\n\n def test_zstat(self): # Low precision because Z vs. t\n assert_almost_equal(self.res1.tvalues, self.res2.z, DECIMAL_1)\n\n def test_llf(self):\n assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_1)\n\n def test_llr(self):\n assert_almost_equal(self.res1.llr, self.res2.llr, DECIMAL_2)\n\n def test_bse(self):\n assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_3)\n\n\nclass CheckMNLogitBaseZero(CheckModelResults):\n\n def test_margeff_overall(self):\n me = self.res1.get_margeff()\n assert_almost_equal(me.margeff, self.res2.margeff_dydx_overall, 6)\n assert_almost_equal(me.margeff_se, self.res2.margeff_dydx_overall_se, 6)\n me_frame = me.summary_frame()\n eff = me_frame[\"dy/dx\"].values.reshape(me.margeff.shape, order=\"F\")\n assert_allclose(eff, me.margeff, rtol=1e-13)\n assert_equal(me_frame.shape, (np.size(me.margeff), 6))\n\n def test_margeff_mean(self):\n me = self.res1.get_margeff(at='mean')\n assert_almost_equal(me.margeff, self.res2.margeff_dydx_mean, 7)\n assert_almost_equal(me.margeff_se, self.res2.margeff_dydx_mean_se, 7)\n\n def test_margeff_dummy(self):\n data = self.data\n vote = data.data['vote']\n exog = np.column_stack((data.exog, vote))\n exog = sm.add_constant(exog, prepend=False)\n res = MNLogit(data.endog, exog).fit(method=\"newton\", disp=0)\n me = res.get_margeff(dummy=True)\n assert_almost_equal(me.margeff, self.res2.margeff_dydx_dummy_overall,\n 6)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_dydx_dummy_overall_se, 6)\n me = res.get_margeff(dummy=True, method=\"eydx\")\n assert_almost_equal(me.margeff, self.res2.margeff_eydx_dummy_overall,\n 5)\n assert_almost_equal(me.margeff_se,\n self.res2.margeff_eydx_dummy_overall_se, 6)\n\n def test_j(self):\n assert_equal(self.res1.model.J, self.res2.J)\n\n def test_k(self):\n assert_equal(self.res1.model.K, self.res2.K)\n\n def test_endog_names(self):\n assert_equal(self.res1._get_endog_name(None,None)[1],\n ['y=1', 'y=2', 'y=3', 'y=4', 'y=5', 'y=6'])\n\n def test_pred_table(self):\n # fitted results taken from gretl\n pred = [6, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 6, 0, 1, 6, 0, 0,\n 1, 1, 6, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 6, 0, 0, 6, 6, 0, 0, 1,\n 1, 6, 1, 6, 0, 0, 0, 1, 0, 1, 0, 0, 0, 6, 0, 0, 6, 0, 0, 0, 1,\n 1, 0, 0, 6, 6, 6, 6, 1, 0, 5, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0,\n 6, 0, 6, 6, 1, 0, 1, 1, 6, 5, 1, 0, 0, 0, 5, 0, 0, 6, 0, 1, 0,\n 0, 0, 0, 0, 1, 1, 0, 6, 6, 6, 6, 5, 0, 1, 1, 0, 1, 0, 6, 6, 0,\n 0, 0, 6, 0, 0, 0, 6, 6, 0, 5, 1, 0, 0, 0, 0, 6, 0, 5, 6, 6, 0,\n 0, 0, 0, 6, 1, 0, 0, 1, 0, 1, 6, 1, 1, 1, 1, 1, 0, 0, 0, 6, 0,\n 5, 1, 0, 6, 6, 6, 0, 0, 0, 0, 1, 6, 6, 0, 0, 0, 1, 1, 5, 6, 0,\n 6, 1, 0, 0, 1, 6, 0, 0, 1, 0, 6, 6, 0, 5, 6, 6, 0, 0, 6, 1, 0,\n 6, 0, 1, 0, 1, 6, 0, 1, 1, 1, 6, 0, 5, 0, 0, 6, 1, 0, 6, 5, 5,\n 0, 6, 1, 1, 1, 0, 0, 6, 0, 0, 5, 0, 0, 6, 6, 6, 6, 6, 0, 1, 0,\n 0, 6, 6, 0, 0, 1, 6, 0, 0, 6, 1, 6, 1, 1, 1, 0, 1, 6, 5, 0, 0,\n 1, 5, 0, 1, 6, 6, 1, 0, 0, 1, 6, 1, 5, 6, 1, 0, 0, 1, 1, 0, 6,\n 1, 6, 0, 1, 1, 5, 6, 6, 5, 1, 1, 1, 0, 6, 1, 6, 1, 0, 1, 0, 0,\n 1, 5, 0, 1, 1, 0, 5, 6, 0, 5, 1, 1, 6, 5, 0, 6, 0, 0, 0, 0, 0,\n 0, 1, 6, 1, 0, 5, 1, 0, 0, 1, 6, 0, 0, 6, 6, 6, 0, 2, 1, 6, 5,\n 6, 1, 1, 0, 5, 1, 1, 1, 6, 1, 6, 6, 5, 6, 0, 1, 0, 1, 6, 0, 6,\n 1, 6, 0, 0, 6, 1, 0, 6, 1, 0, 0, 0, 0, 6, 6, 6, 6, 5, 6, 6, 0,\n 0, 6, 1, 1, 6, 0, 0, 6, 6, 0, 6, 6, 0, 0, 6, 0, 0, 6, 6, 6, 1,\n 0, 6, 0, 0, 0, 6, 1, 1, 0, 1, 5, 0, 0, 5, 0, 0, 0, 1, 1, 6, 1,\n 0, 0, 0, 6, 6, 1, 1, 6, 5, 5, 0, 6, 6, 0, 1, 1, 0, 6, 6, 0, 6,\n 5, 5, 6, 5, 1, 0, 6, 0, 6, 1, 0, 1, 6, 6, 6, 1, 0, 6, 0, 5, 6,\n 6, 5, 0, 5, 1, 0, 6, 0, 6, 1, 5, 5, 0, 1, 5, 5, 2, 6, 6, 6, 5,\n 0, 0, 1, 6, 1, 0, 1, 6, 1, 0, 0, 1, 5, 6, 6, 0, 0, 0, 5, 6, 6,\n 6, 1, 5, 6, 1, 0, 0, 6, 5, 0, 1, 1, 1, 6, 6, 0, 1, 0, 0, 0, 5,\n 0, 0, 6, 1, 6, 0, 6, 1, 5, 5, 6, 5, 0, 0, 0, 0, 1, 1, 0, 5, 5,\n 0, 0, 0, 0, 1, 0, 6, 6, 1, 1, 6, 6, 0, 5, 5, 0, 0, 0, 6, 6, 1,\n 6, 0, 0, 5, 0, 1, 6, 5, 6, 6, 5, 5, 6, 6, 1, 0, 1, 6, 6, 1, 6,\n 0, 6, 0, 6, 5, 0, 6, 6, 0, 5, 6, 0, 6, 6, 5, 0, 1, 6, 6, 1, 0,\n 1, 0, 6, 6, 1, 0, 6, 6, 6, 0, 1, 6, 0, 1, 5, 1, 1, 5, 6, 6, 0,\n 1, 6, 6, 1, 5, 0, 5, 0, 6, 0, 1, 6, 1, 0, 6, 1, 6, 0, 6, 1, 0,\n 0, 0, 6, 6, 0, 1, 1, 6, 6, 6, 1, 6, 0, 5, 6, 0, 5, 6, 6, 5, 5,\n 5, 6, 0, 6, 0, 0, 0, 5, 0, 6, 1, 2, 6, 6, 6, 5, 1, 6, 0, 6, 0,\n 0, 0, 0, 6, 5, 0, 5, 1, 6, 5, 1, 6, 5, 1, 1, 0, 0, 6, 1, 1, 5,\n 6, 6, 0, 5, 2, 5, 5, 0, 5, 5, 5, 6, 5, 6, 6, 5, 2, 6, 5, 6, 0,\n 0, 6, 5, 0, 6, 0, 0, 6, 6, 6, 0, 5, 1, 1, 6, 6, 5, 2, 1, 6, 5,\n 6, 0, 6, 6, 1, 1, 5, 1, 6, 6, 6, 0, 0, 6, 1, 0, 5, 5, 1, 5, 6,\n 1, 6, 0, 1, 6, 5, 0, 0, 6, 1, 5, 1, 0, 6, 0, 6, 6, 5, 5, 6, 6,\n 6, 6, 2, 6, 6, 6, 5, 5, 5, 0, 1, 0, 0, 0, 6, 6, 1, 0, 6, 6, 6,\n 6, 6, 1, 0, 6, 1, 5, 5, 6, 6, 6, 6, 6, 5, 6, 1, 6, 2, 5, 5, 6,\n 5, 6, 6, 5, 6, 6, 5, 5, 6, 1, 5, 1, 6, 0, 2, 5, 0, 5, 0, 2, 1,\n 6, 0, 0, 6, 6, 1, 6, 0, 5, 5, 6, 6, 1, 6, 6, 6, 5, 6, 6, 1, 6,\n 5, 6, 1, 1, 0, 6, 6, 5, 1, 0, 0, 6, 6, 5, 6, 0, 1, 6, 0, 5, 6,\n 5, 2, 5, 2, 0, 0, 1, 6, 6, 1, 5, 6, 6, 0, 6, 6, 6, 6, 6, 5]\n assert_array_equal(self.res1.predict().argmax(1), pred)\n\n # the rows should add up for pred table\n assert_array_equal(self.res1.pred_table().sum(0), np.bincount(pred))\n\n # note this is just a regression test, gretl does not have a prediction\n # table\n pred = [[ 126., 41., 2., 0., 0., 12., 19.],\n [ 77., 73., 3., 0., 0., 15., 12.],\n [ 37., 43., 2., 0., 0., 19., 7.],\n [ 12., 9., 1., 0., 0., 9., 6.],\n [ 19., 10., 2., 0., 0., 20., 43.],\n [ 22., 25., 1., 0., 0., 31., 71.],\n [ 9., 7., 1., 0., 0., 18., 140.]]\n assert_array_equal(self.res1.pred_table(), pred)\n\n def test_resid(self):\n assert_array_equal(self.res1.resid_misclassified, self.res2.resid)\n\n @pytest.mark.xfail(reason=\"res2.cov_params is a zero-dim array of None\",\n strict=True)\n def test_cov_params(self):\n super(CheckMNLogitBaseZero, self).test_cov_params()\n\n\nclass TestMNLogitNewtonBaseZero(CheckMNLogitBaseZero):\n @classmethod\n def setup_class(cls):\n cls.data = data = load_anes96()\n exog = data.exog\n exog = sm.add_constant(exog, prepend=False)\n cls.res1 = MNLogit(data.endog, exog).fit(method=\"newton\", disp=0)\n res2 = Anes.mnlogit_basezero\n cls.res2 = res2\n\n\nclass TestMNLogitLBFGSBaseZero(CheckMNLogitBaseZero):\n @classmethod\n def setup_class(cls):\n cls.data = data = load_anes96()\n exog = data.exog\n exog = sm.add_constant(exog, prepend=False)\n mymodel = MNLogit(data.endog, exog)\n cls.res1 = mymodel.fit(method=\"lbfgs\", disp=0, maxiter=50000,\n #m=12, pgtol=1e-7, factr=1e3, # 5 failures\n #m=20, pgtol=1e-8, factr=1e2, # 3 failures\n #m=30, pgtol=1e-9, factr=1e1, # 1 failure\n m=40, pgtol=1e-10, factr=5e0,\n loglike_and_score=mymodel.loglike_and_score)\n res2 = Anes.mnlogit_basezero\n cls.res2 = res2\n\n\ndef test_perfect_prediction():\n cur_dir = os.path.dirname(os.path.abspath(__file__))\n iris_dir = os.path.join(cur_dir, '..', '..', 'genmod', 'tests', 'results')\n iris_dir = os.path.abspath(iris_dir)\n iris = np.genfromtxt(os.path.join(iris_dir, 'iris.csv'), delimiter=\",\",\n skip_header=1)\n y = iris[:,-1]\n X = iris[:,:-1]\n X = X[y != 2]\n y = y[y != 2]\n X = sm.add_constant(X, prepend=True)\n mod = Logit(y,X)\n assert_raises(PerfectSeparationError, mod.fit, maxiter=1000)\n #turn off raise PerfectSeparationError\n mod.raise_on_perfect_prediction = False\n # this will raise if you set maxiter high enough with a singular matrix\n with pytest.warns(ConvergenceWarning):\n res = mod.fit(disp=False, maxiter=50) # should not raise but does warn\n assert_(not res.mle_retvals['converged'])\n\n\ndef test_poisson_predict():\n #GH: 175, make sure poisson predict works without offset and exposure\n data = load_randhie()\n exog = sm.add_constant(data.exog, prepend=True)\n res = sm.Poisson(data.endog, exog).fit(method='newton', disp=0)\n pred1 = res.predict()\n pred2 = res.predict(exog)\n assert_almost_equal(pred1, pred2)\n #exta options\n pred3 = res.predict(exog, offset=0, exposure=1)\n assert_almost_equal(pred1, pred3)\n pred3 = res.predict(exog, offset=0, exposure=2)\n assert_almost_equal(2*pred1, pred3)\n pred3 = res.predict(exog, offset=np.log(2), exposure=1)\n assert_almost_equal(2*pred1, pred3)\n\n\ndef test_poisson_newton():\n #GH: 24, Newton does not work well sometimes\n nobs = 10000\n np.random.seed(987689)\n x = np.random.randn(nobs, 3)\n x = sm.add_constant(x, prepend=True)\n y_count = np.random.poisson(np.exp(x.sum(1)))\n mod = sm.Poisson(y_count, x)\n # this is not thread-safe\n with pytest.warns(ConvergenceWarning):\n res = mod.fit(start_params=-np.ones(4), method='newton', disp=0)\n\n assert_(not res.mle_retvals['converged'])\n\n\ndef test_issue_339():\n # make sure MNLogit summary works for J != K.\n data = load_anes96()\n exog = data.exog\n # leave out last exog column\n exog = exog[:,:-1]\n exog = sm.add_constant(exog, prepend=True)\n res1 = sm.MNLogit(data.endog, exog).fit(method=\"newton\", disp=0)\n # strip the header from the test\n smry = \"\\n\".join(res1.summary().as_text().split('\\n')[9:])\n cur_dir = os.path.dirname(os.path.abspath(__file__))\n test_case_file = os.path.join(cur_dir, 'results', 'mn_logit_summary.txt')\n with open(test_case_file, 'r') as fd:\n test_case = fd.read()\n np.testing.assert_equal(smry, test_case[:-1])\n # smoke test for summary2\n res1.summary2() # see #3651\n\n\ndef test_issue_341():\n data = load_anes96()\n exog = data.exog\n # leave out last exog column\n exog = exog[:,:-1]\n exog = sm.add_constant(exog, prepend=True)\n res1 = sm.MNLogit(data.endog, exog).fit(method=\"newton\", disp=0)\n x = exog[0]\n np.testing.assert_equal(res1.predict(x).shape, (1,7))\n np.testing.assert_equal(res1.predict(x[None]).shape, (1,7))\n\n\ndef test_iscount():\n X = np.random.random((50, 10))\n X[:,2] = np.random.randint(1, 10, size=50)\n X[:,6] = np.random.randint(1, 10, size=50)\n X[:,4] = np.random.randint(0, 2, size=50)\n X[:,1] = np.random.randint(-10, 10, size=50) # not integers\n count_ind = _iscount(X)\n assert_equal(count_ind, [2, 6])\n\n\ndef test_isdummy():\n X = np.random.random((50, 10))\n X[:,2] = np.random.randint(1, 10, size=50)\n X[:,6] = np.random.randint(0, 2, size=50)\n X[:,4] = np.random.randint(0, 2, size=50)\n X[:,1] = np.random.randint(-10, 10, size=50) # not integers\n count_ind = _isdummy(X)\n assert_equal(count_ind, [4, 6])\n\n\ndef test_non_binary():\n y = [1, 2, 1, 2, 1, 2]\n X = np.random.randn(6, 2)\n assert_raises(ValueError, Logit, y, X)\n y = [0, 1, 0, 0, 1, 0.5]\n assert_raises(ValueError, Probit, y, X)\n\n\ndef test_mnlogit_factor():\n dta = sm.datasets.anes96.load_pandas()\n dta['endog'] = dta.endog.replace(dict(zip(range(7), 'ABCDEFG')))\n exog = sm.add_constant(dta.exog, prepend=True)\n mod = sm.MNLogit(dta.endog, exog)\n res = mod.fit(disp=0)\n # smoke tests\n params = res.params\n summary = res.summary()\n predicted = res.predict(exog.iloc[:5, :])\n\n # with patsy\n mod = smf.mnlogit('PID ~ ' + ' + '.join(dta.exog.columns), dta.data)\n res2 = mod.fit(disp=0)\n params_f = res2.params\n summary = res2.summary()\n assert_allclose(params_f, params, rtol=1e-10)\n predicted_f = res2.predict(dta.exog.iloc[:5, :])\n assert_allclose(predicted_f, predicted, rtol=1e-10)\n\n\ndef test_mnlogit_factor_categorical():\n dta = sm.datasets.anes96.load_pandas()\n dta['endog'] = dta.endog.replace(dict(zip(range(7), 'ABCDEFG')))\n exog = sm.add_constant(dta.exog, prepend=True)\n mod = sm.MNLogit(dta.endog, exog)\n res = mod.fit(disp=0)\n dta['endog'] = dta['endog'].astype('category')\n mod = sm.MNLogit(dta.endog, exog)\n res_cat = mod.fit(disp=0)\n assert_allclose(res.params, res_cat.params)\n\n\ndef test_formula_missing_exposure():\n # see 2083\n d = {'Foo': [1, 2, 10, 149], 'Bar': [1, 2, 3, np.nan],\n 'constant': [1] * 4, 'exposure' : np.random.uniform(size=4),\n 'x': [1, 3, 2, 1.5]}\n df = pd.DataFrame(d)\n\n # should work\n mod1 = smf.poisson('Foo ~ Bar', data=df, exposure=df['exposure'])\n assert_(type(mod1.exposure) is np.ndarray, msg='Exposure is not ndarray')\n\n # make sure this raises\n exposure = pd.Series(np.random.uniform(size=5))\n df.loc[3, 'Bar'] = 4 # nan not relevant for ValueError for shape mismatch\n assert_raises(ValueError, sm.Poisson, df.Foo, df[['constant', 'Bar']],\n exposure=exposure)\n\n\ndef test_predict_with_exposure():\n # Case where CountModel.predict is called with exog = None and exposure\n # or offset not-None\n # See 3565\n\n # Setup copied from test_formula_missing_exposure\n import pandas as pd\n d = {'Foo': [1, 2, 10, 149], 'Bar': [1, 2, 3, 4],\n 'constant': [1] * 4, 'exposure' : [np.exp(1)]*4,\n 'x': [1, 3, 2, 1.5]}\n df = pd.DataFrame(d)\n\n mod1 = CountModel.from_formula('Foo ~ Bar', data=df,\n exposure=df['exposure'])\n\n params = np.array([1, .4])\n pred = mod1.predict(params, linear=True)\n # No exposure is passed, so default to using mod1.exposure, which\n # should have been logged\n X = df[['constant', 'Bar']].values # mod1.exog\n expected = np.dot(X, params) + 1\n assert_allclose(pred, expected)\n # The above should have passed without the current patch. The next\n # test would fail under the old code\n\n pred2 = mod1.predict(params, exposure=[np.exp(2)]*4, linear=True)\n expected2 = expected + 1\n assert_allclose(pred2, expected2)\n\n\ndef test_binary_pred_table_zeros():\n # see 2968\n nobs = 10\n y = np.zeros(nobs)\n y[[1,3]] = 1\n\n res = Logit(y, np.ones(nobs)).fit(disp=0)\n expected = np.array([[ 8., 0.], [ 2., 0.]])\n assert_equal(res.pred_table(), expected)\n\n res = MNLogit(y, np.ones(nobs)).fit(disp=0)\n expected = np.array([[ 8., 0.], [ 2., 0.]])\n assert_equal(res.pred_table(), expected)\n\n\nclass TestGeneralizedPoisson_p2(object):\n # Test Generalized Poisson model\n\n @classmethod\n def setup_class(cls):\n data = load_randhie()\n data.exog = sm.add_constant(data.exog, prepend=False)\n mod = GeneralizedPoisson(data.endog, data.exog, p=2)\n cls.res1 = mod.fit(method='newton', disp=0)\n res2 = RandHIE.generalizedpoisson_gp2\n cls.res2 = res2\n\n def test_bse(self):\n assert_allclose(self.res1.bse, self.res2.bse, atol=1e-5)\n\n def test_params(self):\n assert_allclose(self.res1.params, self.res2.params, atol=1e-5)\n\n def test_alpha(self):\n assert_allclose(self.res1.lnalpha, self.res2.lnalpha)\n assert_allclose(self.res1.lnalpha_std_err,\n self.res2.lnalpha_std_err, atol=1e-5)\n\n def test_conf_int(self):\n assert_allclose(self.res1.conf_int(), self.res2.conf_int,\n atol=1e-3)\n\n def test_aic(self):\n assert_allclose(self.res1.aic, self.res2.aic)\n\n def test_bic(self):\n assert_allclose(self.res1.bic, self.res2.bic)\n\n def test_df(self):\n assert_equal(self.res1.df_model, self.res2.df_model)\n\n def test_llf(self):\n assert_allclose(self.res1.llf, self.res2.llf)\n\n def test_wald(self):\n result = self.res1.wald_test(np.eye(len(self.res1.params))[:-2])\n assert_allclose(result.statistic, self.res2.wald_statistic)\n assert_allclose(result.pvalue, self.res2.wald_pvalue, atol=1e-15)\n\n def test_t(self):\n unit_matrix = np.identity(self.res1.params.size)\n t_test = self.res1.t_test(unit_matrix)\n assert_allclose(self.res1.tvalues, t_test.tvalue)\n\n\nclass TestGeneralizedPoisson_transparams(object):\n # Test Generalized Poisson model\n\n @classmethod\n def setup_class(cls):\n data = load_randhie()\n data.exog = sm.add_constant(data.exog, prepend=False)\n cls.res1 = GeneralizedPoisson(data.endog, data.exog, p=2).fit(\n method='newton', disp=0)\n res2 = RandHIE.generalizedpoisson_gp2\n cls.res2 = res2\n\n def test_bse(self):\n assert_allclose(self.res1.bse, self.res2.bse, atol=1e-5)\n\n def test_params(self):\n assert_allclose(self.res1.params, self.res2.params, atol=1e-5)\n\n def test_alpha(self):\n assert_allclose(self.res1.lnalpha, self.res2.lnalpha)\n assert_allclose(self.res1.lnalpha_std_err,\n self.res2.lnalpha_std_err, atol=1e-5)\n\n def test_conf_int(self):\n assert_allclose(self.res1.conf_int(), self.res2.conf_int,\n atol=1e-3)\n\n def test_aic(self):\n assert_allclose(self.res1.aic, self.res2.aic)\n\n def test_bic(self):\n assert_allclose(self.res1.bic, self.res2.bic)\n\n def test_df(self):\n assert_equal(self.res1.df_model, self.res2.df_model)\n\n def test_llf(self):\n assert_allclose(self.res1.llf, self.res2.llf)\n\n\nclass TestGeneralizedPoisson_p1(object):\n # Test Generalized Poisson model\n\n @classmethod\n def setup_class(cls):\n cls.data = load_randhie()\n cls.data.exog = sm.add_constant(cls.data.exog, prepend=False)\n cls.res1 = GeneralizedPoisson(\n cls.data.endog, cls.data.exog, p=1).fit(method='newton', disp=0)\n\n def test_llf(self):\n poisson_llf = sm.Poisson(\n self.data.endog, self.data.exog).loglike(\n self.res1.params[:-1])\n genpoisson_llf = sm.GeneralizedPoisson(\n self.data.endog, self.data.exog, p=1).loglike(\n list(self.res1.params[:-1]) + [0])\n assert_allclose(genpoisson_llf, poisson_llf)\n\n def test_score(self):\n poisson_score = sm.Poisson(\n self.data.endog, self.data.exog).score(\n self.res1.params[:-1])\n genpoisson_score = sm.GeneralizedPoisson(\n self.data.endog, self.data.exog, p=1).score(\n list(self.res1.params[:-1]) + [0])\n assert_allclose(genpoisson_score[:-1], poisson_score, atol=1e-9)\n\n def test_hessian(self):\n poisson_score = sm.Poisson(\n self.data.endog, self.data.exog).hessian(\n self.res1.params[:-1])\n genpoisson_score = sm.GeneralizedPoisson(\n self.data.endog, self.data.exog, p=1).hessian(\n list(self.res1.params[:-1]) + [0])\n assert_allclose(genpoisson_score[:-1,:-1], poisson_score, atol=1e-10)\n\n def test_t(self):\n unit_matrix = np.identity(self.res1.params.size)\n t_test = self.res1.t_test(unit_matrix)\n assert_allclose(self.res1.tvalues, t_test.tvalue)\n\n def test_fit_regularized(self):\n model = self.res1.model\n\n # do not penalize constant and dispersion parameter\n alpha = np.ones(len(self.res1.params))\n alpha[-2:] = 0\n # the first prints currently a warning, irrelevant here\n res_reg1 = model.fit_regularized(alpha=alpha*0.01, disp=0)\n res_reg2 = model.fit_regularized(alpha=alpha*100, disp=0)\n res_reg3 = model.fit_regularized(alpha=alpha*1000, disp=0)\n\n assert_allclose(res_reg1.params, self.res1.params, atol=5e-5)\n assert_allclose(res_reg1.bse, self.res1.bse, atol=1e-5)\n\n # check shrinkage, regression numbers\n assert_allclose((self.res1.params[:-2]**2).mean(), 0.016580955543320779)\n assert_allclose((res_reg1.params[:-2]**2).mean(), 0.016580734975068664)\n assert_allclose((res_reg2.params[:-2]**2).mean(), 0.010672558641545994)\n assert_allclose((res_reg3.params[:-2]**2).mean(), 0.00035544919793048415)\n\n def test_init_kwds(self):\n kwds = self.res1.model._get_init_kwds()\n assert_('p' in kwds)\n assert_equal(kwds['p'], 1)\n\n\nclass TestGeneralizedPoisson_underdispersion(object):\n\n @classmethod\n def setup_class(cls):\n cls.expected_params = [1, -0.5, -0.05]\n np.random.seed(1234)\n nobs = 200\n exog = np.ones((nobs, 2))\n exog[:nobs//2, 1] = 2\n mu_true = np.exp(exog.dot(cls.expected_params[:-1]))\n cls.endog = sm.distributions.genpoisson_p.rvs(mu_true,\n cls.expected_params[-1], 1, size=len(mu_true))\n model_gp = sm.GeneralizedPoisson(cls.endog, exog, p=1)\n cls.res = model_gp.fit(method='nm', xtol=1e-6, maxiter=5000,\n maxfun=5000, disp=0)\n\n def test_basic(self):\n res = self.res\n endog = res.model.endog\n # check random data generation, regression test\n assert_allclose(endog.mean(), 1.42, rtol=1e-3)\n assert_allclose(endog.var(), 1.2836, rtol=1e-3)\n\n # check estimation\n assert_allclose(res.params, self.expected_params, atol=0.07, rtol=0.1)\n assert_(res.mle_retvals['converged'] is True)\n assert_allclose(res.mle_retvals['fopt'], 1.418753161722015, rtol=0.01)\n\n def test_newton(self):\n # check newton optimization with start_params\n res = self.res\n res2 = res.model.fit(start_params=res.params, method='newton', disp=0)\n assert_allclose(res.model.score(res.params),\n np.zeros(len(res2.params)), atol=0.01)\n assert_allclose(res.model.score(res2.params),\n np.zeros(len(res2.params)), atol=1e-10)\n assert_allclose(res.params, res2.params, atol=1e-4)\n\n def test_mean_var(self):\n assert_allclose(self.res.predict().mean(), self.endog.mean(),\n atol=1e-1, rtol=1e-1)\n\n assert_allclose(\n self.res.predict().mean() * self.res._dispersion_factor.mean(),\n self.endog.var(), atol=2e-1, rtol=2e-1)\n\n def test_predict_prob(self):\n res = self.res\n endog = res.model.endog\n freq = np.bincount(endog.astype(int))\n\n pr = res.predict(which='prob')\n pr2 = sm.distributions.genpoisson_p.pmf(np.arange(6)[:, None],\n res.predict(), res.params[-1], 1).T\n assert_allclose(pr, pr2, rtol=1e-10, atol=1e-10)\n\n expected = pr.sum(0)\n # add expected obs from right tail to last bin\n expected[-1] += pr.shape[0] - expected.sum()\n # scipy requires observed and expected add to the same at rtol=1e-8\n assert_allclose(freq.sum(), expected.sum(), rtol=1e-13)\n\n from scipy import stats\n chi2 = stats.chisquare(freq, expected)\n # numbers are regression test, we should not reject\n assert_allclose(chi2[:], (0.5511787456691261, 0.9901293016678583),\n rtol=0.01)\n\n\nclass TestNegativeBinomialPNB2Newton(CheckNegBinMixin, CheckModelResults):\n\n @classmethod\n def setup_class(cls):\n data = load_randhie()\n exog = sm.add_constant(data.exog, prepend=False)\n mod = NegativeBinomialP(data.endog, exog, p=2)\n cls.res1 = mod.fit(method='newton', disp=0)\n res2 = RandHIE.negativebinomial_nb2_bfgs\n cls.res2 = res2\n\n #NOTE: The bse is much closer precitions to stata\n def test_bse(self):\n assert_allclose(self.res1.bse, self.res2.bse,\n atol=1e-3, rtol=1e-3)\n\n def test_params(self):\n assert_allclose(self.res1.params, self.res2.params,\n atol=1e-7)\n\n def test_alpha(self):\n self.res1.bse # attaches alpha_std_err\n assert_allclose(self.res1.lnalpha, self.res2.lnalpha)\n assert_allclose(self.res1.lnalpha_std_err,\n self.res2.lnalpha_std_err,\n atol=1e-7)\n\n def test_conf_int(self):\n assert_allclose(self.res1.conf_int(), self.res2.conf_int,\n atol=1e-3, rtol=1e-3)\n\n def test_zstat(self): # Low precision because Z vs. t\n assert_allclose(self.res1.pvalues[:-1], self.res2.pvalues,\n atol=5e-3, rtol=5e-3)\n\n def test_fittedvalues(self):\n assert_allclose(self.res1.fittedvalues[:10],\n self.res2.fittedvalues[:10])\n\n def test_predict(self):\n assert_allclose(self.res1.predict()[:10],\n np.exp(self.res2.fittedvalues[:10]))\n\n def test_predict_xb(self):\n assert_allclose(self.res1.predict(which='linear')[:10],\n self.res2.fittedvalues[:10])\n\n\nclass TestNegativeBinomialPNB1Newton(CheckNegBinMixin, CheckModelResults):\n\n @classmethod\n def setup_class(cls):\n data = load_randhie()\n exog = sm.add_constant(data.exog, prepend=False)\n mod = NegativeBinomialP(data.endog, exog, p=1)\n cls.res1 = mod.fit(method=\"newton\", maxiter=100, disp=0)\n res2 = RandHIE.negativebinomial_nb1_bfgs\n cls.res2 = res2\n\n def test_zstat(self):\n assert_allclose(self.res1.tvalues, self.res2.z,\n atol=5e-3, rtol=5e-3)\n\n def test_lnalpha(self):\n self.res1.bse # attaches alpha_std_err\n assert_allclose(self.res1.lnalpha, self.res2.lnalpha)\n assert_allclose(self.res1.lnalpha_std_err,\n self.res2.lnalpha_std_err)\n\n def test_params(self):\n assert_allclose(self.res1.params, self.res2.params)\n\n def test_conf_int(self):\n # the bse for alpha is not high precision from the hessian\n # approximation\n assert_allclose(self.res1.conf_int(), self.res2.conf_int,\n atol=1e-3, rtol=1e-3)\n\n def test_predict(self):\n assert_allclose(self.res1.predict()[:10],\n np.exp(self.res2.fittedvalues[:10]),\n atol=1e-3, rtol=1e-3)\n\n def test_predict_xb(self):\n assert_allclose(self.res1.predict(which='linear')[:10],\n self.res2.fittedvalues[:10],\n atol=1e-3, rtol=1e-3)\n\n\nclass TestNegativeBinomialPNB2BFGS(CheckNegBinMixin, CheckModelResults):\n\n @classmethod\n def setup_class(cls):\n data = load_randhie()\n exog = sm.add_constant(data.exog, prepend=False)\n cls.res1 = NegativeBinomialP(data.endog, exog, p=2).fit(\n method='bfgs', disp=0,\n maxiter=1000)\n res2 = RandHIE.negativebinomial_nb2_bfgs\n cls.res2 = res2\n\n #NOTE: The bse is much closer precitions to stata\n def test_bse(self):\n assert_allclose(self.res1.bse, self.res2.bse,\n atol=1e-3, rtol=1e-3)\n\n def test_params(self):\n assert_allclose(self.res1.params, self.res2.params,\n atol=1e-3, rtol=1e-3)\n\n def test_alpha(self):\n self.res1.bse # attaches alpha_std_err\n assert_allclose(self.res1.lnalpha, self.res2.lnalpha,\n atol=1e-5, rtol=1e-5)\n assert_allclose(self.res1.lnalpha_std_err,\n self.res2.lnalpha_std_err,\n atol=1e-5, rtol=1e-5)\n\n def test_conf_int(self):\n assert_allclose(self.res1.conf_int(), self.res2.conf_int,\n atol=1e-3, rtol=1e-3)\n\n def test_zstat(self): # Low precision because Z vs. t\n assert_allclose(self.res1.pvalues[:-1], self.res2.pvalues,\n atol=5e-3, rtol=5e-3)\n\n def test_fittedvalues(self):\n assert_allclose(self.res1.fittedvalues[:10],\n self.res2.fittedvalues[:10],\n atol=1e-4, rtol=1e-4)\n\n def test_predict(self):\n assert_allclose(self.res1.predict()[:10],\n np.exp(self.res2.fittedvalues[:10]),\n atol=1e-3, rtol=1e-3)\n\n def test_predict_xb(self):\n assert_allclose(self.res1.predict(which='linear')[:10],\n self.res2.fittedvalues[:10],\n atol=1e-3, rtol=1e-3)\n\n\nclass TestNegativeBinomialPNB1BFGS(CheckNegBinMixin, CheckModelResults):\n\n @classmethod\n def setup_class(cls):\n data = load_randhie()\n exog = sm.add_constant(data.exog, prepend=False)\n cls.res1 = NegativeBinomialP(data.endog, exog, p=1).fit(method=\"bfgs\",\n maxiter=100,\n disp=0)\n res2 = RandHIE.negativebinomial_nb1_bfgs\n cls.res2 = res2\n\n def test_bse(self):\n assert_allclose(self.res1.bse, self.res2.bse,\n atol=5e-3, rtol=5e-3)\n\n def test_aic(self):\n assert_allclose(self.res1.aic, self.res2.aic,\n atol=0.5, rtol=0.5)\n\n def test_bic(self):\n assert_allclose(self.res1.bic, self.res2.bic,\n atol=0.5, rtol=0.5)\n\n def test_llf(self):\n assert_allclose(self.res1.llf, self.res2.llf,\n atol=1e-3, rtol=1e-3)\n\n def test_llr(self):\n assert_allclose(self.res1.llf, self.res2.llf,\n atol=1e-3, rtol=1e-3)\n\n def test_zstat(self):\n assert_allclose(self.res1.tvalues, self.res2.z,\n atol=0.5, rtol=0.5)\n\n def test_lnalpha(self):\n assert_allclose(self.res1.lnalpha, self.res2.lnalpha,\n atol=1e-3, rtol=1e-3)\n assert_allclose(self.res1.lnalpha_std_err,\n self.res2.lnalpha_std_err,\n atol=1e-3, rtol=1e-3)\n\n def test_params(self):\n assert_allclose(self.res1.params, self.res2.params,\n atol=5e-2, rtol=5e-2)\n\n def test_conf_int(self):\n # the bse for alpha is not high precision from the hessian\n # approximation\n assert_allclose(self.res1.conf_int(), self.res2.conf_int,\n atol=5e-2, rtol=5e-2)\n\n def test_predict(self):\n assert_allclose(self.res1.predict()[:10],\n np.exp(self.res2.fittedvalues[:10]),\n atol=5e-3, rtol=5e-3)\n\n def test_predict_xb(self):\n assert_allclose(self.res1.predict(which='linear')[:10],\n self.res2.fittedvalues[:10],\n atol=5e-3, rtol=5e-3)\n\n def test_init_kwds(self):\n kwds = self.res1.model._get_init_kwds()\n assert_('p' in kwds)\n assert_equal(kwds['p'], 1)\n\n\nclass TestNegativeBinomialPL1Compatability(CheckL1Compatability):\n @classmethod\n def setup_class(cls):\n cls.kvars = 10 # Number of variables\n cls.m = 7 # Number of unregularized parameters\n rand_data = load_randhie()\n rand_data.endog = np.asarray(rand_data.endog)\n rand_data.exog = np.asarray(rand_data.exog, dtype=float)\n rand_exog = rand_data.exog.view(float).reshape(len(rand_data.exog), -1)\n rand_exog_st = (rand_exog - rand_exog.mean(0)) / rand_exog.std(0)\n rand_exog = sm.add_constant(rand_exog_st, prepend=True)\n # Drop some columns and do an unregularized fit\n exog_no_PSI = rand_exog[:, :cls.m]\n mod_unreg = sm.NegativeBinomialP(rand_data.endog, exog_no_PSI)\n cls.res_unreg = mod_unreg.fit(method=\"newton\", disp=0)\n # Do a regularized fit with alpha, effectively dropping the last column\n alpha = 10 * len(rand_data.endog) * np.ones(cls.kvars + 1)\n alpha[:cls.m] = 0\n alpha[-1] = 0 # do not penalize alpha\n\n mod_reg = sm.NegativeBinomialP(rand_data.endog, rand_exog)\n cls.res_reg = mod_reg.fit_regularized(\n method='l1', alpha=alpha, disp=False, acc=1e-10, maxiter=2000,\n trim_mode='auto')\n cls.k_extra = 1 # 1 extra parameter in nb2\n\n\nclass TestNegativeBinomialPPredictProb(object):\n\n def test_predict_prob_p1(self):\n expected_params = [1, -0.5]\n np.random.seed(1234)\n nobs = 200\n exog = np.ones((nobs, 2))\n exog[:nobs//2, 1] = 2\n mu_true = np.exp(exog.dot(expected_params))\n alpha = 0.05\n size = 1. / alpha * mu_true\n prob = size / (size + mu_true)\n endog = nbinom.rvs(size, prob, size=len(mu_true))\n\n res = sm.NegativeBinomialP(endog, exog).fit(disp=0)\n\n mu = res.predict()\n size = 1. / alpha * mu\n prob = size / (size + mu)\n\n probs = res.predict(which='prob')\n assert_allclose(probs,\n nbinom.pmf(np.arange(8)[:,None], size, prob).T,\n atol=1e-2, rtol=1e-2)\n\n probs_ex = res.predict(exog=exog[[0, -1]], which='prob')\n assert_allclose(probs_ex, probs[[0, -1]], rtol=1e-10, atol=1e-15)\n\n def test_predict_prob_p2(self):\n expected_params = [1, -0.5]\n np.random.seed(1234)\n nobs = 200\n exog = np.ones((nobs, 2))\n exog[:nobs//2, 1] = 2\n mu_true = np.exp(exog.dot(expected_params))\n alpha = 0.05\n size = 1. / alpha\n prob = size / (size + mu_true)\n endog = nbinom.rvs(size, prob, size=len(mu_true))\n\n res = sm.NegativeBinomialP(endog, exog, p=2).fit(disp=0)\n\n mu = res.predict()\n size = 1. / alpha\n prob = size / (size + mu)\n\n assert_allclose(res.predict(which='prob'),\n nbinom.pmf(np.arange(8)[:,None], size, prob).T,\n atol=1e-2, rtol=1e-2)\n\n\nclass CheckNull(object):\n\n @classmethod\n def _get_data(cls):\n x = np.array([ 20., 25., 30., 35., 40., 45., 50.])\n nobs = len(x)\n exog = np.column_stack((np.ones(nobs), x))\n endog = np.array([ 469, 5516, 6854, 6837, 5952, 4066, 3242])\n return endog, exog\n\n def test_llnull(self):\n res = self.model.fit(start_params=self.start_params, disp=0)\n res._results._attach_nullmodel = True\n llf0 = res.llnull\n res_null0 = res.res_null\n assert_allclose(llf0, res_null0.llf, rtol=1e-6)\n\n res_null1 = self.res_null\n assert_allclose(llf0, res_null1.llf, rtol=1e-6)\n # Note default convergence tolerance does not get lower rtol\n # from different starting values (using bfgs)\n assert_allclose(res_null0.params, res_null1.params, rtol=5e-5)\n\n\nclass TestPoissonNull(CheckNull):\n\n @classmethod\n def setup_class(cls):\n endog, exog = cls._get_data()\n cls.model = Poisson(endog, exog)\n cls.res_null = Poisson(endog, exog[:, 0]).fit(start_params=[8.5], disp=0)\n # use start params to avoid warnings\n cls.start_params = [8.5, 0]\n\n\nclass TestNegativeBinomialNB1Null(CheckNull):\n\n @classmethod\n def setup_class(cls):\n endog, exog = cls._get_data()\n cls.model = NegativeBinomial(endog, exog, loglike_method='nb1')\n cls.model_null = NegativeBinomial(endog, exog[:, 0],\n loglike_method='nb1')\n cls.res_null = cls.model_null.fit(start_params=[8, 1000],\n method='bfgs', gtol=1e-08,\n maxiter=300, disp=0)\n # for convergence with bfgs, I needed to round down alpha start_params\n cls.start_params = np.array([7.730452, 2.01633068e-02, 1763.0])\n\n\nclass TestNegativeBinomialNB2Null(CheckNull):\n\n @classmethod\n def setup_class(cls):\n endog, exog = cls._get_data()\n cls.model = NegativeBinomial(endog, exog, loglike_method='nb2')\n cls.model_null = NegativeBinomial(endog, exog[:, 0],\n loglike_method='nb2')\n cls.res_null = cls.model_null.fit(start_params=[8, 0.5],\n method='bfgs', gtol=1e-06,\n maxiter=300, disp=0)\n cls.start_params = np.array([8.07216448, 0.01087238, 0.44024134])\n\n\nclass TestNegativeBinomialNBP2Null(CheckNull):\n\n @classmethod\n def setup_class(cls):\n endog, exog = cls._get_data()\n cls.model = NegativeBinomialP(endog, exog, p=2)\n cls.model_null = NegativeBinomialP(endog, exog[:, 0], p=2)\n cls.res_null = cls.model_null.fit(start_params=[8, 1],\n method='bfgs', gtol=1e-06,\n maxiter=300, disp=0)\n cls.start_params = np.array([8.07216448, 0.01087238, 0.44024134])\n\n def test_start_null(self):\n endog, exog = self.model.endog, self.model.exog\n model_nb2 = NegativeBinomial(endog, exog, loglike_method='nb2')\n sp1 = model_nb2._get_start_params_null()\n sp0 = self.model._get_start_params_null()\n assert_allclose(sp0, sp1, rtol=1e-12)\n\n\nclass TestNegativeBinomialNBP1Null(CheckNull):\n\n @classmethod\n def setup_class(cls):\n endog, exog = cls._get_data()\n cls.model = NegativeBinomialP(endog, exog, p=1.)\n cls.model_null = NegativeBinomialP(endog, exog[:, 0], p=1)\n cls.res_null = cls.model_null.fit(start_params=[8, 1],\n method='bfgs', gtol=1e-06,\n maxiter=300, disp=0)\n cls.start_params = np.array([7.730452, 2.01633068e-02, 1763.0])\n\n def test_start_null(self):\n endog, exog = self.model.endog, self.model.exog\n model_nb2 = NegativeBinomial(endog, exog, loglike_method='nb1')\n sp1 = model_nb2._get_start_params_null()\n sp0 = self.model._get_start_params_null()\n assert_allclose(sp0, sp1, rtol=1e-12)\n\n\nclass TestGeneralizedPoissonNull(CheckNull):\n\n @classmethod\n def setup_class(cls):\n endog, exog = cls._get_data()\n cls.model = GeneralizedPoisson(endog, exog, p=1.5)\n cls.model_null = GeneralizedPoisson(endog, exog[:, 0], p=1.5)\n cls.res_null = cls.model_null.fit(start_params=[8.4, 1],\n method='bfgs', gtol=1e-08,\n maxiter=300, disp=0)\n cls.start_params = np.array([6.91127148, 0.04501334, 0.88393736])\n\n\ndef test_null_options():\n # this is a \"nice\" case because we only check that options are used\n # correctly\n nobs = 10\n exog = np.ones((20, 2))\n exog[:nobs // 2, 1] = 0\n mu = np.exp(exog.sum(1))\n endog = np.random.poisson(mu) # Note no size=nobs in np.random\n res = Poisson(endog, exog).fit(start_params=np.log([1, 1]), disp=0)\n llnull0 = res.llnull\n assert_(hasattr(res, 'res_llnull') is False)\n res.set_null_options(attach_results=True)\n # default optimization\n lln = res.llnull # access to trigger computation\n assert_allclose(res.res_null.mle_settings['start_params'],\n np.log(endog.mean()), rtol=1e-10)\n assert_equal(res.res_null.mle_settings['optimizer'], 'bfgs')\n assert_allclose(lln, llnull0)\n\n res.set_null_options(attach_results=True, start_params=[0.5], method='nm')\n lln = res.llnull # access to trigger computation\n assert_allclose(res.res_null.mle_settings['start_params'], [0.5],\n rtol=1e-10)\n assert_equal(res.res_null.mle_settings['optimizer'], 'nm')\n\n res.summary() # call to fill cache\n assert_('prsquared' in res._cache)\n assert_equal(res._cache['llnull'], lln)\n\n assert_('prsquared' in res._cache)\n assert_equal(res._cache['llnull'], lln)\n\n # check setting cache\n res.set_null_options(llnull=999)\n assert_('prsquared' not in res._cache)\n assert_equal(res._cache['llnull'], 999)\n\n\ndef test_optim_kwds_prelim():\n # test that fit options for preliminary fit is correctly transmitted\n\n cur_dir = os.path.dirname(os.path.abspath(__file__))\n filepath = os.path.join(cur_dir, \"results\", \"sm3533.csv\")\n df = pd.read_csv(filepath)\n\n features = ['pp']\n X = (df[features] - df[features].mean())/df[features].std()\n y = df['num'].values\n exog = sm.add_constant(X[features].copy())\n # offset=np.log(df['population'].values + 1)\n # offset currently not used\n offset = None\n\n # we use \"nm\", \"bfgs\" does not work for Poisson/exp with older scipy\n optim_kwds_prelim = dict(method='nm', maxiter=5000)\n model = Poisson(y, exog, offset=offset) #\n res_poi = model.fit(disp=0, **optim_kwds_prelim)\n\n model = NegativeBinomial(y, exog, offset=offset)\n res = model.fit(disp=0, optim_kwds_prelim=optim_kwds_prelim)\n\n assert_allclose(res.mle_settings['start_params'][:-1], res_poi.params,\n rtol=1e-4)\n assert_equal(res.mle_settings['optim_kwds_prelim'], optim_kwds_prelim)\n assert_allclose(res.predict().mean(), y.mean(), rtol=0.1)\n\n # NBP22 and GPP p=1.5 also fail on older scipy with bfgs, use nm instead\n optim_kwds_prelim = dict(method='nm', maxiter=5000)\n model = NegativeBinomialP(y, exog, offset=offset, p=2)\n res = model.fit(disp=0, optim_kwds_prelim=optim_kwds_prelim)\n\n assert_allclose(res.mle_settings['start_params'][:-1], res_poi.params,\n rtol=1e-4)\n assert_equal(res.mle_settings['optim_kwds_prelim'], optim_kwds_prelim)\n assert_allclose(res.predict().mean(), y.mean(), rtol=0.1)\n\n # GPP with p=1.5 converges correctly,\n # GPP fails when p=2 even with good start_params\n model = GeneralizedPoisson(y, exog, offset=offset, p=1.5)\n res = model.fit(disp=0, maxiter=200, optim_kwds_prelim=optim_kwds_prelim)\n\n assert_allclose(res.mle_settings['start_params'][:-1], res_poi.params,\n rtol=1e-4)\n assert_equal(res.mle_settings['optim_kwds_prelim'], optim_kwds_prelim)\n # rough check that convergence makes sense\n assert_allclose(res.predict().mean(), y.mean(), rtol=0.1)\n\n\ndef test_unchanging_degrees_of_freedom():\n data = load_randhie()\n # see GH3734\n warnings.simplefilter('error')\n model = sm.NegativeBinomial(data.endog, data.exog, loglike_method='nb2')\n params = np.array([-0.05654134, -0.21213734, 0.08783102, -0.02991825,\n 0.22902315, 0.06210253, 0.06799444, 0.08406794,\n 0.18530092, 1.36645186])\n\n res1 = model.fit(start_params=params, disp=0)\n assert_equal(res1.df_model, 8)\n\n reg_params = np.array([-0.04854 , -0.15019404, 0.08363671, -0.03032834, 0.17592454,\n 0.06440753, 0.01584555, 0. , 0. , 1.36984628])\n\n res2 = model.fit_regularized(alpha=100, start_params=reg_params, disp=0)\n assert_(res2.df_model != 8)\n # If res2.df_model == res1.df_model, then this test is invalid.\n\n res3 = model.fit(start_params=params, disp=0)\n # Test that the call to `fit_regularized` did not\n # modify model.df_model inplace.\n assert_equal(res3.df_model, res1.df_model)\n assert_equal(res3.df_resid, res1.df_resid)\n\n\ndef test_mnlogit_float_name():\n df = pd.DataFrame({\"A\": [0., 1.1, 0, 0, 1.1], \"B\": [0, 1, 0, 1, 1]})\n with pytest.warns(SpecificationWarning,\n match='endog contains values are that not int-like'):\n result = smf.mnlogit(formula=\"A ~ B\", data=df).fit()\n summ = result.summary().as_text()\n assert 'A=1.1' in summ\n\n\ndef test_cov_confint_pandas():\n data = sm.datasets.anes96.load_pandas()\n exog = sm.add_constant(data.exog, prepend=False)\n res1 = sm.MNLogit(data.endog, exog).fit(method=\"newton\", disp=0)\n cov = res1.cov_params()\n ci = res1.conf_int()\n se = np.sqrt(np.diag(cov))\n se2 = (ci.iloc[:, 1] - ci.iloc[:, 0]) / (2 * stats.norm.ppf(0.975))\n assert_allclose(se, se2)\n assert_index_equal(ci.index, cov.index)\n assert_index_equal(cov.index, cov.columns)\n assert isinstance(ci.index, pd.MultiIndex)\n\n\ndef test_t_test():\n # GH669, check t_test works in multivariate model\n data = load_anes96()\n exog = sm.add_constant(data.exog, prepend=False)\n res1 = sm.MNLogit(data.endog, exog).fit(disp=0)\n r = np.ones(res1.cov_params().shape[0])\n t1 = res1.t_test(r)\n f1 = res1.f_test(r)\n\n data = sm.datasets.anes96.load()\n exog = sm.add_constant(data.exog, prepend=False)\n endog, exog = np.asarray(data.endog), np.asarray(exog)\n res2 = sm.MNLogit(endog, exog).fit(disp=0)\n t2 = res2.t_test(r)\n f2 = res2.f_test(r)\n\n assert_allclose(t1.effect, t2.effect)\n assert_allclose(f1.statistic, f2.statistic)\n"
]
| [
[
"numpy.testing.assert_allclose",
"numpy.dot",
"numpy.exp",
"numpy.size",
"numpy.cumsum",
"numpy.random.random",
"pandas.read_csv",
"numpy.bincount",
"numpy.log",
"pandas.DataFrame",
"numpy.random.poisson",
"numpy.eye",
"numpy.random.randint",
"numpy.arange",
"numpy.column_stack",
"numpy.testing.assert_raises",
"scipy.stats.chisquare",
"numpy.array",
"numpy.zeros",
"numpy.testing.assert_equal",
"numpy.testing.assert_almost_equal",
"numpy.random.randn",
"numpy.testing.assert_",
"numpy.identity",
"numpy.loadtxt",
"scipy.stats.norm.ppf",
"numpy.asarray",
"numpy.random.seed",
"numpy.testing.assert_array_equal",
"numpy.ones",
"numpy.testing.assert_array_less",
"numpy.random.uniform",
"numpy.diag"
]
]
|
nci/drish | [
"89cd8b740239c5b2c8222dffd4e27432fde170a1"
]
| [
"bin/assets/scripts/unet3Plus/unet_collection/layer_utils.py"
]
| [
"from __future__ import absolute_import\n\nfrom tensorflow.keras.layers import MaxPooling2D, MaxPooling3D, AveragePooling2D, AveragePooling3D, UpSampling2D, UpSampling3D, Conv2DTranspose, Conv3DTranspose, GlobalAveragePooling2D, GlobalAveragePooling3D\nfrom tensorflow.keras.layers import Conv2D, DepthwiseConv2D, Conv3D, Lambda\nfrom tensorflow.keras.layers import BatchNormalization, Activation, concatenate, multiply, add\nfrom tensorflow.keras.layers import ReLU, LeakyReLU, PReLU, ELU, Softmax\n\ndef decode_layer(X, channel, pool_size, unpool, kernel_size=3, \n activation='ReLU', batch_norm=False, name='decode'):\n '''\n An overall decode layer, based on either upsampling or trans conv.\n \n decode_layer(X, channel, pool_size, unpool, kernel_size=3,\n activation='ReLU', batch_norm=False, name='decode')\n \n Input\n ----------\n X: input tensor.\n pool_size: the decoding factor.\n channel: (for trans conv only) number of convolution filters.\n unpool: True or 'bilinear' for Upsampling2D with bilinear interpolation.\n 'nearest' for Upsampling2D with nearest interpolation.\n False for Conv2DTranspose + batch norm + activation. \n kernel_size: size of convolution kernels. \n If kernel_size='auto', then it equals to the `pool_size`.\n activation: one of the `tensorflow.keras.layers` interface, e.g., ReLU.\n batch_norm: True for batch normalization, False otherwise.\n name: prefix of the created keras layers.\n \n Output\n ----------\n X: output tensor.\n \n * The defaut: `kernel_size=3`, is suitable for `pool_size=2`.\n \n '''\n # parsers\n if unpool is False:\n # trans conv configurations\n bias_flag = not batch_norm\n \n elif unpool == 'nearest':\n # upsample2d configurations\n unpool = True\n interp = 'nearest'\n \n elif (unpool is True) or (unpool == 'bilinear'):\n # upsample2d configurations\n unpool = True\n interp = 'bilinear'\n \n else:\n raise ValueError('Invalid unpool keyword')\n \n if unpool:\n X = UpSampling2D(size=(pool_size, pool_size), interpolation=interp, name='{}_unpool'.format(name))(X)\n else:\n if kernel_size == 'auto':\n kernel_size = pool_size\n \n X = Conv2DTranspose(channel, kernel_size, strides=(pool_size, pool_size), \n padding='same', name='{}_trans_conv'.format(name))(X)\n \n # batch normalization\n if batch_norm:\n X = BatchNormalization(axis=3, name='{}_bn'.format(name))(X)\n \n # activation\n if activation is not None:\n activation_func = eval(activation)\n X = activation_func(name='{}_activation'.format(name))(X)\n \n return X\n\ndef encode_layer(X, channel, pool_size, pool, kernel_size='auto', \n activation='ReLU', batch_norm=False, name='encode'):\n '''\n An overall encode layer, based on one of the:\n (1) max-pooling, (2) average-pooling, (3) strided conv2d.\n \n encode_layer(X, channel, pool_size, pool, kernel_size='auto', \n activation='ReLU', batch_norm=False, name='encode')\n \n Input\n ----------\n X: input tensor.\n pool_size: the encoding factor.\n channel: (for strided conv only) number of convolution filters.\n pool: True or 'max' for MaxPooling2D.\n 'ave' for AveragePooling2D.\n False for strided conv + batch norm + activation.\n kernel_size: size of convolution kernels. \n If kernel_size='auto', then it equals to the `pool_size`.\n activation: one of the `tensorflow.keras.layers` interface, e.g., ReLU.\n batch_norm: True for batch normalization, False otherwise.\n name: prefix of the created keras layers.\n \n Output\n ----------\n X: output tensor.\n \n '''\n # parsers\n if (pool in [False, True, 'max', 'ave']) is not True:\n raise ValueError('Invalid pool keyword')\n \n # maxpooling2d as default\n if pool is True:\n pool = 'max'\n \n elif pool is False:\n # stride conv configurations\n bias_flag = not batch_norm\n \n if pool == 'max':\n X = MaxPooling2D(pool_size=(pool_size, pool_size), name='{}_maxpool'.format(name))(X)\n \n elif pool == 'ave':\n X = AveragePooling2D(pool_size=(pool_size, pool_size), name='{}_avepool'.format(name))(X)\n \n else:\n if kernel_size == 'auto':\n kernel_size = pool_size\n \n # linear convolution with strides\n X = Conv2D(channel, kernel_size, strides=(pool_size, pool_size), \n padding='valid', use_bias=bias_flag, name='{}_stride_conv'.format(name))(X)\n \n # batch normalization\n if batch_norm:\n X = BatchNormalization(axis=3, name='{}_bn'.format(name))(X)\n \n # activation\n if activation is not None:\n activation_func = eval(activation)\n X = activation_func(name='{}_activation'.format(name))(X)\n \n return X\n\n\ndef CONV_stack(X, channel, kernel_size=3, stack_num=2, \n dilation_rate=1, activation='ReLU', \n batch_norm=False, name='conv_stack'):\n '''\n Stacked convolutional layers:\n (Convolutional layer --> batch normalization --> Activation)*stack_num\n \n CONV_stack(X, channel, kernel_size=3, stack_num=2, dilation_rate=1, activation='ReLU', \n batch_norm=False, name='conv_stack')\n \n \n Input\n ----------\n X: input tensor.\n channel: number of convolution filters.\n kernel_size: size of 2-d convolution kernels.\n stack_num: number of stacked Conv2D-BN-Activation layers.\n dilation_rate: optional dilated convolution kernel.\n activation: one of the `tensorflow.keras.layers` interface, e.g., ReLU.\n batch_norm: True for batch normalization, False otherwise.\n name: prefix of the created keras layers.\n \n Output\n ----------\n X: output tensor\n \n '''\n \n bias_flag = not batch_norm\n \n # stacking Convolutional layers\n for i in range(stack_num):\n \n activation_func = eval(activation)\n \n # linear convolution\n X = Conv2D(channel, kernel_size, padding='same', use_bias=bias_flag, \n dilation_rate=dilation_rate, name='{}_{}'.format(name, i))(X)\n \n # batch normalization\n if batch_norm:\n X = BatchNormalization(axis=3, name='{}_{}_bn'.format(name, i))(X)\n \n # activation\n activation_func = eval(activation)\n X = activation_func(name='{}_{}_activation'.format(name, i))(X)\n \n return X\n\n\ndef CONV_output(X, n_labels, kernel_size=1, activation='Softmax', name='conv_output'):\n '''\n Convolutional layer with output activation.\n \n CONV_output(X, n_labels, kernel_size=1, activation='Softmax', name='conv_output')\n \n Input\n ----------\n X: input tensor.\n n_labels: number of classification label(s).\n kernel_size: size of 2-d convolution kernels. Default is 1-by-1.\n activation: one of the `tensorflow.keras.layers` or `keras_unet_collection.activations` interface or 'Sigmoid'.\n Default option is 'Softmax'.\n if None is received, then linear activation is applied.\n name: prefix of the created keras layers.\n \n Output\n ----------\n X: output tensor.\n \n '''\n \n X = Conv2D(n_labels, kernel_size, padding='same', use_bias=True, name=name)(X)\n \n if activation:\n \n if activation == 'Sigmoid':\n X = Activation('sigmoid', name='{}_activation'.format(name))(X)\n \n else:\n activation_func = eval(activation)\n X = activation_func(name='{}_activation'.format(name))(X)\n \n return X\n\n#-----------------------------------------------------------\n#-----------------------------------------------------------\n#-----------------------------------------------------------\n\n\ndef decode_layer3D(X, channel, pool_size, unpool, kernel_size=3, \n activation='ReLU', batch_norm=False, name='decode'):\n '''\n An overall decode layer, based on either upsampling or trans conv.\n \n decode_layer(X, channel, pool_size, unpool, kernel_size=3,\n activation='ReLU', batch_norm=False, name='decode')\n \n Input\n ----------\n X: input tensor.\n pool_size: the decoding factor.\n channel: (for trans conv only) number of convolution filters.\n unpool: True or 'bilinear' for Upsampling2D with bilinear interpolation.\n 'nearest' for Upsampling2D with nearest interpolation.\n False for Conv2DTranspose + batch norm + activation. \n kernel_size: size of convolution kernels. \n If kernel_size='auto', then it equals to the `pool_size`.\n activation: one of the `tensorflow.keras.layers` interface, e.g., ReLU.\n batch_norm: True for batch normalization, False otherwise.\n name: prefix of the created keras layers.\n \n Output\n ----------\n X: output tensor.\n \n * The defaut: `kernel_size=3`, is suitable for `pool_size=2`.\n \n '''\n # parsers\n if unpool is False:\n # trans conv configurations\n bias_flag = not batch_norm\n\n\n if unpool:\n X = UpSampling3D(size=(pool_size, pool_size, pool_size), name='{}_unpool'.format(name))(X)\n else:\n if kernel_size == 'auto':\n kernel_size = pool_size\n \n X = Conv3DTranspose(channel, kernel_size, strides=(pool_size, pool_size, pool_size), \n padding='same', name='{}_trans_conv'.format(name))(X)\n \n # batch normalization\n if batch_norm:\n X = BatchNormalization(axis=3, name='{}_bn'.format(name))(X)\n \n # activation\n if activation is not None:\n activation_func = eval(activation)\n X = activation_func(name='{}_activation'.format(name))(X)\n \n return X\n\ndef encode_layer3D(X, channel, pool_size, pool, kernel_size='auto', \n activation='ReLU', batch_norm=False, name='encode'):\n '''\n An overall encode layer, based on one of the:\n (1) max-pooling, (2) average-pooling, (3) strided conv2d.\n \n encode_layer(X, channel, pool_size, pool, kernel_size='auto', \n activation='ReLU', batch_norm=False, name='encode')\n \n Input\n ----------\n X: input tensor.\n pool_size: the encoding factor.\n channel: (for strided conv only) number of convolution filters.\n pool: True or 'max' for MaxPooling2D.\n 'ave' for AveragePooling2D.\n False for strided conv + batch norm + activation.\n kernel_size: size of convolution kernels. \n If kernel_size='auto', then it equals to the `pool_size`.\n activation: one of the `tensorflow.keras.layers` interface, e.g., ReLU.\n batch_norm: True for batch normalization, False otherwise.\n name: prefix of the created keras layers.\n \n Output\n ----------\n X: output tensor.\n \n '''\n # parsers\n if (pool in [False, True, 'max', 'ave']) is not True:\n raise ValueError('Invalid pool keyword')\n \n # maxpooling2d as default\n if pool is True:\n pool = 'max'\n \n elif pool is False:\n # stride conv configurations\n bias_flag = not batch_norm\n \n if pool == 'max':\n X = MaxPooling3D(pool_size=(pool_size, pool_size, pool_size), name='{}_maxpool'.format(name))(X)\n \n elif pool == 'ave':\n X = AveragePooling3D(pool_size=(pool_size, pool_size, pool_size), name='{}_avepool'.format(name))(X)\n \n else:\n if kernel_size == 'auto':\n kernel_size = pool_size\n \n # linear convolution with strides\n X = Conv3D(channel, kernel_size, strides=(pool_size, pool_size, pool_size), \n padding='valid', use_bias=bias_flag, name='{}_stride_conv'.format(name))(X)\n \n # batch normalization\n if batch_norm:\n X = BatchNormalization(axis=3, name='{}_bn'.format(name))(X)\n \n # activation\n if activation is not None:\n activation_func = eval(activation)\n X = activation_func(name='{}_activation'.format(name))(X)\n \n return X\n\n\ndef CONV_stack3D(X, channel, kernel_size=3, stack_num=2, \n dilation_rate=1, activation='ReLU', \n batch_norm=False, name='conv_stack'):\n '''\n Stacked convolutional layers:\n (Convolutional layer --> batch normalization --> Activation)*stack_num\n \n CONV_stack(X, channel, kernel_size=3, stack_num=2, dilation_rate=1, activation='ReLU', \n batch_norm=False, name='conv_stack')\n \n \n Input\n ----------\n X: input tensor.\n channel: number of convolution filters.\n kernel_size: size of 2-d convolution kernels.\n stack_num: number of stacked Conv2D-BN-Activation layers.\n dilation_rate: optional dilated convolution kernel.\n activation: one of the `tensorflow.keras.layers` interface, e.g., ReLU.\n batch_norm: True for batch normalization, False otherwise.\n name: prefix of the created keras layers.\n \n Output\n ----------\n X: output tensor\n \n '''\n \n bias_flag = not batch_norm\n \n # stacking Convolutional layers\n for i in range(stack_num):\n \n activation_func = eval(activation)\n \n # linear convolution\n X = Conv3D(channel, kernel_size, padding='same', use_bias=bias_flag, \n dilation_rate=dilation_rate, name='{}_{}'.format(name, i))(X)\n \n # batch normalization\n if batch_norm:\n X = BatchNormalization(axis=3, name='{}_{}_bn'.format(name, i))(X)\n \n # activation\n activation_func = eval(activation)\n X = activation_func(name='{}_{}_activation'.format(name, i))(X)\n \n return X\n\n\ndef CONV_output3D(X, n_labels, kernel_size=1, activation='Softmax', name='conv_output'):\n '''\n Convolutional layer with output activation.\n \n CONV_output(X, n_labels, kernel_size=1, activation='Softmax', name='conv_output')\n \n Input\n ----------\n X: input tensor.\n n_labels: number of classification label(s).\n kernel_size: size of 2-d convolution kernels. Default is 1-by-1.\n activation: one of the `tensorflow.keras.layers` or `keras_unet_collection.activations` interface or 'Sigmoid'.\n Default option is 'Softmax'.\n if None is received, then linear activation is applied.\n name: prefix of the created keras layers.\n \n Output\n ----------\n X: output tensor.\n \n '''\n \n X = Conv3D(n_labels, kernel_size, padding='same', use_bias=True, name=name)(X)\n \n if activation:\n \n if activation == 'Sigmoid':\n X = Activation('sigmoid', name='{}_activation'.format(name))(X)\n \n else:\n activation_func = eval(activation)\n X = activation_func(name='{}_activation'.format(name))(X)\n \n return X\n\n"
]
| [
[
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Conv3D"
]
]
|
Sean-Bin-Yang/TPR | [
"5a36bbe032d65a08a5f84695eccce7441723d267"
]
| [
"embed/embedding.py"
]
| [
"import torch.nn as nn\nimport torch\n\n\nclass PathEmbedding(nn.Module):\n \n def __init__(self, node2vec, dropout=0.1):\n \n super().__init__()\n\n self.path_embed = nn.Embedding.from_pretrained(node2vec) \n \n\n # self.rt = nn.Embedding(21, 64) ##road type\n # self.ow = nn.Embedding(2,16) ##one way or not\n # self.lane = nn.Embedding(7,32) ##number of lane\n # self.t_signal = nn.Embedding(2,16) ##traffic signal\n\n self.dropout = nn.Dropout(p=dropout)\n\n\n # def forward(self, Path, Path_RT, Path_OW, Path_Lane, Path_ts):\n # x = torch.cat([self.path_embed(Path), self.rt(Path_RT),self.ow(Path_OW),self.lane(Path_Lane), self.t_signal(Path_ts)],dim=2)\n # return self.dropout(x)\n \n def forward(self, Path):\n x = self.path_embed(Path)\n return self.dropout(x)\n"
]
| [
[
"torch.nn.Embedding.from_pretrained",
"torch.nn.Dropout"
]
]
|
cwwang15/neural_network_cracking | [
"2bb89da599ca0f30d868ca26ab284d559b56d301"
]
| [
"setup.py"
]
| [
"from distutils.core import setup\nfrom distutils.extension import Extension\nfrom Cython.Distutils import build_ext\nimport numpy\n\nsetup(\n cmdclass = { 'build_ext': build_ext },\n version = '0.0.1',\n ext_modules = [Extension(\"generator\", [\"pwd_guess_ctypes.pyx\"],\n include_dirs = [numpy.get_include()])]\n)\n"
]
| [
[
"numpy.get_include"
]
]
|
JohanMabille/proteus | [
"6307c442a74471945a688efe4cfeb347bdd765ca"
]
| [
"proteus/tests/SWFlow/seawall.py"
]
| [
"from __future__ import division\nfrom builtins import object\nfrom past.utils import old_div\nfrom proteus.mprans import (SW2DCV, GN_SW2DCV)\nfrom proteus.Domain import RectangularDomain, PlanarStraightLineGraphDomain\nimport numpy as np\nfrom proteus import (Domain, Context, MeshTools as mt)\nfrom proteus.Profiling import logEvent\nimport proteus.SWFlow.SWFlowProblem as SWFlowProblem\n\n\"\"\"\nWe now consider the experiments of [Hsiao and Lin, 2013]\n performed at the Tainan Hydraulic Laboratory in Taiwan.\n In this is set of experiments a solitary wave over-tops a seawall.\n\"\"\"\n\n# *************************** #\n# ***** GENERAL OPTIONS ***** #\n# *************************** #\nopts = Context.Options([\n ('sw_model', 1, \"sw_model = {0,1} for {SWEs,DSWEs}\"),\n (\"final_time\", 12.0, \"Final time for simulation\"),\n (\"dt_output\", 0.1, \"Time interval to output solution\"),\n (\"cfl\", 0.25, \"Desired CFL restriction\"),\n (\"refinement\", 4, \"Refinement level\"),\n (\"reflecting_BCs\",False,\"Use reflecting BCs\"),\n (\"structured\", True, \"Structured or unstructured mesh\"),\n (\"he\", 0.5, \"Mesh size for unstructured mesh\"),\n (\"mannings\", 0.012, \"Mannings roughness coefficient\")\n])\n\n###################\n# DOMAIN AND MESH #\n###################\nL = (15.0, 2.0) # this is domain length in x direction and y direction\nrefinement = opts.refinement\nrectangle = RectangularDomain(L=L)\n\n# CREATE REFINEMENT #\nnnx0 = 6\nnnx = (nnx0 - 1) * (2**refinement) + 1\nnny = old_div((nnx - 1), 10) + 1\nhe = old_div(L[0], float(nnx - 1))\nif opts.structured:\n domain = rectangle\nelse:\n rectangle.writePoly(\"seawall\")\n domain = PlanarStraightLineGraphDomain(fileprefix=\"seawall\")\n domain.MeshOptions.triangleOptions = \"pAq30Dena%f\" % (0.5 * opts.he**2,)\n nnx = None\n nny = None\n\n###############################\n# CONSTANTS NEEDED FOR SETUP #\n###############################\ng = 9.81\nh0 = 0.2 # water depth\na = 0.35 # relative amplitude\nk_wavenumber = np.sqrt(3.0 * a / (4.0 * h0**3)) # wavenumber\nz = np.sqrt(3.0 * a * h0) / (2.0 * h0 * np.sqrt(h0 * (1.0 + a))) # width\nc = np.sqrt(g * (1.0 + a) * h0) # wave celerity\nx0 = 5.9 # initial location of solitary wave\n\n\n###############################\n# Functions defined here #\n###############################\n\n\ndef solitary_wave(x, t):\n sechSqd = (1.00 / np.cosh(z * (x - x0 - c * t)))**2.00\n return a * h0 * sechSqd\n\n\ndef bathymetry_function(X):\n # need this shift for experimental data\n x = X[0] + 3\n # define conditionals for bathymetry\n conds = [x < 10, (13.6 < x) & (x <= 13.9), (13.9 < x) & (x <= 13.948), \\\n (13.948 < x) & (x<= 14.045)]\n bath = [lambda x: 0 - h0, \\\n lambda x: 3.6 / 20. + 0.076 / (13.9 - 13.6) * (x - 13.6) - h0, \\\n lambda x: 3.6 / 20. + 0.076 - h0, \\\n lambda x: 3.6 / 20. + 0.076 - (0.076 - 0.022) / (14.045 - 13.948) * (x - 13.948) - h0, \\\n lambda x: 1 / 20. * (x - 10.) - h0]\n return np.piecewise(x, conds, bath)\n\n##############################\n##### INITIAL CONDITIONS #####\n##############################\nclass water_height_at_t0(object):\n def uOfXT(self, X, t):\n eta = solitary_wave(X[0], 0)\n h = max(eta - bathymetry_function(X), 0.)\n return h\n\n\nclass x_mom_at_t0(object):\n def uOfXT(self, X, t):\n eta = solitary_wave(X[0], 0)\n h = max(eta - bathymetry_function(X), 0.)\n x_mom = h * c * eta / (h0 + eta)\n return x_mom\n\n\nclass Zero(object):\n def uOfXT(self, X, t):\n return 0.\n\n\n\"\"\"\nheta and hw are needed for the modified green naghdi equations.\nFor initial conditions, heta -> h^2 and hw -> h^2div(u).\nNote that the BCs for the heta and hw should be same as h.\nFor more details see: 'Robust explicit relaxation techinque for solving\nthe Green-Naghdi equations' by Guermond, Popov, Tovar, Kees.\nJCP 2019\n\"\"\"\n\nclass heta_at_t0(object):\n def uOfXT(self, X, t):\n h = water_height_at_t0().uOfXT(X, t)\n return h**2\n\n\nclass hw_at_t0(object):\n def uOfXT(self, X, t):\n eta = solitary_wave(X[0], 0)\n h = max(eta - bathymetry_function(X), 0.)\n hprime = -2.0 * z * eta * np.tanh(z * (X[0] - x0 - c * t))\n hw = h * (-c * h0 * eta * hprime / (h0 + eta)**2)\n return hw\n\n\n###############################\n##### BOUNDARY CONDITIONS #####\n###############################\nX_coords = (0.0, 15.0)\n\ndef x_mom_DBC(X, flag):\n if X[0] == X_coords[0] or X[0] == X_coords[1]:\n return lambda x, t: 0.0\n\n# ********************************** #\n# ***** Create mySWFlowProblem ***** #\n# ********************************** #\noutputStepping = SWFlowProblem.OutputStepping(\n opts.final_time, dt_output=opts.dt_output)\ninitialConditions = {'water_height': water_height_at_t0(),\n 'x_mom': x_mom_at_t0(),\n 'y_mom': Zero(),\n 'h_times_eta': heta_at_t0(),\n 'h_times_w': hw_at_t0(),\n 'h_times_beta': Zero()}\nboundaryConditions = {'water_height': lambda x, flag: None,\n 'x_mom': x_mom_DBC,\n 'y_mom': lambda x, flag: lambda x, t: 0.0,\n 'h_times_eta': lambda x, flag: None,\n 'h_times_w': lambda x, flag: None,\n 'h_times_beta': x_mom_DBC}\nmySWFlowProblem = SWFlowProblem.SWFlowProblem(sw_model=opts.sw_model,\n cfl=opts.cfl,\n outputStepping=outputStepping,\n structured=opts.structured,\n he=he,\n nnx=nnx,\n nny=nny,\n domain=domain,\n initialConditions=initialConditions,\n boundaryConditions=boundaryConditions,\n reflectingBCs=opts.reflecting_BCs,\n bathymetry=bathymetry_function)\nmySWFlowProblem.physical_parameters['LINEAR_FRICTION'] = 0\nmySWFlowProblem.physical_parameters['mannings'] = opts.mannings\n"
]
| [
[
"numpy.cosh",
"numpy.tanh",
"numpy.piecewise",
"numpy.sqrt"
]
]
|
steliosploumpis/menpo | [
"473bfc83ce027e8be3290daba31914f54ef84e6e"
]
| [
"menpo/shape/mesh/textured.py"
]
| [
"import numpy as np\n\nfrom menpo.shape import PointCloud\nfrom menpo.transform import tcoords_to_image_coords\n\nfrom ..adjacency import mask_adjacency_array, reindex_adjacency_array\nfrom .base import TriMesh, grid_tcoords\n\n\nclass TexturedTriMesh(TriMesh):\n r\"\"\"\n Combines a :map:`TriMesh` with a texture. Also encapsulates the texture\n coordinates required to render the texture on the mesh.\n\n Parameters\n ----------\n points : ``(n_points, n_dims)`` `ndarray`\n The array representing the points.\n tcoords : ``(N, 2)`` `ndarray`\n The texture coordinates for the mesh.\n texture : :map:`Image`\n The texture for the mesh.\n trilist : ``(M, 3)`` `ndarray` or ``None``, optional\n The triangle list. If ``None``, a Delaunay triangulation of\n the points will be used instead.\n copy: `bool`, optional\n If ``False``, the points, trilist and texture will not be copied on\n assignment.\n In general this should only be used if you know what you are doing.\n \"\"\"\n def __init__(self, points, tcoords, texture, trilist=None, copy=True):\n super(TexturedTriMesh, self).__init__(points, trilist=trilist,\n copy=copy)\n self.tcoords = PointCloud(tcoords, copy=copy)\n\n if not copy:\n self.texture = texture\n else:\n self.texture = texture.copy()\n\n @property\n def n_channels(self):\n r\"\"\"\n The number of channels of colour used (e.g. 3 for RGB).\n\n :type: `int`\n \"\"\"\n return self.texture.n_channels\n\n @classmethod\n def init_2d_grid(cls, shape, spacing=None, tcoords=None, texture=None):\n r\"\"\"\n Create a TexturedTriMesh that exists on a regular 2D grid. The first\n dimension is the number of rows in the grid and the second dimension\n of the shape is the number of columns. ``spacing`` optionally allows\n the definition of the distance between points (uniform over points).\n The spacing may be different for rows and columns.\n\n The triangulation will be right-handed and the diagonal will go from\n the top left to the bottom right of a square on the grid.\n\n If no texture is passed a blank (black) texture is attached with\n correct texture coordinates for texture mapping an image of the same\n size as ``shape``.\n\n Parameters\n ----------\n shape : `tuple` of 2 `int`\n The size of the grid to create, this defines the number of points\n across each dimension in the grid. The first element is the number\n of rows and the second is the number of columns.\n spacing : `int` or `tuple` of 2 `int`, optional\n The spacing between points. If a single `int` is provided, this\n is applied uniformly across each dimension. If a `tuple` is\n provided, the spacing is applied non-uniformly as defined e.g.\n ``(2, 3)`` gives a spacing of 2 for the rows and 3 for the\n columns.\n tcoords : ``(N, 2)`` `ndarray`, optional\n The texture coordinates for the mesh.\n texture : :map:`Image`, optional\n The texture for the mesh.\n\n Returns\n -------\n trimesh : :map:`TriMesh`\n A TriMesh arranged in a grid.\n \"\"\"\n pc = TriMesh.init_2d_grid(shape, spacing=spacing)\n points = pc.points\n trilist = pc.trilist\n # Ensure that the tcoords and texture are copied\n if tcoords is not None:\n tcoords = tcoords.copy()\n else:\n tcoords = grid_tcoords(shape)\n if texture is not None:\n texture = texture.copy()\n else:\n from menpo.image import Image\n # Default texture is all black\n texture = Image.init_blank(shape)\n return TexturedTriMesh(points, tcoords, texture, trilist=trilist,\n copy=False)\n\n @classmethod\n def init_from_depth_image(cls, depth_image, tcoords=None, texture=None):\n r\"\"\"\n Return a 3D textured triangular mesh from the given depth image. The\n depth image is assumed to represent height/depth values and the XY\n coordinates are assumed to unit spaced and represent image coordinates.\n This is particularly useful for visualising depth values that have been\n recovered from images.\n\n The optionally passed texture will be textured mapped onto the planar\n surface using the correct texture coordinates for an image of the\n same shape as ``depth_image``.\n\n Parameters\n ----------\n depth_image : :map:`Image` or subclass\n A single channel image that contains depth values - as commonly\n returned by RGBD cameras, for example.\n tcoords : ``(N, 2)`` `ndarray`, optional\n The texture coordinates for the mesh.\n texture : :map:`Image`, optional\n The texture for the mesh.\n\n Returns\n -------\n depth_cloud : ``type(cls)``\n A new 3D TriMesh with unit XY coordinates and the given depth\n values as Z coordinates. The trilist is constructed as in\n :meth:`init_2d_grid`.\n \"\"\"\n from menpo.image import MaskedImage\n\n new_tmesh = cls.init_2d_grid(depth_image.shape, tcoords=tcoords,\n texture=texture)\n if isinstance(depth_image, MaskedImage):\n new_tmesh = new_tmesh.from_mask(depth_image.mask.as_vector())\n return cls(np.hstack([new_tmesh.points,\n depth_image.as_vector(keep_channels=True).T]),\n new_tmesh.tcoords.points,\n new_tmesh.texture,\n trilist=new_tmesh.trilist,\n copy=False)\n\n def tcoords_pixel_scaled(self):\n r\"\"\"\n Returns a :map:`PointCloud` that is modified to be suitable for directly\n indexing into the pixels of the texture (e.g. for manual mapping\n operations). The resulting tcoords behave just like image landmarks\n do.\n\n The operations that are performed are:\n\n - Flipping the origin from bottom-left to top-left\n - Scaling the tcoords by the image shape (denormalising them)\n - Permuting the axis so that\n\n Returns\n -------\n tcoords_scaled : :map:`PointCloud`\n A copy of the tcoords that behave like :map:`Image` landmarks\n\n Examples\n --------\n Recovering pixel values for every texture coordinate:\n\n >>> texture = texturedtrimesh.texture\n >>> tc_ps = texturedtrimesh.tcoords_pixel_scaled()\n >>> pixel_values_at_tcs = texture.sample(tc_ps)\n \"\"\"\n return tcoords_to_image_coords(self.texture.shape).apply(self.tcoords)\n\n def from_vector(self, flattened):\n r\"\"\"\n Builds a new :class:`TexturedTriMesh` given the `flattened` 1D vector.\n Note that the trilist, texture, and tcoords will be drawn from self.\n\n Parameters\n ----------\n flattened : ``(N,)`` `ndarray`\n Vector representing a set of points.\n\n Returns\n --------\n trimesh : :map:`TriMesh`\n A new trimesh created from the vector with ``self`` trilist.\n \"\"\"\n return TexturedTriMesh(flattened.reshape([-1, self.n_dims]),\n self.tcoords.points, self.texture,\n trilist=self.trilist)\n\n def from_mask(self, mask):\n \"\"\"\n A 1D boolean array with the same number of elements as the number of\n points in the TexturedTriMesh. This is then broadcast across the\n dimensions of the mesh and returns a new mesh containing only those\n points that were ``True`` in the mask.\n\n Parameters\n ----------\n mask : ``(n_points,)`` `ndarray`\n 1D array of booleans\n\n Returns\n -------\n mesh : :map:`TexturedTriMesh`\n A new mesh that has been masked.\n \"\"\"\n if mask.shape[0] != self.n_points:\n raise ValueError('Mask must be a 1D boolean array of the same '\n 'number of entries as points in this '\n 'TexturedTriMesh.')\n\n ttm = self.copy()\n if np.all(mask): # Fast path for all true\n return ttm\n else:\n # Recalculate the mask to remove isolated vertices\n isolated_mask = self._isolated_mask(mask)\n # Recreate the adjacency array with the updated mask\n masked_adj = mask_adjacency_array(isolated_mask, self.trilist)\n ttm.trilist = reindex_adjacency_array(masked_adj)\n ttm.points = ttm.points[isolated_mask, :]\n ttm.tcoords.points = ttm.tcoords.points[isolated_mask, :]\n return ttm\n\n def clip_texture(self, range=(0., 1.)):\n \"\"\"\n Method that returns a copy of the object with the texture values\n clipped in range ``(0, 1)``.\n\n Parameters\n ----------\n range : ``(float, float)``, optional\n The clipping range.\n\n Returns\n -------\n self : :map:`ColouredTriMesh`\n A copy of self with its texture clipped.\n \"\"\"\n instance = self.copy()\n instance.texture.pixels = np.clip(self.texture.pixels, *range)\n return instance\n\n def rescale_texture(self, minimum, maximum, per_channel=True):\n r\"\"\"\n A copy of this mesh with texture linearly rescaled to fit a range.\n\n Parameters\n ----------\n minimum: `float`\n The minimal value of the rescaled colours\n maximum: `float`\n The maximal value of the rescaled colours\n per_channel: `boolean`, optional\n If ``True``, each channel will be rescaled independently. If\n ``False``, the scaling will be over all channels.\n\n Returns\n -------\n textured_mesh : ``type(self)``\n A copy of this mesh with texture linearly rescaled to fit in the\n range provided.\n \"\"\"\n instance = self.copy()\n instance.texture = instance.texture.rescale_pixels(\n minimum, maximum, per_channel=per_channel)\n return instance\n\n def _view_3d(self, figure_id=None, new_figure=True, render_texture=True,\n mesh_type='surface', ambient_light=0.0, specular_light=0.0,\n colour='r', line_width=2, normals=None, normals_colour='k',\n normals_line_width=2, normals_marker_style='2darrow',\n normals_marker_resolution=8, normals_marker_size=None,\n step=None, alpha=1.0):\n r\"\"\"\n Visualize the Textured TriMesh in 3D.\n\n Parameters\n ----------\n figure_id : `object`, optional\n The id of the figure to be used.\n new_figure : `bool`, optional\n If ``True``, a new figure is created.\n render_texture : `bool`, optional\n If ``True``, then the texture is rendered. If ``False``, then only\n the TriMesh is rendered with the specified `colour`.\n mesh_type : ``{'surface', 'wireframe'}``, optional\n The representation type to be used for the mesh.\n ambient_light : `float`, optional\n The ambient light intensity. It must be in range ``[0., 1.]``.\n specular_light : `float`, optional\n The specular light intensity. It must be in range ``[0., 1.]``.\n colour : See Below, optional\n The colour of the mesh if `render_texture` is ``False``.\n Example options ::\n\n {r, g, b, c, m, k, w}\n or\n (3, ) ndarray\n\n line_width : `float`, optional\n The width of the lines, if there are any.\n normals : ``(n_points, 3)`` `ndarray` or ``None``, optional\n If ``None``, then the normals will not be rendered. If `ndarray`,\n then the provided normals will be rendered as well. Note that a\n normal must be provided for each point in the TriMesh.\n normals_colour : See Below, optional\n The colour of the normals.\n Example options ::\n\n {r, g, b, c, m, k, w}\n or\n (3, ) ndarray\n\n normals_line_width : `float`, optional\n The width of the lines of the normals. It only applies if `normals`\n is not ``None``.\n normals_marker_style : `str`, optional\n The style of the markers of the normals. It only applies if `normals`\n is not ``None``.\n Example options ::\n\n {2darrow, 2dcircle, 2dcross, 2ddash, 2ddiamond, 2dhooked_arrow,\n 2dsquare, 2dthick_arrow, 2dthick_cross, 2dtriangle, 2dvertex,\n arrow, axes, cone, cube, cylinder, point, sphere}\n\n normals_marker_resolution : `int`, optional\n The resolution of the markers of the normals. For spheres, for\n instance, this is the number of divisions along theta and phi. It\n only applies if `normals` is not ``None``.\n normals_marker_size : `float` or ``None``, optional\n The size of the markers. This size can be seen as a scale factor\n applied to the size markers, which is by default calculated from\n the inter-marker spacing. If ``None``, then an optimal marker size\n value will be set automatically. It only applies if `normals` is not\n ``None``.\n step : `int` or ``None``, optional\n If `int`, then one every `step` normals will be rendered.\n If ``None``, then all vertexes will be rendered. It only applies if\n `normals` is not ``None``.\n alpha : `float`, optional\n Defines the transparency (opacity) of the object.\n\n Returns\n -------\n renderer : `menpo3d.visualize.TexturedTriMeshViewer3D`\n The Menpo3D rendering object.\n \"\"\"\n if render_texture:\n try:\n from menpo3d.visualize import TexturedTriMeshViewer3d\n renderer = TexturedTriMeshViewer3d(figure_id, new_figure,\n self.points, self.trilist,\n self.texture,\n self.tcoords.points)\n renderer.render(\n mesh_type=mesh_type, ambient_light=ambient_light,\n specular_light=specular_light, normals=normals,\n normals_colour=normals_colour,\n normals_line_width=normals_line_width,\n normals_marker_style=normals_marker_style,\n normals_marker_resolution=normals_marker_resolution,\n normals_marker_size=normals_marker_size, step=step,\n alpha=alpha)\n return renderer\n except ImportError:\n from menpo.visualize import Menpo3dMissingError\n raise Menpo3dMissingError()\n else:\n try:\n from menpo3d.visualize import TriMeshViewer3d\n renderer = TriMeshViewer3d(figure_id, new_figure, self.points,\n self.trilist)\n renderer.render(\n mesh_type=mesh_type, line_width=line_width, colour=colour,\n normals=normals, normals_colour=normals_colour,\n normals_line_width=normals_line_width,\n normals_marker_style=normals_marker_style,\n normals_marker_resolution=normals_marker_resolution,\n normals_marker_size=normals_marker_size, step=step,\n alpha=alpha)\n return renderer\n except ImportError:\n from menpo.visualize import Menpo3dMissingError\n raise Menpo3dMissingError()\n\n def _view_2d(self, figure_id=None, new_figure=False, image_view=True,\n render_lines=True, line_colour='r', line_style='-',\n line_width=1., render_markers=True, marker_style='o',\n marker_size=5, marker_face_colour='k', marker_edge_colour='k',\n marker_edge_width=1., render_numbering=False,\n numbers_horizontal_align='center',\n numbers_vertical_align='bottom',\n numbers_font_name='sans-serif', numbers_font_size=10,\n numbers_font_style='normal', numbers_font_weight='normal',\n numbers_font_colour='k', render_axes=True,\n axes_font_name='sans-serif', axes_font_size=10,\n axes_font_style='normal', axes_font_weight='normal',\n axes_x_limits=None, axes_y_limits=None, axes_x_ticks=None,\n axes_y_ticks=None, figure_size=(10, 8),\n label=None, **kwargs):\n r\"\"\"\n Visualization of the TriMesh in 2D. Currently, explicit textured TriMesh\n viewing is not supported, and therefore viewing falls back to untextured\n 2D TriMesh viewing.\n\n Returns\n -------\n figure_id : `object`, optional\n The id of the figure to be used.\n new_figure : `bool`, optional\n If ``True``, a new figure is created.\n image_view : `bool`, optional\n If ``True`` the TexturedTriMesh will be viewed as if it is in the\n image coordinate system.\n render_lines : `bool`, optional\n If ``True``, the edges will be rendered.\n line_colour : See Below, optional\n The colour of the lines.\n Example options::\n\n {r, g, b, c, m, k, w}\n or\n (3, ) ndarray\n\n line_style : ``{-, --, -., :}``, optional\n The style of the lines.\n line_width : `float`, optional\n The width of the lines.\n render_markers : `bool`, optional\n If ``True``, the markers will be rendered.\n marker_style : See Below, optional\n The style of the markers. Example options ::\n\n {., ,, o, v, ^, <, >, +, x, D, d, s, p, *, h, H, 1, 2, 3, 4, 8}\n\n marker_size : `int`, optional\n The size of the markers in points.\n marker_face_colour : See Below, optional\n The face (filling) colour of the markers.\n Example options ::\n\n {r, g, b, c, m, k, w}\n or\n (3, ) ndarray\n\n marker_edge_colour : See Below, optional\n The edge colour of the markers.\n Example options ::\n\n {r, g, b, c, m, k, w}\n or\n (3, ) ndarray\n\n marker_edge_width : `float`, optional\n The width of the markers' edge.\n render_numbering : `bool`, optional\n If ``True``, the landmarks will be numbered.\n numbers_horizontal_align : ``{center, right, left}``, optional\n The horizontal alignment of the numbers' texts.\n numbers_vertical_align : ``{center, top, bottom, baseline}``, optional\n The vertical alignment of the numbers' texts.\n numbers_font_name : See Below, optional\n The font of the numbers. Example options ::\n\n {serif, sans-serif, cursive, fantasy, monospace}\n\n numbers_font_size : `int`, optional\n The font size of the numbers.\n numbers_font_style : ``{normal, italic, oblique}``, optional\n The font style of the numbers.\n numbers_font_weight : See Below, optional\n The font weight of the numbers.\n Example options ::\n\n {ultralight, light, normal, regular, book, medium, roman,\n semibold, demibold, demi, bold, heavy, extra bold, black}\n\n numbers_font_colour : See Below, optional\n The font colour of the numbers.\n Example options ::\n\n {r, g, b, c, m, k, w}\n or\n (3, ) ndarray\n\n render_axes : `bool`, optional\n If ``True``, the axes will be rendered.\n axes_font_name : See Below, optional\n The font of the axes.\n Example options ::\n\n {serif, sans-serif, cursive, fantasy, monospace}\n\n axes_font_size : `int`, optional\n The font size of the axes.\n axes_font_style : {``normal``, ``italic``, ``oblique``}, optional\n The font style of the axes.\n axes_font_weight : See Below, optional\n The font weight of the axes.\n Example options ::\n\n {ultralight, light, normal, regular, book, medium, roman,\n semibold, demibold, demi, bold, heavy, extra bold, black}\n\n axes_x_limits : `float` or (`float`, `float`) or ``None``, optional\n The limits of the x axis. If `float`, then it sets padding on the\n right and left of the TriMesh as a percentage of the TriMesh's\n width. If `tuple` or `list`, then it defines the axis limits. If\n ``None``, then the limits are set automatically.\n axes_y_limits : (`float`, `float`) `tuple` or ``None``, optional\n The limits of the y axis. If `float`, then it sets padding on the\n top and bottom of the TriMesh as a percentage of the TriMesh's\n height. If `tuple` or `list`, then it defines the axis limits. If\n ``None``, then the limits are set automatically.\n axes_x_ticks : `list` or `tuple` or ``None``, optional\n The ticks of the x axis.\n axes_y_ticks : `list` or `tuple` or ``None``, optional\n The ticks of the y axis.\n figure_size : (`float`, `float`) `tuple` or ``None``, optional\n The size of the figure in inches.\n label : `str`, optional\n The name entry in case of a legend.\n\n Returns\n -------\n viewer : :map:`PointGraphViewer2d`\n The viewer object.\n\n Raises\n ------\n warning\n 2D Viewing of Coloured TriMeshes is not supported, automatically\n falls back to 2D :map:`TriMesh` viewing.\n \"\"\"\n import warnings\n warnings.warn(Warning('2D Viewing of Textured TriMeshes is not '\n 'supported, falling back to TriMesh viewing.'))\n return TriMesh._view_2d(\n self, figure_id=figure_id, new_figure=new_figure,\n image_view=image_view, render_lines=render_lines,\n line_colour=line_colour, line_style=line_style,\n line_width=line_width, render_markers=render_markers,\n marker_style=marker_style, marker_size=marker_size,\n marker_face_colour=marker_face_colour,\n marker_edge_colour=marker_edge_colour,\n marker_edge_width=marker_edge_width,\n render_numbering=render_numbering,\n numbers_horizontal_align=numbers_horizontal_align,\n numbers_vertical_align=numbers_vertical_align,\n numbers_font_name=numbers_font_name,\n numbers_font_size=numbers_font_size,\n numbers_font_style=numbers_font_style,\n numbers_font_weight=numbers_font_weight,\n numbers_font_colour=numbers_font_colour, render_axes=render_axes,\n axes_font_name=axes_font_name, axes_font_size=axes_font_size,\n axes_font_style=axes_font_style, axes_font_weight=axes_font_weight,\n axes_x_limits=axes_x_limits, axes_y_limits=axes_y_limits,\n axes_x_ticks=axes_x_ticks, axes_y_ticks=axes_y_ticks,\n figure_size=figure_size, label=label)\n\n def __str__(self):\n return '{}\\ntexture_shape: {}, n_texture_channels: {}'.format(\n TriMesh.__str__(self), self.texture.shape, self.texture.n_channels)\n"
]
| [
[
"numpy.all",
"numpy.clip"
]
]
|
ciaranmccormick/mm-transcription-server | [
"d7e44756beb703bf24a7a2bfe2cdfeaae8a6b49d"
]
| [
"transcript/models.py"
]
| [
"from django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom rest_framework.authtoken.models import Token\nimport numpy as np\nimport numpy.random as nprand\nimport random\n\n\nclass Document(models.Model):\n owner = models.ForeignKey(User)\n filename = models.CharField(max_length=200)\n mp3_filename = models.CharField(max_length=200)\n duration = models.IntegerField()\n date = models.DateField()\n typist = models.CharField(max_length=10)\n\n\nclass Line(models.Model):\n document = models.ForeignKey(Document, related_name='lines')\n line_num = models.IntegerField()\n text = models.CharField(max_length=1000)\n\n\nclass Extract(models.Model):\n document = models.ForeignKey(Document, related_name='extracts')\n context = models.CharField(max_length=3)\n completed = models.BooleanField(default=False)\n flag = models.BooleanField(default=False)\n tag = models.CharField(max_length=512, blank=True)\n\n\nclass ExtractLines(models.Model):\n extract = models.ForeignKey(Extract, related_name='extract_lines')\n line = models.ForeignKey(Line)\n\n\nclass ExtractActors(models.Model):\n extract = models.ForeignKey(Extract)\n app = models.CharField(max_length=256)\n context = models.CharField(max_length=15)\n actor = models.CharField(max_length=256, default=\"\", blank=True)\n conditions = models.CharField(max_length=256, default=\"\", blank=True)\n data_type = models.CharField(max_length=256, default=\"\", blank=True)\n\n\nclass IType(models.Model):\n PERSONAL = 'PR'\n SENSITIVE = 'SN'\n BOTH = \"BO\"\n I_TYPE_CHOICES = (\n (PERSONAL, 'Personal'),\n (SENSITIVE, 'Sensitive'),\n (BOTH, 'Both')\n )\n extract = models.OneToOneField(Extract, related_name='i_type')\n type = models.CharField(max_length=2, choices=I_TYPE_CHOICES)\n\n\nclass IMode(models.Model):\n AUTOMATICS = \"AU\"\n MANUAL = \"MN\"\n I_MODE_CHOICES = (\n (AUTOMATICS, 'Automatic'),\n (MANUAL, \"Manual\")\n )\n extract = models.OneToOneField(Extract, related_name='i_mode')\n mode = models.CharField(max_length=2, choices=I_MODE_CHOICES)\n\n\nclass Purpose(models.Model):\n extract = models.ForeignKey(Extract, related_name='i_purpose')\n purpose = models.CharField(max_length=300)\n\n\nclass RoleRelationship(models.Model):\n extract = models.ForeignKey(Extract, related_name='relationships')\n relationship = models.CharField(max_length=300)\n\n\nclass RoleExpectation(models.Model):\n extract = models.ForeignKey(Extract, related_name='expectations')\n expectation = models.CharField(max_length=300)\n\n\nclass PlaceLocation(models.Model):\n extract = models.ForeignKey(Extract, related_name='locations')\n location = models.CharField(max_length=300)\n\n\nclass PlaceNorm(models.Model):\n extract = models.ForeignKey(Extract, related_name='norms')\n norm = models.CharField(max_length=300)\n\n\nclass IAttrRef(models.Model):\n name = models.CharField(max_length=100, unique=True)\n description = models.CharField(max_length=512)\n label = models.CharField(max_length=100)\n\n\nclass IAttr(models.Model):\n attr = models.ForeignKey(IAttrRef)\n extract = models.ForeignKey(Extract, related_name='i_attrs')\n isAttr = models.BooleanField(default=False)\n\n\nclass InformationFlow(models.Model):\n SENDER_SUB = 'SS'\n SENDER_REC = 'SR'\n THIRD_PARTY = 'TP'\n FEEDBACK = 'FB'\n ALL = 'AL'\n NO_FLOW = 'NF'\n\n INFORMATION_FLOW_CHOICES = (\n (SENDER_SUB, 'Sender-Subject'),\n (SENDER_REC, 'Sender-Receiver'),\n (THIRD_PARTY, 'Third-Party'),\n (FEEDBACK, 'Feedback'),\n (ALL, 'All'),\n (NO_FLOW, 'No-Flow')\n )\n extract = models.OneToOneField(Extract, related_name='info_flow')\n flow = models.CharField(max_length=2, choices=INFORMATION_FLOW_CHOICES)\n\n\nclass Recode(models.Model):\n recoder = models.ForeignKey(User)\n\n\nclass RecodeExtract(models.Model):\n extract = models.ForeignKey(Extract)\n recode = models.ForeignKey(Recode)\n recode_context = models.CharField(max_length=3)\n\n\ndef random_extracts(user):\n queryset = Extract.objects.all()\n\n # Filter only fake extracts\n fake_extracts = list(queryset.filter(document__filename='dummy'))\n # get 10 fake extracts\n rand_fake_extracts = random.sample(fake_extracts, 10)\n\n # exclude fake extracts\n queryset = queryset.exclude(document__filename='dummy')\n\n # Get total count\n all_extract_count = queryset.count()\n\n # 10% of count\n ten_percent_count = int(np.ceil(all_extract_count * 0.1))\n\n # exclude the users own extracts\n real_extracts = list(queryset.exclude(document__owner=user))\n # select random sample of 10% of real extracts\n real_extracts_sample = random.sample(real_extracts, ten_percent_count)\n\n all_extracts = rand_fake_extracts + real_extracts_sample\n\n # randomly shuffle extracts\n random.shuffle(all_extracts)\n\n return all_extracts\n\n\n@receiver(post_save, sender=Recode)\ndef add_extracts_to_recode(sender, instance=None, created=False, **kwargs):\n if created:\n extracts = random_extracts(instance.recoder)\n for extract in extracts:\n RecodeExtract.objects.create(extract=extract, recode=instance,\n recode_context=\"noc\")\n\n\n@receiver(post_save, sender=IAttrRef)\ndef add_attr_to_extract(sender, instance=None, created=False, **kwargs):\n if created:\n extracts = Extract.objects.all()\n for extract in extracts:\n IAttr.objects.create(attr=instance, extract=extract)\n\n\n@receiver(post_save, sender=Extract)\ndef create_attrs(sender, instance=None, created=False, **kwargs):\n if created:\n attrs = IAttrRef.objects.all()\n for attr in attrs:\n IAttr.objects.create(attr=attr, extract=instance)\n\n\n@receiver(post_save, sender=settings.AUTH_USER_MODEL)\ndef create_auth_token(sender, instance=None, created=False, **kwargs):\n if created:\n Token.objects.create(user=instance)\n"
]
| [
[
"numpy.ceil"
]
]
|
jerinka/LSTM_Video_classifier | [
"46dd9f8bf6f4cc00619a4ee7108fd5d0e3896269"
]
| [
"getdata.py"
]
| [
"import numpy as np\nimport random\n\n\n# generate the next frame in the sequence\ndef next_frame(last_step, last_frame, column):\n # define the scope of the next step\n lower = max(0, last_step-1)\n upper = min(last_frame.shape[0]-1, last_step+1)\n # choose the row index for the next step\n step = random.randint(lower, upper)\n # copy the prior frame\n frame = last_frame.copy()\n # add the new step\n frame[step, column] = 1\n return frame, step\n \n# generate a sequence of frames of a dot moving across an image\ndef build_frames(timesteps,width,height, ch=1):\n frames = list()\n # create the first frame\n frame = np.zeros((width,height))\n step = random.randint(0, timesteps-1)\n # decide if we are heading left or right\n right = 1 if random.random() < 0.5 else 0\n col = 0 if right else timesteps-1\n frame[step, col] = 1\n frames.append(frame)\n # create all remaining frames\n for i in range(1, timesteps):\n col = i if right else timesteps-1-i\n frame, step = next_frame(step, frame, col)\n frames.append(frame)\n return frames, right\n \n# generate multiple sequences of frames and reshape for network input\ndef generate_examples(n_patterns,timesteps,width,height,channels):\n X, y = list(), list()\n for _ in range(n_patterns):\n frames, right = build_frames(timesteps,width,height, ch=channels)\n X.append(frames)\n y.append(right)\n import pdb;pdb.set_trace()\n # resize as [samples, timesteps, width, height, channels]\n X = np.array(X).reshape(n_patterns, timesteps,width,height, channels)\n y = np.array(y).reshape(n_patterns, 1)\n return X, y\n \ntimesteps =5\nwidth=100\nheight=100\nchannels=1\nsamples = 50\n\nX, y = generate_examples(samples, timesteps,width,height,channels)\n\nprint('X',X.shape())\nprint('y',y.shape())\n\n\n"
]
| [
[
"numpy.array",
"numpy.zeros"
]
]
|
hoangdzung/pygcn | [
"71276e6361e8f004c3d221e953730de08a414e1f"
]
| [
"pygcn/classify.py"
]
| [
"from __future__ import print_function\nimport os\nfrom sklearn.linear_model import LogisticRegression\nfrom collections import defaultdict\nimport pdb\nimport numpy\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.metrics import f1_score, accuracy_score\nfrom sklearn.preprocessing import MultiLabelBinarizer\nfrom time import time\n\nclass TopKRanker(OneVsRestClassifier):\n def predict(self, X, top_k_list):\n probs = numpy.asarray(super(TopKRanker, self).predict_proba(X))\n all_labels = []\n for i, k in enumerate(top_k_list):\n probs_ = probs[i, :]\n labels = self.classes_[probs_.argsort()[-k:]].tolist()\n probs_[:] = 0\n probs_[labels] = 1\n all_labels.append(probs_)\n return numpy.asarray(all_labels)\n\n\nclass Classifier(object):\n\n def __init__(self, vectors, clf):\n self.embeddings = vectors\n self.clf = TopKRanker(clf)\n self.binarizer = MultiLabelBinarizer(sparse_output=True)\n\n def train(self, X, Y, Y_all):\n self.binarizer.fit(Y_all)\n X_train = [self.embeddings[x] for x in X]\n Y = self.binarizer.transform(Y)\n self.clf.fit(X_train, Y)\n\n def evaluate(self, X, Y):\n top_k_list = [len(l) for l in Y]\n Y_ = self.predict(X, top_k_list)\n Y = self.binarizer.transform(Y)\n averages = [\"micro\", \"macro\", \"samples\", \"weighted\", \"acc\"]\n results = {}\n for average in averages:\n if average == \"acc\":\n results[average] = accuracy_score(Y, Y_)\n else:\n results[average] = f1_score(Y, Y_, average=average)\n # print('Results, using embeddings of dimensionality', len(self.embeddings[X[0]]))\n # print('-------------------')\n # print(results)\n return results\n # print('-------------------')\n\n def predict(self, X, top_k_list):\n X_ = numpy.asarray([self.embeddings[x] for x in X])\n Y = self.clf.predict(X_, top_k_list=top_k_list)\n return Y\n\n def split_train_evaluate(self, X, Y, train_precent, seed=0):\n state = numpy.random.get_state()\n\n training_size = int(train_precent * len(X))\n numpy.random.seed(seed)\n shuffle_indices = numpy.random.permutation(numpy.arange(len(X)))\n X_train = [X[shuffle_indices[i]] for i in range(training_size)]\n Y_train = [Y[shuffle_indices[i]] for i in range(training_size)]\n X_test = [X[shuffle_indices[i]] for i in range(training_size, len(X))]\n Y_test = [Y[shuffle_indices[i]] for i in range(training_size, len(X))]\n\n self.train(X_train, Y_train, Y)\n numpy.random.set_state(state)\n return self.evaluate(X_test, Y_test)\n\ndef classify(embedding, Y, train_percent):\n vectors = {}\n for i in range(embedding.shape[0]):\n vectors[str(i)] = embedding[i]\n\n print(\"Training classifier using {:.2f}% nodes...\".format(train_percent * 100))\n clf = Classifier(vectors=vectors, clf=LogisticRegression(solver=\"lbfgs\", max_iter=4000))\n scores = clf.split_train_evaluate([str(i) for i in range(embedding.shape[0])], [[str(i)] for i in Y], train_percent)\n return scores\n"
]
| [
[
"numpy.asarray",
"numpy.random.seed",
"sklearn.metrics.f1_score",
"sklearn.metrics.accuracy_score",
"sklearn.linear_model.LogisticRegression",
"numpy.random.get_state",
"sklearn.preprocessing.MultiLabelBinarizer",
"numpy.random.set_state"
]
]
|
QuTech-Delft/qilib | [
"a87892f8a9977ed338c36e8fb1e262b47449cf44"
]
| [
"src/tests/unittests/data_set/test_mongo_data_set_io.py"
]
| [
"import unittest\nfrom collections import namedtuple\nfrom unittest.mock import patch, MagicMock, call\n\nimport numpy as np\nfrom bson import ObjectId\nfrom pymongo.errors import DuplicateKeyError\n\nfrom qilib.data_set import MongoDataSetIO\nfrom qilib.data_set.mongo_data_set_io import DocumentNotFoundError, FieldNotUniqueError\n\n\nclass TestMongoDataSetIO(unittest.TestCase):\n def test_constructor_only_name(self):\n mock_mongo_client = MagicMock()\n with patch('qilib.data_set.mongo_data_set_io.MongoClient',\n return_value={'qilib': {'data_sets': mock_mongo_client}}) as mongo_client:\n mock_mongo_client.find_one.return_value = {'_id': ObjectId('5c9a3457e3306c41f7ae1f3e'),\n 'name': 'test_data_set'}\n mock_mongo_client.insert_one.return_value.inserted_id = ObjectId('5c9a3457e3306c41f7ae1f3e')\n\n mongo_data_set_io = MongoDataSetIO(name='test_data_set')\n\n mongo_client.assert_called_once()\n mock_mongo_client.find_one.assert_called_once_with({'name': 'test_data_set'})\n self.assertEqual(mongo_data_set_io.name, 'test_data_set')\n self.assertEqual(mongo_data_set_io.id, '5c9a3457e3306c41f7ae1f3e')\n\n def test_set_unique_field_fails(self):\n mock_mongo_client = MagicMock()\n with patch('qilib.data_set.mongo_data_set_io.MongoClient',\n return_value={'qilib': {'data_sets': mock_mongo_client}}):\n mock_mongo_client.index_information.return_value = {'name_1': {'unique': False}}\n error = FieldNotUniqueError, \"Field 'name' is not unique in database.\"\n self.assertRaisesRegex(*error, MongoDataSetIO, name='test_data_set')\n\n mock_mongo_client.create_index.side_effect = DuplicateKeyError(\"\")\n error = FieldNotUniqueError, \"Failed to set field 'name' unique.\"\n self.assertRaisesRegex(*error, MongoDataSetIO, name='test_data_set')\n\n def test_constructor_name_not_found(self):\n mock_mongo_client = MagicMock()\n with patch('qilib.data_set.mongo_data_set_io.MongoClient',\n return_value={'qilib': {'data_sets': mock_mongo_client}}) as mongo_client:\n insert_one = namedtuple('insert_one', 'inserted_id')\n insert_one.inserted_id = ObjectId('5c9a3457e3306c41f7ae1f3e')\n mock_mongo_client.find_one.return_value = None\n mock_mongo_client.insert_one.return_value = insert_one\n\n mongo_data_set_io = MongoDataSetIO(name='test_data_set')\n\n mongo_client.assert_called_once()\n mock_mongo_client.find_one.assert_called_once_with({'name': 'test_data_set'})\n self.assertEqual(mongo_data_set_io.name, 'test_data_set')\n self.assertEqual(mongo_data_set_io.id, '5c9a3457e3306c41f7ae1f3e')\n\n def test_constructor_name_not_found_raises_error(self):\n mock_mongo_client = MagicMock()\n with patch('qilib.data_set.mongo_data_set_io.MongoClient',\n return_value={'qilib': {'data_sets': mock_mongo_client}}):\n mock_mongo_client.find_one.return_value = None\n\n error = DocumentNotFoundError, \"Document not found in database.\"\n self.assertRaisesRegex(*error, MongoDataSetIO, name='test_data_set', create_if_not_found=False)\n mock_mongo_client.find_one.assert_called_once_with({'name': 'test_data_set'})\n\n def test_constructor_only_id(self):\n mock_mongo_client = MagicMock()\n with patch('qilib.data_set.mongo_data_set_io.MongoClient',\n return_value={'qilib': {'data_sets': mock_mongo_client}}) as mongo_client:\n mock_mongo_client.find_one.return_value = {'_id': ObjectId('5c9a3457e3306c41f7ae1f3e'),\n 'name': 'test_data_set'}\n mongo_data_set_io = MongoDataSetIO(document_id='5c9a3457e3306c41f7ae1f3e')\n\n mongo_client.assert_called_once()\n mock_mongo_client.find_one.assert_called_once_with({'_id': ObjectId('5c9a3457e3306c41f7ae1f3e')})\n self.assertEqual(mongo_data_set_io.name, 'test_data_set')\n self.assertEqual(mongo_data_set_io.id, '5c9a3457e3306c41f7ae1f3e')\n\n def test_constructor_id_not_found(self):\n mock_mongo_client = MagicMock()\n with patch('qilib.data_set.mongo_data_set_io.MongoClient',\n return_value={'qilib': {'data_sets': mock_mongo_client}}):\n mock_mongo_client.find_one.return_value = None\n\n error = DocumentNotFoundError, \"Document not found in database.\"\n self.assertRaisesRegex(*error, MongoDataSetIO, document_id='5c9a3457e3306c41f7ae1f3e')\n mock_mongo_client.find_one.assert_called_once_with({'_id': ObjectId('5c9a3457e3306c41f7ae1f3e')})\n\n def test_constructor_name_and_id(self):\n mock_mongo_client = MagicMock()\n with patch('qilib.data_set.mongo_data_set_io.MongoClient',\n return_value={'qilib': {'data_sets': mock_mongo_client}}) as mongo_client:\n mock_mongo_client.find_one.return_value = {'_id': ObjectId('5c9a3457e3306c41f7ae1f3e'),\n 'name': 'test_data_set'}\n mock_mongo_client.insert_one.return_value.inserted_id = ObjectId('5c9a3457e3306c41f7ae1f3e')\n\n mongo_data_set_io = MongoDataSetIO(name='test_data_set', document_id='5c9a3457e3306c41f7ae1f3e')\n\n mongo_client.assert_called_once()\n mock_mongo_client.find_one.assert_called_once_with(\n {'_id': ObjectId('5c9a3457e3306c41f7ae1f3e'), 'name': 'test_data_set'})\n self.assertEqual(mongo_data_set_io.name, 'test_data_set')\n self.assertEqual(mongo_data_set_io.id, '5c9a3457e3306c41f7ae1f3e')\n\n def test_constructor_name_and_id_not_found(self):\n mock_mongo_client = MagicMock()\n with patch('qilib.data_set.mongo_data_set_io.MongoClient',\n return_value={'qilib': {'data_sets': mock_mongo_client}}):\n mock_mongo_client.find_one.return_value = None\n\n error = DocumentNotFoundError, \"Document not found in database.\"\n self.assertRaisesRegex(*error, MongoDataSetIO, name='test_data_set', document_id='5c9a3457e3306c41f7ae1f3e')\n mock_mongo_client.find_one.assert_called_once_with(\n {'_id': ObjectId('5c9a3457e3306c41f7ae1f3e'), 'name': 'test_data_set'})\n\n def test_constructor_no_name_no_document_id(self):\n error = (DocumentNotFoundError, \"Neither 'name' nor 'document_id' were provided.\")\n self.assertRaisesRegex(*error, MongoDataSetIO)\n\n def test_watch(self):\n mock_mongo_client = MagicMock()\n with patch('qilib.data_set.mongo_data_set_io.MongoClient',\n return_value={'qilib': {'data_sets': mock_mongo_client}}):\n mock_mongo_client.find_one.return_value = {'_id': ObjectId('5c9a3457e3306c41f7ae1f3e'),\n 'name': 'test_data_set'}\n mock_mongo_client.watch.return_value = 'Watching'\n mongo_data_set_io = MongoDataSetIO(name='test_data_set')\n watcher = mongo_data_set_io.watch()\n pipeline = [{'$match': {'fullDocument.name': 'test_data_set'}}]\n mock_mongo_client.watch.assert_called_with(pipeline=pipeline, full_document='updateLookup')\n self.assertEqual('Watching', watcher)\n\n def test_get_document(self):\n mock_mongo_client = MagicMock()\n with patch('qilib.data_set.mongo_data_set_io.MongoClient',\n return_value={'qilib': {'data_sets': mock_mongo_client}}):\n db_document = {'_id': ObjectId('5c9a3457e3306c41f7ae1f3e'),\n 'name': 'test_data_set'}\n mock_mongo_client.find_one.return_value = db_document\n mongo_data_set_io = MongoDataSetIO(name='test_data_set')\n document = mongo_data_set_io.get_document()\n self.assertDictEqual(db_document, document)\n\n def test_finalize(self):\n with patch('qilib.data_set.mongo_data_set_io.MongoClient') as mock_client:\n mongo_data_set_io = MongoDataSetIO(name='test_data_set')\n mongo_data_set_io.finalize()\n mock_client.assert_has_calls([call().close()])\n\n def test_append_to_document(self):\n mock_mongo_client = MagicMock()\n with patch('qilib.data_set.mongo_data_set_io.MongoClient',\n return_value={'qilib': {'data_sets': mock_mongo_client}}):\n mongo_data_set_io = MongoDataSetIO(name='test_data_set')\n mongo_data_set_io.append_to_document({'metadata.label': 'test_data'})\n mock_mongo_client.update_one.called_once_with({'name': 'test_data_set'},\n {'$push': {'metadata.label': 'test_data'},\n \"$currentDate\": {\"lastModified\": True}})\n\n def test_update_document(self):\n mock_mongo_client = MagicMock()\n with patch('qilib.data_set.mongo_data_set_io.MongoClient',\n return_value={'qilib': {'data_sets': mock_mongo_client}}):\n mongo_data_set_io = MongoDataSetIO(name='test_data_set')\n mongo_data_set_io.update_document({'array_updates': ('(2,2)', {'test': 5})})\n mock_mongo_client.update_one.called_once_with({'name': 'test_data_set'},\n {'$set': {'array_updates': ('(2,2)', {'test': 5})},\n \"$currentDate\": {\"lastModified\": True}})\n\n def test_encode_decode(self):\n d_type = np.float64\n shape = (3, 3)\n array = np.ndarray(shape=shape, dtype=d_type)\n for i in range(3):\n for j in range(3):\n array[i][j] = i\n encoded_array = MongoDataSetIO.encode_numpy_array(array)\n decoded_array = MongoDataSetIO.decode_numpy_array(encoded_array)\n for row in range(3):\n self.assertListEqual([row, row, row], list(decoded_array[row]))\n self.assertIsInstance(decoded_array, np.ndarray)\n self.assertTupleEqual(shape, decoded_array.shape)\n self.assertTrue(np.issubdtype(decoded_array.dtype, d_type))\n"
]
| [
[
"numpy.ndarray",
"numpy.issubdtype"
]
]
|
gayatrichitturi/-Building-Detection- | [
"aaa14983cb7e03939d6736605a7cb13c08ab7f29"
]
| [
"utils.py"
]
| [
"from sklearn.metrics import jaccard_similarity_score as iou\nimport numpy as np\n\ndef color_map(N=256, normalized=False):\n \"\"\"\n Python implementation of the color map function for the PASCAL VOC data set\n source: https://gist.github.com/wllhf/a4533e0adebe57e3ed06d4b50c8419ae\n \"\"\"\n def bitget(byteval, idx):\n return ((byteval & (1 << idx)) != 0)\n\n dtype = 'float32' if normalized else 'uint8'\n cmap = np.zeros((N, 3), dtype=dtype)\n for i in range(N):\n r = g = b = 0\n c = i\n for j in range(8):\n r = r | (bitget(c, 0) << 7-j)\n g = g | (bitget(c, 1) << 7-j)\n b = b | (bitget(c, 2) << 7-j)\n c = c >> 3\n\n cmap[i] = np.array([r, g, b])\n\n cmap = cmap/255 if normalized else cmap\n return cmap\n\ndef multi_iou(a,b):\n jk = 0.0\n vals = np.unique(a)[1:]\n for v in vals:\n ac = a.copy()\n ac[ac != v] = 0\n bc = b.copy()\n bc[bc != v] = 0\n jk += iou(ac,bc)\n return jk/len(vals)"
]
| [
[
"sklearn.metrics.jaccard_similarity_score",
"numpy.array",
"numpy.zeros",
"numpy.unique"
]
]
|
prestigeworldwide7/Create-Your-Own-Image-Classifier-Project | [
"7b794e5416216ff25aa790710b70d0ee173e94e8"
]
| [
"predict.py"
]
| [
"import argparse\nimport torch\nfrom torch.autograd import Variable\nfrom torchvision import transforms, models\nimport torch.nn.functional as F\nimport numpy as np\nfrom PIL import Image\nimport json\nimport os\nimport random\nfrom utils import load_checkpoint, load_cat_names\n\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('checkpoint', action='store', default='checkpoint.pth')\n parser.add_argument('--top_k', dest='top_k', default='3')\n parser.add_argument('--filepath', dest='filepath', default='flowers/test/1/image_06743.jpg') # use a deafault filepath to a primrose image \n parser.add_argument('--category_names', dest='category_names', default='cat_to_name.json')\n parser.add_argument('--gpu', action='store', default='gpu')\n return parser.parse_args()\n\ndef process_image(image):\n ''' Scales, crops, and normalizes a PIL image for a PyTorch model,\n returns an Numpy array\n '''\n \n # TODO: Process a PIL image for use in a PyTorch model\n \n img_pil = Image.open(image) # use Image\n \n adjustments = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n \n image = adjustments(img_pil)\n \n return image\n\ndef predict(image_path, model, topk=3, gpu='gpu'):\n ''' Get probability values (indeces) and respective flower classes. \n '''\n \n # TODO: Implement the code to predict the class from an image file\n if gpu == 'gpu':\n model = model.cuda()\n else:\n model = model.cpu()\n \n img_torch = process_image(image_path)\n img_torch = img_torch.unsqueeze_(0)\n img_torch = img_torch.float()\n\n if gpu == 'gpu':\n with torch.no_grad():\n output = model.forward(img_torch.cuda())\n else:\n with torch.no_grad():\n output=model.forward(img_torch)\n \n probability = F.softmax(output.data,dim=1) # use F \n \n probs = np.array(probability.topk(topk)[0][0])\n \n index_to_class = {val: key for key, val in model.class_to_idx.items()} # from reviewer advice\n top_classes = [np.int(index_to_class[each]) for each in np.array(probability.topk(topk)[1][0])]\n \n return probs, top_classes\n\ndef main(): \n args = parse_args()\n gpu = args.gpu\n model = load_checkpoint(args.checkpoint)\n cat_to_name = load_cat_names(args.category_names)\n \n img_path = args.filepath\n probs, classes = predict(img_path, model, int(args.top_k), gpu)\n labels = [cat_to_name[str(index)] for index in classes]\n probability = probs\n print('File selected: ' + img_path)\n \n print(labels)\n print(probability)\n \n i=0 # this prints out top k classes and probs as according to user \n while i < len(labels):\n print(\"{} with a probability of {}\".format(labels[i], probability[i]))\n i += 1 # cycle through\n\nif __name__ == \"__main__\":\n main()"
]
| [
[
"numpy.int",
"torch.no_grad",
"torch.nn.functional.softmax"
]
]
|
kokkiemouse/ml-agents | [
"dd0c901f2391c650b55c7f7d98561d97a2c5e710"
]
| [
"ml-agents/mlagents/trainers/tests/test_simple_rl.py"
]
| [
"import math\nimport tempfile\nimport pytest\nimport yaml\nimport numpy as np\nfrom typing import Dict, Any\n\nfrom mlagents.trainers.tests.simple_test_envs import (\n SimpleEnvironment,\n MemoryEnvironment,\n RecordEnvironment,\n)\nfrom mlagents.trainers.trainer_controller import TrainerController\nfrom mlagents.trainers.trainer_util import TrainerFactory\nfrom mlagents.trainers.simple_env_manager import SimpleEnvManager\nfrom mlagents.trainers.sampler_class import SamplerManager\nfrom mlagents.trainers.demo_loader import write_demo\nfrom mlagents.trainers.stats import StatsReporter, StatsWriter, StatsSummary\nfrom mlagents_envs.side_channel.float_properties_channel import FloatPropertiesChannel\nfrom mlagents_envs.communicator_objects.demonstration_meta_pb2 import (\n DemonstrationMetaProto,\n)\nfrom mlagents_envs.communicator_objects.brain_parameters_pb2 import BrainParametersProto\nfrom mlagents_envs.communicator_objects.space_type_pb2 import discrete, continuous\n\nBRAIN_NAME = \"1D\"\n\nPPO_CONFIG = f\"\"\"\n {BRAIN_NAME}:\n trainer: ppo\n batch_size: 16\n beta: 5.0e-3\n buffer_size: 64\n epsilon: 0.2\n hidden_units: 32\n lambd: 0.95\n learning_rate: 5.0e-3\n learning_rate_schedule: constant\n max_steps: 3000\n memory_size: 16\n normalize: false\n num_epoch: 3\n num_layers: 1\n time_horizon: 64\n sequence_length: 64\n summary_freq: 500\n use_recurrent: false\n reward_signals:\n extrinsic:\n strength: 1.0\n gamma: 0.99\n \"\"\"\n\nSAC_CONFIG = f\"\"\"\n {BRAIN_NAME}:\n trainer: sac\n batch_size: 8\n buffer_size: 500\n buffer_init_steps: 100\n hidden_units: 16\n init_entcoef: 0.01\n learning_rate: 5.0e-3\n max_steps: 1000\n memory_size: 16\n normalize: false\n num_update: 1\n train_interval: 1\n num_layers: 1\n time_horizon: 64\n sequence_length: 32\n summary_freq: 100\n tau: 0.01\n use_recurrent: false\n curiosity_enc_size: 128\n demo_path: None\n vis_encode_type: simple\n reward_signals:\n extrinsic:\n strength: 1.0\n gamma: 0.99\n \"\"\"\n\n\ndef generate_config(\n config: str, override_vals: Dict[str, Any] = None\n) -> Dict[str, Any]:\n trainer_config = yaml.safe_load(config)\n if override_vals is not None:\n trainer_config[BRAIN_NAME].update(override_vals)\n return trainer_config\n\n\n# The reward processor is passed as an argument to _check_environment_trains.\n# It is applied to the list pf all final rewards for each brain individually.\n# This is so that we can process all final rewards in different ways for different algorithms.\n# Custom reward processors shuld be built within the test function and passed to _check_environment_trains\n# Default is average over the last 5 final rewards\ndef default_reward_processor(rewards, last_n_rewards=5):\n rewards_to_use = rewards[-last_n_rewards:]\n # For debugging tests\n print(\"Last {} rewards:\".format(last_n_rewards), rewards_to_use)\n return np.array(rewards[-last_n_rewards:], dtype=np.float32).mean()\n\n\nclass DebugWriter(StatsWriter):\n \"\"\"\n Print to stdout so stats can be viewed in pytest\n \"\"\"\n\n def __init__(self):\n self._last_reward_summary: Dict[str, float] = {}\n\n def get_last_rewards(self):\n return self._last_reward_summary\n\n def write_stats(\n self, category: str, values: Dict[str, StatsSummary], step: int\n ) -> None:\n for val, stats_summary in values.items():\n if val == \"Environment/Cumulative Reward\":\n print(step, val, stats_summary.mean)\n self._last_reward_summary[category] = stats_summary.mean\n\n\ndef _check_environment_trains(\n env,\n trainer_config,\n reward_processor=default_reward_processor,\n meta_curriculum=None,\n success_threshold=0.9,\n env_manager=None,\n):\n # Create controller and begin training.\n with tempfile.TemporaryDirectory() as dir:\n run_id = \"id\"\n save_freq = 99999\n seed = 1337\n StatsReporter.writers.clear() # Clear StatsReporters so we don't write to file\n debug_writer = DebugWriter()\n StatsReporter.add_writer(debug_writer)\n if env_manager is None:\n env_manager = SimpleEnvManager(env, FloatPropertiesChannel())\n trainer_factory = TrainerFactory(\n trainer_config=trainer_config,\n summaries_dir=dir,\n run_id=run_id,\n model_path=dir,\n keep_checkpoints=1,\n train_model=True,\n load_model=False,\n seed=seed,\n meta_curriculum=meta_curriculum,\n multi_gpu=False,\n )\n\n tc = TrainerController(\n trainer_factory=trainer_factory,\n summaries_dir=dir,\n model_path=dir,\n run_id=run_id,\n meta_curriculum=meta_curriculum,\n train=True,\n training_seed=seed,\n sampler_manager=SamplerManager(None),\n resampling_interval=None,\n save_freq=save_freq,\n )\n\n # Begin training\n tc.start_learning(env_manager)\n if (\n success_threshold is not None\n ): # For tests where we are just checking setup and not reward\n processed_rewards = [\n reward_processor(rewards) for rewards in env.final_rewards.values()\n ]\n assert all(not math.isnan(reward) for reward in processed_rewards)\n assert all(reward > success_threshold for reward in processed_rewards)\n\n\[email protected](\"use_discrete\", [True, False])\ndef test_simple_ppo(use_discrete):\n env = SimpleEnvironment([BRAIN_NAME], use_discrete=use_discrete)\n config = generate_config(PPO_CONFIG)\n _check_environment_trains(env, config)\n\n\[email protected](\"use_discrete\", [True, False])\ndef test_2d_ppo(use_discrete):\n env = SimpleEnvironment(\n [BRAIN_NAME], use_discrete=use_discrete, action_size=2, step_size=0.5\n )\n config = generate_config(PPO_CONFIG)\n _check_environment_trains(env, config)\n\n\[email protected](\"use_discrete\", [True, False])\[email protected](\"num_visual\", [1, 2])\ndef test_visual_ppo(num_visual, use_discrete):\n env = SimpleEnvironment(\n [BRAIN_NAME],\n use_discrete=use_discrete,\n num_visual=num_visual,\n num_vector=0,\n step_size=0.2,\n )\n override_vals = {\"learning_rate\": 3.0e-4}\n config = generate_config(PPO_CONFIG, override_vals)\n _check_environment_trains(env, config)\n\n\[email protected](\"num_visual\", [1, 2])\[email protected](\"vis_encode_type\", [\"resnet\", \"nature_cnn\"])\ndef test_visual_advanced_ppo(vis_encode_type, num_visual):\n env = SimpleEnvironment(\n [BRAIN_NAME],\n use_discrete=True,\n num_visual=num_visual,\n num_vector=0,\n step_size=0.5,\n vis_obs_size=(36, 36, 3),\n )\n override_vals = {\n \"learning_rate\": 3.0e-4,\n \"vis_encode_type\": vis_encode_type,\n \"max_steps\": 500,\n \"summary_freq\": 100,\n }\n config = generate_config(PPO_CONFIG, override_vals)\n # The number of steps is pretty small for these encoders\n _check_environment_trains(env, config, success_threshold=0.5)\n\n\[email protected](\"use_discrete\", [True, False])\ndef test_recurrent_ppo(use_discrete):\n env = MemoryEnvironment([BRAIN_NAME], use_discrete=use_discrete)\n override_vals = {\n \"max_steps\": 5000,\n \"batch_size\": 64,\n \"buffer_size\": 128,\n \"learning_rate\": 1e-3,\n \"use_recurrent\": True,\n }\n config = generate_config(PPO_CONFIG, override_vals)\n _check_environment_trains(env, config, success_threshold=0.9)\n\n\[email protected](\"use_discrete\", [True, False])\ndef test_simple_sac(use_discrete):\n env = SimpleEnvironment([BRAIN_NAME], use_discrete=use_discrete)\n config = generate_config(SAC_CONFIG)\n _check_environment_trains(env, config)\n\n\[email protected](\"use_discrete\", [True, False])\ndef test_2d_sac(use_discrete):\n env = SimpleEnvironment(\n [BRAIN_NAME], use_discrete=use_discrete, action_size=2, step_size=0.8\n )\n override_vals = {\"buffer_init_steps\": 2000, \"max_steps\": 10000}\n config = generate_config(SAC_CONFIG, override_vals)\n _check_environment_trains(env, config, success_threshold=0.8)\n\n\[email protected](\"use_discrete\", [True, False])\[email protected](\"num_visual\", [1, 2])\ndef test_visual_sac(num_visual, use_discrete):\n env = SimpleEnvironment(\n [BRAIN_NAME],\n use_discrete=use_discrete,\n num_visual=num_visual,\n num_vector=0,\n step_size=0.2,\n )\n override_vals = {\"batch_size\": 16, \"learning_rate\": 3e-4}\n config = generate_config(SAC_CONFIG, override_vals)\n _check_environment_trains(env, config)\n\n\[email protected](\"num_visual\", [1, 2])\[email protected](\"vis_encode_type\", [\"resnet\", \"nature_cnn\"])\ndef test_visual_advanced_sac(vis_encode_type, num_visual):\n env = SimpleEnvironment(\n [BRAIN_NAME],\n use_discrete=True,\n num_visual=num_visual,\n num_vector=0,\n step_size=0.5,\n vis_obs_size=(36, 36, 3),\n )\n override_vals = {\n \"batch_size\": 16,\n \"learning_rate\": 3.0e-4,\n \"vis_encode_type\": vis_encode_type,\n \"buffer_init_steps\": 0,\n \"max_steps\": 100,\n }\n config = generate_config(SAC_CONFIG, override_vals)\n # The number of steps is pretty small for these encoders\n _check_environment_trains(env, config, success_threshold=0.5)\n\n\[email protected](\"use_discrete\", [True, False])\ndef test_recurrent_sac(use_discrete):\n env = MemoryEnvironment([BRAIN_NAME], use_discrete=use_discrete)\n override_vals = {\n \"batch_size\": 64,\n \"use_recurrent\": True,\n \"max_steps\": 3000,\n \"learning_rate\": 1e-3,\n \"buffer_init_steps\": 500,\n }\n config = generate_config(SAC_CONFIG, override_vals)\n _check_environment_trains(env, config)\n\n\[email protected](\"use_discrete\", [True, False])\ndef test_simple_ghost(use_discrete):\n env = SimpleEnvironment(\n [BRAIN_NAME + \"?team=0\", BRAIN_NAME + \"?team=1\"], use_discrete=use_discrete\n )\n override_vals = {\n \"max_steps\": 2500,\n \"self_play\": {\n \"play_against_latest_model_ratio\": 1.0,\n \"save_steps\": 2000,\n \"swap_steps\": 2000,\n },\n }\n config = generate_config(PPO_CONFIG, override_vals)\n _check_environment_trains(env, config)\n\n\[email protected](\"use_discrete\", [True, False])\ndef test_simple_ghost_fails(use_discrete):\n env = SimpleEnvironment(\n [BRAIN_NAME + \"?team=0\", BRAIN_NAME + \"?team=1\"], use_discrete=use_discrete\n )\n # This config should fail because the ghosted policy is never swapped with a competent policy.\n # Swap occurs after max step is reached.\n override_vals = {\n \"max_steps\": 2500,\n \"self_play\": {\n \"play_against_latest_model_ratio\": 1.0,\n \"save_steps\": 2000,\n \"swap_steps\": 4000,\n },\n }\n config = generate_config(PPO_CONFIG, override_vals)\n _check_environment_trains(env, config, success_threshold=None)\n processed_rewards = [\n default_reward_processor(rewards) for rewards in env.final_rewards.values()\n ]\n success_threshold = 0.9\n assert any(reward > success_threshold for reward in processed_rewards) and any(\n reward < success_threshold for reward in processed_rewards\n )\n\n\[email protected](\"use_discrete\", [True, False])\ndef test_simple_asymm_ghost(use_discrete):\n # Make opponent for asymmetric case\n brain_name_opp = BRAIN_NAME + \"Opp\"\n env = SimpleEnvironment(\n [BRAIN_NAME + \"?team=0\", brain_name_opp + \"?team=1\"], use_discrete=use_discrete\n )\n override_vals = {\n \"max_steps\": 2000,\n \"self_play\": {\n \"play_against_latest_model_ratio\": 1.0,\n \"save_steps\": 5000,\n \"swap_steps\": 5000,\n \"team_change\": 2000,\n },\n }\n config = generate_config(PPO_CONFIG, override_vals)\n config[brain_name_opp] = config[BRAIN_NAME]\n _check_environment_trains(env, config)\n\n\[email protected](\"use_discrete\", [True, False])\ndef test_simple_asymm_ghost_fails(use_discrete):\n # Make opponent for asymmetric case\n brain_name_opp = BRAIN_NAME + \"Opp\"\n env = SimpleEnvironment(\n [BRAIN_NAME + \"?team=0\", brain_name_opp + \"?team=1\"], use_discrete=use_discrete\n )\n # This config should fail because the team that us not learning when both have reached\n # max step should be executing the initial, untrained poliy.\n override_vals = {\n \"max_steps\": 2000,\n \"self_play\": {\n \"play_against_latest_model_ratio\": 0.0,\n \"save_steps\": 5000,\n \"swap_steps\": 5000,\n \"team_change\": 2000,\n },\n }\n config = generate_config(PPO_CONFIG, override_vals)\n config[brain_name_opp] = config[BRAIN_NAME]\n _check_environment_trains(env, config, success_threshold=None)\n processed_rewards = [\n default_reward_processor(rewards) for rewards in env.final_rewards.values()\n ]\n success_threshold = 0.9\n assert any(reward > success_threshold for reward in processed_rewards) and any(\n reward < success_threshold for reward in processed_rewards\n )\n\n\[email protected](scope=\"session\")\ndef simple_record(tmpdir_factory):\n def record_demo(use_discrete, num_visual=0, num_vector=1):\n env = RecordEnvironment(\n [BRAIN_NAME],\n use_discrete=use_discrete,\n num_visual=num_visual,\n num_vector=num_vector,\n n_demos=100,\n )\n # If we want to use true demos, we can solve the env in the usual way\n # Otherwise, we can just call solve to execute the optimal policy\n env.solve()\n agent_info_protos = env.demonstration_protos[BRAIN_NAME]\n meta_data_proto = DemonstrationMetaProto()\n brain_param_proto = BrainParametersProto(\n vector_action_size=[1],\n vector_action_descriptions=[\"\"],\n vector_action_space_type=discrete if use_discrete else continuous,\n brain_name=BRAIN_NAME,\n is_training=True,\n )\n action_type = \"Discrete\" if use_discrete else \"Continuous\"\n demo_path_name = \"1DTest\" + action_type + \".demo\"\n demo_path = str(tmpdir_factory.mktemp(\"tmp_demo\").join(demo_path_name))\n write_demo(demo_path, meta_data_proto, brain_param_proto, agent_info_protos)\n return demo_path\n\n return record_demo\n\n\[email protected](\"use_discrete\", [True, False])\[email protected](\"trainer_config\", [PPO_CONFIG, SAC_CONFIG])\ndef test_gail(simple_record, use_discrete, trainer_config):\n demo_path = simple_record(use_discrete)\n env = SimpleEnvironment([BRAIN_NAME], use_discrete=use_discrete, step_size=0.2)\n override_vals = {\n \"max_steps\": 500,\n \"behavioral_cloning\": {\"demo_path\": demo_path, \"strength\": 1.0, \"steps\": 1000},\n \"reward_signals\": {\n \"gail\": {\n \"strength\": 1.0,\n \"gamma\": 0.99,\n \"encoding_size\": 32,\n \"demo_path\": demo_path,\n }\n },\n }\n config = generate_config(trainer_config, override_vals)\n _check_environment_trains(env, config, success_threshold=0.9)\n\n\[email protected](\"use_discrete\", [True, False])\ndef test_gail_visual_ppo(simple_record, use_discrete):\n demo_path = simple_record(use_discrete, num_visual=1, num_vector=0)\n env = SimpleEnvironment(\n [BRAIN_NAME],\n num_visual=1,\n num_vector=0,\n use_discrete=use_discrete,\n step_size=0.2,\n )\n override_vals = {\n \"max_steps\": 500,\n \"learning_rate\": 3.0e-4,\n \"behavioral_cloning\": {\"demo_path\": demo_path, \"strength\": 1.0, \"steps\": 1000},\n \"reward_signals\": {\n \"gail\": {\n \"strength\": 1.0,\n \"gamma\": 0.99,\n \"encoding_size\": 32,\n \"demo_path\": demo_path,\n }\n },\n }\n config = generate_config(PPO_CONFIG, override_vals)\n _check_environment_trains(env, config, success_threshold=0.9)\n\n\[email protected](\"use_discrete\", [True, False])\ndef test_gail_visual_sac(simple_record, use_discrete):\n demo_path = simple_record(use_discrete, num_visual=1, num_vector=0)\n env = SimpleEnvironment(\n [BRAIN_NAME],\n num_visual=1,\n num_vector=0,\n use_discrete=use_discrete,\n step_size=0.2,\n )\n override_vals = {\n \"max_steps\": 500,\n \"batch_size\": 16,\n \"learning_rate\": 3.0e-4,\n \"behavioral_cloning\": {\"demo_path\": demo_path, \"strength\": 1.0, \"steps\": 1000},\n \"reward_signals\": {\n \"gail\": {\n \"strength\": 1.0,\n \"gamma\": 0.99,\n \"encoding_size\": 32,\n \"demo_path\": demo_path,\n }\n },\n }\n config = generate_config(SAC_CONFIG, override_vals)\n _check_environment_trains(env, config, success_threshold=0.9)\n"
]
| [
[
"numpy.array"
]
]
|
alexwim/CS263A_Project | [
"f955711a02e4920bf0b7c4fbecfdf00b18e213ec"
]
| [
"utils.py"
]
| [
"from nltk.tokenize import WordPunctTokenizer\nimport nltk.data\nimport numpy as np\nimport re\nimport os\n\nroot = os.path.dirname(os.path.abspath(__file__))\n\n##################\n# TEXTS INVOLVED #\n##################\n##Alexandre Dumas\n# 0:The Three Musketeers\n# 1:Twenty Years After (D'Artagnan Series: Part Two)\n# 2:The Count of Monte Cristo\n##Mark Twain\n# 3:Adventures of Huckleberry Finn\n# 4:The American Claimant\n##Jules Verne\n# 5:Around the World in 80 Days\n# 6:Twenty Thousand Leagues Under the Sea\n##################\n\n# These pull out the core text of their respective stories.\nrulesStory = [\n\tr'our history\\.\\n{5}(.*)\\s+----',\n\tr'Conclusion\\.\\n{5}(.*)\\s+----',\n\tr', Pere\\n{5}(.*)\\n{6}End of',\n\tr'years ago\\n{5}(.*)THE END\\. YOURS TRULY, HUCK FINN\\.',\n\tr'goes along.\\n{6}(.*)\\n{6}APPENDIX',\n\tr'\\n{5}(.*)\\n{10}',\n\tr'\\n{6}(.*)\\n{10}'\n\t]\n\t\n# These represent meta elements of the text that must be stripped out, e.g. chapter headings.\nrulesMeta = [\n\tr'\\n(\\d+.*)\\n',\n\tr'\\n(\\d+\\..*)\\n',\n\tr'\\n(Chapter \\d+\\..*)\\n',\n\tr'\\n(Chapter [XVIL]+\\.)\\n',\n\tr'\\n(Chapter [XVIL]+\\.)\\n',\n\tr'\\n{2}(Chapter [XVIL]+)\\n',\n\tr'\\n{2}(Chapter [XVIL]+)\\n'\n\t]\n\ndef getText(idx):\n\tfile = open(root+'/'+str(idx)+'.book', encoding='utf8').read()\n\tm = re.search(rulesStory[idx],re.sub(rulesMeta[idx], '', file),re.DOTALL)\n\tif m:\n\t\ttokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\n\t\ttext = [WordPunctTokenizer().tokenize(s) for s in tokenizer.tokenize(m.group(1).rstrip().replace('\\n', ' '))]\n\t\tt = []\n\t\tfor sentence in text:\n\t\t\ts = []\n\t\t\tfor word in sentence:\n\t\t\t\tr = re.search(r'(-|.)(-|\")', word)\n\t\t\t\ts+=[r.group(1),r.group(2)] if r else [word]\n\t\t\tt+=[s]\n\t\treturn t\n\t\t# return([w for s in t for w in s if w not in '.,:;()!?\"\\'_-'])\n\telse:\n\t\traise Exception('Story regex failure in '+str(idx)+'.')\n\ndef getFuzzyList(word):\n\treturn [word, word.lower()]+\\\n\t\t([word[:-1], word[:-1].lower()] if word[-1] == 's' else [])+\\\n\t\t([word[:-2], word[:-2].lower()] if word[-2:] == 'ed' else [])+\\\n\t\t([word[:-2], word[:-2].lower()] if word[-2:] == 'er' else [])+\\\n\t\t([word[:-3], word[:-3].lower()] if word[-3:] == 'ing' else [])+\\\n\t\t([word[:-3]+'y', word[:-3].lower()+'y'] if word[-3:] == 'ied' else [])\n\t\t\ndef getFuzzyMatch(word, dict):\n\tfor w in getFuzzyList(word):\n\t\tif w in dict:\n\t\t\treturn w\n\treturn None\n\ndef isAdmissible(sentence, dict):\n\tfor word in sentence:\n\t\tif not getFuzzyMatch(word, dict):\n\t\t\treturn False\n\treturn True\n\ndef rate(pos, neg):\n\treturn pos/(pos+neg)\n\n#This sampling code taken from lstm_example.py in the Keras examples subfolder\ndef sample(a, temperature=1.0):\n a = np.log(a) / temperature\n a = np.exp(a) / np.sum(np.exp(a))\n return np.argmax(np.random.multinomial(1, a, 1))\n"
]
| [
[
"numpy.random.multinomial",
"numpy.exp",
"numpy.log"
]
]
|
googleinterns/multimodal-long-transformer-2021 | [
"8161dfd1698967d2eb76a262b46cbf44b9dd4739"
]
| [
"src/tasks/classification.py"
]
| [
"# Copyright 2021 Google LLC\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# https://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Classification task.\"\"\"\nimport collections\nimport dataclasses\nfrom typing import List, Union, Optional, Tuple, Mapping\n\nfrom absl import logging\nimport numpy as np\nimport orbit\nfrom official.core import base_task\nfrom official.core import config_definitions as cfg\nfrom official.core import task_factory\nfrom official.nlp.data import data_loader_factory\nfrom official.nlp.modeling import layers\nimport tensorflow as tf\n\nimport input_utils\nfrom configs import mmt\nfrom configs import encoders\nfrom modeling import models\nfrom modeling import losses\n\n\nMETRIC_TYPES = frozenset(['accuracy', 'auc'])\n\n\[email protected]\nclass ClassificationConfig(cfg.TaskConfig):\n \"\"\"The classification model config.\"\"\"\n\n model: mmt.ClassificationModelConfig = mmt.ClassificationModelConfig()\n scale_loss: bool = False\n\n train_data: cfg.DataConfig = cfg.DataConfig()\n validation_data: cfg.DataConfig = cfg.DataConfig()\n\n init_checkpoint: str = ''\n init_cls_pooler: bool = False\n metric_type: str = 'accuracy'\n\n\n@task_factory.register_task_cls(ClassificationConfig)\nclass ClassificationTask(base_task.Task):\n \"\"\"Task object for classification.\"\"\"\n\n def __init__(self,\n params: cfg.TaskConfig,\n logging_dir: Optional[str] = None,\n name: Optional[str] = None):\n super().__init__(params, logging_dir, name=name)\n if params.metric_type not in METRIC_TYPES:\n raise ValueError(f'Invalid metric_type: {params.metric_type}')\n self.metric_type = params.metric_type\n self.label_field = params.train_data.label_field or 'label_ids'\n self.logits_field = params.train_data.logits_field or 'logits'\n self.label_weights_field = (params.train_data.label_weights_field or\n 'label_weights')\n self.pos_weights_field = (params.train_data.pos_weights_field or\n 'pos_weights')\n\n self.task_name = 'classification'\n\n def _build_encoder(self, encoder_cfg):\n return encoders.build_encoder(encoder_cfg)\n\n def build_model(self):\n config = self.task_config.model\n encoder_cfg = config.encoder\n encoder = self._build_encoder(encoder_cfg)\n\n data_cfg = self.task_config.train_data\n\n cls_heads = []\n for cfg in config.cls_heads:\n cls_head = layers.ClassificationHead(**cfg.as_dict())\n cls_heads.append(cls_head)\n\n model = models.MmtClassificationModel(\n encoder=encoder,\n classification_heads=cls_heads)\n\n inputs = input_utils.create_mmt_encoder_inputs(data_cfg)\n model(**inputs)\n\n return model\n\n def build_losses(self,\n labels,\n model_outputs,\n metrics,\n aux_losses=None) -> tf.Tensor:\n\n label_ids = labels[self.label_field]\n logits = model_outputs[self.logits_field]\n\n label_weights = labels[self.label_weights_field]\n pos_weights = labels[self.pos_weights_field]\n\n if self.task_config.model.num_classes == 1:\n loss_fn = losses.weighted_binary_crossentropy_loss\n logits = tf.reshape(logits, (-1,))\n else:\n loss_fn = losses.weighted_sparse_categorical_crossentropy_loss\n\n loss = loss_fn(logits, label_ids, label_weights,\n metrics, self.task_name, pos_weights=pos_weights)\n\n total_loss = loss\n\n if aux_losses:\n total_loss += tf.add_n(aux_losses)\n\n return total_loss\n\n def build_inputs(self, params, input_context=None):\n \"\"\"Returns tf.data.Dataset for classification task.\"\"\"\n return data_loader_factory.get_data_loader(params).load(input_context)\n\n def build_metrics(self, training=None):\n del training\n if self.task_config.model.num_classes == 1:\n metrics = [\n tf.keras.metrics.AUC(name='auc', curve='PR')\n ]\n elif self.task_config.model.num_classes == 2:\n metrics = [\n tf.keras.metrics.SparseCategoricalAccuracy(name='cls_accuracy'),\n tf.keras.metrics.AUC(name='auc', curve='PR'),\n ]\n else:\n metrics = [\n tf.keras.metrics.SparseCategoricalAccuracy(name='cls_accuracy'),\n ]\n metrics.append(tf.keras.metrics.Mean(name=f'{self.task_name}_loss'))\n return metrics\n\n def process_metrics(self, metrics, labels, model_outputs):\n label_ids = labels[self.label_field]\n logits = model_outputs[self.logits_field]\n label_weights = labels[self.label_weights_field]\n\n with tf.name_scope('ClassificationTask/process_metrics'):\n for metric in metrics:\n if metric.name == 'auc':\n if self.task_config.model.num_classes == 1:\n logits = tf.reshape(logits, (-1,))\n probs = tf.sigmoid(logits)\n elif self.task_config.model.num_classes == 2:\n # Converts the logit to probability and extract the prob of True.\n probs = tf.nn.softmax(logits)[:, 1]\n else:\n raise ValueError('auc requires # classes either 1 or 2.')\n\n metric.update_state(label_ids, probs, label_weights)\n\n if metric.name == 'cls_accuracy':\n metric.update_state(label_ids, logits, label_weights)\n\n def train_step(self,\n inputs: Tuple[Mapping[str, tf.Tensor], Mapping[str, tf.Tensor]],\n model: tf.keras.Model,\n optimizer: tf.keras.optimizers.Optimizer,\n metrics: Mapping[str, tf.keras.metrics.Metric]):\n \"\"\"Does forward and backward pass.\n\n Args:\n inputs: a pair of dictionaries of input and label tensors.\n model: the model, forward pass definition.\n optimizer: the optimizer for this training step.\n metrics: a nested structure of metrics objects.\n\n Returns:\n A dictionary of logs.\n\n \"\"\"\n\n inputs, labels = inputs\n\n with tf.GradientTape() as tape:\n # Computes per-replica loss.\n outputs = model(**inputs, training=True)\n loss = self.build_losses(\n labels=labels,\n model_outputs=outputs,\n metrics=metrics,\n aux_losses=model.losses)\n if self.task_config.scale_loss:\n # Scales loss as the default gradients allreduce performs sum inside the\n # optimizer.\n scaled_loss = loss / tf.distribute.get_strategy().num_replicas_in_sync\n\n tvars = model.trainable_variables\n if self.task_config.scale_loss:\n grads = tape.gradient(scaled_loss, tvars)\n else:\n grads = tape.gradient(loss, tvars)\n optimizer.apply_gradients(list(zip(grads, tvars)))\n self.process_metrics(metrics, labels, outputs)\n return {self.loss: loss}\n\n def validation_step(self, inputs, model: tf.keras.Model, metrics):\n inputs, labels = inputs\n outputs = self.inference_step(inputs, model)\n loss = self.build_losses(\n labels=labels,\n model_outputs=outputs,\n metrics=metrics,\n aux_losses=model.losses)\n logs = {self.loss: loss}\n self.process_metrics(metrics, labels, outputs)\n return logs\n\n def inference_step(self, inputs, model: tf.keras.Model):\n return model(**inputs, training=False)\n\n def initialize(self, model):\n \"\"\"Loads a pretrained checkpoint (if exists).\"\"\"\n\n ckpt_dir_or_file = self.task_config.init_checkpoint\n if not ckpt_dir_or_file:\n logging.info('task_config.init_checkpoint is empty. Train from stratch.')\n return\n if tf.io.gfile.isdir(ckpt_dir_or_file):\n ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)\n\n pretrain2finetune_mapping = {\n 'encoder': model.checkpoint_items['encoder'],\n }\n\n # Restores pretrained cls_heads from the checkpoint if the cls_heads exist in\n # the finetuning model.\n if self.task_config.model.cls_heads:\n for cls_head in self.task_config.model.cls_heads:\n for key, item in model.checkpoint_items.items():\n if cls_head.name in key:\n pretrain2finetune_mapping[key] = item\n ckpt = tf.train.Checkpoint(**pretrain2finetune_mapping)\n status = ckpt.read(ckpt_dir_or_file)\n status.expect_partial().assert_existing_objects_matched()\n logging.info(f'Finished loading pretrained checkpoint from {ckpt_dir_or_file}.')\n\n\ndef predict(task: ClassificationTask,\n params: cfg.DataConfig,\n model: tf.keras.Model) -> List[Union[int, float]]:\n \"\"\"Predicts on the input data.\n\n Args:\n task: A `ClassificationTask` object.\n params: A `cfg.DataConfig` object.\n model: A keras.Model.\n\n Returns:\n A list of predictions with length of `num_examples`.\n\n \"\"\"\n\n RawResult = collections.namedtuple('RawResult',\n ['image_index',\n 'text_index',\n 'gt_image_index',\n 'output'])\n\n strategy = tf.distribute.get_strategy()\n\n def get_raw_results(predictions):\n \"\"\"Converts multi-replica predictions to RawResult.\"\"\"\n for img_idx, txt_idx, gt_img_idx, logits in zip(predictions['image_index'],\n predictions['text_index'],\n predictions['gt_image_index'],\n predictions['logits']):\n\n if task.task_config.model.num_classes == 1:\n outputs = tf.sigmoid(logits)\n elif task.task_config.model.num_classes == 2:\n # Gets the scores of the positive class (num_classes == 2).\n outputs = tf.nn.softmax(logits, axis=1)[:, 1]\n else:\n # Gets the classes with maximum scores (num_classes >= 2).\n outputs = tf.argmax(logits, axis=1)\n \n for values in zip(img_idx.numpy(), txt_idx.numpy(),\n gt_img_idx.numpy(), outputs.numpy()):\n yield RawResult(image_index=values[0],\n text_index=values[1],\n gt_image_index=values[2],\n output = values[3].tolist())\n\n @tf.function\n def predict_step(batch):\n \"\"\"Replicates prediction calculation.\"\"\"\n\n def _replicated_step(inputs):\n inputs, labels = inputs\n image_index = inputs.pop('image_index')\n text_index = inputs.pop('text_index')\n gt_image_index = inputs.pop('gt_image_index')\n outputs = task.inference_step(inputs, model)\n return dict(image_index=image_index,\n text_index=text_index,\n gt_image_index=gt_image_index,\n logits=outputs['itm_logits'])\n\n outputs = strategy.run(_replicated_step, args=(batch,))\n return tf.nest.map_structure(strategy.experimental_local_results, outputs)\n\n dataset = orbit.utils.make_distributed_dataset(strategy,\n task.build_inputs,\n params)\n dataset = iter(dataset)\n\n results = []\n for step, batch in enumerate(dataset, start=1):\n predictions = predict_step(batch)\n results.extend(list(get_raw_results(predictions)))\n\n if step % 5 == 0:\n logging.info(f'Made predictions for {len(results)} examples.')\n\n logging.info(f'Finished predictions for {len(results)} examples.')\n return results\n"
]
| [
[
"tensorflow.keras.metrics.Mean",
"tensorflow.keras.metrics.AUC",
"tensorflow.train.latest_checkpoint",
"tensorflow.GradientTape",
"tensorflow.distribute.get_strategy",
"tensorflow.sigmoid",
"tensorflow.argmax",
"tensorflow.reshape",
"tensorflow.io.gfile.isdir",
"tensorflow.nest.map_structure",
"tensorflow.add_n",
"tensorflow.name_scope",
"tensorflow.nn.softmax",
"tensorflow.keras.metrics.SparseCategoricalAccuracy",
"tensorflow.train.Checkpoint"
]
]
|
nflu/softlearning | [
"b4db23ad266f594c891357d9dabe981ecf9bcdea"
]
| [
"softlearning/replay_pools/flexible_replay_pool.py"
]
| [
"from dataclasses import dataclass\nfrom typing import Union, Callable\nfrom numbers import Number\nimport gzip\nimport pickle\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom flatten_dict import flatten, unflatten\nfrom .replay_pool import ReplayPool\n\n\n@dataclass\nclass Field:\n name: str\n dtype: Union[str, np.dtype, tf.DType]\n shape: Union[tuple, tf.TensorShape]\n initializer: Callable = np.zeros\n default_value: Number = 0.0\n\n\nINDEX_FIELDS = {\n 'episode_index_forwards': Field(\n name='episode_index_forwards',\n dtype='uint64',\n shape=(1, ),\n ),\n 'episode_index_backwards': Field(\n name='episode_index_backwards',\n dtype='uint64',\n shape=(1, ),\n ),\n}\n\n\nclass FlexibleReplayPool(ReplayPool):\n def __init__(self, max_size, fields):\n super(FlexibleReplayPool, self).__init__()\n\n max_size = int(max_size)\n self._max_size = max_size\n\n self.data = {}\n self.fields = {**fields, **INDEX_FIELDS}\n self.fields_flat = flatten(self.fields)\n self._initialize_data()\n\n self._pointer = 0\n self._size = 0\n self._samples_since_save = 0\n\n @property\n def size(self):\n return self._size\n\n def _initialize_field(self, field):\n field_shape = (self._max_size, *field.shape)\n field_values = field.initializer(\n field_shape, dtype=field.dtype)\n\n return field_values\n\n def _initialize_data(self):\n \"\"\"Initialize data for the pool.\"\"\"\n fields = flatten(self.fields)\n for field_name, field_attrs in fields.items():\n self.data[field_name] = self._initialize_field(field_attrs)\n\n def _advance(self, count=1):\n \"\"\"Handles bookkeeping after adding samples to the pool.\n\n * Moves the pointer (`self._pointer`)\n * Updates the size (`self._size`)\n * Fixes the `episode_index_backwards` field, which might have become\n out of date when the pool is full and we start overriding old\n samples.\n \"\"\"\n self._pointer = (self._pointer + count) % self._max_size\n self._size = min(self._size + count, self._max_size)\n\n if self.data[('episode_index_forwards', )][self._pointer] != 0:\n episode_tail_length = int(self.data[\n ('episode_index_backwards', )\n ][self._pointer, 0] + 1)\n self.data[\n ('episode_index_forwards', )\n ][np.arange(\n self._pointer, self._pointer + episode_tail_length\n ) % self._max_size] = np.arange(episode_tail_length)[..., None]\n\n self._samples_since_save += count\n\n def add_sample(self, sample):\n sample_flat = flatten(sample)\n samples_flat = type(sample)([\n (field_name_flat, np.array(sample_flat[field_name_flat])[None, ...])\n for field_name_flat in sample_flat.keys()\n ])\n samples = unflatten(samples_flat)\n\n self.add_samples(samples)\n\n def add_samples(self, samples):\n samples = flatten(samples)\n\n field_names = tuple(samples.keys())\n num_samples = samples[field_names[0]].shape[0]\n\n index = np.arange(\n self._pointer, self._pointer + num_samples) % self._max_size\n\n for field_name, values in samples.items():\n default_value = self.fields_flat[field_name].default_value\n values = samples.get(field_name, default_value)\n assert values.shape[0] == num_samples\n self.data[field_name][index] = values\n\n self._advance(num_samples)\n\n def add_path(self, path):\n path = path.copy()\n\n path_flat = flatten(path)\n path_length = path_flat[next(iter(path_flat.keys()))].shape[0]\n path.update({\n 'episode_index_forwards': np.arange(\n path_length,\n dtype=self.fields['episode_index_forwards'].dtype\n )[..., None],\n 'episode_index_backwards': np.arange(\n path_length,\n dtype=self.fields['episode_index_backwards'].dtype\n )[::-1, None],\n })\n\n return self.add_samples(path)\n\n def random_indices(self, batch_size):\n if self._size == 0: return np.arange(0, 0)\n return np.random.randint(0, self._size, batch_size)\n\n def random_batch(self, batch_size, field_name_filter=None, **kwargs):\n random_indices = self.random_indices(batch_size)\n return self.batch_by_indices(\n random_indices, field_name_filter=field_name_filter, **kwargs)\n\n def last_n_batch(self, last_n, field_name_filter=None, **kwargs):\n last_n_indices = np.arange(\n self._pointer - min(self.size, int(last_n)), self._pointer,\n dtype=int\n ) % self._max_size\n\n return self.batch_by_indices(\n last_n_indices, field_name_filter=field_name_filter, **kwargs)\n\n def filter_fields(self, field_names, field_name_filter):\n if isinstance(field_name_filter, str):\n field_name_filter = [field_name_filter]\n\n if isinstance(field_name_filter, (list, tuple)):\n field_name_list = field_name_filter\n\n def filter_fn(field_name):\n return field_name in field_name_list\n\n else:\n filter_fn = field_name_filter\n\n filtered_field_names = [\n field_name for field_name in field_names\n if filter_fn(field_name)\n ]\n\n return filtered_field_names\n\n def batch_by_indices(self, indices, field_name_filter=None):\n if np.any(indices % self._max_size > self.size):\n raise ValueError(\n \"Tried to retrieve batch with indices greater than current\"\n \" size\")\n\n field_names_flat = self.fields_flat.keys()\n if field_name_filter is not None:\n field_names_flat = self.filter_fields(\n field_names_flat, field_name_filter)\n\n batch_flat = {\n field_name: self.data[field_name][indices]\n for field_name in field_names_flat\n }\n\n batch = unflatten(batch_flat)\n return batch\n\n def save_latest_experience(self, pickle_path):\n latest_samples = self.last_n_batch(self._samples_since_save)\n\n with gzip.open(pickle_path, 'wb') as f:\n pickle.dump(latest_samples, f)\n\n self._samples_since_save = 0\n\n def load_experience(self, experience_path):\n with gzip.open(experience_path, 'rb') as f:\n latest_samples = pickle.load(f)\n\n latest_samples_flat = flatten(latest_samples)\n\n key = list(latest_samples_flat.keys())[0]\n num_samples = latest_samples_flat[key].shape[0]\n for data in latest_samples_flat.values():\n assert data.shape[0] == num_samples, data.shape\n\n self.add_samples(latest_samples)\n self._samples_since_save = 0\n\n # def __getstate__(self):\n # state = self.__dict__.copy()\n # state['fields'] = {\n # field_name: self.data[field_name][:self.size]\n # for field_name in self.field_names\n # }\n\n # return state\n\n # def __setstate__(self, state):\n # if state['_size'] < state['_max_size']:\n # pad_size = state['_max_size'] - state['_size']\n # for field_name in state['data'].keys():\n # field_shape = state['fields'][field_name]['shape']\n # state['fields'][field_name] = np.concatenate((\n # state['fields'][field_name],\n # np.zeros((pad_size, *field_shape))\n # ), axis=0)\n\n # self.__dict__ = state\n"
]
| [
[
"numpy.any",
"numpy.arange",
"numpy.array",
"numpy.random.randint"
]
]
|
mohanliu/qmpy | [
"4dd7f6206df06213f8cbf335f992c1c54690ef5b"
]
| [
"qmpy/analysis/symmetry/routines.py"
]
| [
"import fractions as frac\nimport numpy as np\nimport logging\n\nimport qmpy\nif qmpy.FOUND_SPGLIB:\n import pyspglib._spglib as spg\n\nimport qmpy.data as data\nfrom qmpy.utils import *\n\nlogger = logging.getLogger(__name__)\n\nif not qmpy.FOUND_SPGLIB:\n logger.critical('Must install spglib to be able to do symmetry analysis')\n\n## spglib functions | http://spglib.sourceforge.net/ v1.8.3\n\ndef find_structure_symmetry(structure, method='spglib',\n symprec=1e-5, angle_tolerance=-1.0):\n \"\"\"\n Return the rotatiosn and translations which are possessed by the structure.\n \n Examples::\n\n >>> from qmpy.io import read\n >>> from qmpy.analysis.symmetry import find_structure_symmetry\n >>> structure = read('POSCAR')\n >>> find_structure_symmetry(structure)\n \n \"\"\"\n # Get number of symmetry operations and allocate symmetry operations\n multi = 48 * len(structure)\n rotation = np.zeros((multi, 3, 3), dtype='intc')\n translation = np.zeros((multi, 3))\n\n cell = structure.cell.T.copy()\n coords = structure.site_coords.copy()\n numbers = np.array(structure.site_ids, dtype='intc')\n \n # Get symmetry operations\n magmoms = structure.magmoms\n if not any(magmoms):\n num_sym = spg.symmetry(rotation,\n translation,\n cell,\n coords,\n numbers,\n symprec,\n angle_tolerance)\n else:\n num_sym = spg.symmetry_with_collinear_spin(rotation,\n translation,\n cell,\n coords,\n numbers,\n magmoms,\n symprec,\n angle_tolerance)\n \n return rotation[:num_sym], translation[:num_sym]\n\ndef get_symmetry_dataset(structure, symprec=1e-3, angle_tolerance=-1.0):\n \"\"\"\n Return a full set of symmetry information from a given input structure.\n\n Mapping values:\n number: International space group number\n international: International symbol\n hall: Hall symbol\n transformation_matrix:\n Transformation matrix from lattice of input cell to Bravais lattice\n L^bravais = L^original * Tmat\n origin shift: Origin shift in the setting of 'Bravais lattice'\n rotations, translations:\n Rotation matrices and translation vectors\n Space group operations are obtained by\n [(r,t) for r, t in zip(rotations, translations)]\n wyckoffs:\n Wyckoff letters\n\n Examples::\n\n >>> from qmpy.io import read\n >>> from qmpy.analysis.symmetry import get_symmetry_dataset\n >>> structure = read('POSCAR')\n >>> get_symmetry_dataset(structure)\n\n \"\"\"\n keys = ('number',\n 'hall_number',\n 'international',\n 'hall',\n 'transformation_matrix',\n 'origin_shift',\n 'rotations',\n 'translations',\n 'wyckoffs',\n 'equivalent_atoms',\n 'std_lattice',\n 'std_types',\n 'std_positions',\n 'pointgroup_number',\n 'pointgroup')\n\n cell = structure.cell.T.copy()\n coords = np.array(structure.site_coords)\n comps = structure.site_compositions\n numbers = [ comps.index(c) for c in comps ]\n numbers = np.array(numbers, dtype='intc')\n\n dataset = {}\n for key, data in zip(keys, spg.dataset(cell,\n coords,\n numbers,\n symprec,\n angle_tolerance)):\n dataset[key] = data\n\n dataset['international'] = dataset['international'].strip()\n dataset['hall'] = dataset['hall'].strip()\n dataset['transformation_matrix'] = np.array(dataset['transformation_matrix'], dtype='double', order='C')\n dataset['origin_shift'] = np.array(dataset['origin_shift'], dtype='double')\n dataset['rotations'] = np.array(dataset['rotations'], dtype='intc', order='C')\n dataset['translations'] = np.array(dataset['translations'], dtype='double', order='C')\n letters = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n dataset['wyckoffs'] = [letters[x] for x in dataset['wyckoffs']]\n dataset['equivalent_atoms'] = np.array(dataset['equivalent_atoms'], dtype='intc')\n dataset['std_lattice'] = np.array(np.transpose(dataset['std_lattice']), dtype='double', order='C')\n dataset['std_types'] = np.array(dataset['std_types'], dtype='intc')\n dataset['std_positions'] = np.array(dataset['std_positions'], dtype='double', order='C')\n dataset['pointgroup'] = dataset['pointgroup'].strip()\n\n return dataset\n\ndef get_spacegroup(structure, symprec=1e-5, angle_tolerance=-1.0):\n \"\"\"\n Return space group in international table symbol and number\n as a string.\n \"\"\"\n cell = structure.cell.T.copy(),\n scaled = structure.site_coords.copy()\n comps = structure.site_compositions\n numbers = [ comps.index(c) for c in comps ]\n numbers = np.array(numbers, dtype='intc')\n # Atomic positions have to be specified by scaled positions for spglib.\n return int(spg.spacegroup(cell,\n coords,\n numbers,\n symprec,\n angle_tolerance).strip(' ()'))\n\ndef get_pointgroup(rotations):\n \"\"\"\n Return point group in international table symbol and number.\n \"\"\"\n\n # (symbol, pointgroup_number, transformation_matrix)\n return spg.pointgroup(rotations)\n\ndef refine_cell(structure, symprec=1e-5, angle_tolerance=-1.0):\n \"\"\"\n Return refined cell\n \"\"\"\n # Atomic positions have to be specified by scaled positions for spglib.\n num_atom = len(structure.sites)\n cell = structure.cell.T.copy()\n coords = np.zeros((num_atom * 4, 3), dtype='double')\n coords[:num_atom] = structure.site_coords.copy()\n comps = structure.site_compositions\n numbers = [ comps.index(c) for c in comps ]\n numbers = np.array(numbers*4, dtype='intc')\n\n num_atom_bravais = spg.refine_cell(cell,\n coords,\n numbers,\n num_atom,\n symprec,\n angle_tolerance)\n\n coords = wrap(coords)\n comps = [ comps[i] for i in numbers ]\n if num_atom_bravais > 0:\n structure.cell = cell.T\n structure.set_nsites_manager(coords[:num_atom_bravais])\n #structure.set_nsites(num_atom_bravais)\n #structure.site_coords = coords[:num_atom_bravais]\n structure.site_compositions = comps[:num_atom_bravais]\n return structure\n else:\n return structure\n\n\ndef find_primitive(structure, symprec=1e-4, angle_tolerance=-1.0):\n \"\"\"\n A primitive cell in the input cell is searched and returned\n as an object of Atoms class.\n If no primitive cell is found, (None, None, None) is returned.\n \"\"\"\n cell = structure.cell.T.copy()\n coords = np.array(structure.site_coords.copy(), dtype='double')\n comps = structure.site_compositions\n numbers = [ comps.index(c) for c in comps ]\n numbers = np.array(numbers*4, dtype='intc')\n\n num_atom_prim = spg.primitive(cell,\n coords,\n numbers,\n symprec,\n angle_tolerance)\n\n coords = wrap(coords)\n comps = [ comps[i] for i in numbers ]\n if num_atom_prim > 0:\n structure.cell = cell.T\n structure.set_nsites_manager(coords[:num_atom_prim])\n #structure.set_nsites(num_atom_prim)\n #structure.site_coords = coords[:num_atom_prim]\n structure.site_compositions = comps[:num_atom_prim]\n return structure\n else:\n return structure\n\ndef parse_sitesym(sitesym, sep=','):\n rot = np.zeros((3, 3))\n trans = np.zeros(3)\n for i, s in enumerate (sitesym.split(sep)):\n s = s.lower().strip()\n while s:\n sign = 1\n if s[0] in '+-':\n if s[0] == '-':\n sign = -1\n s = s[1:]\n if s[0] in 'xyz':\n j = ord(s[0]) - ord('x')\n rot[i, j] = sign\n s = s[1:]\n elif s[0].isdigit() or s[0] == '.':\n n = 0\n while n < len(s) and (s[n].isdigit() or s[n] in '/.'):\n n += 1\n t = s[:n]\n s = s[n:]\n trans[i] = float(frac.Fraction(t))\n else:\n raise ValueError('Failed to parse symmetry of %s' % (sitesym))\n return rot, trans\n"
]
| [
[
"numpy.array",
"numpy.transpose",
"numpy.zeros"
]
]
|
KonstantinWilleke/lucent | [
"4809cf3429316ad97847d883a4257bf07042e242"
]
| [
"lucent/optvis/objectives.py"
]
| [
"# Copyright 2020 The Lucent Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import, division, print_function\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom decorator import decorator\nfrom lucent.optvis.objectives_util import _make_arg_str, _extract_act_pos, _T_handle_batch\n\n\nclass Objective():\n\n def __init__(self, objective_func, name=\"\", description=\"\"):\n self.objective_func = objective_func\n self.name = name\n self.description = description\n\n def __call__(self, model):\n return self.objective_func(model)\n\n def __add__(self, other):\n if isinstance(other, (int, float)):\n objective_func = lambda model: other + self(model)\n name = self.name\n description = self.description\n else:\n objective_func = lambda model: self(model) + other(model)\n name = \", \".join([self.name, other.name])\n description = \"Sum(\" + \" +\\n\".join([self.description, other.description]) + \")\"\n return Objective(objective_func, name=name, description=description)\n\n @staticmethod\n def sum(objs):\n objective_func = lambda T: sum([obj(T) for obj in objs])\n descriptions = [obj.description for obj in objs]\n description = \"Sum(\" + \" +\\n\".join(descriptions) + \")\"\n names = [obj.name for obj in objs]\n name = \", \".join(names)\n return Objective(objective_func, name=name, description=description)\n\n def __neg__(self):\n return -1 * self\n\n def __sub__(self, other):\n return self + (-1 * other)\n\n def __mul__(self, other):\n if isinstance(other, (int, float)):\n objective_func = lambda model: other * self(model)\n return Objective(objective_func, name=self.name, description=self.description)\n else:\n # Note: In original Lucid library, objectives can be multiplied with non-numbers\n # Removing for now until we find a good use case\n raise TypeError('Can only multiply by int or float. Received type ' + str(type(other)))\n\n def __truediv__(self, other):\n if isinstance(other, (int, float)):\n return self.__mul__(1 / other)\n else:\n raise TypeError('Can only divide by int or float. Received type ' + str(type(other)))\n\n def __rmul__(self, other):\n return self.__mul__(other)\n\n def __radd__(self, other):\n return self.__add__(other)\n\n\ndef wrap_objective():\n @decorator\n def inner(func, *args, **kwds):\n objective_func = func(*args, **kwds)\n objective_name = func.__name__\n args_str = \" [\" + \", \".join([_make_arg_str(arg) for arg in args]) + \"]\"\n description = objective_name.title() + args_str\n return Objective(objective_func, objective_name, description)\n return inner\n\n\ndef handle_batch(batch=None):\n return lambda f: lambda model: f(_T_handle_batch(model, batch=batch))\n\n\n@wrap_objective()\ndef neuron(layer, n_channel, x=None, y=None, batch=None):\n \"\"\"Visualize a single neuron of a single channel.\n\n Defaults to the center neuron. When width and height are even numbers, we\n choose the neuron in the bottom right of the center 2x2 neurons.\n\n Odd width & height: Even width & height:\n\n +---+---+---+ +---+---+---+---+\n | | | | | | | | |\n +---+---+---+ +---+---+---+---+\n | | X | | | | | | |\n +---+---+---+ +---+---+---+---+\n | | | | | | | X | |\n +---+---+---+ +---+---+---+---+\n | | | | |\n +---+---+---+---+\n\n \"\"\"\n @handle_batch(batch)\n def inner(model):\n layer_t = model(layer)\n layer_t = _extract_act_pos(layer_t, x, y)\n return -layer_t[:, n_channel].mean()\n return inner\n\n\n@wrap_objective()\ndef channel(layer, n_channel, batch=None):\n \"\"\"Visualize a single channel\"\"\"\n @handle_batch(batch)\n def inner(model):\n return -model(layer)[:, n_channel].mean()\n return inner\n\n@wrap_objective()\ndef neuron_weight(layer, weight, x=None, y=None, batch=None):\n \"\"\" Linearly weighted channel activation at one location as objective\n weight: a torch Tensor vector same length as channel.\n \"\"\"\n @handle_batch(batch)\n def inner(model):\n layer_t = model(layer)\n layer_t = _extract_act_pos(layer_t, x, y)\n if weight is None:\n return -layer_t.mean()\n else:\n return -(layer_t.squeeze() * weight).mean()\n return inner\n\n@wrap_objective()\ndef channel_weight(layer, weight, batch=None):\n \"\"\" Linearly weighted channel activation as objective\n weight: a torch Tensor vector same length as channel. \"\"\"\n @handle_batch(batch)\n def inner(model):\n layer_t = model(layer)\n return -(layer_t * weight.view(1, -1, 1, 1)).mean()\n return inner\n\n@wrap_objective()\ndef localgroup_weight(layer, weight=None, x=None, y=None, wx=1, wy=1, batch=None):\n \"\"\" Linearly weighted channel activation around some spot as objective\n weight: a torch Tensor vector same length as channel. \"\"\"\n @handle_batch(batch)\n def inner(model):\n layer_t = model(layer)\n if weight is None:\n return -(layer_t[:, :, y:y + wy, x:x + wx]).mean()\n else:\n return -(layer_t[:, :, y:y + wy, x:x + wx] * weight.view(1, -1, 1, 1)).mean()\n return inner\n\n@wrap_objective()\ndef direction(layer, direction, batch=None):\n \"\"\"Visualize a direction\n\n InceptionV1 example:\n > direction = torch.rand(512, device=device)\n > obj = objectives.direction(layer='mixed4c', direction=direction)\n\n Args:\n layer: Name of layer in model (string)\n direction: Direction to visualize. torch.Tensor of shape (num_channels,)\n batch: Batch number (int)\n\n Returns:\n Objective\n\n \"\"\"\n\n @handle_batch(batch)\n def inner(model):\n return -torch.nn.CosineSimilarity(dim=1)(direction.reshape(\n (1, -1, 1, 1)), model(layer)).mean()\n\n return inner\n\n\n@wrap_objective()\ndef direction_neuron(layer,\n direction,\n x=None,\n y=None,\n batch=None):\n \"\"\"Visualize a single (x, y) position along the given direction\n\n Similar to the neuron objective, defaults to the center neuron.\n\n InceptionV1 example:\n > direction = torch.rand(512, device=device)\n > obj = objectives.direction_neuron(layer='mixed4c', direction=direction)\n\n Args:\n layer: Name of layer in model (string)\n direction: Direction to visualize. torch.Tensor of shape (num_channels,)\n batch: Batch number (int)\n\n Returns:\n Objective\n\n \"\"\"\n\n @handle_batch(batch)\n def inner(model):\n # breakpoint()\n layer_t = model(layer)\n layer_t = _extract_act_pos(layer_t, x, y)\n return -torch.nn.CosineSimilarity(dim=1)(direction.reshape(\n (1, -1, 1, 1)), layer_t).mean()\n\n return inner\n\n\n@wrap_objective()\ndef output_neuron(unit_index, data_key=None, batch=None):\n \"\"\"Visualize a single output neuron\"\"\"\n def inner(model):\n return -model(data_key=data_key, unit_index=unit_index)\n return inner\n\n\ndef _torch_blur(tensor, out_c=3):\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n depth = tensor.shape[1]\n weight = np.zeros([depth, depth, out_c, out_c])\n for ch in range(depth):\n weight_ch = weight[ch, ch, :, :]\n weight_ch[ : , : ] = 0.5\n weight_ch[1:-1, 1:-1] = 1.0\n weight_t = torch.tensor(weight).float().to(device)\n conv_f = lambda t: F.conv2d(t, weight_t, None, 1, 1)\n return conv_f(tensor) / conv_f(torch.ones_like(tensor))\n\n\n@wrap_objective()\ndef blur_input_each_step():\n \"\"\"Minimizing this objective is equivelant to blurring input each step.\n Optimizing (-k)*blur_input_each_step() is equivelant to:\n input <- (1-k)*input + k*blur(input)\n An operation that was used in early feature visualization work.\n See Nguyen, et al., 2015.\n \"\"\"\n def inner(T):\n t_input = T(\"input\")\n with torch.no_grad():\n t_input_blurred = _torch_blur(t_input)\n return -0.5*torch.sum((t_input - t_input_blurred)**2)\n return inner\n\n\n@wrap_objective()\ndef channel_interpolate(layer1, n_channel1, layer2, n_channel2):\n \"\"\"Interpolate between layer1, n_channel1 and layer2, n_channel2.\n Optimize for a convex combination of layer1, n_channel1 and\n layer2, n_channel2, transitioning across the batch.\n Args:\n layer1: layer to optimize 100% at batch=0.\n n_channel1: neuron index to optimize 100% at batch=0.\n layer2: layer to optimize 100% at batch=N.\n n_channel2: neuron index to optimize 100% at batch=N.\n Returns:\n Objective\n \"\"\"\n def inner(model):\n batch_n = list(model(layer1).shape)[0]\n arr1 = model(layer1)[:, n_channel1]\n arr2 = model(layer2)[:, n_channel2]\n weights = np.arange(batch_n) / (batch_n - 1)\n sum_loss = 0\n for n in range(batch_n):\n sum_loss -= (1 - weights[n]) * arr1[n].mean()\n sum_loss -= weights[n] * arr2[n].mean()\n return sum_loss\n return inner\n\n\n@wrap_objective()\ndef alignment(layer, decay_ratio=2):\n \"\"\"Encourage neighboring images to be similar.\n When visualizing the interpolation between two objectives, it's often\n desirable to encourage analogous objects to be drawn in the same position,\n to make them more comparable.\n This term penalizes L2 distance between neighboring images, as evaluated at\n layer.\n In general, we find this most effective if used with a parameterization that\n shares across the batch. (In fact, that works quite well by itself, so this\n function may just be obsolete.)\n Args:\n layer: layer to penalize at.\n decay_ratio: how much to decay penalty as images move apart in batch.\n Returns:\n Objective.\n \"\"\"\n def inner(model):\n batch_n = list(model(layer).shape)[0]\n layer_t = model(layer)\n accum = 0\n for d in [1, 2, 3, 4]:\n for i in range(batch_n - d):\n a, b = i, i + d\n arr_a, arr_b = layer_t[a], layer_t[b]\n accum += ((arr_a - arr_b) ** 2).mean() / decay_ratio ** float(d)\n return accum\n return inner\n\n\n@wrap_objective()\ndef diversity(layer):\n \"\"\"Encourage diversity between each batch element.\n\n A neural net feature often responds to multiple things, but naive feature\n visualization often only shows us one. If you optimize a batch of images,\n this objective will encourage them all to be different.\n\n In particular, it calculates the correlation matrix of activations at layer\n for each image, and then penalizes cosine similarity between them. This is\n very similar to ideas in style transfer, except we're *penalizing* style\n similarity instead of encouraging it.\n\n Args:\n layer: layer to evaluate activation correlations on.\n\n Returns:\n Objective.\n \"\"\"\n def inner(model):\n layer_t = model(layer)\n batch, channels, _, _ = layer_t.shape\n flattened = layer_t.view(batch, channels, -1)\n grams = torch.matmul(flattened, torch.transpose(flattened, 1, 2))\n grams = F.normalize(grams, p=2, dim=(1, 2))\n return -sum([ sum([ (grams[i]*grams[j]).sum()\n for j in range(batch) if j != i])\n for i in range(batch)]) / batch\n return inner\n\n\ndef as_objective(obj):\n \"\"\"Convert obj into Objective class.\n\n Strings of the form \"layer:n\" become the Objective channel(layer, n).\n Objectives are returned unchanged.\n\n Args:\n obj: string or Objective.\n\n Returns:\n Objective\n \"\"\"\n if isinstance(obj, Objective):\n return obj\n if callable(obj):\n return obj\n if isinstance(obj, str):\n layer, chn = obj.split(\":\")\n layer, chn = layer.strip(), int(chn)\n return channel(layer, chn)\n"
]
| [
[
"torch.nn.functional.normalize",
"numpy.zeros",
"torch.no_grad",
"torch.cuda.is_available",
"numpy.arange",
"torch.tensor",
"torch.ones_like",
"torch.transpose",
"torch.nn.CosineSimilarity",
"torch.nn.functional.conv2d",
"torch.sum"
]
]
|
wjmaddox/online_gp | [
"3bff4c347263a9b8b1f0aa801a986f4aaa019a66"
]
| [
"online_gp/utils/data.py"
]
| [
"import numpy as np\nimport pandas as pd\nimport torch\n\n\ndef get_datasets(name):\n if name == 'banana':\n train_x = pd.read_csv(\n \"https://raw.githubusercontent.com/thangbui/streaming_sparse_gp/master/data/banana_train_x.txt\",\n header=None)\n train_y = pd.read_csv(\n \"https://raw.githubusercontent.com/thangbui/streaming_sparse_gp/master/data/banana_train_y.txt\",\n header=None)\n train_x = torch.tensor(train_x.values).float().view(-1, 2)\n train_y = torch.tensor(train_y.values).long().view(-1)\n train_y[train_y < 0] = 0\n if torch.cuda.is_available():\n train_x, train_y = train_x.cuda(), train_y.cuda()\n train_dataset = torch.utils.data.TensorDataset(train_x, train_y)\n\n test_x = pd.read_csv(\n \"https://raw.githubusercontent.com/thangbui/streaming_sparse_gp/master/data/banana_test_x.txt\",\n header=None)\n test_y = pd.read_csv(\n \"https://raw.githubusercontent.com/thangbui/streaming_sparse_gp/master/data/banana_test_y.txt\",\n header=None)\n test_x = torch.tensor(test_x.values).float().view(-1, 2)\n test_y = torch.tensor(test_y.values).long().view(-1)\n test_y[test_y < 0] = 0\n if torch.cuda.is_available():\n test_x, test_y = test_x.cuda(), test_y.cuda()\n test_dataset = torch.utils.data.TensorDataset(test_x, test_y)\n\n if name == 'criteo':\n criteo_df = pd.read_csv('../data/criteo/train.txt', sep='\\t', header=None, low_memory=True, memory_map=True,\n nrows=1000)\n\n labels = criteo_df[0]\n int_features = criteo_df[list(range(1, 14))]\n cat_features = criteo_df[list(range(14, 40))]\n\n # log transform large values, standardize, and mean-fill\n int_features = int_features.applymap(lambda x: np.log(x) ** 2 if x > 2 else x)\n int_features = (int_features - int_features.mean()) / int_features.std()\n int_features.fillna(0, inplace=True)\n\n # TODO drop any categories in the test set that do not appear in the train set\n # drop low-frequency categories, convert to one-hot\n # cat_features = cat_features.apply(lambda x: x.mask(x.map(x.value_counts()) < 8, float('NaN')))\n cat_features = cat_features.apply(lambda x: x.astype('category'))\n cat_features = pd.get_dummies(cat_features, dummy_na=True)\n\n all_features = np.concatenate([int_features.values, cat_features.values], axis=1)\n all_features = torch.tensor(all_features).float()\n labels = torch.tensor(labels.values).long()\n row_perm = torch.randperm(all_features.size(0))\n all_features = all_features[row_perm]\n labels = labels[row_perm]\n\n num_train = int(all_features.size(0) * 0.9)\n num_test = all_features.size(0) - num_train\n dataset = torch.utils.data.TensorDataset(\n all_features,\n labels\n )\n train_dataset, test_dataset = torch.utils.data.random_split(dataset, [num_train, num_test])\n train_dataset = balance_classes(train_dataset, num_classes=2)\n test_dataset = balance_classes(test_dataset, num_classes=2)\n\n return train_dataset, test_dataset\n\n\ndef balance_classes(dataset, num_classes=2):\n inputs, targets = dataset[:]\n num_train = inputs.size(0)\n balanced_inputs, balanced_targets = [], []\n for class_idx in range(num_classes):\n num_class_examples = num_train // num_classes\n mask = (targets == class_idx)\n masked_inputs, masked_targets = inputs[mask], targets[mask]\n idxs = torch.randint(masked_inputs.size(0), (num_class_examples,))\n balanced_inputs.append(masked_inputs[idxs])\n balanced_targets.append(masked_targets[idxs])\n balanced_inputs = torch.cat(balanced_inputs)\n balanced_targets = torch.cat(balanced_targets)\n row_perm = torch.randperm(balanced_inputs.size(0))\n balanced_dataset = torch.utils.data.TensorDataset(\n balanced_inputs[row_perm],\n balanced_targets[row_perm]\n )\n return balanced_dataset\n"
]
| [
[
"numpy.concatenate",
"torch.cat",
"numpy.log",
"torch.utils.data.random_split",
"torch.cuda.is_available",
"torch.tensor",
"pandas.read_csv",
"torch.utils.data.TensorDataset",
"pandas.get_dummies"
]
]
|
IslamAlam/JUMPING-JIVE-Geodetic-Observatory-Wettzell | [
"53fef7a673a4bcdebd249d11016e8f5b0a2a67e0"
]
| [
"data/scripts/update_grafana.py"
]
| [
"# ! pip install grafana-api\n\nimport os \ndir_path = os.getcwd() #os.path.dirname(os.path.realpath(__file__))\nprint(dir_path)\n\nimport time\nfrom datetime import datetime\nimport pandas as pd\n\nfrom dateutil.relativedelta import relativedelta\nfrom datetime import date, time, timedelta\n\nimport json\n\nfrom grafana_api.grafana_face import GrafanaFace\n\ngrafana_api = GrafanaFace(auth='eyJrIjoibDZvZGdzN3RFUDlOc0JQdnNSN0RmTWtnU3hWaWdJM1giLCJuIjoicHl0aG9uIiwiaWQiOjF9', host='138.246.233.65:3000')\ncurrent_time = datetime.now() # - relativedelta(days=1, hours=4)\nurl = \"https://ivscc.gsfc.nasa.gov/sessions/\" + str(date.today().year) + \"/\"\n\n\ndef add_datetime(x):\n return datetime.combine(x[\"start_date\"], x[\"start_time\"])\n\ndef calc_end_schedule(x):\n hours, minutes = map(int, x[\"Dur\"].split(':'))\n duration = timedelta(hours=hours, minutes=minutes)\n return x[\"start_schedule\"] + duration\n\ndef session_to_datafram(session_url):\n session_df = pd.read_html(session_url, header=0, parse_dates=True)[0] #, match='.+', flavor=None, header=None, index_col=None, skiprows=None, attrs=None, parse_dates=False, thousands=', ', encoding=None, decimal='.', converters=None, na_values=None, keep_default_na=True, displayed_only=True\n session_df = session_df.dropna(how='all', subset=['Code'])\n new_dates, new_times = zip(*[(datetime.strptime(d, '%Y-%m-%d %H:%M').date(), datetime.strptime(d, '%Y-%m-%d %H:%M').time()) for d in session_df['Start']]) #datetime.datetime.strptime(d, '%Y-%m-%d %H:%M')\n session_df = session_df.assign(start_date=new_dates, start_time=new_times)\n Stations = session_df[\"Stations\"].str.split(\"\\s\\s\")\n session_df[\"Stations_Codes\"] = Stations\n # Create row for start_schedule\n session_df[\"start_schedule\"] = session_df.apply(add_datetime, axis=1) #passes a Series object, row-wise\n session_df[\"end_schedule\"] = session_df.apply(calc_end_schedule, axis=1) #passes a Series object, row-wise\n\n return session_df[['Name','Code','start_schedule','end_schedule', 'Stations_Codes','DB Code', 'Ops Center', 'Correlator', 'Status', 'Analysis']]\n\ndef session_at_time(session_df, current_time):\n return session_df.loc[(session_df['start_schedule'] <= current_time) & (session_df['end_schedule'] >= current_time )]\n\ndef telescope_in_session(current_session_df, telescope_name):\n if any(telescope_name in s for s in current_session_df[\"Stations_Codes\"].values.tolist()):\n print(telescope_name + \" is in the session list\")\n telescope_participation = True\n else:\n print(telescope_name + \" is not in the session list\")\n telescope_participation = False\n return telescope_participation\n\n\nfinal_session_df = session_to_datafram(url)\ncurrent_session_df = session_at_time(final_session_df, current_time)\n\n#json_str = df_today.to_json(orient='records')\nheader = ['Name','Code','start_schedule','end_schedule', 'Stations_Codes','DB Code', 'Ops Center', 'Correlator', 'Status', 'Analysis']\ncurrent_session_df.to_csv(dir_path + \"/\" +'sessions.csv', columns = header)\ncurrent_session_df\n\n# WETTZ13S (Ws)\nWETTZ13S = telescope_in_session(current_session_df, \"Ws\")\n\n# WETTZ13N (Wn)\nWETTZ13N = telescope_in_session(current_session_df, \"Wn\")\n\n# WETTZELL (Wz)\nWETTZELL = telescope_in_session(current_session_df, \"Wz\")\n\n# Change here imp to have new col for star_sch + Dur \ncurrent_session_df\n\n\nrawQwery_template = \\\n\"\"\"\nSELECT \n h.name AS \"station_id\" ,\n h.available AS \"triggers\", \n i.location_lat AS \"lat\", \n i.location_lon AS \"lon\"\nFROM host_inventory AS i, hosts AS h\nWHERE \n i.hostid=h.hostid\n AND i.location_lat!='' AND i.location_lon!=''\n AND ( \n h.name LIKE '%WETTZELL%'\n OR h.name LIKE '%WETTZ13S%' \n OR h.name LIKE '%WETTZ13N%'\n )\n\"\"\"\n\n\ndef de_activate_telescope(rawQwery_template,telescope_active, telescope_name):\n if telescope_active == True:\n print(telescope_name +\" is in the currnet session\")\n return rawQwery_template.replace(\"NULL_\" + telescope_name, telescope_name)\n # grafana_table_dashboard['dashboard']['panels'][0]['targets'][0]['hide'] = False\n else:\n print(telescope_name + \" is not in the currnet session\")\n return rawQwery_template.replace(telescope_name, \"NULL_\" + telescope_name)\n\n\nrawQwery = de_activate_telescope(rawQwery_template, WETTZELL, \"WETTZELL\")\nrawQwery = de_activate_telescope(rawQwery, WETTZ13S, \"WETTZ13S\")\nrawQwery = de_activate_telescope(rawQwery, WETTZ13N, \"WETTZ13N\")\n\n\n# Create or update a dashboard\ngrafana_table_dashboard_json = \\\n{'dashboard': {'annotations': {'list': [{'builtIn': 1,\n 'datasource': '-- Grafana --',\n 'enable': True,\n 'hide': True,\n 'iconColor': 'rgba(0, 211, 255, 1)',\n 'name': 'Annotations & Alerts',\n 'type': 'dashboard'}]},\n 'editable': True,\n 'gnetId': None,\n 'graphTooltip': 0,\n 'id': 6,\n 'links': [],\n 'panels': [{'columns': [],\n 'datasource': 'PostgreSQL',\n 'fontSize': '100%',\n 'gridPos': {'h': 9, 'w': 12, 'x': 0, 'y': 0},\n 'id': 2,\n 'links': [],\n 'options': {},\n 'pageSize': None,\n 'scroll': True,\n 'showHeader': True,\n 'sort': {'col': 0, 'desc': True},\n 'styles': [{'alias': 'Time',\n 'dateFormat': 'YYYY-MM-DD HH:mm:ss',\n 'pattern': 'Time',\n 'type': 'date'},\n {'alias': '',\n 'colorMode': None,\n 'colors': ['rgba(245, 54, 54, 0.9)',\n 'rgba(237, 129, 40, 0.89)',\n 'rgba(50, 172, 45, 0.97)'],\n 'decimals': 2,\n 'pattern': '/.*/',\n 'thresholds': [],\n 'type': 'number',\n 'unit': 'short'}],\n 'targets': [{'format': 'table',\n 'group': [],\n 'hide': False,\n 'metricColumn': 'none',\n 'rawQuery': True,\n 'rawSql': 'SELECT \\r\\n h.name AS \"station_id\" , /* LIKE \\'D%\\' SUBSTRING(h.name, \\'WETTZELL*([0-9]{1,3})\\') AS \"station_id\" */\\r\\n h.available AS \"triggers\", \\r\\n i.location_lat AS \"lat\", \\r\\n i.location_lon AS \"lon\"\\r\\nFROM host_inventory AS i, hosts AS h\\r\\nWHERE \\r\\n i.hostid=h.hostid AND h.name LIKE \\'%WETTZELL%\\' /* AND i.location_lon!=\\'\\' AND h.name SIMILAR TO \\'*.WETTZELL.*\\' */\\r\\n \\r\\n/* GROUP BY \"station_id\" */',\n 'refId': 'A',\n 'select': [[{'params': ['value'], 'type': 'column'}]],\n 'timeColumn': 'time',\n 'where': [{'name': '$__timeFilter', 'params': [], 'type': 'macro'}]}],\n 'timeFrom': None,\n 'timeShift': None,\n 'title': 'Panel Title',\n 'transform': 'table',\n 'type': 'table'},\n {'circleMaxSize': '50',\n 'circleMinSize': '10',\n 'colors': ['#56a64b',\n 'rgba(151, 170, 179, 0.89)',\n 'rgba(116, 153, 255, 0.97)',\n 'rgba(255, 200, 89, 0.97)',\n 'rgba(255, 160, 89, 0.97)',\n 'rgba(233, 118, 89, 0.97)',\n '#e02f44'],\n 'datasource': 'PostgreSQL',\n 'decimals': 0,\n 'esMetric': 'Count',\n 'gridPos': {'h': 9, 'w': 12, 'x': 12, 'y': 0},\n 'hideEmpty': False,\n 'hideZero': False,\n 'id': 4,\n 'initialZoom': '14',\n 'locationData': 'table',\n 'mapCenter': 'custom',\n 'mapCenterLatitude': '49.145',\n 'mapCenterLongitude': '12.89 ',\n 'maxDataPoints': 1,\n 'mouseWheelZoom': False,\n 'options': {},\n 'showLegend': False,\n 'stickyLabels': False,\n 'tableQueryOptions': {'geohashField': 'geohash',\n 'labelField': 'station_id',\n 'latitudeField': 'lat',\n 'longitudeField': 'lon',\n 'metricField': 'triggers',\n 'queryType': 'coordinates'},\n 'targets': [{'format': 'table',\n 'group': [],\n 'hide': False,\n 'metricColumn': 'none',\n 'rawQuery': True,\n 'rawSql': 'SELECT \\r\\n h.name AS \"station_id\" , /* LIKE \\'D%\\' SUBSTRING(h.name, \\'WETTZELL*([0-9]{1,3})\\') AS \"station_id\" */\\r\\n h.available AS \"triggers\", \\r\\n i.location_lat AS \"lat\", \\r\\n i.location_lon AS \"lon\"\\r\\nFROM host_inventory AS i, hosts AS h\\r\\nWHERE \\r\\n i.hostid=h.hostid AND h.name LIKE \\'%WETTZELL%\\' \\r\\n OR h.name LIKE \\'%WETTZ13S%\\' \\r\\n OR h.name LIKE \\'%WETTZ13N%\\'\\r\\n \\r\\n /* AND i.location_lon!=\\'\\' AND h.name SIMILAR TO \\'*.WETTZELL.*\\' */\\r\\n \\r\\n/* GROUP BY \"station_id\" */\\r\\n',\n 'refId': 'A',\n 'select': [[{'params': ['value'], 'type': 'column'}]],\n 'timeColumn': 'time',\n 'where': [{'name': '$__timeFilter', 'params': [], 'type': 'macro'}]}],\n 'thresholds': '0,1,2,3,4,5',\n 'timeFrom': None,\n 'timeShift': None,\n 'title': 'Panel Title',\n 'type': 'grafana-worldmap-panel',\n 'unitPlural': '',\n 'unitSingle': '',\n 'valueName': 'current'}],\n 'schemaVersion': 18,\n 'style': 'dark',\n 'tags': ['python_map'],\n 'templating': {'list': []},\n 'time': {'from': 'now-6h', 'to': 'now'},\n 'timepicker': {'refresh_intervals': ['5s',\n '10s',\n '30s',\n '1m',\n '5m',\n '15m',\n '30m',\n '1h',\n '2h',\n '1d'],\n 'time_options': ['5m', '15m', '1h', '6h', '12h', '24h', '2d', '7d', '30d']},\n 'timezone': '',\n 'title': 'Python Dashboard',\n 'uid': '-0BCN4sWz',\n 'version': 15},\n 'meta': {'canAdmin': False,\n 'canEdit': True,\n 'canSave': True,\n 'canStar': True,\n 'created': '2020-01-21T07:39:23Z',\n 'createdBy': 'admin',\n 'expires': '0001-01-01T00:00:00Z',\n 'folderId': 0,\n 'folderTitle': 'General',\n 'folderUrl': '',\n 'hasAcl': False,\n 'isFolder': False,\n 'provisioned': False,\n 'provisionedExternalId': '',\n 'slug': 'python-dashboard',\n 'type': 'db',\n 'updated': '2020-01-21T17:26:03Z',\n 'updatedBy': 'admin',\n 'url': '/d/-0BCN4sWz/python-dashboard',\n 'version': 15}}\n\nprint(\"Update Table\")\ngrafana_table_dashboard_json['dashboard']['panels'][0]['targets'][0]['rawSql'] = rawQwery\nprint(\"Update Map\")\ngrafana_table_dashboard_json['dashboard']['panels'][1]['targets'][0]['rawSql'] = rawQwery\n\n# Create or update a dashboard\n# grafana_api.dashboard.update_dashboard(dashboard={'dashboard': {...}, 'folderId': 0, 'overwrite': True})\nupdate = True\nif update == True:\n if 'overwrite' not in grafana_table_dashboard_json.keys(): # make sure to overwrite not to have any issue\n grafana_table_dashboard_json['overwrite'] = True\n grafana_api.dashboard.update_dashboard(grafana_table_dashboard_json)\n print(\"Dashboard is updated!!\")\n\n"
]
| [
[
"pandas.read_html"
]
]
|
dials-src/dials | [
"25055c1f6164dc33e672e7c5c6a9c5a35e870660"
]
| [
"src/dials/report/plots.py"
]
| [
"\"\"\"\nThis module defines a number of general plots, which may be relevant to\nfor reports of several programs.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nfrom io import StringIO\n\nimport numpy as np\nfrom scipy.optimize import least_squares\nfrom scipy.stats import norm\n\nfrom cctbx import uctbx\nfrom dxtbx import flumpy\nfrom mmtbx.scaling.absolute_scaling import expected_intensity, scattering_information\nfrom mmtbx.scaling.matthews import matthews_rupp\nfrom scitbx.array_family import flex\n\nlogger = logging.getLogger(\"dials\")\n\n\ndef make_image_range_table(experiments, batch_manager):\n \"\"\"Make a summary table of image ranges.\"\"\"\n table = [\n [\n \"Experiment number\",\n \"scan image range\",\n \"image range in use\",\n \"associated batch range\",\n \"Image template\",\n ]\n ]\n for i, exp in enumerate(experiments):\n if exp.scan:\n valid_image_ranges = \",\".join(\n str(j) for j in exp.scan.get_valid_image_ranges(exp.identifier)\n )\n image_range = exp.scan.get_image_range()\n template = exp.imageset.get_template()\n b_0 = batch_manager._batch_increments[i]\n batch_params = batch_manager.batch_params[i]\n batch_range = batch_params[\"range\"]\n batches = (b_0, b_0 + (batch_range[1] - batch_range[0]))\n table.append(\n [\n str(batch_params[\"id\"]),\n image_range,\n valid_image_ranges,\n batches,\n template,\n ]\n )\n return table\n\n\ndef scale_rmerge_vs_batch_plot(batch_manager, rmerge_vs_b, scales_vs_b=None):\n reduced_batches = batch_manager.reduced_batches\n shapes, annotations, text = batch_manager.batch_plot_shapes_and_annotations()\n if len(annotations) > 30:\n # at a certain point the annotations become unreadable\n annotations = None\n\n return {\n \"scale_rmerge_vs_batch\": {\n \"data\": [\n (\n {\n \"x\": reduced_batches,\n \"y\": scales_vs_b,\n \"type\": \"scatter\",\n \"name\": \"Scale\",\n \"opacity\": 0.75,\n \"text\": text,\n }\n if scales_vs_b is not None\n else {}\n ),\n {\n \"x\": reduced_batches,\n \"y\": rmerge_vs_b,\n \"yaxis\": \"y2\",\n \"type\": \"scatter\",\n \"name\": \"R<sub>merge</sub>\",\n \"opacity\": 0.75,\n \"text\": text,\n },\n ],\n \"layout\": {\n \"title\": \"Scale and R<sub>merge</sub> vs batch\",\n \"xaxis\": {\"title\": \"N\"},\n \"yaxis\": {\"title\": \"Scale\", \"rangemode\": \"tozero\"},\n \"yaxis2\": {\n \"title\": \"R<sub>merge</sub>\",\n \"overlaying\": \"y\",\n \"side\": \"right\",\n \"rangemode\": \"tozero\",\n },\n \"shapes\": shapes,\n \"annotations\": annotations,\n },\n }\n }\n\n\ndef i_over_sig_i_vs_batch_plot(batch_manager, i_sig_i_vs_batch):\n\n reduced_batches = batch_manager.reduced_batches\n shapes, annotations, text = batch_manager.batch_plot_shapes_and_annotations()\n if len(annotations) > 30:\n # at a certain point the annotations become unreadable\n annotations = None\n\n return {\n \"i_over_sig_i_vs_batch\": {\n \"data\": [\n {\n \"x\": reduced_batches,\n \"y\": i_sig_i_vs_batch,\n \"type\": \"scatter\",\n \"name\": \"I/sigI vs batch\",\n \"opacity\": 0.75,\n \"text\": text,\n }\n ],\n \"layout\": {\n \"title\": \"<I/σ(I)> vs batch\",\n \"xaxis\": {\"title\": \"N\"},\n \"yaxis\": {\"title\": \"<I/σ(I)>\", \"rangemode\": \"tozero\"},\n \"shapes\": shapes,\n \"annotations\": annotations,\n },\n }\n }\n\n\ndef i_over_sig_i_vs_i_plot(intensities, sigmas, label=None):\n \"\"\"Plot unscaled I / sigma_adjusted vs unscaled I.\"\"\"\n sel = (intensities > 0) & (sigmas > 0)\n intensities = intensities.select(sel)\n sigmas = sigmas.select(sel)\n x = flex.log10(intensities)\n y = intensities / sigmas\n\n H, xedges, yedges = np.histogram2d(\n x.as_numpy_array(), y.as_numpy_array(), bins=(200, 200)\n )\n nonzeros = np.nonzero(H)\n z = np.empty(H.shape)\n z[:] = np.NAN\n z[nonzeros] = H[nonzeros]\n key = f\"i_over_sig_i_vs_i_{label}\" if label is not None else \"i_over_sig_i_vs_i\"\n title = \"I/σ(I) vs I\"\n title = title + f\" (error model {label})\" if label is not None else title\n return {\n key: {\n \"data\": [\n {\n \"x\": xedges.tolist(),\n \"y\": yedges.tolist(),\n \"z\": z.transpose().tolist(),\n \"type\": \"heatmap\",\n \"name\": \"Isigma distribution\",\n \"colorbar\": {\n \"title\": \"Number of reflections\",\n \"titleside\": \"right\",\n },\n \"colorscale\": \"Viridis\",\n }\n ],\n \"layout\": {\n \"title\": title,\n \"xaxis\": {\"title\": \"log I\"},\n \"yaxis\": {\"title\": \"I/σ(I)\"},\n },\n \"help\": \"\"\"\\\nThis plot shows the distribution of I/σ(I) as a function of I, which can\ngive indication of the errors within the dataset. The I/σ(I) asymptotic\nlimit can be seen at the plateau in the top-right of the plot, if the measured\ndata are strong enough.\n\n[1] Diederichs, K. (2010). Acta Cryst. D, 66(6), 733-740.\nhttps://doi.org/10.1107/S0907444910014836\n\"\"\",\n }\n }\n\n\ndef d_star_sq_to_d_ticks(d_star_sq, nticks):\n min_d_star_sq = min(d_star_sq)\n dstep = (max(d_star_sq) - min_d_star_sq) / nticks\n tickvals = [min_d_star_sq + (i * dstep) for i in range(nticks)]\n ticktext = [f\"{uctbx.d_star_sq_as_d(dsq):.2f}\" for dsq in tickvals]\n return tickvals, ticktext\n\n\nclass IntensityStatisticsPlots:\n \"\"\"Generate plots for intensity-derived statistics.\"\"\"\n\n def __init__(\n self,\n intensities,\n anomalous=False,\n n_resolution_bins=20,\n xtriage_analyses=None,\n run_xtriage_analysis=True,\n ):\n self.n_bins = n_resolution_bins\n self._xanalysis = xtriage_analyses\n if anomalous:\n intensities = intensities.as_anomalous_array()\n intensities.setup_binner(n_bins=self.n_bins)\n merged = intensities.merge_equivalents()\n self.binner = intensities.binner()\n self.merged_intensities = merged.array()\n self.multiplicities = merged.redundancies().complete_array(new_data_value=0)\n intensities.setup_binner_d_star_sq_step(auto_binning=True)\n self.wilson_plot_result = intensities.wilson_plot(use_binning=True)\n mr = matthews_rupp(intensities.crystal_symmetry(), out=StringIO())\n self.n_residues = mr.n_residues\n if not self._xanalysis and run_xtriage_analysis:\n # imports needed here or won't work, unsure why.\n from mmtbx.scaling.xtriage import master_params as xtriage_master_params\n from mmtbx.scaling.xtriage import xtriage_analyses\n\n xtriage_params = xtriage_master_params.fetch(sources=[]).extract()\n xtriage_params.scaling.input.xray_data.skip_sanity_checks = True\n try:\n self._xanalysis = xtriage_analyses(\n miller_obs=self.merged_intensities,\n unmerged_obs=intensities,\n text_out=\"silent\",\n params=xtriage_params,\n )\n except RuntimeError:\n logger.warning(\"Xtriage analysis failed.\", exc_info=True)\n self._xanalysis = None\n\n def generate_resolution_dependent_plots(self):\n d = self.second_moments_plot()\n d.update(self.wilson_plot())\n return d\n\n def generate_miscellanous_plots(self):\n d = self.cumulative_intensity_distribution_plot()\n d.update(self.l_test_plot())\n d.update(self.multiplicity_histogram())\n return d\n\n def multiplicity_histogram(self):\n \"\"\"Generate histogram data for acentric and centric multiplicities.\"\"\"\n mult_acentric = self.multiplicities.select_acentric().data()\n mult_centric = self.multiplicities.select_centric().data()\n\n multiplicities_acentric = {}\n multiplicities_centric = {}\n\n for x in sorted(set(mult_acentric)):\n multiplicities_acentric[x] = mult_acentric.count(x)\n for x in sorted(set(mult_centric)):\n multiplicities_centric[x] = mult_centric.count(x)\n\n return {\n \"multiplicities\": {\n \"data\": [\n {\n \"x\": list(multiplicities_acentric.keys()),\n \"y\": list(multiplicities_acentric.values()),\n \"type\": \"bar\",\n \"name\": \"Acentric\",\n \"opacity\": 0.75,\n },\n {\n \"x\": list(multiplicities_centric.keys()),\n \"y\": list(multiplicities_centric.values()),\n \"type\": \"bar\",\n \"name\": \"Centric\",\n \"opacity\": 0.75,\n },\n ],\n \"layout\": {\n \"title\": \"Distribution of multiplicities\",\n \"xaxis\": {\"title\": \"Multiplicity\"},\n \"yaxis\": {\n \"title\": \"Frequency\",\n # 'rangemode': 'tozero'\n },\n \"bargap\": 0,\n \"barmode\": \"overlay\",\n },\n }\n }\n\n def wilson_plot(self):\n if not self._xanalysis or not self._xanalysis.wilson_scaling:\n return {}\n\n dstarsq = self.wilson_plot_result.binner.bin_centers(2)\n observed = self.wilson_plot_result.data[1:-1]\n # The binning of the wilson plot can result in some bins with 'None' values\n if None in observed:\n observed = [i for i in observed if i is not None]\n dstarsq = flex.double(\n [i for i, j in zip(dstarsq, observed) if j is not None]\n )\n if not observed:\n return {}\n expected = expected_intensity(\n scattering_information(n_residues=self.n_residues),\n dstarsq,\n b_wilson=self._xanalysis.iso_b_wilson,\n p_scale=self._xanalysis.wilson_scaling.iso_p_scale,\n )\n\n x1 = observed\n x2 = expected.mean_intensity\n # ignore the start and end of the plot, which may be unreliable\n if len(x1) > 10:\n x1 = x1[1:-3]\n x2 = x2[1:-3]\n\n def residuals(k):\n \"\"\"Calculate the residual for an overall scale factor\"\"\"\n return x1 - k * x2\n\n best = least_squares(residuals, 1.0)\n\n mean_I_obs_theory = expected.mean_intensity * best.x\n tickvals_wilson, ticktext_wilson = d_star_sq_to_d_ticks(dstarsq, nticks=5)\n\n return {\n \"wilson_intensity_plot\": {\n \"data\": (\n [\n {\n \"x\": list(dstarsq),\n \"y\": list(observed),\n \"type\": \"scatter\",\n \"name\": \"Observed\",\n },\n {\n \"x\": list(dstarsq),\n \"y\": list(mean_I_obs_theory),\n \"type\": \"scatter\",\n \"name\": \"Expected\",\n },\n ]\n ),\n \"layout\": {\n \"title\": \"Wilson intensity plot\",\n \"xaxis\": {\n \"title\": \"Resolution (Å)\",\n \"tickvals\": tickvals_wilson,\n \"ticktext\": ticktext_wilson,\n },\n \"yaxis\": {\"type\": \"log\", \"title\": \"Mean(I)\", \"rangemode\": \"tozero\"},\n },\n }\n }\n\n def cumulative_intensity_distribution_plot(self):\n if not self._xanalysis or not self._xanalysis.twin_results:\n return {}\n nz_test = self._xanalysis.twin_results.nz_test\n return {\n \"cumulative_intensity_distribution\": {\n \"data\": [\n {\n \"x\": list(nz_test.z),\n \"y\": list(nz_test.ac_obs),\n \"type\": \"scatter\",\n \"name\": \"Acentric observed\",\n \"mode\": \"lines\",\n \"line\": {\"color\": \"rgb(31, 119, 180)\"},\n },\n {\n \"x\": list(nz_test.z),\n \"y\": list(nz_test.c_obs),\n \"type\": \"scatter\",\n \"name\": \"Centric observed\",\n \"mode\": \"lines\",\n \"line\": {\"color\": \"rgb(255, 127, 14)\"},\n },\n {\n \"x\": list(nz_test.z),\n \"y\": list(nz_test.ac_untwinned),\n \"type\": \"scatter\",\n \"name\": \"Acentric theory\",\n \"mode\": \"lines\",\n \"line\": {\"color\": \"rgb(31, 119, 180)\", \"dash\": \"dot\"},\n \"opacity\": 0.8,\n },\n {\n \"x\": list(nz_test.z),\n \"y\": list(nz_test.c_untwinned),\n \"type\": \"scatter\",\n \"name\": \"Centric theory\",\n \"mode\": \"lines\",\n \"line\": {\"color\": \"rgb(255, 127, 14)\", \"dash\": \"dot\"},\n \"opacity\": 0.8,\n },\n ],\n \"layout\": {\n \"title\": \"Cumulative intensity distribution\",\n \"xaxis\": {\"title\": \"z\", \"range\": (0, 1)},\n \"yaxis\": {\"title\": \"P(Z <= Z)\", \"range\": (0, 1)},\n },\n }\n }\n\n def l_test_plot(self):\n if not self._xanalysis or not self._xanalysis.twin_results:\n return {}\n l_test = self._xanalysis.twin_results.l_test\n return {\n \"l_test\": {\n \"data\": [\n {\n \"x\": list(l_test.l_values),\n \"y\": list(l_test.l_cumul_untwinned),\n \"type\": \"scatter\",\n \"name\": \"Untwinned\",\n \"mode\": \"lines\",\n \"line\": {\"color\": \"rgb(31, 119, 180)\", \"dash\": \"dashdot\"},\n },\n {\n \"x\": list(l_test.l_values),\n \"y\": list(l_test.l_cumul_perfect_twin),\n \"type\": \"scatter\",\n \"name\": \"Perfect twin\",\n \"mode\": \"lines\",\n \"line\": {\"color\": \"rgb(31, 119, 180)\", \"dash\": \"dot\"},\n \"opacity\": 0.8,\n },\n {\n \"x\": list(l_test.l_values),\n \"y\": list(l_test.l_cumul),\n \"type\": \"scatter\",\n \"name\": \"Observed\",\n \"mode\": \"lines\",\n \"line\": {\"color\": \"rgb(255, 127, 14)\"},\n },\n ],\n \"layout\": {\n \"title\": \"L test (Padilla and Yeates)\",\n \"xaxis\": {\"title\": \"|l|\", \"range\": (0, 1)},\n \"yaxis\": {\"title\": \"P(L >= l)\", \"range\": (0, 1)},\n },\n }\n }\n\n def second_moments_plot(self):\n\n acentric = self.merged_intensities.select_acentric()\n centric = self.merged_intensities.select_centric()\n if acentric.size():\n acentric.setup_binner(n_bins=self.n_bins)\n second_moments_acentric = acentric.second_moment_of_intensities(\n use_binning=True\n )\n else:\n second_moments_acentric = None\n if centric.size():\n centric.setup_binner(n_bins=self.n_bins)\n second_moments_centric = centric.second_moment_of_intensities(\n use_binning=True\n )\n else:\n second_moments_centric = None\n\n second_moment_d_star_sq = []\n if acentric.size():\n second_moment_d_star_sq.extend(\n second_moments_acentric.binner.bin_centers(2)\n )\n if centric.size():\n second_moment_d_star_sq.extend(second_moments_centric.binner.bin_centers(2))\n tickvals_2nd_moment, ticktext_2nd_moment = d_star_sq_to_d_ticks(\n second_moment_d_star_sq, nticks=5\n )\n\n return {\n \"second_moments\": {\n \"data\": [\n (\n {\n \"x\": list(\n second_moments_acentric.binner.bin_centers(2)\n ), # d_star_sq\n \"y\": second_moments_acentric.data[1:-1],\n \"type\": \"scatter\",\n \"name\": \"<I<sup>2</sub>> acentric\",\n }\n if acentric.size()\n else {}\n ),\n (\n {\n \"x\": list(\n second_moments_centric.binner.bin_centers(2)\n ), # d_star_sq\n \"y\": second_moments_centric.data[1:-1],\n \"type\": \"scatter\",\n \"name\": \"<I<sup>2</sub>> centric\",\n }\n if centric.size()\n else {}\n ),\n ],\n \"layout\": {\n \"title\": \"Second moment of I\",\n \"xaxis\": {\n \"title\": \"Resolution (Å)\",\n \"tickvals\": tickvals_2nd_moment,\n \"ticktext\": ticktext_2nd_moment,\n },\n \"yaxis\": {\"title\": \"<I<sup>2</sub>>\", \"rangemode\": \"tozero\"},\n },\n }\n }\n\n\nclass ResolutionPlotsAndStats:\n \"\"\"\n Use iotbx dataset statistics objects to make plots and tables for reports.\n\n This class allows the generation of plots of various properties as a\n function of resolution as well as a statistics table and summary table,\n using the data from two iotbx.dataset_statistics objects, with\n anomalous=False/True.\n \"\"\"\n\n def __init__(\n self, dataset_statistics, anomalous_dataset_statistics, is_centric=False\n ):\n self.dataset_statistics = dataset_statistics\n self.anomalous_dataset_statistics = anomalous_dataset_statistics\n self.d_star_sq_bins = [\n 0.5 * (uctbx.d_as_d_star_sq(b.d_max) + uctbx.d_as_d_star_sq(b.d_min))\n for b in self.dataset_statistics.bins\n ]\n self.d_star_sq_tickvals, self.d_star_sq_ticktext = d_star_sq_to_d_ticks(\n self.d_star_sq_bins, nticks=5\n )\n self.is_centric = is_centric\n\n def make_all_plots(self, cc_one_half_method=None):\n \"\"\"Make a dictionary containing all available resolution-dependent plots.\"\"\"\n d = self.cc_one_half_plot(method=cc_one_half_method)\n d.update(self.i_over_sig_i_plot())\n d.update(self.completeness_plot())\n d.update(self.multiplicity_vs_resolution_plot())\n d.update(self.r_pim_plot())\n return d\n\n def cc_one_half_plot(self, method=None):\n \"\"\"Make a plot of cc half against resolution.\"\"\"\n\n if method == \"sigma_tau\":\n cc_one_half_bins = [\n bin_stats.cc_one_half_sigma_tau\n if bin_stats.cc_one_half_sigma_tau\n else 0.0\n for bin_stats in self.dataset_statistics.bins\n ]\n cc_one_half_critical_value_bins = [\n bin_stats.cc_one_half_sigma_tau_critical_value\n if bin_stats.cc_one_half_sigma_tau_critical_value\n else 0.0\n for bin_stats in self.dataset_statistics.bins\n ]\n else:\n cc_one_half_bins = [\n bin_stats.cc_one_half if bin_stats.cc_one_half else 0.0\n for bin_stats in self.dataset_statistics.bins\n ]\n cc_one_half_critical_value_bins = [\n bin_stats.cc_one_half_critical_value\n if bin_stats.cc_one_half_critical_value\n else 0.0\n for bin_stats in self.dataset_statistics.bins\n ]\n cc_anom_bins = [\n bin_stats.cc_anom if bin_stats.cc_anom else 0.0\n for bin_stats in self.dataset_statistics.bins\n ]\n cc_anom_critical_value_bins = [\n bin_stats.cc_anom_critical_value\n if bin_stats.cc_anom_critical_value\n else 0.0\n for bin_stats in self.dataset_statistics.bins\n ]\n\n return {\n \"cc_one_half\": cc_half_plot(\n d_star_sq=self.d_star_sq_bins,\n cc_half=cc_one_half_bins,\n cc_anom=cc_anom_bins if not self.is_centric else None,\n cc_half_critical_values=cc_one_half_critical_value_bins,\n cc_anom_critical_values=cc_anom_critical_value_bins\n if not self.is_centric\n else None,\n cc_half_fit=None,\n d_min=None,\n )\n }\n\n def i_over_sig_i_plot(self):\n \"\"\"Make a plot of <I/sigI> against resolution.\"\"\"\n i_over_sig_i_bins = [\n bin_stats.i_over_sigma_mean for bin_stats in self.dataset_statistics.bins\n ]\n\n return {\n \"i_over_sig_i\": {\n \"data\": [\n {\n \"x\": self.d_star_sq_bins, # d_star_sq\n \"y\": i_over_sig_i_bins,\n \"type\": \"scatter\",\n \"name\": \"I/sigI vs resolution\",\n }\n ],\n \"layout\": {\n \"title\": \"<I/σ(I)> vs resolution\",\n \"xaxis\": {\n \"title\": \"Resolution (Å)\",\n \"tickvals\": self.d_star_sq_tickvals,\n \"ticktext\": self.d_star_sq_ticktext,\n },\n \"yaxis\": {\"title\": \"<I/σ(I)>\", \"rangemode\": \"tozero\"},\n },\n }\n }\n\n def r_pim_plot(self):\n \"\"\"Make a plot of <I/sigI> against resolution.\"\"\"\n r_pim_bins = [bin_stats.r_pim for bin_stats in self.dataset_statistics.bins]\n\n return {\n \"r_pim\": {\n \"data\": [\n {\n \"x\": self.d_star_sq_bins, # d_star_sq\n \"y\": r_pim_bins,\n \"type\": \"scatter\",\n \"name\": \"R<sub>pim</sub> vs resolution\",\n }\n ],\n \"layout\": {\n \"title\": \"R<sub>pim</sub> vs resolution\",\n \"xaxis\": {\n \"title\": \"Resolution (Å)\",\n \"tickvals\": self.d_star_sq_tickvals,\n \"ticktext\": self.d_star_sq_ticktext,\n },\n \"yaxis\": {\"title\": \"R<sub>pim</sub>\", \"rangemode\": \"tozero\"},\n },\n }\n }\n\n def completeness_plot(self):\n \"\"\"Make a plot of completeness against resolution.\"\"\"\n completeness_bins = [\n bin_stats.completeness for bin_stats in self.dataset_statistics.bins\n ]\n if self.anomalous_dataset_statistics:\n anom_completeness_bins = [\n bin_stats.anom_completeness\n for bin_stats in self.anomalous_dataset_statistics.bins\n ]\n\n return {\n \"completeness\": {\n \"data\": [\n {\n \"x\": self.d_star_sq_bins,\n \"y\": completeness_bins,\n \"type\": \"scatter\",\n \"name\": \"Completeness\",\n },\n (\n {\n \"x\": self.d_star_sq_bins,\n \"y\": anom_completeness_bins,\n \"type\": \"scatter\",\n \"name\": \"Anomalous completeness\",\n }\n if not self.is_centric and self.anomalous_dataset_statistics\n else {}\n ),\n ],\n \"layout\": {\n \"title\": \"Completeness vs resolution\",\n \"xaxis\": {\n \"title\": \"Resolution (Å)\",\n \"tickvals\": self.d_star_sq_tickvals,\n \"ticktext\": self.d_star_sq_ticktext,\n },\n \"yaxis\": {\"title\": \"Completeness\", \"range\": (0, 1)},\n },\n }\n }\n\n def multiplicity_vs_resolution_plot(self):\n \"\"\"Make a plot of multiplicity against resolution.\"\"\"\n multiplicity_bins = [\n bin_stats.mean_redundancy for bin_stats in self.dataset_statistics.bins\n ]\n if self.anomalous_dataset_statistics:\n anom_multiplicity_bins = [\n bin_stats.mean_redundancy\n for bin_stats in self.anomalous_dataset_statistics.bins\n ]\n\n return {\n \"multiplicity_vs_resolution\": {\n \"data\": [\n {\n \"x\": self.d_star_sq_bins,\n \"y\": multiplicity_bins,\n \"type\": \"scatter\",\n \"name\": \"Multiplicity\",\n },\n (\n {\n \"x\": self.d_star_sq_bins,\n \"y\": anom_multiplicity_bins,\n \"type\": \"scatter\",\n \"name\": \"Anomalous multiplicity\",\n }\n if not self.is_centric and self.anomalous_dataset_statistics\n else {}\n ),\n ],\n \"layout\": {\n \"title\": \"Multiplicity vs resolution\",\n \"xaxis\": {\n \"title\": \"Resolution (Å)\",\n \"tickvals\": self.d_star_sq_tickvals,\n \"ticktext\": self.d_star_sq_ticktext,\n },\n \"yaxis\": {\"title\": \"Multiplicity\", \"rangemode\": \"tozero\"},\n },\n }\n }\n\n def merging_statistics_table(self, cc_half_method=None):\n\n headers = [\n \"Resolution (Å)\",\n \"N(obs)\",\n \"N(unique)\",\n \"Multiplicity\",\n \"Completeness\",\n \"Mean I\",\n \"Mean I/σ(I)\",\n \"R<sub>merge</sub>\",\n \"R<sub>meas</sub>\",\n \"R<sub>pim</sub>\",\n \"R<sub>anom</sub>\",\n \"CC<sub>½</sub>\",\n ]\n if not self.is_centric:\n headers.append(\"CC<sub>ano</sub>\")\n rows = []\n\n def safe_format(format_str, item):\n return format_str % item if item is not None else \"\"\n\n for bin_stats in self.dataset_statistics.bins:\n row = [\n f\"{bin_stats.d_max:.2f} - {bin_stats.d_min:.2f}\",\n bin_stats.n_obs,\n bin_stats.n_uniq,\n f\"{bin_stats.mean_redundancy:.2f}\",\n f\"{100 * bin_stats.completeness:.2f}\",\n f\"{bin_stats.i_mean:.1f}\",\n f\"{bin_stats.i_over_sigma_mean:.1f}\",\n safe_format(\"%.3f\", bin_stats.r_merge),\n safe_format(\"%.3f\", bin_stats.r_meas),\n safe_format(\"%.3f\", bin_stats.r_pim),\n safe_format(\"%.3f\", bin_stats.r_anom),\n ]\n if cc_half_method == \"sigma_tau\":\n row.append(\n \"%.3f%s\"\n % (\n bin_stats.cc_one_half_sigma_tau,\n \"*\" if bin_stats.cc_one_half_sigma_tau_significance else \"\",\n )\n )\n else:\n row.append(\n \"%.3f%s\"\n % (\n bin_stats.cc_one_half,\n \"*\" if bin_stats.cc_one_half_significance else \"\",\n )\n )\n\n if not self.is_centric:\n row.append(\n \"%.3f%s\"\n % (bin_stats.cc_anom, \"*\" if bin_stats.cc_anom_significance else \"\")\n )\n rows.append(row)\n\n merging_stats_table = [headers]\n merging_stats_table.extend(rows)\n\n return merging_stats_table\n\n def overall_statistics_table(self, cc_half_method=None):\n\n headers = [\"\", \"Overall\", \"Low resolution\", \"High resolution\"]\n\n stats = (\n self.dataset_statistics.overall,\n self.dataset_statistics.bins[0],\n self.dataset_statistics.bins[-1],\n )\n\n rows = [\n [\"Resolution (Å)\"] + [f\"{s.d_max:.2f} - {s.d_min:.2f}\" for s in stats],\n [\"Observations\"] + [\"%i\" % s.n_obs for s in stats],\n [\"Unique reflections\"] + [\"%i\" % s.n_uniq for s in stats],\n [\"Multiplicity\"] + [f\"{s.mean_redundancy:.1f}\" for s in stats],\n [\"Completeness\"] + [f\"{s.completeness * 100:.2f}%\" for s in stats],\n # ['Mean intensity'] + ['%.1f' %s.i_mean for s in stats],\n [\"Mean I/σ(I)\"] + [f\"{s.i_over_sigma_mean:.1f}\" for s in stats],\n [\"R<sub>merge</sub>\"] + [f\"{s.r_merge:.3f}\" for s in stats],\n [\"R<sub>meas</sub>\"] + [f\"{s.r_meas:.3f}\" for s in stats],\n [\"R<sub>pim</sub>\"] + [f\"{s.r_pim:.3f}\" for s in stats],\n ]\n\n if cc_half_method == \"sigma_tau\":\n rows.append(\n [\"CC<sub>½</sub>\"] + [f\"{s.cc_one_half_sigma_tau:.3f}\" for s in stats]\n )\n else:\n rows.append([\"CC<sub>½</sub>\"] + [f\"{s.cc_one_half:.3f}\" for s in stats])\n rows = [[f\"<strong>{r[0]}</strong>\"] + r[1:] for r in rows]\n\n overall_stats_table = [headers]\n overall_stats_table.extend(rows)\n\n return overall_stats_table\n\n def statistics_tables(self):\n \"\"\"Generate the overall and by-resolution tables.\"\"\"\n return (self.overall_statistics_table(), self.merging_statistics_table())\n\n\nclass AnomalousPlotter:\n def __init__(self, anomalous_array, strong_cutoff=0.0, n_bins=20):\n self.intensities_anom = anomalous_array.map_to_asu()\n self.merged = self.intensities_anom.merge_equivalents(\n use_internal_variance=False\n ).array()\n self.n_bins = n_bins\n self.strong_cutoff = strong_cutoff\n if strong_cutoff > 0.0:\n self.low_res_intensities_anom = self.intensities_anom.resolution_filter(\n d_min=strong_cutoff\n )\n self.strong_merged = self.low_res_intensities_anom.merge_equivalents(\n use_internal_variance=False\n ).array()\n\n def make_plots(self):\n d = {}\n if self.strong_cutoff > 0.0:\n d.update(self.del_anom_normal_plot(self.strong_merged, self.strong_cutoff))\n d.update(\n self.del_anom_scatter_plot(\n self.low_res_intensities_anom, self.strong_cutoff\n )\n )\n else:\n d.update(self.del_anom_normal_plot(self.merged))\n d.update(self.del_anom_correlation_ratio(self.intensities_anom))\n return d\n\n def del_anom_correlation_ratio(self, unmerged_intensities):\n\n acentric = unmerged_intensities.select_acentric()\n centric = unmerged_intensities.select_centric()\n correl_ratios_acentric, correl_ratios_centric = ([], [])\n\n def calc_correl_ratios(data):\n correl_ratios = []\n data.setup_binner(n_bins=self.n_bins)\n for i_bin in data.binner().range_used():\n sel = data.binner().selection(i_bin)\n data_sel = data.select(sel)\n if data_sel.size() > 0:\n arr1, arr2 = data_sel.half_dataset_anomalous_correlation(\n return_split_datasets=1\n )\n dano1 = arr1.anomalous_differences().data()\n dano2 = arr2.anomalous_differences().data()\n if dano1.size() > 0:\n rmsd_11 = (\n flex.sum(flex.pow2(dano1 - dano2)) / (2.0 * dano1.size())\n ) ** 0.5\n rmsd_1min1 = (\n flex.sum(flex.pow2(dano1 + dano2)) / (2.0 * dano1.size())\n ) ** 0.5\n correl_ratios.append(rmsd_1min1 / rmsd_11)\n else:\n correl_ratios.append(0.0)\n else:\n correl_ratios.append(0.0)\n return correl_ratios\n\n if acentric.size() > 0:\n correl_ratios_acentric = calc_correl_ratios(acentric)\n if all(list(flex.double(correl_ratios_acentric) == 0.0)):\n correl_ratios_acentric = []\n else:\n d_star_sq_acentric = acentric.binner().bin_centers(2)\n actickvals, acticktext = d_star_sq_to_d_ticks(\n d_star_sq_acentric, nticks=5\n )\n if centric.size() > 0:\n correl_ratios_centric = calc_correl_ratios(centric)\n if all(list(flex.double(correl_ratios_centric) == 0.0)):\n correl_ratios_centric = []\n else:\n d_star_sq_centric = centric.binner().bin_centers(2)\n ctickvals, cticktext = d_star_sq_to_d_ticks(\n d_star_sq_acentric, nticks=5\n )\n\n if not (correl_ratios_acentric or correl_ratios_centric):\n return {}\n if correl_ratios_acentric:\n tickvals = actickvals\n ticktext = acticktext\n else:\n tickvals = ctickvals\n ticktext = cticktext\n return {\n \"anom_correl_plot\": {\n \"data\": [\n (\n {\n \"x\": list(d_star_sq_acentric),\n \"y\": correl_ratios_acentric,\n \"type\": \"lines\",\n \"name\": \"Anomalous correlation ratio (acentric)\",\n }\n if correl_ratios_acentric\n else {}\n ),\n (\n {\n \"x\": list(d_star_sq_centric),\n \"y\": correl_ratios_centric,\n \"type\": \"lines\",\n \"name\": \"Anomalous correlation ratio (centric)\",\n }\n if correl_ratios_centric\n else {}\n ),\n ],\n \"layout\": {\n \"title\": \"Anomalous R.M.S. correlation ratio (acentric reflections)\",\n \"xaxis\": {\n \"title\": \"Resolution (Å)\",\n \"tickvals\": tickvals,\n \"ticktext\": ticktext,\n },\n \"yaxis\": {\"anchor\": \"x\", \"title\": \"rms correlation ratio\"},\n },\n \"help\": \"\"\"\\\nThis plot shows the significance of the anomalous signal, as shown in the\nanomalous scatter plot, by calculating the ratio of the width of the signal along\nthe diagonal (a measure of the anomalous signal) over the width of the signal\nperpendicular to the diagonal (a measure of the error).\n\n[1] P. Evans, Acta Cryst. (2006). D62, 72-82\nhttps://doi.org/10.1107/S0907444905036693\n\"\"\",\n }\n }\n\n def del_anom_scatter_plot(self, unmerged_intensities, strong_cutoff=0.0):\n \"\"\"Make a scatter plot of the anomalous differences of half sets.\"\"\"\n\n acentric = unmerged_intensities.select_acentric()\n if acentric.size() == 0:\n return {}\n arr1, arr2 = acentric.half_dataset_anomalous_correlation(\n return_split_datasets=1\n )\n dano1 = arr1.anomalous_differences()\n dano2 = arr2.anomalous_differences()\n assert dano1.indices().all_eq(dano2.indices())\n if dano1.size() == 0:\n return {}\n max_val = max(flex.max(dano1.data()), flex.max(dano2.data()))\n min_val = min(flex.min(dano1.data()), flex.min(dano2.data()))\n\n title = \"Correlation of half-set differences\"\n plotname = \"anom_scatter_plot\"\n if strong_cutoff > 0.0:\n title += f\" (d > {strong_cutoff:.2f})\"\n plotname += \"_lowres\"\n else:\n title += \" (all data)\"\n return {\n plotname: {\n \"data\": [\n {\n \"x\": list(dano1.data()),\n \"y\": list(dano2.data()),\n \"type\": \"scatter\",\n \"mode\": \"markers\",\n \"size\": 1,\n \"name\": \"half-set anomalous differences (acentrics)\",\n },\n {\n \"x\": [min_val - 1, max_val + 1],\n \"y\": [min_val - 1, max_val + 1],\n \"type\": \"scatter\",\n \"mode\": \"lines\",\n \"name\": \"D1 = D2\",\n \"color\": \"rgb(0,0,0)\",\n },\n ],\n \"layout\": {\n \"title\": title,\n \"xaxis\": {\"anchor\": \"y\", \"title\": \"Delta I1\"},\n \"yaxis\": {\"anchor\": \"x\", \"title\": \"Delta I2\"},\n },\n \"help\": \"\"\"\\\nThis plot shows the correlation of the anomalous differences for the data divided\ninto two half sets. For each reflection, the I+ and I- observations are divided\ninto two sets, and two differences are calculated; Delta I1 = I+(1) - I-(1),\nDelta I2 = I+(2) - I-(2). Perfect data would therefore have all points along\nthe diagonal, in reality an elliptical distribution is seen in the presence of\nanomalous signal, or a spherical distribution for data with no anomalous signal.\n\n[1] P. Evans, Acta Cryst. (2006). D62, 72-82\nhttps://doi.org/10.1107/S0907444905036693\n\"\"\",\n }\n }\n\n @staticmethod\n def del_anom_normal_plot(intensities, strong_cutoff=0.0):\n \"\"\"Make a normal probability plot of the normalised anomalous differences.\"\"\"\n diff_array = intensities.anomalous_differences()\n if not diff_array.data().size():\n return {}\n delta = diff_array.data() / diff_array.sigmas()\n\n n = delta.size()\n y = np.sort(flumpy.to_numpy(delta))\n d = 0.5 / n\n v = np.linspace(start=d, stop=1.0 - d, endpoint=True, num=n)\n x = norm.ppf(v)\n\n H, xedges, yedges = np.histogram2d(x, y, bins=(200, 200))\n nonzeros = np.nonzero(H)\n z = np.empty(H.shape)\n z[:] = np.NAN\n z[nonzeros] = H[nonzeros]\n\n # also make a histogram\n histy = flex.histogram(flumpy.from_numpy(y), n_slots=100)\n # make a gaussian for reference also\n n = y.size\n width = histy.slot_centers()[1] - histy.slot_centers()[0]\n gaussian = []\n from math import exp, pi\n\n for x in histy.slot_centers():\n gaussian.append(n * width * exp(-(x**2) / 2.0) / ((2.0 * pi) ** 0.5))\n\n title = \"Normal probability plot of anomalous differences\"\n plotname = \"normal_distribution_plot\"\n if strong_cutoff > 0.0:\n title += f\" (d > {strong_cutoff:.2f})\"\n plotname += \"_lowres\"\n else:\n title += \" (all data)\"\n plotname += \"_highres\"\n return {\n plotname: {\n \"data\": [\n {\n \"x\": xedges.tolist(),\n \"y\": yedges.tolist(),\n \"z\": z.transpose().tolist(),\n \"type\": \"heatmap\",\n \"name\": \"normalised deviations\",\n \"colorbar\": {\n \"title\": \"Number of reflections\",\n \"titleside\": \"right\",\n },\n \"colorscale\": \"Viridis\",\n },\n {\n \"x\": [-5, 5],\n \"y\": [-5, 5],\n \"type\": \"scatter\",\n \"mode\": \"lines\",\n \"name\": \"z = m\",\n \"color\": \"rgb(0,0,0)\",\n },\n ],\n \"layout\": {\n \"title\": title,\n \"xaxis\": {\n \"anchor\": \"y\",\n \"title\": \"expected delta\",\n \"range\": [-4, 4],\n },\n \"yaxis\": {\n \"anchor\": \"x\",\n \"title\": \"observed delta\",\n \"range\": [-5, 5],\n },\n },\n \"help\": \"\"\"\\\n This plot shows the normalised anomalous differences, sorted in order and\n plotted against the expected order based on a normal distribution model.\n A true normal distribution of deviations would give the straight line indicated.\n\n [1] P. L. Howell and G. D. Smith, J. Appl. Cryst. (1992). 25, 81-86\n https://doi.org/10.1107/S0021889891010385\n [2] P. Evans, Acta Cryst. (2006). D62, 72-82\n https://doi.org/10.1107/S0907444905036693\n \"\"\",\n }\n }\n\n\ndef cc_half_plot(\n d_star_sq,\n cc_half,\n cc_anom=None,\n cc_half_critical_values=None,\n cc_anom_critical_values=None,\n cc_half_fit=None,\n d_min=None,\n):\n d_star_sq_tickvals, d_star_sq_ticktext = d_star_sq_to_d_ticks(d_star_sq, nticks=5)\n return {\n \"data\": [\n {\n \"x\": list(d_star_sq),\n \"y\": list(cc_half),\n \"type\": \"scatter\",\n \"name\": \"CC<sub>½</sub>\",\n \"mode\": \"lines\",\n \"line\": {\"color\": \"rgb(31, 119, 180)\"},\n },\n (\n {\n \"x\": list(d_star_sq),\n \"y\": list(cc_half_critical_values),\n \"type\": \"scatter\",\n \"name\": \"CC<sub>½</sub> critical value (p=0.01)\",\n \"line\": {\"color\": \"rgb(31, 119, 180)\", \"dash\": \"dot\"},\n }\n if cc_half_critical_values\n else {}\n ),\n (\n {\n \"x\": list(d_star_sq),\n \"y\": list(cc_anom),\n \"type\": \"scatter\",\n \"name\": \"CC-anom\",\n \"mode\": \"lines\",\n \"line\": {\"color\": \"rgb(255, 127, 14)\"},\n }\n if cc_anom\n else {}\n ),\n (\n {\n \"x\": list(d_star_sq),\n \"y\": list(cc_anom_critical_values),\n \"type\": \"scatter\",\n \"name\": \"CC-anom critical value (p=0.01)\",\n \"mode\": \"lines\",\n \"line\": {\"color\": \"rgb(255, 127, 14)\", \"dash\": \"dot\"},\n }\n if cc_anom_critical_values\n else {}\n ),\n (\n {\n \"x\": list(d_star_sq),\n \"y\": list(cc_half_fit),\n \"type\": \"scatter\",\n \"name\": \"CC<sub>½</sub> fit\",\n \"line\": {\"color\": \"rgb(47, 79, 79)\"},\n }\n if cc_half_fit\n else {}\n ),\n (\n {\n \"x\": [uctbx.d_as_d_star_sq(d_min)] * 2,\n \"y\": [0, 1],\n \"type\": \"scatter\",\n \"name\": f\"d_min = {d_min:.2f} Å\",\n \"mode\": \"lines\",\n \"line\": {\"color\": \"rgb(169, 169, 169)\", \"dash\": \"dot\"},\n }\n if d_min\n else {}\n ),\n ],\n \"layout\": {\n \"title\": \"CC<sub>½</sub> vs resolution\",\n \"xaxis\": {\n \"title\": \"Resolution (Å)\",\n \"tickvals\": d_star_sq_tickvals,\n \"ticktext\": d_star_sq_ticktext,\n },\n \"yaxis\": {\n \"title\": \"CC<sub>½</sub>\",\n \"range\": [min(cc_half + cc_anom if cc_anom else [] + [0]), 1],\n },\n },\n \"help\": \"\"\"\\\nThe correlation coefficients, CC<sub>½</sub>, between random half-datasets. A correlation\ncoefficient of +1 indicates good correlation, and 0 indicates no correlation.\nCC<sub>½</sub> is typically close to 1 at low resolution, falling off to close to zero at\nhigher resolution. A typical resolution cutoff based on CC<sub>½</sub> is around 0.3-0.5.\n\n[1] Karplus, P. A., & Diederichs, K. (2012). Science, 336(6084), 1030-1033.\n https://doi.org/10.1126/science.1218231\n[2] Diederichs, K., & Karplus, P. A. (2013). Acta Cryst D, 69(7), 1215-1222.\n https://doi.org/10.1107/S0907444913001121\n[3] Evans, P. R., & Murshudov, G. N. (2013). Acta Cryst D, 69(7), 1204-1214.\n https://doi.org/10.1107/S0907444913000061\n\"\"\",\n }\n"
]
| [
[
"numpy.histogram2d",
"scipy.stats.norm.ppf",
"numpy.empty",
"numpy.nonzero",
"numpy.linspace",
"scipy.optimize.least_squares"
]
]
|
daniele21/Stock_Analysis | [
"6f6a936e1ccce80e20143c698f065b7e4637df6b"
]
| [
"core/preprocessing/embeddings.py"
]
| [
"from tqdm import tqdm\nfrom typing import Dict, Text\nfrom constants.config import EMBEDDING_DIM\nfrom constants.paths import GLOVE_PATH\nimport numpy as np\n\nfrom core.preprocessing.tokenizers import MyTokenizer\n\n\ndef load_pretrained_glove_embeddings(tokenizer: MyTokenizer,\n embedding_path: Text =GLOVE_PATH):\n embeddings_index = {}\n\n f = open(embedding_path)\n\n for line in tqdm(f, desc='> Loading Embeddings'):\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n f.close()\n\n embedding_matrix = np.zeros((tokenizer.n_words+1, EMBEDDING_DIM))\n for word, i in tqdm(tokenizer.vocab().items(), total=tokenizer.n_words):\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n # words not found in embedding index will be all-zeros.\n embedding_matrix[i] = embedding_vector\n\n return embedding_matrix\n\n\n"
]
| [
[
"numpy.asarray",
"numpy.zeros"
]
]
|
arknoll/laspy | [
"8b57c38ac3fe666e527406589fa734a50f9ee86a"
]
| [
"tests/test_common.py"
]
| [
"from pathlib import Path\n\nimport numpy as np\nimport pytest\n\nimport laspy\nfrom laspy.lib import write_then_read_again\nfrom . import conftest\n\nsimple_las = conftest.SIMPLE_LAS_FILE_PATH\nsimple_laz = conftest.SIMPLE_LAZ_FILE_PATH\nvegetation1_3_las = conftest.VEGETATION1_3_LAS_FILE_PATH\ntest1_4_las = conftest.TEST1_4_LAS_FILE_PATH\nextra_bytes_las = conftest.EXTRA_BYTES_LAS_FILE_PATH\nextra_bytes_laz = conftest.EXTRA_BYTES_LAZ_FILE_PATH\nplane_laz = conftest.PLANE_LAZ_FILE_PATH\n\nif not laspy.LazBackend.detect_available():\n do_compression = [False]\n all_file_paths = [simple_las, vegetation1_3_las, test1_4_las, extra_bytes_las]\nelse:\n do_compression = [False, True]\n all_file_paths = [\n simple_las,\n simple_laz,\n vegetation1_3_las,\n test1_4_las,\n plane_laz,\n extra_bytes_laz,\n extra_bytes_las,\n ]\n\n\[email protected](params=all_file_paths)\ndef las(request):\n return laspy.read(request.param)\n\n\[email protected](params=[simple_las, vegetation1_3_las])\ndef all_las_but_1_4(request):\n return laspy.read(request.param)\n\n\[email protected](params=[simple_las, vegetation1_3_las, test1_4_las, extra_bytes_las])\ndef las_path_fixture(request):\n return request.param\n\n\[email protected](params=[simple_laz, extra_bytes_laz, plane_laz])\ndef all_laz_path(request):\n return request.param\n\n\ndef dim_does_not_exists(las, dim_name):\n try:\n _ = getattr(las, dim_name)\n except AttributeError:\n return True\n return False\n\n\ndef dim_does_exists(las, dim_name):\n try:\n _ = getattr(las, dim_name)\n except AttributeError:\n return False\n return True\n\n\ndef test_change_format(las):\n in_version = las.header.version\n\n las = laspy.convert(las, point_format_id=2)\n las = write_then_read_again(las)\n assert las.points.point_format.id == 2\n assert las.header.point_format.id == 2\n assert las.header.version == in_version\n assert dim_does_not_exists(las, \"gps_time\")\n\n las = laspy.convert(las, point_format_id=1)\n las = write_then_read_again(las)\n assert las.points.point_format.id == 1\n assert las.header.point_format.id == 1\n assert las.header.version == in_version\n assert dim_does_not_exists(las, \"red\")\n assert dim_does_not_exists(las, \"green\")\n assert dim_does_not_exists(las, \"blue\")\n\n las = laspy.convert(las, point_format_id=0)\n las = write_then_read_again(las)\n assert las.points.point_format.id == 0\n assert las.header.point_format.id == 0\n assert las.header.version == in_version\n assert dim_does_not_exists(las, \"red\")\n assert dim_does_not_exists(las, \"green\")\n assert dim_does_not_exists(las, \"blue\")\n assert dim_does_not_exists(las, \"gps_time\")\n\n las = laspy.convert(las, point_format_id=8)\n las = write_then_read_again(las)\n assert str(las.header.version) == \"1.4\"\n assert las.points.point_format.id == 8\n assert las.header.point_format.id == 8\n assert dim_does_exists(las, \"red\")\n assert dim_does_exists(las, \"green\")\n assert dim_does_exists(las, \"blue\")\n assert dim_does_exists(las, \"nir\")\n\n las = laspy.convert(las, point_format_id=7)\n las = write_then_read_again(las)\n assert str(las.header.version) == \"1.4\"\n assert las.points.point_format.id == 7\n assert las.header.point_format.id == 7\n assert dim_does_exists(las, \"red\")\n assert dim_does_exists(las, \"green\")\n assert dim_does_exists(las, \"blue\")\n assert dim_does_not_exists(las, \"nir\")\n\n las = laspy.convert(las, point_format_id=6)\n las = write_then_read_again(las)\n assert str(las.header.version) == \"1.4\"\n assert las.points.point_format.id == 6\n assert las.header.point_format.id == 6\n assert dim_does_not_exists(las, \"red\")\n assert dim_does_not_exists(las, \"green\")\n assert dim_does_not_exists(las, \"blue\")\n assert dim_does_not_exists(las, \"nir\")\n\n\ndef test_rw_all_set_one(las):\n for dim_name in las.point_format.dimension_names:\n las[dim_name][:] = 1\n\n for dim_name in las.point_format.dimension_names:\n assert np.alltrue(las[dim_name] == 1), \"{} not equal\".format(dim_name)\n\n las2 = write_then_read_again(las)\n\n for dim_name in las.point_format.dimension_names:\n assert np.alltrue(las[dim_name] == las2[dim_name]), \"{} not equal\".format(\n dim_name\n )\n\n\ndef test_coords_do_not_break(las):\n xs, ys, zs = las.x, las.y, las.z\n\n las.x = xs\n las.y = ys\n las.z = zs\n\n assert np.allclose(xs, las.x)\n assert np.allclose(ys, las.y)\n assert np.allclose(zs, las.z)\n\n\ndef test_coords_when_setting_offsets_and_scales(las):\n new_las = laspy.create()\n\n new_las.header.offsets = las.header.offsets\n new_las.header.scales = las.header.scales\n\n new_las.x = las.x\n new_las.y = las.y\n new_las.z = las.z\n\n assert np.allclose(las.x, new_las.x)\n assert np.allclose(las.y, new_las.y)\n assert np.allclose(las.z, new_las.z)\n\n\ndef test_coords_when_using_create_from_header(las):\n new_las = laspy.LasData(las.header)\n\n new_las.x = las.x\n new_las.y = las.y\n new_las.z = las.z\n\n assert np.allclose(las.x, new_las.x)\n assert np.allclose(las.y, new_las.y)\n assert np.allclose(las.z, new_las.z)\n\n\ndef test_slicing(las):\n las.points = las.points[len(las.points) // 2 :]\n\n\[email protected](\"do_compress\", do_compression)\ndef test_can_write_then_re_read_files(las, do_compress):\n _las = write_then_read_again(las, do_compress=do_compress)\n\n\ndef test_point_record_setitem_scaled_view():\n las = laspy.read(simple_las)\n las.add_extra_dim(\n laspy.ExtraBytesParams(\n \"lol\", \"uint64\", scales=np.array([2.0]), offsets=np.array([0.0])\n )\n )\n\n new_values = np.ones(len(las.points)) * 4\n las.lol = new_values\n\n assert np.allclose(las.lol, new_values)\n\n\ndef test_laspy_file_raises():\n with pytest.raises(laspy.errors.LaspyException):\n laspy.file.File(\"some path\")\n"
]
| [
[
"numpy.allclose",
"numpy.array",
"numpy.alltrue"
]
]
|
wconnell/pytorch-metric-learning | [
"bf2b7675b7b80e5762b75428d51e4ab0a861e710"
]
| [
"tests/miners/test_batch_hard_miner.py"
]
| [
"import unittest \r\nfrom .. import TEST_DTYPES\r\nimport torch\r\nfrom pytorch_metric_learning.miners import BatchHardMiner\r\nfrom pytorch_metric_learning.utils import common_functions as c_f\r\nimport numpy as np\r\nfrom pytorch_metric_learning.distances import CosineSimilarity, LpDistance\r\n\r\nclass TestBatchHardMiner(unittest.TestCase):\r\n\r\n @classmethod\r\n def setUpClass(self):\r\n self.device = torch.device('cuda')\r\n self.dist_miner = BatchHardMiner(distance=LpDistance(normalize_embeddings=False))\r\n self.normalized_dist_miner = BatchHardMiner(distance=LpDistance(normalize_embeddings=True))\r\n self.normalized_dist_miner_squared = BatchHardMiner(distance=LpDistance(normalize_embeddings=True, power=2))\r\n self.sim_miner = BatchHardMiner(distance=CosineSimilarity())\r\n self.labels = torch.LongTensor([0, 0, 1, 1, 0, 2, 1, 1, 1])\r\n self.correct_a = torch.LongTensor([0, 1, 2, 3, 4, 6, 7, 8]).to(self.device)\r\n self.correct_p = torch.LongTensor([4, 4, 8, 8, 0, 2, 2, 2]).to(self.device)\r\n self.correct_n = [torch.LongTensor([2, 2, 1, 4, 3, 5, 5, 5]).to(self.device), torch.LongTensor([2, 2, 1, 4, 5, 5, 5, 5]).to(self.device)]\r\n\r\n def test_dist_mining(self):\r\n for dtype in TEST_DTYPES:\r\n embeddings = torch.arange(9).type(dtype).unsqueeze(1).to(self.device)\r\n a, p, n = self.dist_miner(embeddings, self.labels)\r\n self.helper(a, p, n)\r\n self.assertTrue(self.dist_miner.hardest_pos_pair_dist == 6)\r\n self.assertTrue(self.dist_miner.hardest_neg_pair_dist == 1)\r\n\r\n def test_normalized_dist_mining(self):\r\n for dtype in TEST_DTYPES:\r\n angles = [0, 20, 40, 60, 80, 100, 120, 140, 160]\r\n embeddings = torch.tensor([c_f.angle_to_coord(a) for a in angles], dtype=dtype).to(self.device)\r\n a, p, n = self.normalized_dist_miner(embeddings, self.labels)\r\n self.helper(a, p, n)\r\n correct_hardest_pos_pair_dist = torch.sqrt(torch.sum((embeddings[2]-embeddings[8])**2)).item()\r\n correct_hardest_neg_pair_dist = torch.sqrt(torch.sum((embeddings[1]-embeddings[2])**2)).item()\r\n places = 2 if dtype == torch.float16 else 5\r\n self.assertAlmostEqual(self.normalized_dist_miner.hardest_pos_pair_dist, correct_hardest_pos_pair_dist, places=places)\r\n self.assertAlmostEqual(self.normalized_dist_miner.hardest_neg_pair_dist, correct_hardest_neg_pair_dist, places=places)\r\n\r\n def test_normalized_dist_squared_mining(self):\r\n for dtype in TEST_DTYPES:\r\n angles = [0, 20, 40, 60, 80, 100, 120, 140, 160]\r\n embeddings = torch.tensor([c_f.angle_to_coord(a) for a in angles], dtype=dtype).to(self.device)\r\n a, p, n = self.normalized_dist_miner_squared(embeddings, self.labels)\r\n self.helper(a, p, n)\r\n correct_hardest_pos_pair_dist = torch.sum((embeddings[2]-embeddings[8])**2).item()\r\n correct_hardest_neg_pair_dist = torch.sum((embeddings[1]-embeddings[2])**2).item()\r\n places = 2 if dtype == torch.float16 else 5\r\n self.assertAlmostEqual(self.normalized_dist_miner_squared.hardest_pos_pair_dist, correct_hardest_pos_pair_dist, places=places)\r\n self.assertAlmostEqual(self.normalized_dist_miner_squared.hardest_neg_pair_dist, correct_hardest_neg_pair_dist, places=places) \r\n\r\n def test_sim_mining(self):\r\n for dtype in TEST_DTYPES:\r\n angles = [0, 20, 40, 60, 80, 100, 120, 140, 160]\r\n embeddings = torch.tensor([c_f.angle_to_coord(a) for a in angles], dtype=dtype).to(self.device)\r\n a, p, n = self.sim_miner(embeddings, self.labels)\r\n self.helper(a, p, n)\r\n places = 2 if dtype == torch.float16 else 5\r\n self.assertAlmostEqual(self.sim_miner.hardest_pos_pair_dist, np.cos(np.radians(120)), places=places)\r\n self.assertAlmostEqual(self.sim_miner.hardest_neg_pair_dist, np.cos(np.radians(20)), places=places)\r\n\r\n def helper(self, a, p, n):\r\n self.assertTrue(torch.equal(a, self.correct_a))\r\n self.assertTrue(torch.equal(p, self.correct_p))\r\n self.assertTrue(any(torch.equal(n, cn) for cn in self.correct_n))\r\n\r\n def test_empty_output(self):\r\n batch_size = 32\r\n for dtype in TEST_DTYPES:\r\n embeddings = torch.randn(batch_size, 64).type(dtype).to(self.device)\r\n labels = torch.arange(batch_size)\r\n for miner in [self.dist_miner, self.normalized_dist_miner, self.normalized_dist_miner_squared, self.sim_miner]:\r\n a, p, n = miner(embeddings, labels)\r\n self.assertTrue(len(a)==0)\r\n self.assertTrue(len(p)==0)\r\n self.assertTrue(len(n)==0)\r\n self.assertTrue(miner.hardest_pos_pair_dist == 0)\r\n self.assertTrue(miner.hardest_neg_pair_dist == 0)\r\n self.assertTrue(miner.hardest_triplet_dist == 0)\r\n"
]
| [
[
"torch.device",
"torch.arange",
"numpy.radians",
"torch.LongTensor",
"torch.equal",
"torch.randn",
"torch.sum"
]
]
|
shikivi/- | [
"e83cc9342115801e1464e9907a971801dbd68335"
]
| [
"Image Inpainting/model/net.py"
]
| [
"import torch as t\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom model.basemodel import BaseModel\nfrom model.basenet import BaseNet\nfrom model.loss import WGANLoss, IDMRFLoss\nfrom model.layer import init_weights, PureUpsampling, ConfidenceDrivenMaskLayer, SpectralNorm, GatedConv, GatedDilatedConv\nfrom util.utils import generate_mask\nimport numpy as np\n\n\nclass GMCNN(BaseNet):\n def __init__(self, in_channels, out_channels, cnum=32, act=F.elu, norm=F.instance_norm, using_norm=False):\n super(GMCNN, self).__init__()\n self.act = act\n self.using_norm = using_norm\n if using_norm is True:\n self.norm = norm\n else:\n self.norm = None\n channel = cnum\n\n self.EB1 = []\n self.EB2 = []\n self.EB3 = []\n self.decoding_layers = []\n self.EB1_pad = []\n self.EB2_pad = []\n self.EB3_pad = []\n\n self.EB1.append(GatedConv(in_channels, channel, kernel_size=7, stride=1))\n self.EB1.append(GatedConv(channel, channel * 2, kernel_size=7, stride=2))\n self.EB1.append(GatedConv(channel * 2, channel * 2, kernel_size=7, stride=1))\n self.EB1.append(GatedConv(channel * 2, channel * 4, kernel_size=7, stride=2))\n self.EB1.append(GatedConv(channel * 4, channel * 4, kernel_size=7, stride=1))\n self.EB1.append(GatedConv(channel * 4, channel * 4, kernel_size=7, stride=1))\n self.EB1.append(GatedDilatedConv(channel * 4, channel* 4, kernel_size=7, stride=1, dilation=2))\n self.EB1.append(GatedDilatedConv(channel* 4, channel* 4, kernel_size=7, stride=1, dilation=4))\n self.EB1.append(GatedDilatedConv(channel* 4, channel* 4, kernel_size=7, stride=1, dilation=8))\n self.EB1.append(GatedDilatedConv(channel* 4, channel* 4, kernel_size=7, stride=1, dilation=16))\n self.EB1.append(GatedConv(channel* 4, channel* 4, kernel_size=7, stride=1))\n self.EB1.append(GatedConv(channel* 4, channel* 4, kernel_size=7, stride=1, act=None))\n self.EB1.append(PureUpsampling(scale=4))\n self.EB1_pad = [3, 3, 3, 3, 3, 3, 6, 12, 24, 48, 3, 3, 0]\n\n self.EB2.append(GatedConv(in_channels, channel, kernel_size=5, stride=1))\n self.EB2.append(GatedConv(channel, channel* 2, kernel_size=5, stride=2))\n self.EB2.append(GatedConv(channel* 2, channel* 2, kernel_size=5, stride=1))\n self.EB2.append(GatedConv(channel* 2, channel* 4, kernel_size=5, stride=2))\n self.EB2.append(GatedConv(channel* 4, channel* 4, kernel_size=5, stride=1))\n self.EB2.append(GatedConv(channel* 4, channel* 4, kernel_size=5, stride=1))\n self.EB2.append(GatedDilatedConv(channel* 4, channel* 4, kernel_size=5, stride=1, dilation=2))\n self.EB2.append(GatedDilatedConv(channel* 4, channel* 4, kernel_size=5, stride=1, dilation=4))\n self.EB2.append(GatedDilatedConv(channel* 4, channel* 4, kernel_size=5, stride=1, dilation=8))\n self.EB2.append(GatedDilatedConv(channel* 4, channel* 4, kernel_size=5, stride=1, dilation=16))\n self.EB2.append(GatedConv(channel* 4, channel* 4, kernel_size=5, stride=1))\n self.EB2.append(GatedConv(channel* 4, channel* 4, kernel_size=5, stride=1,act=None))\n self.EB2.append(PureUpsampling(scale=2, mode='nearest'))\n self.EB2.append(GatedConv(channel* 4, channel* 2, kernel_size=5, stride=1))\n self.EB2.append(GatedConv(channel* 2, channel* 2, kernel_size=5, stride=1,act=None))\n self.EB2.append(PureUpsampling(scale=2))\n self.EB2_pad = [2, 2, 2, 2, 2, 2, 4, 8, 16, 32, 2, 2, 0, 2, 2, 0]\n\n self.EB3.append(GatedConv(in_channels, channel, kernel_size=3, stride=1))\n self.EB3.append(GatedConv(channel, channel* 2, kernel_size=3, stride=2))\n self.EB3.append(GatedConv(channel* 2, channel* 2, kernel_size=3, stride=1))\n self.EB3.append(GatedConv(channel* 2, channel* 4, kernel_size=3, stride=2))\n self.EB3.append(GatedConv(channel* 4, channel* 4, kernel_size=3, stride=1))\n self.EB3.append(GatedConv(channel* 4, channel* 4, kernel_size=3, stride=1))\n self.EB3.append(GatedDilatedConv(channel* 4, channel* 4, kernel_size=3, stride=1, dilation=2))\n self.EB3.append(GatedDilatedConv(channel* 4, channel* 4, kernel_size=3, stride=1, dilation=4))\n self.EB3.append(GatedDilatedConv(channel* 4, channel* 4, kernel_size=3, stride=1, dilation=8))\n self.EB3.append(GatedDilatedConv(channel* 4, channel* 4, kernel_size=3, stride=1, dilation=16))\n self.EB3.append(GatedConv(channel* 4, channel* 4, kernel_size=3, stride=1))\n self.EB3.append(GatedConv(channel* 4, channel* 4, kernel_size=3, stride=1,act=None))\n self.EB3.append(PureUpsampling(scale=2, mode='nearest'))\n self.EB3.append(GatedConv(channel* 4, channel* 2, kernel_size=3, stride=1))\n self.EB3.append(GatedConv(channel* 2, channel* 2, kernel_size=3, stride=1,act=None))\n self.EB3.append(PureUpsampling(scale=2, mode='nearest'))\n self.EB3.append(GatedConv(channel* 2, channel, kernel_size=3, stride=1))\n self.EB3.append(GatedConv(channel, channel, kernel_size=3, stride=1))\n self.EB3_pad = [1, 1, 1, 1, 1, 1, 2, 4, 8, 16, 1, 1, 0, 1, 1, 0, 1, 1]\n\n self.decoding_layers.append(GatedConv(channel* 7, channel// 2, kernel_size=3, stride=1,act=None))\n self.decoding_layers.append(GatedConv(channel// 2, out_channels, kernel_size=3, stride=1,act=None))\n self.decoding_pad_rec = [1, 1]\n\n self.EB1 = nn.ModuleList(self.EB1)\n self.EB2 = nn.ModuleList(self.EB2)\n self.EB3 = nn.ModuleList(self.EB3)\n self.decoding_layers = nn.ModuleList(self.decoding_layers)\n self.pads = [0] * 49\n for i in range(49):\n self.pads[i] = nn.ReflectionPad2d(i)\n self.pads = nn.ModuleList(self.pads)\n\n def forward(self, x):\n x1, x2, x3 = x, x, x\n for i, layer in enumerate(self.EB1):\n pad_idx = self.EB1_pad[i]\n x1 = layer(self.pads[pad_idx](x1))\n if self.using_norm:\n x1 = self.norm(x1)\n\n for i, layer in enumerate(self.EB2):\n pad_idx = self.EB2_pad[i]\n x2 = layer(self.pads[pad_idx](x2))\n if self.using_norm:\n x2 = self.norm(x2)\n\n for i, layer in enumerate(self.EB3):\n pad_idx = self.EB3_pad[i]\n x3 = layer(self.pads[pad_idx](x3))\n if self.using_norm:\n x3 = self.norm(x3)\n\n x_d = t.cat((x1, x2, x3), 1)\n x_d = self.pads[self.decoding_pad_rec[0]](x_d)\n x_d = self.decoding_layers[0](x_d)\n x_d = self.act(x_d)\n x_d = self.pads[self.decoding_pad_rec[1]](x_d)\n x_d = self.decoding_layers[1](x_d)\n x_out = t.clamp(x_d, -1, 1)\n return x_out\n\n\n\nclass Discriminator(BaseNet):\n def __init__(self, in_channels, cnum=32, fc_channels=8*8*32*4, act=F.elu, norm=None):\n super(Discriminator, self).__init__()\n self.act = act\n self.norm = norm\n self.embedding = None\n self.logit = None\n\n channel = cnum\n self.layers = []\n self.layers.append(SpectralNorm(nn.Conv2d(in_channels, channel, kernel_size=5, padding=2, stride=2)))\n self.layers.append(SpectralNorm(nn.Conv2d(channel, channel* 2, kernel_size=5, padding=2, stride=2)))\n self.layers.append(SpectralNorm(nn.Conv2d(channel* 2, channel* 4, kernel_size=5, padding=2, stride=2)))\n self.layers.append(SpectralNorm(nn.Conv2d(channel* 4, channel* 4, kernel_size=5, padding=2, stride=2)))\n self.layers.append(SpectralNorm(nn.Linear(fc_channels, 1)))\n self.layers = nn.ModuleList(self.layers)\n\n def forward(self, x):\n for layer in self.layers[:-1]:\n x = self.act(layer(x))\n self.embedding = x.view(x.size(0), -1)\n self.logit = self.layers[-1](self.embedding)\n return self.logit\n\n\n\nclass GlobalLocalDiscriminator(BaseNet):\n def __init__(self, in_channels, cnum=32, g_fc_channels=16*16*32*4, l_fc_channels=8*8*32*4, act=F.elu, norm=None,):\n super(GlobalLocalDiscriminator, self).__init__()\n self.act = act\n self.norm = norm\n self.global_discriminator = Discriminator(in_channels=in_channels, fc_channels=g_fc_channels, cnum=cnum,\n act=act, norm=norm)\n self.local_discriminator = Discriminator(in_channels=in_channels, fc_channels=l_fc_channels, cnum=cnum,\n act=act, norm=norm)\n\n def forward(self, x_g, x_l):\n return self.global_discriminator(x_g), self.local_discriminator(x_l)\n\n\n\nclass InpaintingModel_GMCNN(BaseModel):\n def __init__(self, in_channels, act=F.elu, norm=None, opt=None):\n super(InpaintingModel_GMCNN, self).__init__()\n self.opt = opt\n self.init(opt)\n self.confidence_mask_layer = ConfidenceDrivenMaskLayer()\n self.netGM = GMCNN(in_channels, out_channels=3, cnum=opt.g_cnum, act=act, norm=norm).cuda()\n init_weights(self.netGM)\n self.model_names = ['GM']\n if self.opt.phase == 'test':\n return\n\n self.netD = None\n self.optimizer_G = t.optim.Adam(self.netGM.parameters(), lr=opt.lr, betas=(0.5, 0.9))\n self.optimizer_D = None\n self.wganloss = None\n self.recloss = nn.L1Loss()\n self.aeloss = nn.L1Loss()\n self.mrfloss = None\n self.lambda_adv = opt.lambda_adv\n self.lambda_rec = opt.lambda_rec\n self.lambda_ae = opt.lambda_ae\n self.lambda_gp = opt.lambda_gp\n self.lambda_mrf = opt.lambda_mrf\n self.G_loss, self.G_loss_reconstruction,self. G_loss_mrf = None, None, None\n self.G_loss_adv, self.G_loss_adv_local, self.G_loss_ae = None, None, None\n self.D_loss, self.D_loss_local = None, None\n self.GAN_loss = None\n self.gt, self.gt_local = None, None\n self.mask, self.mask_01 = None, None\n self.rect = None\n self.im_in, self.gin = None, None\n self.completed, self.completed_local = None, None\n self.completed_logit, self.completed_local_logit = None, None\n self.gt_logit, self.gt_local_logit = None, None\n\n self.pred = None\n\n if self.opt.pretrain_network is False:\n img_channels=opt.img_shapes[0]//16*opt.img_shapes[1]//16*opt.d_cnum*4\n mask_channels= l_fc_channels=opt.mask_shapes[0]//16*opt.mask_shapes[1]//16*opt.d_cnum*4\n if self.opt.mask_type == 'rect':\n self.netD = GlobalLocalDiscriminator(3, cnum=opt.d_cnum, act=act, g_fc_channels=img_channels,\n l_fc_channels=mask_channels,).cuda()\n else:\n self.netD = GlobalLocalDiscriminator(3, cnum=opt.d_cnum, act=act, g_fc_channels=img_channels,\n l_fc_channels=img_channels).cuda()\n init_weights(self.netD)\n self.optimizer_D = t.optim.Adam(filter(lambda x: x.requires_grad, self.netD.parameters()),\n lr=opt.lr, betas=(0.5, 0.9))\n self.wganloss = WGANLoss()\n self.mrfloss = IDMRFLoss()\n\n def initVariables(self):\n self.gt = self.input['gt']\n mask, rect = generate_mask(self.opt.mask_type, self.opt.img_shapes, self.opt.mask_shapes)\n self.mask_01 = t.from_numpy(mask).cuda().repeat([self.opt.batch_size, 1, 1, 1])\n self.mask = self.confidence_mask_layer(self.mask_01)\n if self.opt.mask_type == 'rect':\n self.rect = [rect[0, 0], rect[0, 1], rect[0, 2], rect[0, 3]]\n self.gt_local = self.gt[:, :, self.rect[0]:self.rect[0] + self.rect[1],\n self.rect[2]:self.rect[2] + self.rect[3]]\n else:\n self.gt_local = self.gt\n self.im_in = self.gt * (1 - self.mask_01)\n self.gin = t.cat((self.im_in, self.mask_01), 1)\n\n #loss部分有点不理解实际意义\n def forward_G(self):\n self.G_loss_reconstruction = self.recloss(self.completed * self.mask, self.gt.detach() * self.mask)\n self.G_loss_reconstruction = self.G_loss_reconstruction / t.mean(self.mask_01)\n self.G_loss_ae = self.aeloss(self.pred * (1 - self.mask_01), self.gt.detach() * (1 - self.mask_01))\n self.G_loss_ae = self.G_loss_ae / t.mean(1 - self.mask_01)\n self.G_loss = self.lambda_rec * self.G_loss_reconstruction + self.lambda_ae * self.G_loss_ae\n\n if self.opt.pretrain_network is False:\n self.completed_logit, self.completed_local_logit = self.netD(self.completed, self.completed_local)\n self.G_loss_mrf = self.mrfloss((self.completed_local+1)/2.0, (self.gt_local.detach()+1)/2.0)\n self.G_loss = self.G_loss + self.lambda_mrf * self.G_loss_mrf\n self.G_loss_adv = -self.completed_logit.mean()\n self.G_loss_adv_local = -self.completed_local_logit.mean()\n self.G_loss = self.G_loss + self.lambda_adv * (self.G_loss_adv + self.G_loss_adv_local)\n\n def forward_D(self):\n self.completed_logit, self.completed_local_logit = self.netD(self.completed.detach(), self.completed_local.detach())\n self.gt_logit, self.gt_local_logit = self.netD(self.gt, self.gt_local)\n self.D_loss_local = nn.ReLU()(1.0 - self.gt_local_logit).mean() + nn.ReLU()(1.0 + self.completed_local_logit).mean()\n self.D_loss = nn.ReLU()(1.0 - self.gt_logit).mean() + nn.ReLU()(1.0 + self.completed_logit).mean()\n self.D_loss = self.D_loss + self.D_loss_local\n\n def backward_G(self):\n self.G_loss.backward()\n\n def backward_D(self):\n self.D_loss.backward(retain_graph=True)\n\n def optimize_parameters(self):\n self.initVariables()\n\n self.pred = self.netGM(self.gin)\n self.completed = self.pred * self.mask_01 + self.gt * (1 - self.mask_01)\n if self.opt.mask_type == 'rect':\n self.completed_local = self.completed[:, :, self.rect[0]:self.rect[0] + self.rect[1],\n self.rect[2]:self.rect[2] + self.rect[3]]\n else:\n self.completed_local = self.completed\n\n if self.opt.pretrain_network is False:\n for i in range(self.opt.D_max_iters):\n self.optimizer_D.zero_grad()\n self.optimizer_G.zero_grad()\n self.forward_D()\n self.backward_D()\n self.optimizer_D.step()\n\n self.optimizer_G.zero_grad()\n self.forward_G()\n self.backward_G()\n self.optimizer_G.step()\n\n def get_current_losses(self):\n l = {'G_loss': self.G_loss.item(), 'G_loss_rec': self.G_loss_reconstruction.item(),\n 'G_loss_ae': self.G_loss_ae.item()}\n if self.opt.pretrain_network is False:\n l.update({'G_loss_adv': self.G_loss_adv.item(),\n 'G_loss_adv_local': self.G_loss_adv_local.item(),\n 'D_loss': self.D_loss.item(),\n 'G_loss_mrf': self.G_loss_mrf.item()})\n return l\n\n\n def get_current_visuals_tensor(self):\n return {'input': self.im_in.cpu().detach(), 'gt': self.gt.cpu().detach(),\n 'completed': self.completed.cpu().detach()}\n\n def evaluate(self, img, mask):\n img = t.from_numpy(img).type(t.FloatTensor).cuda() / 127.5 - 1\n mask = t.from_numpy(mask).type(t.FloatTensor).cuda()\n img = img * (1-mask)\n xin = t.cat((img, mask), 1)\n ret = (self.netGM(xin) * mask + img * (1-mask)).cpu().detach().numpy()\n ret = (ret+ 1) * 127.5\n uint8_rect=ret.astype(np.uint8)\n return uint8_rect\n"
]
| [
[
"torch.nn.Linear",
"torch.cat",
"torch.nn.ModuleList",
"torch.clamp",
"torch.nn.L1Loss",
"torch.from_numpy",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.ReflectionPad2d",
"torch.mean"
]
]
|
janekfleper/RayTracing | [
"bb34f5eb045fe48384f9c937b2a619b31e628110"
]
| [
"raytracing/tests/testsExamples.py"
]
| [
"import envtest # modifies path\nimport subprocess\nimport matplotlib as mpl\nmpl.use('Agg')\nfrom matplotlib import patches, transforms\nfrom unittest.mock import Mock, patch\n\nfrom raytracing import *\n\nclass TestExamples(envtest.RaytracingTestCase):\n\n def testRegex(self):\n pattern = r'^(ex\\d+|fig.+)\\.py$'\n matchObj = re.match(pattern, \"fig8-bla.py\")\n self.assertIsNotNone(matchObj)\n self.assertIsNotNone(matchObj.group(1) == 'fig8-bla')\n matchObj = re.match(pattern, \"ex08.py\")\n self.assertIsNotNone(matchObj)\n self.assertIsNotNone(matchObj.group(1) == 'ex08')\n\n def testExamplesArePresent(self):\n import raytracing.examples as ex\n self.assertTrue(len(ex.short) > 0)\n\n @patch('matplotlib.pyplot.show', new=Mock())\n def testExamplesRun(self):\n import raytracing.examples as ex\n for ex in ex.short:\n self.assertTrue(len(ex[\"title\"])!=0)\n self.assertTrue(len(ex[\"sourceCode\"])!=0)\n print(\".\", end='', file=sys.stderr)\n print(ex[\"name\"], end='', file=sys.stderr)\n with envtest.redirect_stdout(self.stdout):\n ex[\"code\"]()\n\n def testExamplesHaveSrcCode(self):\n import raytracing.examples as ex\n for ex in ex.short:\n self.assertTrue(len(ex[\"sourceCode\"])!=0)\n\n def testExamplesHaveBmpSrcCode(self):\n import raytracing.examples as ex\n for ex in ex.short:\n self.assertIsNotNone(ex[\"bmpSourceCode\"])\n\n @envtest.skipUnless(envtest.performanceTests, \"Skipping long performance tests\")\n def testScriptsRun(self):\n import raytracing.examples as ex\n for scripts in ex.long:\n err = subprocess.run([sys.executable, scripts[\"path\"]], capture_output=True)\n self.assertTrue(err == 0)\n \n\n\nif __name__ == '__main__':\n envtest.main()\n"
]
| [
[
"matplotlib.use"
]
]
|
SimonKleine/ModeDetection | [
"768f7e5dd8b2ce00f34cd6657606ac160ae4d1df"
]
| [
"utils/smoothing.py"
]
| [
"from abc import ABC\n\nimport numpy as np\nimport more_itertools as mit\n\n\nclass Smoother(ABC):\n def smooth(self, modes):\n pass\n\n\nclass MajorityVoteSmoother(Smoother):\n def __init__(self, num_iterations, window_size, step_size=1):\n self.num_iterations = num_iterations\n self.half_window_size = int(window_size / 2)\n self.step_size = step_size\n self._dummy_mode = 'dummy'\n\n def _pad(self, modes):\n return np.concatenate((\n self.half_window_size * [self._dummy_mode],\n modes,\n self.half_window_size * [self._dummy_mode]))\n\n def smooth(self, modes):\n tmp_modes = modes.copy()\n\n for _ in range(self.num_iterations):\n tmp_modes = self._smooth_step(tmp_modes)\n\n return tmp_modes\n\n def _smooth_step(self, modes):\n padded_modes = self._pad(modes)\n smoothed_modes = []\n for window in mit.windowed(padded_modes, n=2*self.half_window_size + 1):\n\n contained_modes, mode_counts = np.unique(\n [w for w in window if w != self._dummy_mode],\n return_counts=True)\n\n most_prevalent_mode_index = np.argmax(mode_counts)\n most_prevalent_mode = contained_modes[most_prevalent_mode_index]\n smoothed_modes.append(most_prevalent_mode)\n # print(f'Major mode:\\t{most_prevalent_mode}\\tin{window}')\n\n assert len(modes) == len(smoothed_modes)\n return np.array(smoothed_modes, dtype=np.str_)\n"
]
| [
[
"numpy.concatenate",
"numpy.array",
"numpy.argmax",
"numpy.unique"
]
]
|
NCAS-CMS/cfdm | [
"8e6ac54c1a2966ad5c07cd51ef609005a1fd70cc"
]
| [
"cfdm/cellmethod.py"
]
| [
"import logging\nfrom copy import deepcopy\n\nimport numpy\n\nfrom . import core, mixin\nfrom .data import Data\nfrom .decorators import _manage_log_level_via_verbosity\n\nlogger = logging.getLogger(__name__)\n\n\nclass CellMethod(mixin.Container, core.CellMethod):\n \"\"\"A cell method construct of the CF data model.\n\n One or more cell method constructs describe how the cell values of\n the field construct represent the variation of the physical\n quantity within its cells, i.e. the structure of the data at a\n higher resolution.\n\n A single cell method construct consists of a set of axes, a\n \"method\" property which describes how a value of the field\n construct's data array describes the variation of the quantity\n within a cell over those axes (e.g. a value might represent the\n cell area average), and descriptive qualifiers serving to indicate\n more precisely how the method was applied (e.g. recording the\n spacing of the original data, or the fact that the method was\n applied only over El Nino years).\n\n .. versionadded:: (cfdm) 1.7.0\n\n \"\"\"\n\n def __new__(cls, *args, **kwargs):\n \"\"\"This must be overridden in subclasses.\n\n .. versionadded:: (cfdm) 1.8.7.0\n\n \"\"\"\n instance = super().__new__(cls)\n instance._Data = Data\n return instance\n\n def __str__(self):\n \"\"\"Called by the `str` built-in function.\n\n x.__str__() <==> str(x)\n\n Returns a CF-netCDF-like string of the cell method.\n\n Note that if the intention is to use this string in a CF-netCDF\n cell_methods attribute then, unless they are standard names, the\n axes names will need to be modified to be netCDF dimension names.\n\n .. versionadded:: (cfdm) 1.7.0\n\n \"\"\"\n string = [f\"{axis}:\" for axis in self.get_axes(())]\n\n string.append(self.get_method(\"\"))\n\n for portion in (\"within\", \"where\", \"over\"):\n q = self.get_qualifier(portion, None)\n if q is not None:\n string.extend((portion, q))\n\n interval = self.get_qualifier(\"interval\", ())\n comment = self.get_qualifier(\"comment\", None)\n\n if interval:\n x = [\"(\"]\n\n y = [f\"interval: {data}\" for data in interval]\n x.append(\" \".join(y))\n\n if comment is not None:\n x.append(f\" comment: {comment}\")\n\n x.append(\")\")\n\n string.append(\"\".join(x))\n\n elif comment is not None:\n string.append(f\"({comment})\")\n\n return \" \".join(string)\n\n def _identities_iter(self):\n \"\"\"Return all possible identities.\n\n See `identities` for details and examples.\n\n :Returns:\n\n generator\n The identities.\n\n \"\"\"\n n = self.get_method(None)\n if n is not None:\n yield f\"method:{n}\"\n\n def creation_commands(\n self, namespace=None, indent=0, string=True, name=\"c\", header=True\n ):\n \"\"\"Returns the commands to create the cell method construct.\n\n .. versionadded:: (cfdm) 1.8.7.0\n\n .. seealso:: `{{package}}.Data.creation_commands`,\n `{{package}}.Field.creation_commands`\n\n :Parameters:\n\n {{namespace: `str`, optional}}\n\n {{indent: `int`, optional}}\n\n {{string: `bool`, optional}}\n\n {{name: `str`, optional}}\n\n {{header: `bool`, optional}}\n\n :Returns:\n\n {{returns creation_commands}}\n\n **Examples:**\n\n >>> x = {{package}}.CellMethod(\n ... axes=['area'],\n ... qualifiers={'over': 'land'}\n ... )\n >>> print(x.creation_commands(header=False))\n c = {{package}}.CellMethod()\n c.set_axes(('area',))\n c.set_qualifier('over', 'land')\n\n \"\"\"\n namespace0 = namespace\n if namespace is None:\n namespace = self._package() + \".\"\n elif namespace and not namespace.endswith(\".\"):\n namespace += \".\"\n\n out = []\n\n method = self.get_method(None)\n\n if header:\n out.append(\"#\")\n out.append(f\"# {self.construct_type}:\")\n if method is not None:\n out[-1] += f\" {method}\"\n\n out.append(f\"{name} = {namespace}{self.__class__.__name__}()\")\n\n if method is not None:\n out.append(f\"{name}.set_method({method!r})\")\n\n axes = self.get_axes(None)\n if axes is not None:\n out.append(f\"{name}.set_axes({axes!r})\")\n\n for term, value in self.qualifiers().items():\n if term == \"interval\":\n value = deepcopy(value)\n for i, data in enumerate(value[:]):\n if isinstance(data, self._Data):\n value[i] = data.creation_commands(\n name=None,\n namespace=namespace0,\n indent=0,\n string=True,\n )\n else:\n value[i] = str(data)\n\n value = \", \".join(value)\n value = f\"[{value}]\"\n else:\n value = repr(value)\n\n out.append(f\"{name}.set_qualifier({term!r}, {value})\")\n\n if string:\n indent = \" \" * indent\n out[0] = indent + out[0]\n out = (\"\\n\" + indent).join(out)\n\n return out\n\n def dump(self, display=True, _title=None, _level=0):\n \"\"\"A full description of the cell method construct.\n\n Returns a description the method, all qualifiers and the axes\n to which it applies.\n\n .. versionadded:: (cfdm) 1.7.0\n\n :Parameters:\n\n display: `bool`, optional\n If False then return the description as a string. By\n default the description is printed.\n\n :Returns:\n\n {{returns dump}}\n\n \"\"\"\n indent0 = \" \" * _level\n\n if _title is None:\n _title = \"Cell Method: \"\n\n return indent0 + _title + str(self)\n\n @_manage_log_level_via_verbosity\n def equals(\n self,\n other,\n rtol=None,\n atol=None,\n verbose=None,\n ignore_qualifiers=(),\n ignore_type=False,\n ):\n \"\"\"Whether two cell method constructs are the same.\n\n Equality is strict by default. This means that:\n\n * the descriptive qualifiers must be the same (see the\n *ignore_qualifiers* parameter).\n\n The axes of the cell method constructs are *not* considered,\n because they may only be correctly interpreted by the field\n constructs that contain the cell method constructs in\n question. They are, however, taken into account when two\n fields constructs are tested for equality.\n\n {{equals tolerance}}\n\n Any type of object may be tested but, in general, equality is\n only possible with another cell method construct, or a\n subclass of one. See the *ignore_type* parameter.\n\n {{equals tolerance}}\n\n .. versionadded:: (cfdm) 1.7.0\n\n :Parameters:\n\n other:\n The object to compare for equality.\n\n {{atol: number, optional}}\n\n {{rtol: number, optional}}\n\n {{verbose: `int` or `str` or `None`, optional}}\n\n ignore_qualifiers: sequence of `str`, optional\n The names of qualifiers to omit from the comparison.\n\n {{ignore_type: `bool`, optional}}\n\n :Returns:\n\n `bool`\n Whether the two cell method constructs are equal.\n\n **Examples:**\n\n >>> c = {{package}}.CellMethod()\n >>> c.equals(c)\n True\n >>> c.equals(c.copy())\n True\n >>> c.equals('not a cell method')\n False\n\n \"\"\"\n pp = super()._equals_preprocess(\n other, verbose=verbose, ignore_type=ignore_type\n )\n if pp is True or pp is False:\n return pp\n\n other = pp\n\n # ------------------------------------------------------------\n # Check the methods\n # ------------------------------------------------------------\n if self.get_method(None) != other.get_method(None):\n logger.info(\n f\"{self.__class__.__name__}: Different methods: \"\n f\"{self.get_method(None)!r} != {other.get_method(None)!r}\"\n ) # pragma: no cover\n return False\n\n # ------------------------------------------------------------\n # Check the qualifiers\n # ------------------------------------------------------------\n self_qualifiers = self.qualifiers()\n other_qualifiers = other.qualifiers()\n\n for prop in tuple(ignore_qualifiers) + (\"interval\",):\n self_qualifiers.pop(prop, None)\n other_qualifiers.pop(prop, None)\n\n if set(self_qualifiers) != set(other_qualifiers):\n for q in set(self_qualifiers).symmetric_difference(\n other_qualifiers\n ):\n logger.info(\n f\"{self.__class__.__name__}: Non-common qualifier: {q!r}\"\n ) # pragma: no cover\n return False\n\n for qualifier, x in self_qualifiers.items():\n y = other_qualifiers[qualifier]\n\n if not self._equals(\n x,\n y,\n rtol=rtol,\n atol=atol,\n ignore_data_type=True,\n verbose=verbose,\n basic=True,\n ):\n logger.info(\n f\"{self.__class__.__name__}: Different {prop} \"\n f\"qualifiers: {x!r}, {y!r}\"\n ) # pragma: no cover\n return False\n\n if \"interval\" in ignore_qualifiers:\n return True\n\n intervals0 = self.get_qualifier(\"interval\", ())\n intervals1 = other.get_qualifier(\"interval\", ())\n if intervals0:\n if not intervals1:\n logger.info(\n f\"{self.__class__.__name__}: Different interval \"\n f\"qualifiers: {intervals0!r} != {intervals1!r}\"\n ) # pragma: no cover\n return False\n\n if len(intervals0) != len(intervals1):\n logger.info(\n f\"{self.__class__.__name__}: Different numbers of \"\n f\"interval qualifiers: {intervals0!r} != {intervals1!r}\"\n ) # pragma: no cover\n return False\n\n for data0, data1 in zip(intervals0, intervals1):\n if not self._equals(\n data0,\n data1,\n rtol=rtol,\n atol=atol,\n verbose=verbose,\n ignore_data_type=True,\n ignore_fill_value=True,\n ):\n logger.info(\n f\"{self.__class__.__name__}: Different interval \"\n f\"qualifiers: {intervals0!r} != {intervals1!r}\"\n ) # pragma: no cover\n return False\n\n elif intervals1:\n logger.info(\n f\"{self.__class__.__name__}: Different intervals: \"\n f\"{intervals0!r} != {intervals1!r}\"\n ) # pragma: no cover\n return False\n\n # ------------------------------------------------------------\n # Do NOT check the axes\n # ------------------------------------------------------------\n\n return True\n\n def identity(self, default=\"\"):\n \"\"\"Return the canonical identity for the cell method construct.\n\n By default the identity is the first found of the following:\n\n 1. The method, preceded by 'method:'\n 2. The value of the *default* parameter.\n\n .. versionadded:: (cfdm) 1.7.0\n\n .. seealso:: `identities`\n\n :Parameters:\n\n default: optional\n If no identity can be found then return the value of the\n default parameter.\n\n :Returns:\n\n The identity.\n\n **Examples:**\n\n >>> f = {{package}}.example_field(1)\n >>> c = f.get_construct('cellmethod1')\n >>> c.get_method()\n 'maximum'\n >>> c.identity()\n 'method:maximum'\n >>> c.del_method()\n 'maximum'\n >>> c.identity()\n ''\n >>> c.identity(default='no identity')\n 'no identity'\n\n \"\"\"\n n = self.get_method(None)\n if n is not None:\n return f\"method:{n}\"\n\n return default\n\n def identities(self, generator=False, **kwargs):\n \"\"\"Return all possible identities.\n\n The identities comprise:\n\n * The method, preceded by 'method:'.\n\n .. versionadded:: (cfdm) 1.7.0\n\n .. seealso:: `identity`\n\n :Parameters:\n\n generator: `bool`, optional\n If True then return a generator for the identities,\n rather than a list.\n\n .. versionadded:: (cfdm) 1.8.9.0\n\n kwargs: optional\n Additional configuration parameters. Currently\n none. Unrecognised parameters are ignored.\n\n .. versionadded:: (cfdm) 1.8.9.0\n\n :Returns:\n\n `list` or generator\n The identities.\n\n **Examples:**\n\n >>> f = {{package}}.example_field(1)\n >>> c = f.get_construct('cellmethod1')\n >>> c.get_method()\n 'maximum'\n >>> c.identities()\n ['method:maximum']\n >>> c.del_method()\n 'maximum'\n >>> c.identities()\n []\n >>> for i in c.identities(generator=True):\n ... print(i)\n ...\n\n \"\"\"\n g = self._iter(body=self._identities_iter(), **kwargs)\n if generator:\n return g\n\n return list(g)\n\n def sorted(self, indices=None):\n \"\"\"Return a new cell method construct with sorted axes.\n\n The axes are sorted by domain axis construct identifier or\n standard name, and any intervals are sorted accordingly.\n\n .. versionadded:: (cfdm) 1.7.0\n\n :Parameters:\n\n indices: ordered sequence of `int`, optional\n Sort the axes with the given indices. By default the axes\n are sorted by domain axis construct identifier or standard\n name.\n\n :Returns:\n\n `CellMethod`\n A new cell method construct with sorted axes.\n\n **Examples:**\n\n >>> cm = {{package}}.CellMethod(axes=['domainaxis1', 'domainaxis0'],\n ... method='mean',\n ... qualifiers={'interval': [1, 2]})\n >>> cm\n <{{repr}}CellMethod: domainaxis1: domainaxis0: mean (interval: 1 interval: 2)>\n >>> cm.sorted()\n <{{repr}}CellMethod: domainaxis0: domainaxis1: mean (interval: 2 interval: 1)>\n\n >>> cm = {{package}}.CellMethod(axes=['domainaxis0', 'area'],\n ... method='mean',\n ... qualifiers={'interval': [1, 2]})\n >>> cm\n <{{repr}}CellMethod: domainaxis0: area: mean (interval: 1 interval: 2)>\n >>> cm.sorted()\n <{{repr}}CellMethod: area: domainaxis0: mean (interval: 2 interval: 1)>\n\n \"\"\"\n new = self.copy()\n\n axes = new.get_axes(())\n if len(axes) == 1:\n return new\n\n if indices is None:\n indices = numpy.argsort(axes)\n elif len(indices) != len(axes):\n raise ValueError(\n f\"Can't sort cell method axes. The given indices ({indices}) \"\n f\"do not correspond to the number of axes ({axes})\"\n )\n\n axes2 = []\n for i in indices:\n axes2.append(axes[i])\n\n new.set_axes(tuple(axes2))\n\n intervals = new.get_qualifier(\"interval\", ())\n if len(intervals) <= 1:\n return new\n\n intervals2 = []\n for i in indices:\n intervals2.append(intervals[i])\n\n new.set_qualifier(\"interval\", tuple(intervals2))\n\n return new\n"
]
| [
[
"numpy.argsort"
]
]
|
ryan-jonesford/Semantic-Segmentation | [
"ca891cb6c4e1aecf0736d28936edaa11f1fcb474"
]
| [
"helper.py"
]
| [
"import re\nimport random\nimport numpy as np\nimport os.path\nimport scipy.misc\nimport shutil\nimport zipfile\nimport time\nimport tensorflow as tf\nfrom glob import glob\nfrom urllib.request import urlretrieve\n\n\ndef maybe_download_pretrained_vgg(data_dir):\n \"\"\"\n Download and extract pretrained vgg model if it doesn't exist\n :param data_dir: Directory to download the model to\n \"\"\"\n vgg_filename = 'vgg.zip'\n vgg_path = os.path.join(data_dir, 'vgg')\n vgg_files = [\n os.path.join(vgg_path, 'variables/variables.data-00000-of-00001'),\n os.path.join(vgg_path, 'variables/variables.index'),\n os.path.join(vgg_path, 'saved_model.pb')]\n\n missing_vgg_files = [\n vgg_file for vgg_file in vgg_files if not os.path.exists(vgg_file)]\n if missing_vgg_files:\n # Clean vgg dir\n if os.path.exists(vgg_path):\n shutil.rmtree(vgg_path)\n os.makedirs(vgg_path)\n\n # Download vgg\n print('Downloading pre-trained vgg model...')\n urlretrieve(\n 'https://s3-us-west-1.amazonaws.com/udacity-selfdrivingcar/vgg.zip',\n os.path.join(vgg_path, vgg_filename))\n\n # Extract vgg\n print('Extracting model...')\n zip_ref = zipfile.ZipFile(os.path.join(vgg_path, vgg_filename), 'r')\n zip_ref.extractall(data_dir)\n zip_ref.close()\n\n # Remove zip file to save space\n os.remove(os.path.join(vgg_path, vgg_filename))\n\n\ndef gen_batch_function(data_folder, image_shape):\n \"\"\"\n Generate function to create batches of data\n :param data_folder: Path to folder that contains all the datasets\n :param image_shape: Tuple - Shape of image\n :return:\n \"\"\"\n def get_batches_fn(batch_size):\n \"\"\"\n Create batches of training data\n :param batch_size: Batch Size\n :return: Batches of training data\n \"\"\"\n image_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))\n label_paths = {\n re.sub(r'_(lane|road)_', '_', os.path.basename(path)): path\n for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))}\n background_color = np.array([255, 0, 0])\n\n random.shuffle(image_paths)\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i+batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n\n image = scipy.misc.imresize(\n scipy.misc.imread(image_file), image_shape)\n gt_image = scipy.misc.imresize(\n scipy.misc.imread(gt_image_file), image_shape)\n\n gt_bg = np.all(gt_image == background_color, axis=2)\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)\n return get_batches_fn\n\n\ndef create_validation_set(data_folder, validation_size=8):\n \"\"\"\n Function to set asside data from the training set for validation\n :param data_folder: Path to folder that contains all the datasets\n :param validation_size: Number of data to set asside for validation\n :return:\n \"\"\"\n output_dir = os.path.join('./data', 'data_road', \"cross_val\")\n\n # if the dataset already exists, exit without doing anything\n if os.path.exists(output_dir):\n return\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'image_2'))\n os.makedirs(os.path.join(output_dir, 'gt_image_2'))\n\n print('Creating Validation set in: {}'.format(output_dir))\n\n image_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))\n label_paths = {\n re.sub(r'_(lane|road)_', '_', os.path.basename(path)): path\n for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))}\n\n random.shuffle(image_paths)\n for i in range(0, validation_size):\n os.rename(image_paths[i], os.path.join(\n output_dir, 'image_2', os.path.basename(image_paths[i])))\n os.rename(label_paths[os.path.basename(image_paths[i])], os.path.join(\n output_dir, 'gt_image_2', os.path.basename(label_paths[os.path.basename(image_paths[i])])))\n\n\ndef gen_test_output(sess, logits, keep_prob, image_pl, data_folder, image_shape):\n \"\"\"\n Generate test output using the test images\n :param sess: TF session\n :param logits: TF Tensor for the logits\n :param keep_prob: TF Placeholder for the dropout keep robability\n :param image_pl: TF Placeholder for the image placeholder\n :param data_folder: Path to the folder that contains the datasets\n :param image_shape: Tuple - Shape of image\n :return: Output for for each test image\n \"\"\"\n for image_file in glob(os.path.join(data_folder, 'image_2', '*.png')):\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n\n im_softmax = sess.run(\n [tf.nn.softmax(logits)],\n {keep_prob: 1.0, image_pl: [image]})\n im_softmax = im_softmax[0][:, 1].reshape(\n image_shape[0], image_shape[1])\n segmentation = (im_softmax > 0.5).reshape(\n image_shape[0], image_shape[1], 1)\n mask = np.dot(segmentation, np.array([[0, 255, 0, 127]]))\n mask = scipy.misc.toimage(mask, mode=\"RGBA\")\n street_im = scipy.misc.toimage(image)\n street_im.paste(mask, box=None, mask=mask)\n\n yield os.path.basename(image_file), np.array(street_im)\n\n\ndef save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image):\n # Make folder for current run\n output_dir = os.path.join(runs_dir, str(time.time()))\n if os.path.exists(output_dir):\n shutil.rmtree(output_dir)\n os.makedirs(output_dir)\n\n # Run NN on test images and save them to HD\n print('Training Finished. Saving test images to: {}'.format(output_dir))\n image_outputs = gen_test_output(\n sess, logits, keep_prob, input_image, os.path.join(data_dir, 'data_road/testing'), image_shape)\n for name, image in image_outputs:\n scipy.misc.imsave(os.path.join(output_dir, name), image)\n"
]
| [
[
"numpy.all",
"numpy.invert",
"numpy.array",
"tensorflow.nn.softmax"
]
]
|
NapITlab/lncRNAmiRNA | [
"f241383ed7e23e9807137c1efb3f03cc04a9695a"
]
| [
"lncRNAmiRNA/lncRNAmiRNA_model/utils/util_utils.py"
]
| [
"import datetime\nimport json\nimport logging\nimport os\nimport sys\nimport time\nimport traceback\n\nimport pandas as pd\nfrom django.conf import settings\nfrom rest_framework.response import Response\n\nlogger = logging.getLogger(\"lncRNAmiRNA_model\")\ntelegram_logger = logging.getLogger(\"lncRNAmiRNA_model.telegram\")\n\n\ndef run_or_cached(fp: str, func, args: list, kwargs: dict):\n \"\"\"\n if cached get function output from cache\n :param fp: filepath to cached file\n :param func: func to run if not cached\n :param args: func args\n :param kwargs: func kwargs\n :return:\n \"\"\"\n error_msg = f\"Cache for extension of file '{fp}' is not implemented\"\n\n if not os.path.isfile(fp) or not settings.CACHE_FILES:\n to_return = func(*args, **kwargs)\n\n if settings.CACHE_FILES:\n if fp.endswith(\".json\"):\n with open(fp, \"w\") as json_f:\n json.dump(to_return, json_f)\n elif fp.endswith(\".csv\"):\n to_return.to_csv(fp, index=False)\n else:\n raise NotImplementedError(error_msg)\n else:\n logger.debug(f\"Skipped run of '{func.__name__}', using cached results from '{fp}'\")\n if fp.endswith(\".json\"):\n with open(fp, \"r\") as json_f:\n to_return = json.load(json_f)\n elif fp.endswith(\".csv\"):\n to_return = pd.read_csv(fp)\n else:\n raise NotImplementedError(error_msg)\n\n return to_return\n\n\ndef csv_to_fasta(csv_fp: str, fasta_fp: str):\n csv = pd.read_csv(csv_fp, header=0)\n\n with open(fasta_fp, \"w\") as fasta:\n for i, row in csv.iterrows():\n # seq_len = len(row['Sequence'])\n # fasta_fp_len = fast_fp_prefix + \"_\" + str(seq_len) + \".fasta\"\n\n fasta.write(f\">{i}_{row['gene']}\\n{row['Sequence']}\\n\")\n\n\ndef create_timestamp(historical_moment: str, beginning: float):\n ts = time.time()\n logger.debug(f\"{historical_moment} elapsed: {datetime.timedelta(seconds=ts-beginning)}\")\n return ts\n\n\ndef make_error_msg(what):\n ex_type, ex_value, ex_trace = sys.exc_info()\n traceback_str = traceback.format_exc()\n\n return ex_value, traceback_str, f\"{what} failed:\\n\" \\\n f\"ex_type: {ex_type}\\n\" \\\n f\"ex_value: {ex_value}\\n\" \\\n f\"trace: {traceback_str}\"\n\n\ndef ErrorResponse(request, traceback, message, error_status):\n url = ''\n try:\n url = request.stream.path\n except Exception as ex:\n pass\n log = \"user: {}\\nurl: {}\\nmessage: {}\\ntraceback: {}\".format(request.user.username, url, message, traceback)\n\n telegram_logger.error(log)\n logger.error(log)\n return Response({'message': message}, status=error_status)\n"
]
| [
[
"pandas.read_csv"
]
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.